From ae8f10b9cf35e6ea808b7ee338dbd414b91ada78 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?J=C3=B6rg=20Prante?= <joergprante@gmail.com>
Date: Mon, 8 Jul 2024 17:34:38 +0200
Subject: [PATCH] add testcontainers

---
 NOTICE.txt                                    |    8 +
 gradle.properties                             |    1 -
 gradle/test/junit5.gradle                     |    4 +-
 gradle/wrapper/gradle-wrapper.jar             |  Bin 43462 -> 43453 bytes
 gradle/wrapper/gradle-wrapper.properties      |    2 +-
 gradlew.bat                                   |   20 +-
 pgjdbc/build.gradle                           |   27 +
 .../src/main/java/org/postgresql/Driver.java  | 1372 ++-
 .../java/org/postgresql/PGConnection.java     |  639 +-
 .../java/org/postgresql/PGEnvironment.java    |  160 +-
 .../java/org/postgresql/PGNotification.java   |   46 +-
 .../main/java/org/postgresql/PGProperty.java  | 1995 ++--
 .../org/postgresql/PGRefCursorResultSet.java  |   18 +-
 .../org/postgresql/PGResultSetMetaData.java   |   76 +-
 .../main/java/org/postgresql/PGStatement.java |  150 +-
 .../main/java/org/postgresql/copy/CopyIn.java |   67 +-
 .../java/org/postgresql/copy/CopyManager.java |  437 +-
 .../org/postgresql/copy/CopyOperation.java    |   60 +-
 .../java/org/postgresql/copy/CopyOut.java     |   34 +-
 .../postgresql/copy/PGCopyInputStream.java    |  287 +-
 .../postgresql/copy/PGCopyOutputStream.java   |  343 +-
 .../postgresql/core/AsciiStringInterner.java  |  564 +-
 .../org/postgresql/core/BaseConnection.java   |  380 +-
 .../org/postgresql/core/BaseQueryKey.java     |   96 +-
 .../org/postgresql/core/BaseStatement.java    |  107 +-
 .../java/org/postgresql/core/CachedQuery.java |  104 +-
 .../core/CachedQueryCreateAction.java         |  103 +-
 .../org/postgresql/core/CallableQueryKey.java |   40 +-
 .../core/CommandCompleteParser.java           |  162 +-
 .../postgresql/core/ConnectionFactory.java    |  133 +-
 .../java/org/postgresql/core/Encoding.java    |  592 +-
 .../postgresql/core/EncodingPredictor.java    |  230 +-
 .../main/java/org/postgresql/core/Field.java  |  284 +-
 .../core/FixedLengthOutputStream.java         |   66 +-
 .../java/org/postgresql/core/JavaVersion.java |   50 +-
 .../postgresql/core/JdbcCallParseInfo.java    |   44 +-
 .../java/org/postgresql/core/NativeQuery.java |  182 +-
 .../org/postgresql/core/Notification.java     |   58 +-
 .../main/java/org/postgresql/core/Oid.java    |  247 +-
 .../org/postgresql/core/PGBindException.java  |   14 +-
 .../java/org/postgresql/core/PGStream.java    | 1486 ++-
 .../org/postgresql/core/ParameterList.java    |  327 +-
 .../main/java/org/postgresql/core/Parser.java | 2980 +++---
 .../java/org/postgresql/core/Provider.java    |   12 +-
 .../main/java/org/postgresql/core/Query.java  |  117 +-
 .../org/postgresql/core/QueryExecutor.java    | 1154 +--
 .../postgresql/core/QueryExecutorBase.java    |  906 +-
 .../core/QueryExecutorCloseAction.java        |  118 +-
 .../core/QueryWithReturningColumnsKey.java    |  118 +-
 .../postgresql/core/ReplicationProtocol.java  |   29 +-
 .../org/postgresql/core/ResultCursor.java     |   10 +-
 .../org/postgresql/core/ResultHandler.java    |  122 +-
 .../postgresql/core/ResultHandlerBase.java    |  106 +-
 .../core/ResultHandlerDelegate.java           |   96 +-
 .../org/postgresql/core/ServerVersion.java    |  299 +-
 .../org/postgresql/core/SetupQueryRunner.java |  101 +-
 .../postgresql/core/SocketFactoryFactory.java |  102 +-
 .../java/org/postgresql/core/SqlCommand.java  |  100 +-
 .../org/postgresql/core/SqlCommandType.java   |   28 +-
 .../org/postgresql/core/TransactionState.java |    6 +-
 .../main/java/org/postgresql/core/Tuple.java  |  176 +-
 .../java/org/postgresql/core/TypeInfo.java    |  217 +-
 .../main/java/org/postgresql/core/Utils.java  |  307 +-
 .../java/org/postgresql/core/Version.java     |   12 +-
 .../core/VisibleBufferedInputStream.java      |  635 +-
 .../core/v3/AuthenticationPluginManager.java  |  199 +-
 .../org/postgresql/core/v3/BatchedQuery.java  |  295 +-
 .../core/v3/CompositeParameterList.java       |  357 +-
 .../postgresql/core/v3/CompositeQuery.java    |  149 +-
 .../core/v3/ConnectionFactoryImpl.java        | 1598 ++--
 .../org/postgresql/core/v3/CopyDualImpl.java  |   91 +-
 .../org/postgresql/core/v3/CopyInImpl.java    |   51 +-
 .../postgresql/core/v3/CopyOperationImpl.java |  125 +-
 .../org/postgresql/core/v3/CopyOutImpl.java   |   37 +-
 .../postgresql/core/v3/DescribeRequest.java   |   23 +-
 .../postgresql/core/v3/ExecuteRequest.java    |   17 +-
 .../java/org/postgresql/core/v3/Portal.java   |   79 +-
 .../postgresql/core/v3/QueryExecutorImpl.java | 5897 ++++++------
 .../core/v3/SimpleParameterList.java          | 1097 ++-
 .../org/postgresql/core/v3/SimpleQuery.java   |  642 +-
 .../core/v3/TypeTransferModeRegistry.java     |   26 +-
 .../postgresql/core/v3/V3ParameterList.java   |   78 +-
 .../v3/adaptivefetch/AdaptiveFetchCache.java  |  303 +-
 .../AdaptiveFetchCacheEntry.java              |   54 +-
 .../v3/replication/V3PGReplicationStream.java |  475 +-
 .../v3/replication/V3ReplicationProtocol.java |  211 +-
 .../ds/PGConnectionPoolDataSource.java        |  120 +-
 .../org/postgresql/ds/PGPooledConnection.java |  799 +-
 .../postgresql/ds/PGPoolingDataSource.java    |  817 +-
 .../org/postgresql/ds/PGSimpleDataSource.java |   60 +-
 .../postgresql/ds/common/BaseDataSource.java  | 3494 ++++---
 .../postgresql/ds/common/PGObjectFactory.java |  136 +-
 .../org/postgresql/fastpath/Fastpath.java     |  546 +-
 .../org/postgresql/fastpath/FastpathArg.java  |  182 +-
 .../java/org/postgresql/geometric/PGbox.java  |  328 +-
 .../org/postgresql/geometric/PGcircle.java    |  210 +-
 .../java/org/postgresql/geometric/PGline.java |  360 +-
 .../java/org/postgresql/geometric/PGlseg.java |  212 +-
 .../java/org/postgresql/geometric/PGpath.java |  289 +-
 .../org/postgresql/geometric/PGpoint.java     |  334 +-
 .../org/postgresql/geometric/PGpolygon.java   |  232 +-
 .../postgresql/gss/GSSCallbackHandler.java    |   77 +-
 .../org/postgresql/gss/GSSInputStream.java    |   95 +-
 .../org/postgresql/gss/GSSOutputStream.java   |  127 +-
 .../java/org/postgresql/gss/GssAction.java    |  293 +-
 .../java/org/postgresql/gss/GssEncAction.java |  255 +-
 .../main/java/org/postgresql/gss/MakeGSS.java |  302 +-
 .../postgresql/hostchooser/CandidateHost.java |   12 +-
 .../hostchooser/GlobalHostStatusTracker.java  |  116 +-
 .../postgresql/hostchooser/HostChooser.java   |   14 +-
 .../hostchooser/HostChooserFactory.java       |   12 +-
 .../hostchooser/HostRequirement.java          |  118 +-
 .../postgresql/hostchooser/HostStatus.java    |    8 +-
 .../hostchooser/MultiHostChooser.java         |  200 +-
 .../hostchooser/SingleHostChooser.java        |   16 +-
 .../org/postgresql/jdbc/AbstractBlobClob.java |  473 +-
 .../org/postgresql/jdbc/ArrayDecoding.java    | 1429 ++-
 .../org/postgresql/jdbc/ArrayEncoding.java    | 2653 +++---
 .../java/org/postgresql/jdbc/AutoSave.java    |   26 +-
 .../postgresql/jdbc/BatchResultHandler.java   |  395 +-
 .../org/postgresql/jdbc/BooleanTypeUtil.java  |  136 +-
 .../jdbc/CallableBatchResultHandler.java      |   18 +-
 .../postgresql/jdbc/EscapeSyntaxCallMode.java |   36 +-
 .../org/postgresql/jdbc/EscapedFunctions.java | 1418 +--
 .../postgresql/jdbc/EscapedFunctions2.java    | 1343 +--
 .../org/postgresql/jdbc/FieldMetadata.java    |  131 +-
 .../java/org/postgresql/jdbc/GSSEncMode.java  |   76 +-
 .../org/postgresql/jdbc/PSQLSavepoint.java    |  106 +-
 .../postgresql/jdbc/PSQLWarningWrapper.java   |   26 +-
 .../java/org/postgresql/jdbc/PgArray.java     |  833 +-
 .../main/java/org/postgresql/jdbc/PgBlob.java |   64 +-
 .../postgresql/jdbc/PgCallableStatement.java  | 1754 ++--
 .../main/java/org/postgresql/jdbc/PgClob.java |  142 +-
 .../org/postgresql/jdbc/PgConnection.java     | 3546 ++++----
 .../jdbc/PgConnectionCleaningAction.java      |  102 +-
 .../postgresql/jdbc/PgDatabaseMetaData.java   | 6305 +++++++------
 .../postgresql/jdbc/PgParameterMetaData.java  |  176 +-
 .../postgresql/jdbc/PgPreparedStatement.java  | 3385 +++----
 .../java/org/postgresql/jdbc/PgResultSet.java | 8066 ++++++++---------
 .../postgresql/jdbc/PgResultSetMetaData.java  |  856 +-
 .../java/org/postgresql/jdbc/PgSQLXML.java    |  518 +-
 .../java/org/postgresql/jdbc/PgStatement.java | 2594 +++---
 .../org/postgresql/jdbc/PreferQueryMode.java  |   38 +-
 .../jdbc/QueryExecutorTimeZoneProvider.java   |   30 +-
 .../org/postgresql/jdbc/ResourceLock.java     |   32 +-
 .../org/postgresql/jdbc/ResultWrapper.java    |   74 +-
 .../java/org/postgresql/jdbc/SslMode.java     |  118 +-
 .../postgresql/jdbc/StatementCancelState.java |    8 +-
 .../jdbc/StatementCancelTimerTask.java        |   42 +-
 .../org/postgresql/jdbc/TimestampUtils.java   | 3147 +++----
 .../org/postgresql/jdbc/TypeInfoCache.java    | 1954 ++--
 .../postgresql/jdbc/UUIDArrayAssistant.java   |   28 +-
 .../org/postgresql/jdbc2/ArrayAssistant.java  |   44 +-
 .../jdbc2/ArrayAssistantRegistry.java         |   20 +-
 .../jdbcurlresolver/PgPassParser.java         |  442 +-
 .../jdbcurlresolver/PgServiceConfParser.java  |  416 +-
 .../largeobject/BlobInputStream.java          |  607 +-
 .../largeobject/BlobOutputStream.java         |  423 +-
 .../postgresql/largeobject/LargeObject.java   |  767 +-
 .../largeobject/LargeObjectManager.java       |  553 +-
 .../plugin/AuthenticationPlugin.java          |   36 +-
 .../replication/LogSequenceNumber.java        |  184 +-
 .../replication/PGReplicationConnection.java  |   44 +-
 .../PGReplicationConnectionImpl.java          |   50 +-
 .../replication/PGReplicationStream.java      |  222 +-
 .../replication/ReplicationSlotInfo.java      |  116 +-
 .../replication/ReplicationType.java          |    4 +-
 .../fluent/AbstractCreateSlotBuilder.java     |   52 +-
 .../fluent/AbstractStreamBuilder.java         |   46 +-
 .../ChainedCommonCreateSlotBuilder.java       |   52 +-
 .../fluent/ChainedCommonStreamBuilder.java    |   58 +-
 .../ChainedCreateReplicationSlotBuilder.java  |  135 +-
 .../fluent/ChainedStreamBuilder.java          |  122 +-
 .../replication/fluent/CommonOptions.java     |   42 +-
 .../fluent/ReplicationCreateSlotBuilder.java  |   24 +-
 .../fluent/ReplicationStreamBuilder.java      |   54 +-
 .../ChainedLogicalCreateSlotBuilder.java      |   24 +-
 .../logical/ChainedLogicalStreamBuilder.java  |   66 +-
 .../logical/LogicalCreateSlotBuilder.java     |  114 +-
 .../logical/LogicalReplicationOptions.java    |   28 +-
 .../fluent/logical/LogicalStreamBuilder.java  |  138 +-
 .../StartLogicalReplicationCallback.java      |    2 +-
 .../ChainedPhysicalCreateSlotBuilder.java     |    2 +-
 .../ChainedPhysicalStreamBuilder.java         |   16 +-
 .../physical/PhysicalCreateSlotBuilder.java   |   90 +-
 .../physical/PhysicalStreamBuilder.java       |   58 +-
 .../StartPhysicalReplicationCallback.java     |    2 +-
 .../postgresql/scram/ScramAuthenticator.java  |  306 +-
 .../ssl/DbKeyStoreSocketFactory.java          |   93 +-
 .../postgresql/ssl/DefaultJavaSSLFactory.java |    7 +-
 .../org/postgresql/ssl/LazyKeyManager.java    |  484 +-
 .../java/org/postgresql/ssl/LibPQFactory.java |  394 +-
 .../main/java/org/postgresql/ssl/MakeSSL.java |  124 +-
 .../postgresql/ssl/NonValidatingFactory.java  |   51 +-
 .../ssl/PGjdbcHostnameVerifier.java           |  447 +-
 .../org/postgresql/ssl/PKCS12KeyManager.java  |  306 +-
 .../ssl/SingleCertValidatingFactory.java      |  224 +-
 .../org/postgresql/ssl/WrappedFactory.java    |   71 +-
 .../postgresql/ssl/jdbc4/LibPQFactory.java    |  124 +-
 .../postgresql/translation/messages_bg.java   |  901 +-
 .../postgresql/translation/messages_cs.java   |  452 +-
 .../postgresql/translation/messages_de.java   |  672 +-
 .../postgresql/translation/messages_es.java   |  166 +-
 .../postgresql/translation/messages_fr.java   |  674 +-
 .../postgresql/translation/messages_it.java   |  646 +-
 .../postgresql/translation/messages_ja.java   | 1242 +--
 .../postgresql/translation/messages_nl.java   |  102 +-
 .../postgresql/translation/messages_pl.java   |  364 +-
 .../translation/messages_pt_BR.java           |  778 +-
 .../postgresql/translation/messages_ru.java   |  516 +-
 .../postgresql/translation/messages_sr.java   |  776 +-
 .../postgresql/translation/messages_tr.java   |  782 +-
 .../translation/messages_zh_CN.java           |  548 +-
 .../translation/messages_zh_TW.java           |  546 +-
 .../util/ByteBufferByteStreamWriter.java      |   58 +-
 .../util/ByteBuffersByteStreamWriter.java     |   96 +-
 .../org/postgresql/util/ByteConverter.java    | 1220 ++-
 .../org/postgresql/util/ByteStreamWriter.java |   68 +-
 .../org/postgresql/util/CanEstimateSize.java  |    2 +-
 .../java/org/postgresql/util/DriverInfo.java  |   33 +-
 .../postgresql/util/ExpressionProperties.java |  144 +-
 .../src/main/java/org/postgresql/util/GT.java |   75 +-
 .../java/org/postgresql/util/Gettable.java    |    2 +-
 .../org/postgresql/util/GettableHashMap.java  |    4 +-
 .../org/postgresql/util/HStoreConverter.java  |  268 +-
 .../java/org/postgresql/util/HostSpec.java    |  166 +-
 .../java/org/postgresql/util/IntList.java     |  114 +-
 .../org/postgresql/util/JdbcBlackHole.java    |   54 +-
 .../org/postgresql/util/KerberosTicket.java   |  106 +-
 .../java/org/postgresql/util/LazyCleaner.java |  336 +-
 .../org/postgresql/util/LogWriterHandler.java |  134 +-
 .../java/org/postgresql/util/LruCache.java    |  296 +-
 .../java/org/postgresql/util/MD5Digest.java   |   94 +-
 .../org/postgresql/util/NumberParser.java     |  146 +-
 .../main/java/org/postgresql/util/OSUtil.java |   40 +-
 .../org/postgresql/util/ObjectFactory.java    |   98 +-
 .../org/postgresql/util/PGBinaryObject.java   |   48 +-
 .../java/org/postgresql/util/PGInterval.java  | 1000 +-
 .../java/org/postgresql/util/PGJDBCMain.java  |   30 +-
 .../util/PGPropertyMaxResultBufferParser.java |  400 +-
 .../org/postgresql/util/PGPropertyUtil.java   |  196 +-
 .../main/java/org/postgresql/util/PGTime.java |  162 +-
 .../java/org/postgresql/util/PGTimestamp.java |  174 +-
 .../java/org/postgresql/util/PGbytea.java     |  274 +-
 .../java/org/postgresql/util/PGmoney.java     |  186 +-
 .../java/org/postgresql/util/PGobject.java    |  208 +-
 .../java/org/postgresql/util/PGtokenizer.java |  404 +-
 .../org/postgresql/util/PSQLException.java    |   34 +-
 .../java/org/postgresql/util/PSQLState.java   |  172 +-
 .../java/org/postgresql/util/PSQLWarning.java |   24 +-
 .../org/postgresql/util/PasswordUtil.java     |  236 +-
 .../postgresql/util/ReaderInputStream.java    |  255 +-
 .../postgresql/util/ServerErrorMessage.java   |  404 +-
 .../java/org/postgresql/util/SharedTimer.java |  120 +-
 .../org/postgresql/util/StreamWrapper.java    |  217 +-
 .../org/postgresql/util/TempFileHolder.java   |   56 +-
 .../java/org/postgresql/util/URLCoder.java    |   66 +-
 .../org/postgresql/xa/PGXAConnection.java     | 1204 ++-
 .../org/postgresql/xa/PGXADataSource.java     |   82 +-
 .../postgresql/xa/PGXADataSourceFactory.java  |   42 +-
 .../java/org/postgresql/xa/PGXAException.java |   26 +-
 .../java/org/postgresql/xa/RecoveredXid.java  |  183 +-
 .../xml/DefaultPGXmlFactoryFactory.java       |  221 +-
 .../xml/EmptyStringEntityResolver.java        |   21 +-
 .../LegacyInsecurePGXmlFactoryFactory.java    |   67 +-
 .../org/postgresql/xml/NullErrorHandler.java  |   24 +-
 .../postgresql/xml/PGXmlFactoryFactory.java   |   17 +-
 .../core/AsciiStringInternerTest.java         |  130 -
 .../CommandCompleteParserNegativeTest.java    |   44 -
 .../core/CommandCompleteParserTest.java       |   49 -
 .../org/postgresql/core/OidToStringTest.java  |   31 -
 .../org/postgresql/core/OidValueOfTest.java   |   32 -
 .../core/OidValuesCorrectnessTest.java        |  143 -
 .../java/org/postgresql/core/ParserTest.java  |  302 -
 .../postgresql/core/ReturningParserTest.java  |   55 -
 .../org/postgresql/core/UTF8EncodingTest.java |   85 -
 .../core/v3/V3ParameterListTests.java         |   64 -
 .../adaptivefetch/AdaptiveFetchCacheTest.java | 1088 ---
 .../postgresql/jdbc/AbstractArraysTest.java   | 1116 ---
 .../java/org/postgresql/jdbc/ArraysTest.java  |   45 -
 .../org/postgresql/jdbc/ArraysTestSuite.java  |   31 -
 .../jdbc/BigDecimalObjectArraysTest.java      |   26 -
 .../org/postgresql/jdbc/BitFieldTest.java     |  134 -
 .../postgresql/jdbc/BooleanArraysTest.java    |   18 -
 .../jdbc/BooleanObjectArraysTest.java         |   18 -
 .../org/postgresql/jdbc/ByteaArraysTest.java  |   60 -
 .../postgresql/jdbc/ConnectionValidTest.java  |  150 -
 .../jdbc/DeepBatchedInsertStatementTest.java  |  319 -
 .../org/postgresql/jdbc/DoubleArraysTest.java |   19 -
 .../jdbc/DoubleObjectArraysTest.java          |   19 -
 .../org/postgresql/jdbc/FloatArraysTest.java  |   19 -
 .../jdbc/FloatObjectArraysTest.java           |   19 -
 .../org/postgresql/jdbc/IntArraysTest.java    |   18 -
 .../jdbc/IntegerObjectArraysTest.java         |   19 -
 .../jdbc/LargeObjectManagerTest.java          |  171 -
 .../org/postgresql/jdbc/LongArraysTest.java   |   18 -
 .../postgresql/jdbc/LongObjectArraysTest.java |   19 -
 .../jdbc/ParameterInjectionTest.java          |  144 -
 .../org/postgresql/jdbc/PgSQLXMLTest.java     |  140 -
 .../org/postgresql/jdbc/ResourceLockTest.java |   34 -
 .../java/org/postgresql/jdbc/ScramTest.java   |  136 -
 .../org/postgresql/jdbc/ShortArraysTest.java  |   18 -
 .../jdbc/ShortObjectArraysTest.java           |   18 -
 .../org/postgresql/jdbc/StringArraysTest.java |   20 -
 .../org/postgresql/jdbc/UUIDArrayTest.java    |  139 -
 .../jdbcurlresolver/PgPassParserTest.java     |  186 -
 .../PgServiceConfParserTest.java              |  335 -
 .../replication/CopyBothResponseTest.java     |  194 -
 .../replication/LogSequenceNumberTest.java    |  147 -
 .../LogicalReplicationStatusTest.java         |  549 --
 .../replication/LogicalReplicationTest.java   |  959 --
 .../replication/PhysicalReplicationTest.java  |  291 -
 .../ReplicationConnectionTest.java            |  104 -
 .../replication/ReplicationSlotTest.java      |  395 -
 .../replication/ReplicationTestSuite.java     |   80 -
 .../java/org/postgresql/test/TestUtil.java    | 1178 +++
 .../DisabledIfServerVersionBelow.java         |   31 +
 .../test/annotations/tags/Replication.java    |   18 +
 .../postgresql/test/annotations/tags/Xa.java  |   18 +
 .../test/core/AsciiStringInternerTest.java    |  129 +
 .../CommandCompleteParserNegativeTest.java    |   42 +
 .../test/core/CommandCompleteParserTest.java  |   47 +
 .../core/FixedLengthOutputStreamTest.java     |  137 +-
 .../postgresql/test/core/JavaVersionTest.java |   24 +-
 .../core/LogServerMessagePropertyTest.java    |  237 +-
 .../test/core/NativeQueryBindLengthTest.java  |   62 +-
 .../postgresql/test/core/OidToStringTest.java |   29 +
 .../postgresql/test/core/OidValueOfTest.java  |   30 +
 .../test/core/OidValuesCorrectnessTest.java   |  141 +
 .../test/core/OptionsPropertyTest.java        |   89 +-
 .../org/postgresql/test/core/ParserTest.java  |  306 +
 .../test/core/QueryExecutorTest.java          |  106 +-
 .../test/core/ReturningParserTest.java        |   55 +
 .../test/core/UTF8EncodingTest.java           |   84 +
 .../test/core/v3/V3ParameterListTests.java    |   62 +
 .../adaptivefetch/AdaptiveFetchCacheTest.java | 1086 +++
 .../test/extensions/ExtensionsTestSuite.java  |    2 +-
 .../test/extensions/HStoreTest.java           |  167 +-
 .../test/hostchooser/MultiHostTestSuite.java  |    2 +-
 .../hostchooser/MultiHostsConnectionTest.java |  854 +-
 .../impl/AfterBeforeParameterResolver.java    |   82 +
 .../test/impl/DefaultParameterContext.java    |   64 +
 .../test/impl/ServerVersionCondition.java     |   58 +
 .../test/jdbc/AbstractArraysTest.java         | 1117 +++
 .../org/postgresql/test/jdbc/ArraysTest.java  |   43 +
 .../postgresql/test/jdbc/ArraysTestSuite.java |   31 +
 .../test/jdbc/BigDecimalObjectArraysTest.java |   24 +
 .../postgresql/test/jdbc/BitFieldTest.java    |  132 +
 .../test/jdbc/BooleanArraysTest.java          |   18 +
 .../test/jdbc/BooleanObjectArraysTest.java    |   18 +
 .../postgresql/test/jdbc/ByteaArraysTest.java |   58 +
 .../test/jdbc/ConnectionValidTest.java        |  148 +
 .../jdbc/DeepBatchedInsertStatementTest.java  |  318 +
 .../test/jdbc/DoubleArraysTest.java           |   19 +
 .../test/jdbc/DoubleObjectArraysTest.java     |   19 +
 .../postgresql/test/jdbc/FloatArraysTest.java |   19 +
 .../test/jdbc/FloatObjectArraysTest.java      |   19 +
 .../postgresql/test/jdbc/IntArraysTest.java   |   18 +
 .../test/jdbc/IntegerObjectArraysTest.java    |   19 +
 .../test/jdbc/LargeObjectManagerTest.java     |  168 +
 .../postgresql/test/jdbc/LongArraysTest.java  |   18 +
 .../test/jdbc/LongObjectArraysTest.java       |   19 +
 .../jdbc/NoColumnMetadataIssue1613Test.java   |   42 +-
 .../test/jdbc/ParameterInjectionTest.java     |  141 +
 .../postgresql/test/jdbc/PgSQLXMLTest.java    |  137 +
 .../test/jdbc/ResourceLockTest.java           |   35 +
 .../org/postgresql/test/jdbc/ScramTest.java   |  133 +
 .../postgresql/test/jdbc/ShortArraysTest.java |   18 +
 .../test/jdbc/ShortObjectArraysTest.java      |   18 +
 .../test/jdbc/StringArraysTest.java           |   20 +
 .../postgresql/test/jdbc/UUIDArrayTest.java   |  136 +
 .../org/postgresql/test/jdbc2/ArrayTest.java  | 1733 ++--
 .../test/jdbc2/AutoRollbackTestSuite.java     |  698 +-
 .../org/postgresql/test/jdbc2/BaseTest4.java  |  223 +-
 .../test/jdbc2/BatchExecuteTest.java          | 1594 ++--
 .../test/jdbc2/BatchFailureTest.java          |  440 +-
 .../BatchedInsertReWriteEnabledTest.java      |  832 +-
 .../org/postgresql/test/jdbc2/BlobTest.java   |  935 +-
 .../test/jdbc2/BlobTransactionTest.java       |  256 +-
 .../test/jdbc2/CallableStmtTest.java          |  514 +-
 .../test/jdbc2/ClientEncodingTest.java        |  108 +-
 .../jdbc2/ColumnSanitiserDisabledTest.java    |  174 +-
 .../jdbc2/ColumnSanitiserEnabledTest.java     |  162 +-
 .../test/jdbc2/ConcurrentStatementFetch.java  |  100 +-
 .../test/jdbc2/ConnectTimeoutTest.java        |   94 +-
 .../postgresql/test/jdbc2/ConnectionTest.java |  975 +-
 .../test/jdbc2/CopyLargeFileTest.java         |  174 +-
 .../org/postgresql/test/jdbc2/CopyTest.java   |  913 +-
 .../test/jdbc2/CursorFetchTest.java           |  925 +-
 .../CustomTypeWithBinaryTransferTest.java     |  390 +-
 .../test/jdbc2/DatabaseEncodingTest.java      |  400 +-
 .../test/jdbc2/DatabaseMetaDataCacheTest.java |  109 +-
 .../jdbc2/DatabaseMetaDataPropertiesTest.java |  298 +-
 .../test/jdbc2/DatabaseMetaDataTest.java      | 3414 +++----
 ...abaseMetaDataTransactionIsolationTest.java |  226 +-
 .../postgresql/test/jdbc2/DateStyleTest.java  |   82 +-
 .../org/postgresql/test/jdbc2/DateTest.java   |  525 +-
 .../org/postgresql/test/jdbc2/DriverTest.java |  931 +-
 .../postgresql/test/jdbc2/EncodingTest.java   |   67 +-
 .../org/postgresql/test/jdbc2/EnumTest.java   |  140 +-
 .../postgresql/test/jdbc2/GeometricTest.java  |  313 +-
 .../org/postgresql/test/jdbc2/GetXXXTest.java |  117 +-
 .../postgresql/test/jdbc2/IntervalTest.java   |  707 +-
 .../postgresql/test/jdbc2/JBuilderTest.java   |   85 +-
 .../test/jdbc2/LoginTimeoutTest.java          |  250 +-
 .../org/postgresql/test/jdbc2/MiscTest.java   |  205 +-
 .../org/postgresql/test/jdbc2/NotifyTest.java |  463 +-
 .../test/jdbc2/NumericTransferTest.java       |  104 +-
 .../test/jdbc2/NumericTransferTest2.java      |  154 +-
 .../test/jdbc2/OuterJoinSyntaxTest.java       |  178 +-
 .../test/jdbc2/PGObjectGetTest.java           |  201 +-
 .../test/jdbc2/PGObjectSetTest.java           |  168 +-
 .../postgresql/test/jdbc2/PGPropertyTest.java |  533 +-
 .../org/postgresql/test/jdbc2/PGTimeTest.java |  405 +-
 .../test/jdbc2/PGTimestampTest.java           |  384 +-
 .../test/jdbc2/ParameterStatusTest.java       |  302 +-
 .../test/jdbc2/PreparedStatementTest.java     | 3000 +++---
 .../postgresql/test/jdbc2/QuotationTest.java  |  238 +-
 .../test/jdbc2/RefCursorFetchTest.java        |  250 +-
 .../postgresql/test/jdbc2/RefCursorTest.java  |  239 +-
 .../test/jdbc2/ReplaceProcessingTest.java     |   50 +-
 .../test/jdbc2/ResultSetMetaDataTest.java     |  676 +-
 .../test/jdbc2/ResultSetRefreshTest.java      |   60 +-
 .../postgresql/test/jdbc2/ResultSetTest.java  | 2799 +++---
 .../test/jdbc2/SearchPathLookupTest.java      |  198 +-
 .../test/jdbc2/ServerCursorTest.java          |  136 +-
 .../test/jdbc2/ServerErrorTest.java           |  270 +-
 .../test/jdbc2/ServerPreparedStmtTest.java    |  508 +-
 .../test/jdbc2/SocketTimeoutTest.java         |   32 +-
 .../postgresql/test/jdbc2/StatementTest.java  | 2170 +++--
 .../jdbc2/StringTypeUnspecifiedArrayTest.java |   44 +-
 .../org/postgresql/test/jdbc2/TestACL.java    |   37 +-
 .../org/postgresql/test/jdbc2/TimeTest.java   |  499 +-
 .../postgresql/test/jdbc2/TimestampTest.java  | 1268 ++-
 .../test/jdbc2/TimezoneCachingTest.java       |  700 +-
 .../postgresql/test/jdbc2/TimezoneTest.java   | 1815 ++--
 .../test/jdbc2/TypeCacheDLLStressTest.java    |  134 +-
 .../test/jdbc2/UpdateableResultTest.java      | 1616 ++--
 .../org/postgresql/test/jdbc2/UpsertTest.java |  321 +-
 .../BaseDataSourceFailoverUrlsTest.java       |  152 +-
 .../jdbc2/optional/BaseDataSourceTest.java    |  382 +-
 .../optional/CaseOptimiserDataSourceTest.java |  153 +-
 .../jdbc2/optional/ConnectionPoolTest.java    |  934 +-
 .../jdbc2/optional/OptionalTestSuite.java     |   14 +-
 .../jdbc2/optional/PoolingDataSourceTest.java |  217 +-
 .../jdbc2/optional/SimpleDataSourceTest.java  |   33 +-
 .../SimpleDataSourceWithSetURLTest.java       |   71 +-
 .../optional/SimpleDataSourceWithUrlTest.java |   26 +-
 .../test/jdbc3/CompositeQueryParseTest.java   |  388 +-
 .../postgresql/test/jdbc3/CompositeTest.java  |  324 +-
 .../test/jdbc3/DatabaseMetaDataTest.java      |   52 +-
 .../jdbc3/EscapeSyntaxCallModeBaseTest.java   |   44 +-
 ...scapeSyntaxCallModeCallIfNoReturnTest.java |  114 +-
 .../jdbc3/EscapeSyntaxCallModeCallTest.java   |  132 +-
 .../jdbc3/EscapeSyntaxCallModeSelectTest.java |  106 +-
 .../test/jdbc3/GeneratedKeysTest.java         |  903 +-
 .../postgresql/test/jdbc3/Jdbc3BlobTest.java  |  500 +-
 .../jdbc3/Jdbc3CallableStatementTest.java     | 2190 +++--
 .../test/jdbc3/Jdbc3SavepointTest.java        |  342 +-
 .../postgresql/test/jdbc3/Jdbc3TestSuite.java |   34 +-
 .../test/jdbc3/ParameterMetaDataTest.java     |  178 +-
 .../test/jdbc3/ProcedureTransactionTest.java  |  272 +-
 .../postgresql/test/jdbc3/ResultSetTest.java  |   66 +-
 .../test/jdbc3/SendRecvBufferSizeTest.java    |   54 +-
 .../test/jdbc3/SqlCommandParseTest.java       |   48 +-
 .../test/jdbc3/StringTypeParameterTest.java   |  236 +-
 .../postgresql/test/jdbc3/TestReturning.java  |  157 +-
 .../org/postgresql/test/jdbc3/TypesTest.java  |  140 +-
 .../org/postgresql/test/jdbc4/ArrayTest.java  | 1304 +--
 .../test/jdbc4/BinaryStreamTest.java          |  248 +-
 .../org/postgresql/test/jdbc4/BinaryTest.java |  202 +-
 .../org/postgresql/test/jdbc4/BlobTest.java   |  272 +-
 .../test/jdbc4/CharacterStreamTest.java       |  354 +-
 .../postgresql/test/jdbc4/ClientInfoTest.java |  138 +-
 .../jdbc4/ConnectionValidTimeoutTest.java     |   84 +-
 ...seMetaDataHideUnprivilegedObjectsTest.java | 1058 +--
 .../test/jdbc4/DatabaseMetaDataTest.java      |  914 +-
 .../postgresql/test/jdbc4/IsValidTest.java    |   84 +-
 .../postgresql/test/jdbc4/Jdbc4TestSuite.java |   30 +-
 .../org/postgresql/test/jdbc4/JsonbTest.java  |  144 +-
 .../test/jdbc4/PGCopyInputStreamTest.java     |  172 +-
 .../org/postgresql/test/jdbc4/UUIDTest.java   |  150 +-
 .../postgresql/test/jdbc4/WrapperTest.java    |  212 +-
 .../org/postgresql/test/jdbc4/XmlTest.java    |  660 +-
 .../test/jdbc4/jdbc41/AbortTest.java          |  126 +-
 .../jdbc4/jdbc41/CloseOnCompletionTest.java   |  152 +-
 .../DriverSupportsClassUnloadingTest.java     |  166 +-
 .../test/jdbc4/jdbc41/GetObjectTest.java      | 1842 ++--
 .../test/jdbc4/jdbc41/Jdbc41TestSuite.java    |   10 +-
 .../test/jdbc4/jdbc41/NetworkTimeoutTest.java |   88 +-
 .../test/jdbc4/jdbc41/SchemaTest.java         |  516 +-
 .../test/jdbc42/AdaptiveFetchSizeTest.java    |  493 +-
 .../jdbc42/CustomizeDefaultFetchSizeTest.java |   88 +-
 .../test/jdbc42/DatabaseMetaDataTest.java     |  250 +-
 .../jdbc42/GetObject310InfinityTests.java     |  126 +-
 .../test/jdbc42/GetObject310Test.java         |  730 +-
 .../jdbc42/Jdbc42CallableStatementTest.java   |  144 +-
 .../test/jdbc42/Jdbc42TestSuite.java          |   18 +-
 .../test/jdbc42/LargeCountJdbc42Test.java     |  730 +-
 .../jdbc42/PreparedStatement64KBindsTest.java |  128 +-
 .../test/jdbc42/PreparedStatementTest.java    |  160 +-
 .../jdbc42/SetObject310InfinityTests.java     |  178 +-
 .../test/jdbc42/SetObject310Test.java         |  829 +-
 .../test/jdbc42/SimpleJdbc42Test.java         |   14 +-
 .../test/jdbc42/TimestampUtilsTest.java       |  204 +-
 .../jdbcurlresolver/PgPassParserTest.java     |  184 +
 .../PgServiceConfParserTest.java              |  333 +
 .../test/jre8/core/Jre8TestSuite.java         |    4 +-
 .../test/jre8/core/SocksProxyTest.java        |   53 +-
 .../postgresql/test/osgi/OsgiTestSuite.java   |   18 -
 .../test/osgi/PGDataSourceFactoryTest.java    |  112 -
 .../test/plugin/AuthenticationPluginTest.java |  106 +-
 .../test/plugin/PluginTestSuite.java          |    2 +-
 .../replication/CopyBothResponseTest.java     |  192 +
 .../replication/LogSequenceNumberTest.java    |  146 +
 .../LogicalReplicationStatusTest.java         |  548 ++
 .../replication/LogicalReplicationTest.java   |  958 ++
 .../replication/PhysicalReplicationTest.java  |  290 +
 .../ReplicationConnectionTest.java            |  101 +
 .../test/replication/ReplicationSlotTest.java |  395 +
 .../replication/ReplicationTestSuite.java     |   78 +
 .../socketfactory/CustomSocketFactory.java    |   83 +-
 .../socketfactory/SocketFactoryTestSuite.java |   59 +-
 .../test/ssl/CommonNameVerifierTest.java      |   43 +-
 .../test/ssl/LazyKeyManagerTest.java          |  158 +-
 .../test/ssl/LibPQFactoryHostNameTest.java    |   67 +-
 .../postgresql/test/ssl/PKCS12KeyTest.java    |  130 +-
 .../ssl/SingleCertValidatingFactoryTest.java  |  562 +-
 .../java/org/postgresql/test/ssl/SslTest.java |  834 +-
 .../org/postgresql/test/ssl/SslTestSuite.java |    8 +-
 .../org/postgresql/test/sspi/SSPITest.java    |  123 +-
 .../postgresql/test/sspi/SSPITestSuite.java   |    2 +-
 .../org/postgresql/{ => test}/util/Await.java |   24 +-
 .../util/BigDecimalByteConverterTest.java     |   94 +
 .../test/util/BrokenInputStream.java          |   30 +-
 .../postgresql/test/util/BufferGenerator.java |   46 +-
 .../util/ByteBufferByteStreamWriterTest.java  |  109 +-
 .../test/util/ByteStreamWriterTest.java       |  475 +-
 .../test/util/ExpressionPropertiesTest.java   |   83 +-
 .../postgresql/test/util/HostSpecTest.java    |  142 +-
 .../org/postgresql/test/util/IntListTest.java |   96 +
 .../test/util/LargeObjectVacuum.java          |   55 +
 .../postgresql/test/util/LazyCleanerTest.java |  149 +
 .../postgresql/test/util/LruCacheTest.java    |  305 +-
 .../postgresql/test/util/MiniJndiContext.java |  309 +-
 .../test/util/MiniJndiContextFactory.java     |    9 +-
 .../{ => test}/util/NullOutputStream.java     |   20 +-
 .../test/util/NumberParserTest.java           |   65 +
 .../test/util/ObjectFactoryTest.java          |  176 +-
 .../PGPropertyMaxResultBufferParserTest.java  |   85 +-
 .../test/util/PGPropertyUtilTest.java         |   62 +
 .../org/postgresql/test/util/PGbyteaTest.java |   54 +
 .../postgresql/test/util/PGtokenizerTest.java |   44 +
 .../test/util/PasswordUtilTest.java           |  308 +-
 .../test/util/ReaderInputStreamTest.java      |  251 +
 .../postgresql/test/util/RegexMatcher.java    |   52 +-
 .../test/util/ServerVersionParseTest.java     |  147 +-
 .../test/util/ServerVersionTest.java          |   44 +-
 .../test/util/StrangeInputStream.java         |   57 +
 .../test/util/StrangeOutputStream.java        |   55 +
 .../test/util/StrangeProxyServer.java         |  129 +-
 .../util/StubEnvironmentAndProperties.java    |    8 +-
 .../postgresql/test/util/TestLogHandler.java  |   42 +
 .../UnusualBigDecimalByteConverterTest.java   |   33 +
 .../EnvironmentVariableMocker.java            |  124 +
 .../systemstubs/EnvironmentVariables.java     |  161 +
 .../ProcessEnvironmentInterceptor.java        |  195 +
 .../util/systemstubs/ThrowingRunnable.java    |   48 +
 .../exception/WrappedThrowable.java           |   10 +
 .../properties/PropertiesUtils.java           |   32 +
 .../properties/SystemProperties.java          |   23 +
 .../properties/SystemPropertiesImpl.java      |  115 +
 .../util/systemstubs/resource/Executable.java |   34 +
 .../resource/NameValuePairSetter.java         |   57 +
 .../util/systemstubs/resource/Resources.java  |   71 +
 .../resource/SingularTestResource.java        |   52 +
 .../systemstubs/resource/TestResource.java    |   36 +
 .../postgresql/test/xa/XADataSourceTest.java  | 1482 ++-
 .../org/postgresql/test/xa/XATestSuite.java   |    2 +-
 .../util/BigDecimalByteConverterTest.java     |   96 -
 .../java/org/postgresql/util/IntListTest.java |   96 -
 .../org/postgresql/util/LazyCleanerTest.java  |  150 -
 .../org/postgresql/util/NumberParserTest.java |   66 -
 .../java/org/postgresql/util/OSUtilTest.java  |   36 -
 .../postgresql/util/PGPropertyUtilTest.java   |   63 -
 .../java/org/postgresql/util/PGbyteaTest.java |   55 -
 .../org/postgresql/util/PGtokenizerTest.java  |   44 -
 .../util/ReaderInputStreamTest.java           |  252 -
 .../org/postgresql/util/TestLogHandler.java   |   42 -
 .../UnusualBigDecimalByteConverterTest.java   |   33 -
 pgjdbc/src/test/resources/test-file.xml       |    6 +-
 .../scram/client/test/RfcExampleSha1.java     |   35 +
 .../client/{ => test}/ScramClientTest.java    |   16 +-
 .../client/{ => test}/ScramSessionTest.java   |   31 +-
 .../scram/common/stringprep/SaslPrepTest.java |   87 -
 .../common/{ => test}/RfcExampleSha1.java     |    2 +-
 .../common/{ => test}/RfcExampleSha256.java   |    2 +-
 .../{ => test}/ScramAttributeValueTest.java   |   15 +-
 .../common/{ => test}/ScramFunctionsTest.java |   32 +-
 .../{ => test}/ScramMechanismsTest.java       |   15 +-
 .../{ => test}/ScramStringFormattingTest.java |   12 +-
 .../gssapi/Gs2AttributeValueTest.java         |   21 +-
 .../{ => test}/gssapi/Gs2HeaderTest.java      |   18 +-
 .../message/ClientFinalMessageTest.java       |   12 +-
 .../message/ClientFirstMessageTest.java       |   23 +-
 .../message/ServerFinalMessageTest.java       |   13 +-
 .../message/ServerFirstMessageTest.java       |   11 +-
 .../stringprep/StringPreparationTest.java     |   23 +-
 .../util/AbstractCharAttributeValueTest.java  |   12 +-
 .../common/{ => test}/util/Base64Test.java    |   35 +-
 .../{ => test}/util/CryptoUtilTest.java       |   14 +-
 .../util/StringWritableCsvTest.java           |   14 +-
 .../{ => test}/util/UsAsciiUtilsTest.java     |   17 +-
 settings.gradle                               |   10 +-
 614 files changed, 100376 insertions(+), 98526 deletions(-)
 create mode 100644 NOTICE.txt
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/core/AsciiStringInternerTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/core/CommandCompleteParserNegativeTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/core/CommandCompleteParserTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/core/OidToStringTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/core/OidValueOfTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/core/OidValuesCorrectnessTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/core/ParserTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/core/ReturningParserTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/core/UTF8EncodingTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/core/v3/V3ParameterListTests.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/AbstractArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/ArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/ArraysTestSuite.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/BigDecimalObjectArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/BitFieldTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/BooleanArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/BooleanObjectArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/ByteaArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/ConnectionValidTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/DeepBatchedInsertStatementTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/DoubleArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/DoubleObjectArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/FloatArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/FloatObjectArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/IntArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/IntegerObjectArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/LargeObjectManagerTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/LongArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/LongObjectArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/ParameterInjectionTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/PgSQLXMLTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/ResourceLockTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/ScramTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/ShortArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/ShortObjectArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/StringArraysTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbc/UUIDArrayTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbcurlresolver/PgPassParserTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/jdbcurlresolver/PgServiceConfParserTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/replication/CopyBothResponseTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/replication/LogSequenceNumberTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/replication/LogicalReplicationStatusTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/replication/LogicalReplicationTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/replication/PhysicalReplicationTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/replication/ReplicationConnectionTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/replication/ReplicationSlotTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/replication/ReplicationTestSuite.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/TestUtil.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/annotations/DisabledIfServerVersionBelow.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/annotations/tags/Replication.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/annotations/tags/Xa.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/core/AsciiStringInternerTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/core/CommandCompleteParserNegativeTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/core/CommandCompleteParserTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/core/OidToStringTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/core/OidValueOfTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/core/OidValuesCorrectnessTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/core/ParserTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/core/ReturningParserTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/core/UTF8EncodingTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/core/v3/V3ParameterListTests.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/core/v3/adaptivefetch/AdaptiveFetchCacheTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/impl/AfterBeforeParameterResolver.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/impl/DefaultParameterContext.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/impl/ServerVersionCondition.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/AbstractArraysTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/ArraysTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/ArraysTestSuite.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/BigDecimalObjectArraysTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/BitFieldTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/BooleanArraysTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/BooleanObjectArraysTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/ByteaArraysTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/ConnectionValidTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/DeepBatchedInsertStatementTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/DoubleArraysTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/DoubleObjectArraysTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/FloatArraysTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/FloatObjectArraysTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/IntArraysTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/IntegerObjectArraysTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/LargeObjectManagerTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/LongArraysTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/LongObjectArraysTest.java
 rename pgjdbc/src/test/java/org/postgresql/{ => test}/jdbc/NoColumnMetadataIssue1613Test.java (54%)
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/ParameterInjectionTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/PgSQLXMLTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/ResourceLockTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/ScramTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/ShortArraysTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/ShortObjectArraysTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/StringArraysTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbc/UUIDArrayTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbcurlresolver/PgPassParserTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/jdbcurlresolver/PgServiceConfParserTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/test/osgi/OsgiTestSuite.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/test/osgi/PGDataSourceFactoryTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/replication/CopyBothResponseTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/replication/LogSequenceNumberTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/replication/LogicalReplicationStatusTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/replication/LogicalReplicationTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/replication/PhysicalReplicationTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/replication/ReplicationConnectionTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/replication/ReplicationSlotTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/replication/ReplicationTestSuite.java
 rename pgjdbc/src/test/java/org/postgresql/{ => test}/util/Await.java (61%)
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/BigDecimalByteConverterTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/IntListTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/LargeObjectVacuum.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/LazyCleanerTest.java
 rename pgjdbc/src/test/java/org/postgresql/{ => test}/util/NullOutputStream.java (55%)
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/NumberParserTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/PGPropertyUtilTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/PGbyteaTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/PGtokenizerTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/ReaderInputStreamTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/StrangeInputStream.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/StrangeOutputStream.java
 rename pgjdbc/src/test/java/org/postgresql/{ => test}/util/StubEnvironmentAndProperties.java (85%)
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/TestLogHandler.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/UnusualBigDecimalByteConverterTest.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/EnvironmentVariableMocker.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/EnvironmentVariables.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/ProcessEnvironmentInterceptor.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/ThrowingRunnable.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/exception/WrappedThrowable.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/properties/PropertiesUtils.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/properties/SystemProperties.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/properties/SystemPropertiesImpl.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/Executable.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/NameValuePairSetter.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/Resources.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/SingularTestResource.java
 create mode 100644 pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/TestResource.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/util/BigDecimalByteConverterTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/util/IntListTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/util/LazyCleanerTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/util/NumberParserTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/util/OSUtilTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/util/PGPropertyUtilTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/util/PGbyteaTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/util/PGtokenizerTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/util/ReaderInputStreamTest.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/util/TestLogHandler.java
 delete mode 100644 pgjdbc/src/test/java/org/postgresql/util/UnusualBigDecimalByteConverterTest.java
 create mode 100644 scram-client/src/test/java/com/ongres/scram/client/test/RfcExampleSha1.java
 rename scram-client/src/test/java/com/ongres/scram/client/{ => test}/ScramClientTest.java (96%)
 rename scram-client/src/test/java/com/ongres/scram/client/{ => test}/ScramSessionTest.java (75%)
 delete mode 100644 scram-common/src/test/java/com/ongres/scram/common/stringprep/SaslPrepTest.java
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/RfcExampleSha1.java (98%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/RfcExampleSha256.java (98%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/ScramAttributeValueTest.java (87%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/ScramFunctionsTest.java (90%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/ScramMechanismsTest.java (92%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/ScramStringFormattingTest.java (90%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/gssapi/Gs2AttributeValueTest.java (81%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/gssapi/Gs2HeaderTest.java (88%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/message/ClientFinalMessageTest.java (87%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/message/ClientFirstMessageTest.java (89%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/message/ServerFinalMessageTest.java (89%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/message/ServerFirstMessageTest.java (85%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/stringprep/StringPreparationTest.java (93%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/util/AbstractCharAttributeValueTest.java (90%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/util/Base64Test.java (58%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/util/CryptoUtilTest.java (88%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/util/StringWritableCsvTest.java (93%)
 rename scram-common/src/test/java/com/ongres/scram/common/{ => test}/util/UsAsciiUtilsTest.java (89%)

diff --git a/NOTICE.txt b/NOTICE.txt
new file mode 100644
index 0000000..bb8b625
--- /dev/null
+++ b/NOTICE.txt
@@ -0,0 +1,8 @@
+This work integrates
+
+pgjdbc - https://github.com/pgjdbc/pgjdbc (as of 1 Apr 2024)
+stringprep - Stringprep (RFC 3454) Java implementation https://github.com/ongres/stringprep (as of 1 Apr 2024)
+saslprep - a profile of stringprep
+scram-client - SCRAM (RFC 5802) Java implementation  https://github.com/ongres/scram (as of 1 Apr 2024)
+
+All of those projects where modfied for Java 21+ with JPMS info.
diff --git a/gradle.properties b/gradle.properties
index dbb89c5..58fc89c 100644
--- a/gradle.properties
+++ b/gradle.properties
@@ -1,4 +1,3 @@
 group = org.xbib.jdbc
 name = pgjdbc
 version = 42.7.4.0
-
diff --git a/gradle/test/junit5.gradle b/gradle/test/junit5.gradle
index 6cace6f..a974336 100644
--- a/gradle/test/junit5.gradle
+++ b/gradle/test/junit5.gradle
@@ -8,8 +8,8 @@ dependencies {
 
 test {
     useJUnitPlatform()
-    failFast = true
-    systemProperty 'java.util.logging.config.file', 'src/test/resources/logging.properties'
+    failFast = false
+    //systemProperty 'java.util.logging.config.file', 'src/test/resources/logging.properties'
     testLogging {
         events 'STARTED', 'PASSED', 'FAILED', 'SKIPPED'
         showStandardStreams = true
diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar
index d64cd4917707c1f8861d8cb53dd15194d4248596..e6441136f3d4ba8a0da8d277868979cfbc8ad796 100644
GIT binary patch
delta 34118
zcmY(qRX`kF)3u#IAjsf0xCD212@LM;?(PI<f(@>NyAue(f;$XO2=4Cg1P$=#e%|lo
zKk1`B>Q#GH)wNd-&cJofz}3=WfYndTeo)CyX{fOHsQjGa<{e=jamMNwjda<PyWE6-
zP~B#x7leo3nD`qQ)CqpqLwL&Nv*c_uf@6ZzDxH4Kk*=_Q!1=S4w`ejm#xafRiVQ9K
zD|NMG<SxpdU<&W$m3qe4z`XCn(ZbVn07Ju^@)#`N!0VN6<~T~KDoO4tX0Wnd#9^aG
zzw_kBWvKv8AX+m002<gC-eNM-rZnBB9F6nP%6MQjX4aHBE}ky4v9DAk%X3G|ZVInC
z0sH3o9G@FVA`%%F8RyEFms&lE|8}H=IL>tD={CN3>GNchOE9OGPIqr)3v>RcKWR3Z
zF-guIMjE2UF0Wqk1)21791y#}ciBI*bAenY*B<S7Lsj7;64caBml7(9sSP$#!4bz5
zfe$Y|L&2pF$!`g2EFBw-(C>MW_)AeSuM5}vz_~`+1i!Lo?XAEq{TlK5-efNFgHr6o
zD>^vB&%3ZGEWMS>`?tu!@66|uiDvS5`?bF=gIq3rkK(j<_TybyoaDHg8;Y#`;>tXI
z=tXo~e9{U!*hqTe#nZjW4z0mP8A9UUv1}C#R*@yu9G3k;`Me0-BA2&Aw6f`{Ozan2
z8c8Cs#dA-7V)ZwcGKH}jW!Ja&VaUc@mu5a@CObzNot?b{f+~+212lwF;!QKI16FDS
zodx>XN$sk9;t;)maB^s6sr^L32EbMV(uvW%or=|0@U6cU<cnXKOZI@-_2b|FP!$}s
zAe;S8Mg2+heB}qk71p?*lsuXu#LR?_0T)ujf|bz~>kE`_!<=LHLlRGJx@gQI=B(nn
z-GEjDE}*8>3U$n(t^(b^C$q<Dz5^v_5fDC=_*60>STI;}6q&ypp?-2rGpqg7b}pyT
zOARu2x><t9J4jOACSe7Cla*>0HB{&D(d3sp`+}ka+Pca5glh|c=M)Ujn_$ly^X6&u
z%Q4Y*LtB_>i6(YR!?{Os-(^J`(70lZ&Hp1I^?t@~SFL1!m0x6j|NM!-JTDk)%Q^R<
z@e?23FD&9_W{Bgtr&CG&*Oer3Z(Bu2EbV3T9FeQ|-vo5pwzwQ%g&=zFS7b{n6T2ZQ
z*!H(=z<{D9@c`K<z${~)Mo?=i!8kgh3^kh#gZ+tgX&gTG!eX0?^?c|0?IsnpYDup`
zE@9&nF#>mHO&DbUIzpg`+r5207}4D=_P$ONIc5lsFgn)UB-oUE#{r+|uHc^hzv_df
zV`n<Uy?gf){vC8%ni$H6BLQ|&nNh_JVuc@V<M{j`fPiV3mmYOH;{D(UIYmW$_M(id
zfz>8&qry%jXQ33}Bjqcim~BY1?KZ}x453Oh7G@fA(}+m(f$)TY%7n=MeLi{jJ7LMB
zt(mE*vFnep?YpkT_&WPV9*f>uSi#n#@STJmV&SLZnlLsWYI@y+Bs=gzcqche=&<e&
zH!*BfX&4XuI(sK|M6EWNz&xj!)?<o^!;`2$ksx0>cBH2WL)dkR!a95*Ri)JH_4c*-
zl4pPLl^as5_y&6RDE@@7342DNyF&GLJez#eMJjI}#pZN{Y8io{l*D+|f_Y&RQPia@
zNDL;SBERA|B#cjlNC@V<BMj^PW+*!uB6Kia#Q_k#D<Kdw%NW0h5PShX-Ateg=^YLq
zPqOC>U{2csOvB8$HzU$01Q?y)KEfos>W46VMh>P~oQC8k=26-Ku)@C|n^zDP!hO}Y
z_tF}0@*Ds!JMt>?4y|l3?`v#5*oV-=vL7}zehMON^=s1%q+n=^^Z{^mTs7}*->#YL
z)x-~SWE{e?YCarwU$=cS>VzmUh?Q&7?#Xrcce+jeZ|%0!l|H_=D_`77hBfd4Zqk&!
zq-Dnt_?5*$Wsw8zGd@?woEtfYZ2|9L8b>TO6>oMh%`B7iBb)-aCefM~q|S2Cc0t9T
zlu-ZXmM0wd$!gd-dTtik{bqyx32%f;`XUvbUWWJmpHfk8^PQIEsByJm+@+-aj4J#D
z4#Br3pO6z1eIC>X^yKk|<Bgti^oqrbEGF$Jw^;w14UYT$*kTROuXlF!RjFMV72W^U
zoMllbg+6ow&xKMI7=%s{uAdU)(;XWPXi7!aAo)eS$$~Y>PeVwX_4B+IYJyJyc3B`4
zPrM#raacGIzVOexcVB;fcsxS=s<vwC^XKnl?x{xO@E;np^jTyr6HSfEb6qXiZr9dg
z)?Uhot!j3J`wuMIE#*geg>1e&V;Xe$tw&KQ`YaCkHTKe*Al#velxV{3wxx}`7@isG
zp6{+s)CG%HF#JBAQ_jM%zCX5X;J%-*%&jVI?6KpYyzGbq7qf;&hFprh?E5Wyo=bZ)
z8YNycvMNGp1836!-?nihm6jI`^C`EeGryoNZO1AFTQhzFJOA%Q{X(sMYlzABt!&f{
zoDENSuoJQIg5Q#@BUsNJX2h>jkdx4<+ipUymWKFr;w+s>$laIIkfP6nU}r+?J9bZg
zUIxz>RX$kX=C4m(zh-Eg$BsJ4OL&_J38PbHW&7JmR27%efAkqqdvf)Am)VF$+U3WR
z-E#I9H6^)zHLKCs7|Zs<7Bo9VCS3@CDQ;{UTczoEprCKL3ZZW!ffmZFkcWU-V|_M2
zUA9~8tE9<5`59W-UgUmDFp11YlORl3mS3*2#ZHjv{*-1#uMV_oVTy{PY(}AqZv#wF
zJVks)%N6LaHF$$<6p8S8Lqn+5&t}DmLKiC~lE{jPZ39oj{wR&fe*LX-z0m}9ZnZ{U
z>3-5Bh{KKN^n5i!M79Aw5eY=`6fG#aW1_ZG;fw7JM69qk^*(rmO{|Z6rXy?l=K=#_
zE-zd*P|(sskasO(cZ5L~_{Mz&Y@@@Q)5_8l<6vB$<SnmTz>@226O+pDvkFaK8b>%2
zfMtgJ@+cN@w>3)(_uR;s8$sGONbYvoEZ3-)zZk4!`tNzd<0lwt{RAgplo*f@Z)uO`
zzd`ljSqKfHJOLx<Pf6T=jN-Y)-G`fh^*;28D6YUz?Jn$Qe<sb8Bqa)nI_IHAx#i9Y
z$j}`a&gQX1Vaa!Ea{Z|{=AEw)&>ya4_}T`k5Ok1Mpo#MSqf~&ia3uIy{zyuaF}pV6
z)@$ZG5LYh8Gge*LqM_|GiT1*J*uKes=Oku_gMj&;FS`*sfpM+ygN&yOla-^WtIU#$
zuw(_-?DS?6DY7IbON7J)p^IM?N>7x^3)(7wR4PZJu(teex%l>zKAUSNL@~{czc}bR
z)I{Xz<Lxhu3=iQUmF>XqZBU3a;7UQ~PvAx8g-3q-9AEd}1JrlfS8NdPc+!=HJ6Bs(
zCG!0;e0z-22(Uzw>hkEmC&<fdEY_MO4!XLJs6(n80jtDO!nnCLqs#Y_I0IE~UCPp@
zM_JDqY7&S(Y&QK^-o&47T#3E|`NAlIxvHf3kny;RvvXG3GAWOcnn~MYcAb70=&uwJ
zkt37-cyED3`=cXw-}?~Jre&#~t1kA3Jz_#Bn#jD975Ham#lXmn@!Xe|z!$O^A|8Ja
zxE40Kw#qS@4?x}td&6F5(NIU<?GmP1&i-wkqanUS(~JutQ=}J2g$roe>xj?{0p|kc
zM}MMXCF%RLLa#5jG`+}{pDL3M&|%3BlwOi?dq!)KUdv5__zR>u^o|QkYiqr(m3HxF
z6J*DyN#Jpooc$ok=b7{UAVM@<lCUruKn!C)jTs?G3tF=X?02t9ti$}%_-&Apkmnno
z%fbj!k#YwEr={VibqP-VP5rAcB3fluUzRfi(z01WI?#!N!mLc);E?^+GEin57qXIO
zg8kNfR~-pWXV>nwGsr6kozSddwulf5g1{B=0#2)zv!zLXQup^BZ4sv*sEsn)+MA?t
zEL<g?Ek3W6nR6!O+b*MM+O~eGu_sDZwCnE-jqq7Mvt4JiAYMi&B_$Kb75}|i#Oq?4
z^@)_zxi_9dEszN`mdX;tu;@!Yj!NQXjGcs`B*6!}Q`Xp8$`LBVWzdw?k{MZ=kF0i6
z0u$CdmQDt$g$6JM?JyEA2oW1N)I#|wQ>)}3*R?4(J~CpeSJPM!oZ~8<cg4cY=X|VM
z=^|PC37b4k-X|34t*R-GX&L@u*U|J4q?K3(MG@qFTXx0}+s2jwB%hz+*7-k{2(QHj
zQmM#6vaQDzB)m!b_6aC`nE*p#lagJ>;8s_=@6o`IA%{aEA9!GELRvOuncE`s7sH91
zmF=+T!Q6%){?lJn3`5}oW31(^Of|$r%`~gT{eimT7R~*Mg@x+tWM3KE>=Q>nkMG$U
za7r>Yz2LEaA|PsMafvJ(Y>Xzha?=>#B!sYfVob4k5Orb$INFdL@U0(J8Hj&kgWUlO
zPm+R07E+oq^4f4#HvEPANGWLL_!uF{nkHYE&BCH%l1FL_r(Nj@M)*VOD5S42Gk-yT
z^23oAMvpA57H(fkDGMx86Z}rtQhR^L!T2iS!<mO!Z%ty;Vm%l(OCwBkU1^sK>788E
z+^${W1V}J_NwdwdxpXAW8}#6o1(Uu|vhJvubFvQIH1bDl4J4iDJ+181KuDuHwvM?`
z%1@Tnq+7>p{O&p=@QT}4wT;HCb@i)&7int<0#bj8j0sfN3s6|a(l7Bj#7$hxX@~iP
z1HF8RFH}irky&eCN4T94VyK<n?6cO6O!>qGywEGY{Gt0Xl-`|dOU&{Q;Ao;sL>C6N
zXx1y^RZSaL-pG|JN;j9ADjo^XR}gce#seM4QB1?S`L*aB&QlbBIRegMnTkTCks7JU
z<0(b+^Q?HN1&$M1l&I@>HMS;!&bb()a}hhJzsmB?I`poqTrSoO>m_JE5U4=?o;OV6
zBZjt;*%1P>%2{UL=;a4(aI>PRk|mr&F^=v6Fr&xMj8fRCXE5Z2qdre&;$_RNid5!S
zm^XiLK25G6_j4dWkFqjtU7#s;b8h?BYFxV?OE?c~&ME`n`$ix_`mb^AWr+{<eR?ie
zu^n7F<r|0}4&z!1h-oy{4Bz+F69}Q`VpCh(8k$5~F}&!BM0MdLg4qhh@bV;%4Rw1m
zzqF@FV8+w9w|_OPDgrx*NZebP^m@hq4m*)5tuF1+`%o#G^#zx@w5ZsxZaUE7jzw;N
zhD*<Wl`rn3nq7+Ag+JpPe%(D1QRTS`qM+;}+_^&I$1>M9{^^Rl;~KREplwy2q;&xe
zUR0SjHzKVYzuqQ84w$NKVPGVHL_4I)Uw<$uL2-Ml#+5r2X{LLqc*p13<F6YTV#kWf
zqG&w9QQdWL4?_d~CQnBI;?0D)Qg}{>{;w#E*Kwb*1D|v?e;(<>vl@VjnFB^^Y;;b3
z=R@(uRj6D}-h6CCOxAdqn~_SG=bN%^9(Ac?zfRkO5x2VM0+@_qk?MDXvf<z0O)%s7
zMAFU0$m3HcbC#~2Hc64da@3O%^-=j9undudUI{}zbhknzUL#akDQiL_v_tyrinjEO
zI(3QAK0&1x_y)uNn}57R#|)d${jFV)^rH-I`D9H?s`)o)uI9hKceIT0a|7|+J9G4R
z?^G4wKj6MYf*wobKriuVpvC#r;9ThcHk6^a^xn2ODvRhy6<dTeve8{i!Cu=iTXW7{
z{?H_E?=z!{hGvD8B;Y6V{)tUgB~RL}OZbPv*z!^?TUpHIl7qWjaL~c+st5S>=@q_*
z3IM@)er6-OXyE1Z4sU3{8$Y$>8NcnU-nkyWD&2ZaqX1JF_JYL8y}>@V8A5%lX#U3E
zet5PJM`z79q9u5v(OE~{by|Jzlw2<0h`hKpOefhw=fgLTY9M8h+?37k@TWpzAb2Fc
zQMf^aVf!yXlK?@5d-re}!fuAWu0t57ZKSSacwRGJ$0uC}ZgxCTw>cjRk*xCt%w&hh
zoeiIgdz__&u~8s|_TZsGvJ7sjvBW<(C@}Y%#l_ID2&C`0;Eg2Z+pk;IK}4T@W6X5H
z`s?ayU-iF+aNr5--T-^~K~p;}D<LJ~4Vm)-6?_|)BF5gW4JSY7&(G|Av&E;#AyyV$
zSunlmXPqL`WNNYmTNMzV20flD83AxG;E^Ec8R;(;vdTi$02nI6(4u&=rI7r^2x}@}
zb^h(s^Hv9AwRzcBrk1j8D%Js|d7yM)>(*GWOAYDV9JEw!w8ZYzS3;W6*_`#aZw&9J
ziXhBKU3~zd$kKzCAP-=t&cFDeQR*_e*(excIUxKuD@;-twSlP6>wWQU)$|H3Cy+`=
z-#7OW!ZlYzZxkdQpfqVDFU3V2B_-eJS)Fi{fLtRz!K{~7TR~XilNCu=Z;{GIf9KYz
zf3h=Jo+1#_s>z$lc~e)l93h&RqW1VHYN;Yjwg#Qi0yzjN^M4cuL>Ew`_-_wRhi*!f
zLK6vTpgo^Bz?8AsU%#n}^EGigkG3FXen3M;hm#C38P@Zs4{!QZPAU=m7ZV&xKI<yS
zef9C7IM#)bKApw^nm2P0xPH)?)BN&sFvRw*IK$#0AfO}b<r{uW7o8y}SN)F@&yQm%
zn~nNb7d)eJ2BHvG2FG3z@j=payQlcc`;O$D3XPT3JFukw8Xr%@g@bkhB?@#$+AOw(
z(|@nqsNM;#gS0yC1MWhDA!W&4Ru~!5ks5Q~Pml#jZyfXEak(Em<WRqk+Ush$9swq)
zmPM%H4#Ov0YvR0-8rrI^layyN^pm(_yZ9(@AnNKHRrR<^fYJvzs{G9-d<-PIeZxic
zI!fz70I#*sa*eAVT<99Vxgy-<y1w@xbY}slZ7CHAcKSW+wgZc7BMb2FfqhW}?;;BY
zP!O(w$5_aUKLF$CVvI<PnooNWjt084FOTmh7FT(=9D-a~XGZP;^5(x4>_HWNt90Ef
zxClm)ZY?S|n**2cNYy-xBlLAVZ=~+!|7y`(fh+M$#4zl&T^gV8ZaG(RBD!`3?9xcK
zp2+aD(T%QIgrLx5au&TjG1AazI;`8m{K7^!@m>uGCSR;Ut{&?t%3AsF{>0Cm(Kf)2
z?4?|J+!BUg*P~C{?m<lX{_VAm$p$7Sy5n4)MhYq@8YJpQU2G)^4Cs_N!H_NDI5wLC
zuxU!+I__)7xJYF^Pf|zP9)c{~jt|)Y(3ss;D?4|k?Aspv4X!02;`uMjy{H&7#%Q@o
z<nV(d4I5h&#m8g~O!r&@N38SW3XQ`EjZh!<u+JXC85$B2rn?)m6Y00}q3o2FWUH%+
zxkU_~@oBdV%UzT$+qN=8G&dY}bCZ=N#`=O2mzStcgw{uTG7hdoSCO(-*C<hlyRW_D
zI1Ra`6kd?oLFBA13(Fk<4zU<VtAK2o$FG2w@>wPQ#)gDMmro20YVNsVx5oWQMkzQ?
zsQ<ZZLyAH1!oNzFM}TfglR)=rWx*=M9GIjB?ZFULc>%Y>%7_wkJqnSMuZjB9lBM(o
zWut|B7w48cn}4buUBbdPBW_J@H7g=szrKEpb|aE>!4rLm+sO9K%iI75y~2HkUo^iw
zJ3se$8$|W>3}?JU@3h@M^HEFNmvCp|+$-0M?RQ8SMoZ@38%!tz8f8-Ptb@106heiJ
z^Bx!`0=I<O1KwEC{Zqa_WBh?i$W#4Fd46V<qLHAqFT^%K_=ff(6##B-Ub(l(@oz>m
z1!NUhO=9ICM*+||b3a7w*Y#5*Q}K^ar+oMMtekF0JnO>hzHqZKH0&PZ^^M(j;vwf_
z@^|VMBpcw8;4E-9J{<clOdQ-gLs}yNpT|>(u7sHSyZpQbS&N{VQ%ZCh{c1UA5;?R}
z+52*X_tkDQ(s~#-6`z4|Y}3N#a&dgP4S_^tsV=oZ<Y0d)Rr;arA(GJ}DlJV*pmcys
z*Mk;Dg{B7HD(oM1AgV$?pZ(feakO5tAV~SCs_W%F(?;(4m+NEoby{|2mc0XI>r4A1
zaSoPN1czE(UIBrC_r$0HM?RyBGe#lTBL4~JW#A`P^#0wuK)C-2$B6TvM<GJr3h=;Q
z>i@@%K@JAT_IB^T7Zfqc8?<ppV&KJMBtJ-oMFyu3aJ37qp`YqlHwSWDi``;8R3fKH
z;())3jrugRTov;`f2|96Ol0~`tdom%2YtK*BEp7dW){Z1+eUuiY^^+%IXC`kb4D<x
zHzFJB;p^zZ4`bRwAwzCEy&SFu3-$>{wHcSVG_?{(wUG%zhCm=%qP~EqeqKI$9UivF
zv+5IUOs|%@ypo6b+i=xsZ=^G1yeWe)z6IX-EC`F=(|_GCNbHbNp(CZ*lpSu5n`FRA
zhnrc4w+Vh?r>her@Ba_jv0Omp#-H7avZb=j_A~B%V0&FNi#!S8cwn0(Gg-Gi_LMI{
zCg=<XfJ{>g@m{W@u?GQ|yp^yENd;M=W2s-k7Gw2Z(tsD5fTGF{iZ%Ccgjy6O!AB4x
z%&=6jB7^}pyftW2YQpOY1w@%wZy%}-l0qJlOSKZXnN2wo3|hujU+-U~blRF!^;Tan
z0w;Srh0|Q~6*tXf!5-rCD)OYE(%S|^WTpa1KHtpHZ{!;KdcM^#g8Z^+LkbiBHt85m
z;2xv#83lWB(kplfgqv@ZNDcHizwi4-8+WHA$U-HBN<XXr0~J?$Lai#_;trL)VA8+1
zU!;TqE~}9XS7A@|JfNqKkGav_MPU#1&?e(-M~ysHg+5_(nK1v-W}9=?Zer5AMLpDA
zz5CzT^brYv;fMsP%wz!?lZWTP?W}8@vv2IuVI?tan0C9Y`-Wjik0PN~x7}xCz5j)1
zoSKG<CGs%hfuQ;WLC{xnn)TVIkPvvNhsVT7>qsZ`hKcUI3zV3d1ngJP-AMRET*A{>
zb2A>Fk|L|WYV;Eu4>{a6ESi2r3aZL7x}eRc?cf|~bP)6b7%BnsR{Sa>K^0obn?yiJ
zCVvaZ&;d_6WEk${F1SN0{_`(#TuOOH1as&#&xN~+JDzX(D-WU_nLEI}T_VaeLA=bc
zl_UZS$nu#C1yH}YV>N2^9^zye{rDrn(rS99>Fh&jtNY7PP15q%g=RGnxACdCov47=
zwf^<OLlg}xVjt0)p-rbBn7T~vS^5_Drk~dEL?fU{>9zfJaL{y`R#~tvVL#*<`=`Qe
zj_@Me$6sIK=LMFbBrJps7vdaf_HeX?eC+P^{AgSvbEn?n<}NDWiQGQG4^ZOc|GskK
z$Ve2_n8gQ-KZ=s(f`_X!+vM5)4+QmOP()2Fe#IL2toZBf+)8gTVgDSTN1CkP<}!j7
z0SEl>PBg{MnPHkj4wj$mZ?m5x!1ePVEYI(L_sb0OZ*=M%yQb?L{UL(2_*CTVbRxBe
z@{)COwTK1}!*CK0Vi4~AB;HF(MmQf|dsoy(eiQ><S&flYs}X=qI6%JhOL}c}8ReE6
z>WTKcEQlnKOri5xYsqi61Y=I4kzAjn5~{IWrz_l))|<ryzIgBfLDnKw3JSt*BE7Yk
z@<X1~%_3&{c@EWKT@&!_5{hgsv0*uWq^Ag5&!#`hO30t*Ey!ZHlK~#2TL+u093@m<
zmh@mPLF@+cbRf2&AOJqqsB1zg;bVL&+4p#cS%+M6!rPy}l*roS@+s2E+JcG2%rRTv
z2!(JA6$64oDKq`kHYO({m<UUf<`TtSFw}vw1zYukXby+c34(E)w*pi_qs3N7vw>Ls
zvq7xgQs?Xx@`N?f7+3XKLyD~6DRJw*uj*j?yvT3}a;(j_?YOe%hUFcPGWRVBXzpMJ
zM43g6DLFqS9tcTLSg=^&N-y0dXL816v&-nqC0iXdg7kV|PY+js`<tQ)jQ;9->F8dm
z2PuHw&k+8*&9SPQ6f!^5q0&AH(i+z3I7a?8O+S5`g)>}fG|BM&ZnmL;rk)|u{1!aZ
zEZHpAMmK_v$GbrrWNP|^2^s*!0waLW=-h5PZa-4jWYUt(<b#|Bsx3P3#+uA4`mmgX
z9Kz|`zO$@)@d{sIynMxFy0ta)_!*pZfTlj%)9Ee4zRt~F_6}gJArZ1%_&ds}7FnKN
zT=p-*f9G3aBCWvVUn3v-@ZXvQrHP}1zTx3P<Jhsl2HrSYxUcBQibO?|u3Wl@z}_;u
z86>Hr@EA(m3Mc3^uDxwt-me^55FMA9^>hpp26MhqjLg#^Y7OIJ5%ZLdNx&uDgIIqc
zZRZl|n6TyV)0^DDyVtw*jlWkDY&Gw4q;k!UwqSL6&sW$B*5Rc?&)dt29bDB*b6IBY
z6SY6Unsf6AOQdEf=P1inu6(6hVZ0~v-<>;LAlcQ2u?wRWj5VczBT$Op#8IhppP-1t
zfz5H59Aa~yh7EN;BXJsLyjkjqARS5iIhDVPj<=4AJb}m6M@n{xYj3qsR*Q8;h<IbJ
zd)uk&8m`?q_q1NS{$84>VxDyC4vLI;;?^eENOb5QARj#nII5l$MtBCI@5u~(ylFi$
zw6-+$<K_|-Wlyt+M0?(Bv&pcNNg0svOYzThtqKx!i*Dc5NZvH&F*N23|J~!vMN;d~
z^=H()7fOiv1)6`d`Xlx->$XQ}Ca>FWT>q{k)g{Ml(Yv=6aDfe?m<nPFf#(N8Laf1%
zeJ4>|5|kbGtWS}fKWI+})F6`x@||0oJ<?@*&=4(53;s&f1=p}VJ=CsEx0>^(g|+xi
zqlPdy5;`g*i*C=Q(aGeDw!eQg&w>UUj^{o?PrlFI=34qAU2u@BgwrBiaM8zoDTFJ<
zh7nWpv>dr?q;4ZA?}V}|7qWz4W?6#S&m>hs4IwvCBe@-C>+oohsQZ^JC*RfDRm!?y
zS4$7oxcI|##ga*y5hV>J4a%HHl^t$pjY%caL%-FlRb<$A$E!ws?8hf0@(4HdgQ!@>
zds{&g$ocr9W4I84TMa9-(&^_B*&R%^=@?Ntxi|Ejnh;z=!|uVj&3fiTngDPg=0=P2
zB)3#%HetD84ayj??qrxsd<YW&s^_fNRPJu1Lu~ooseFw$wVknfo;k}<f4``0v~+Y_
zKU_m+@|S(3^pR^^<>9nqrBem(8^_u_UY{1@R_vK-0H9N7lBX5K(^O2=0#TtUUGSz{
z%g>qU8#a$DyZ~EMa|8*@`GOhCW3%DN%xuS91T7~iXRr)SG`%=Lfu%U~Z_<B8YLfje
zenkDPv?7UMn^wh-#5zYOX60}ftn+_;r9>`1b=lSi?qpD4$vLh$?HU6t0MydaowUpb
zQr{>_${AMesCEffZo`}K0^~x>RY_ZIG{(r39MP>@=aiM@C;K)jUcfQV8#?SDvq>9D
zI{XeKM%$$XP5`7p3K0T}x;qn)VMo>2t}Ib(6zui;k}<<~KibAb%p)**e>ln<=qyWU
zrRDy|UXFi9y~PdEFIAXejLA{K)6<)Q`?;Q5!KsuEw({!#Rl8*5_F{TP?u|5(Hijv(
ztAA^I5+$A*+*e0V0R~fc{ET-RAS3suZ}TRk3r)xqj~g_hxB`qIK5z(5wxYboz%46G
zq{izIz^5xW1Vq#%lhXaZL&)FJWp0VZNO%2&ADd?+J%K$fM#T_Eke1{dQsx48dUPUY
zLS+DWMJeUSjYL453f@HpRGU6Dv)rw+-c6xB>(=p4U%}<qNSa;?l%-Qbgo6H=D;<Ac
z37*~61%G;Rk9zKyD{WraYx|9VK1aeUEagvgL(r)WImH;+hu8m^8fk3iQO!R?vp|AU
z#xX(31o5DGMHtXZ9zytcGdR#e9xm`5)!T;H26%a+**oa@WGMf=BFxg~`r&GAS7$4y
z^2|*EV<Mt4fFFPU)O>_p>z^I@Ow9`nkUG21?cMIh9}hN?R-d)*6%pr6d@mcb*ixr7
z)>Lo<&2F}~>WT1ybm^9UO{6P9;m+fU^06_$o9gBWL9_}EMZFD=rLJ~&e?fhDnJNBI
zKM=-WR6g7HY5tHf=V~6~QIQ~rakNv<yXlZw2~R->csamU8m28YE=z8+G7K=h%)l6k
zmCpiDInKL6*e#)#Pt;ANmjf`8h-nEt&d}(SBZMI_A{BI#ck-_V7nx)K9_D9K<wX2&
ztb`!H<{xu+%$XOK7#l;ig9boZT>-p@?Zh81#b@{wS?wCcJ%og)8RF*-0z+~)6f#T`
zWqF7_CBcnn=S-1QykC*F0YTsKMVG49BuKQBH%WuDkEy%E?*x&tt%0m>>5^HCOq|ux
zuvFB)JPR-<R9*qK#QvOEy<g2y->W|%$24eEC^AtG3Gp4qdK%pjRijF5Sg3X}uaKEE
z-L5p5aVR!NTM8T`4|2QA@hXiLXRcJveWZ%YeFfV%mO5q#($TJ`*U>hicS+CMj%Ip#
zivoL;dd*araeJK9EA<(tihD50FHWbITBgF9<S1F59NBB@-km2xgxh^jmVKRZHCzb@
zldh9O@Tx{jgxfxH#W4wy6M0z_-aTt2G@3|Yp>E<33A+eMr2;cgI3Gg6<-2o|_g9|>
zv5}i93<J?21Jwu60De+%9&8yD$M-==IBwP;d{BC=F44-LGWWplT_ESz4ovU#BL|G}
zcmR56^)_SYTt?AM`r&<V0_9alA|EwbVwM(BT3!TE$hsCu`Gm;Rg~8}C&9f4B^0>2(
zYf<!;V`YfO6gj*4fZkUAr?-F@aA9;z9|jbBgt=33h^ojL5J7|FUPfqa-W2^EN+IwG
zyucPVo58FAeHt$Nq*Z0Lz|^v35)f<vIMOX%n{Czoseg>TE9?4#nQhP@a|zm#9FST2
z!y+p3B;p>KkUzH!K;GkBW}bWssz)9b>Ulg^)EDca;jDl+q=243BddS$hY^fC6lbpM
z(q_bo4V8~e<Zs<b7cyyni#U}G!!KTEURDqc{bMpk?5mA1@g$Y$Wx|caEJ@!TvSwmW
zqw6$6_V9}M--vO-=^ZBsJr^(lCyvX*KI4W@0G*bBPEKAYRH$kkkjj{cPznS-d|ZUy
zJ?>VeA?0LFD6ZtKcmOH^75#q$E<Zzyhezvuj?3@d%!~X4ws-!zRzAK#UFhVU?-K@7
z2G)}Z&^k%kMrWcDJ1~9s%Y7B4b*Q5pIQJtR#E8_m=dCI`iz(ByzI1@)w#x&yV;Dmb
zBR*c4ZqzdV6%u1E<qjcp>o%a&qvE8Zsqg=$p}u^|>DSWUP5i{6)LAYF4E2DfGZuMJ
zMwxxmkxQf}Q$V3&2w|$`9_SQS^2NVbTHh;atB>=A%!}k-f4*i$X8m}Ni^ppZXk<vI
zv4b?e{$fL=4CN|)0GXH{ZZE#0qrw?r2yJZjI_c{zf+uDL9gwdN)`!v>5_oYF>Gq(&
z0wy{LjJOu}69}~#UFPc;$7ka+=gl(FZCy4xEsk);+he>Nnl>hb5Ud-lj!CNicgd^2
z_Qgr_-&S7*#nLAI7r()P$`x~fy)+y=W~6aNh_humoZr7MWGSWJPLk}$#<b0qDKzby
zfsF}6hSD25(h&SI*1V<_%OA=AVAytldFr(qeYf$b2GB9jc!!oXb+YO|A>w_1n%(@?
z3FnHf1lbxKJbQ9c&i<$(wd{tUTX6DAKs@cXIOBv~!9i{wD@*|kwfX~sjKASrNFGvN
zrFc=!0Bb^OhR2f`%hrp2ibv#KUxl)Np1aixD9{^o=)*U%n%rTHX?FSWL^UGpHpY@7
z<FJKJ-x}kSNUZ~J=(sQmm|=Hgx6NDTIwxW%&M+qtaTFkwf2nmoB#ib#d`3q#i8x4D
z8SO1)?2${?@9$aGnmIq+`mJ>74U}KoIRwxI#>)Pn4($A`nw1%-D}`sGRZD8Z#lF$6
zOeA5)+W2qvA%m^|$WluUU-O+KtMqd;Pd58?qZj})MbxYGO<{z9U&t4D{S2G>e+J9K
ztFZ?}ya>SVOLp9hpW)}G%kTrg*KXXXsLkGdgHb+R-ZXqdkdQC0_)`?6mqo8(EU#d(
zy;u&aVPe6C=YgCRPV!mJ6R6kdY*`e+VGM~`VtC>{k27!9vAZT)x2~AiX5|m1Rq}_=
z;A9LX^nd$l-9&2%4s~p5r6ad-siV`HtxKF}l&xGSYJmP=z!?Mlwmwef$EQq~7;#OE
z)U5eS6dB~~1pkj#9(}T3j!((8Uf%!W49FfUAozijoxInUE7z`~U3Y^}xc3xp){#9D
z<^Tz2xw}@o@fdUZ@hnW#dX6gDOj4R8dV}Dw`u!h@*K)-NrxT8%2`T}EvOImNF_N1S
zy?uo6_ZS>Qga4Xme3j#aX+1qdFFE{NT0Wfusa$^;eL5xGE_66!5_N8!Z~jCAH2=${
z*goHjl|z|kbmIE{cl-PloSTtD+2=CDm~ZHRgXJ8~1(g4W=1c3=2eF#3tah7ho`zm4
z05P&?nyqq$nC?iJ-nK_iBo=u5l#|Ka3H7{UZ&O`~t-=triw=SE7ynzMAE{Mv-{7E_
zViZtA(0^wD{iCCcg@c{54Ro@U5p1QZq_XlEGtdBAQ9@nT?(zLO0#)q55G8_Ug~Xnu
zR-^1~hp|cy&52iogG@o?-^AD8Jb^;@&Ea5jEicDlze6%>?u$-eE};bQ`T6@(bED0J
zKYtdc?%9*<<$2LCBzVx9CA4<AoGdF)iT9-BV}&@WMK}bKMAdQBHB9zIg~33D9XqBR
zD>YV|q-qg*-{yQ;|0=KIgI6~z0DKTtajw2Oms3<u6JfE3d!7x4=<@msnH0k~j9dW|
zZtZ80d$e)`WiYJI%5C4$o_@ZG+NLyWNO7P{Rii0d%$QNbSHy)#lwN8boUCoV3)k>L
zn{C%{P`duw!(F@*P)lFy11|Z&x`E2<=$Ln38>UR~z6~za(3r;45k<v>QK_^QTX%!s
zNzoIFFH8|Y>YVrUL5#mgA-Jh>j7)n<Zx96$&fzU>)5}iVM4%_@^GSwEIBA2g-;43*
z*)i7u*xc8jo2z8&=8t7qo|B-rsGw)b8UXnu`RgE4u!(J8yIJi(5m3~aYsADcfZ!GG
zzqa7p=sg<o&$HnFk3_PIDT7^R*l|Uy*ErkWh5p!iw%95SEqgx*(#2u}$;yHXwBY|t
z$vJDd#N!Akn_xau{*4y#!iM@tZOc&Rc)@cg)>`V_KjiqI*LA-=T;uiNRB;BZZ)~88
z`C%p8%hIev2rxS12@doqsrjgMg3{<L-IS7j<nG-26~xHYI&`%4j=)ZUR_2Nwyo&s*
zJ_0X{=sXdnBCoNzt%@LBIlEx@{+A&hsZt3nyF&Gtc)9fXv28Q&PU0uzxK5QDQ^0tT
z@ll2`vK+vydWK?7I0;u%diXOvNZ*YxV6uZ=wi~x}imLLs*Slf$SL&G~13D2Praj;n
z{D>A&N8A?%Ui5vSHh7!iC^ltF&HqG~;=16=h0{ygy^@HxixUb1XYcR36SB}}o3nxu
z_IpEmGh_CK<+sUh@2zbK9MqO!S5cao=8LSQg0Zv4?ju%ww^mvc0WU$q@!oo#2bv24
z+?c}14L2vlDn%Y0!t*z=$*a!`*|uAVu&NO!z_arim$=btpUPR5XGCG0U3YU`v>yMr
z^zmTdcEa<e${0lJ(<4$dGj^YwOU!gy-6u%trv^j}K^#M&{&D1UGEa2nS)OMS>!APX
zYF>^Q-TP11;{VgtMqC}7>B^2gN-3KYl33gS-p%f!X<_Hr?`rG8{jb9jmuQA9U;BeG
zHj6Pk(UB5c6zwX%SNi*Py*)gk^?+729$bAN-EUd*RKN7{CM4`Q65a1qF*-QWACA&m
zrT)B(M}yih{2r!Tiv5Y&O&=H_OtaHUz96Npo_k0eN|!*s2mLe!Zkuv>^E8Xa43ZwH
zO<I&Nur7C^JWb9`H||S7lnGT!zTpho2vN69_#Qs?kjYy$`Q0!HKi(a7WFX@;&MNY~
zax>I058AZznYGrRJ+`*GmZzMi6yliFmGMge6^j?|PN%ARns!Eg$ufpcLc#1Ns!1@1
zvC7N8M$mRgnixwEtX{ypBS^n`k@t2cCh#_6L6WtQb8E~*Vu+Rr)YsKZRX~hzLG*BE
zaeU#LPo?RLm(Wzltk79Jd1Y$|6<Tz#Ab3VOQL!oqnu8!3Ev#T)CDpRjo9fjjgw)N!
zErQj*AF93?w7h)pV7lh%{nq{Sh43S}4qRhV$tBKz2Q3F#YC0?apCV-cr-&g)Ta?xA
zc-})no|s-UajuvdSqa3zR2cl82_%nvGFwn23tfy0+-dPTJzZ8#!t6MWix9bj!?+yg
zn$~Be?akwbUV?z=Y{qc>aWz1)wf1K1RtqS;qyQMy@H@B805vQ%wfSJB?m&&=^m4i*
z<RSh9mC|UV(AH!)v4!3^(;(w}isKaD1$r>YVH`zTTFbFtNFkAI`Khe4e^CdGZw;O0
zqkQe2|NG_y6D%h(|EZNf&77_!NU%0y={^E=*gKGQ=)LdKPM3zUlM@otH2X07Awv8o
zY8Y7a1^&Yy%b%m{mNQ5sWNMTIq96Wtr>a(hL>Qi&F(ckgKkyvM0IH<_<ah`2Jtc4H
zzJ5Z$n<rF%>}v~Fv-GqDa<PYPnU!!je;%S69V23AFX{jR^q$cSK5eU!uzK5~_gTvx
z=xC#ygr;Z7l0l}tE!ut;$3u!G=k}WM8nnSovw+>pig=3*ZMOx!%cYY)SKzo7ECyem
z9Mj3C)tCYM?C9YIlt1?zTJXNOo&oVxu&uXKJs7i+j8p*Qvu2PAnY}b`KStdpi`trk
ztAO}T8eOC%x)mu+4ps8sYZ=vYJp16SVWEEgQyFKSfWQ@O5id6GfL`|2<}hMXLPszS
zgK><z_7D{#_0cM38i3?q!q=1kU*<f=E!5q~OGR+n-!$_a1@<av6z8(-tT0T>NWOoR
zBRyKeUPevpqKKShD|MZ`R;~#PdNMB3LWjqFKNvH9k+;(`;-pyXM55?qaji#nl~K8m
z_MifoM*W*X9CQiXAOH{cZcP0;Bn10E1)T@62Um>et2ci!J2$5-_HPy(AGif+BJpJ^
ziHWynC_%-NlrFY+(f7HyVvbDIM$5ci_i3?22ZkF>Y8RPBhgx-7k3M2>6m5R24C|~I
z&RPh9xpMGzhN4b<n_$TKj6;x$6eSY#J<6z8-<<eXD{YGtPeh(eK>ii*ryWaN^d(`0
zTOADlU)g`1p+SVMNLztd)c+;XjXox(VHQwqzu>FROvf0`s&|NEv26}(TAe;@=FpZq
zaVs6mp>W0rM3Qg*6x5f_bPJd!6dQGmh?&v0rpBNfS$DW-{4L7#_~-eA@7<2BsZV=X
zo<jFL%Bl|~iN=_%v|5Y<#Gpc-_nu`zPSa3B)eyzNRYkS_S7#Vl4Sx`|M*FHkF=-_)
z(qT@$sP8*6(;+2_2GrUtq?WVOz@%!47omewB^Cj*+)_Y7c8o#IQ%W|Wg+5PNnR*6U
zqsrE$&lE5UYNkcwa9IkWJQ`K6q;9o~nXTNNt4ypA@SdwPa*k(~yPsQD9A`rvY6hIQ
zbB3Wq3J(Ip%Spgn<@)Zq04$gB8>w){3aATmLZOQrs>uzDkXOD=IiX;Ue*B(^4RF%H
zeaZ^*MWn4tBDj(wj114r(`)P96EHq4th-;tWiHhkp2rDlrklX}I@ib-nel0slFoQO
zOeTc<hn5|jpM}{BL^>;Rh7sMIebO`1%u)=GlEj+7HU;c|Nj>2j)J-kpR)s3#+9AiB
zd$hAk6;3pu9(GCR#)#>aCGPYq%r&i02$<m+xZvsp>0L9=7Al<uiMgWi00$%C&<=p;
z&DJ&!X-+7DOQ<NGx053X=x&OTuveGw;P#7I963O-cntp%n;x3upG6$bSZdIscXKZz
zE26w-ESTe#dx<IxxgOyxd~k92x!}?e7=&}7EIUABcs@v7J^4#tE1Z2`vYo|Zfx#?M
zXxRQAUwIpnrd%x;d2(+0wHKQb5LXCF5FM|~=&cF?)N_xB4Bl|JpPd+>IGY<wi6MrB
zwuAQco(edmX)&f$!qgzbDk@I5RR!7e{<pl6;0q^D9rH)D+xQ^qUHFjiVuuZ(dNLoE
zk610d0D=8c-t>dlUO5%eH&M!ZWD&6^NBAj0Y<LF!W8hneCvn#&X!jeuMxFNL9R6gV
ziYL0E$zuPwu-dNyWbfxdM|kfI7dtb9QzzTE{}sJ=aTD#{Kb((+@c&OeMaT<*ofYbx
z;1%phK@uKkWduA*1(PK@!{WoV6$&w4_0RR0*51~cnd9Q6WRU~~+iD70xHZv5yb^q9
zSl{&Z2jJC&)zq%KFf=>9ZDcPg@r@8Y&-}e!aq0S(`}NuQ({;aigCPnq75U9cBH&Y7
ze)W0aD>muAepOKgm7uPg3Dz7G%)nEqTUm_&^^3(>+eEI;$ia`m<pPp!PnmJ$ax-Ni
zriP82FsWaes>>m0QHEkTt^<h|Kl{fGlDwG;CQ*b4lzdo-V$nC8<LA?4QJ%$e&M1jF
zj4MPd95jth&|E!zZ*d9v(}1-nH2c9uw}SWz(o)!u-C;JXs?Old?KmR?ZVapLr)~kt
z-!pxZ2T4qAqb;DH9;hiuAcN#25{6pE|B@nn@LY@;@6X}9ZRu6|3F2X?Musq<a_n+z
zJMV<b3!3d+o(r5xln5wq$mvr`E9}w25J;Qi>=cx^JsBC68#H(3zc~Z$E9I)oSrF$3
zUClHXhMBZ|^1ikm3nL$Z@v|JRhud*IhOvx!6X<(YSX(9LG#yYuZeB{=7-MyPF;?_8
zy2i3iVKG2q!=JHN>~!#Bl{cwa6-yB@b<;8LSj}`f9pw7#x3yTD>C=>1S@H)~(n_K4
z2-yr{2?|1b#lS`qG@+823j;&UE5|2+EdU4nVw5=m>o_gj#K>>(*t=xI7{R)lJhLU{
z4IO6!x@1f$aDVIE@1a0lraN9!(j~_uGlks)!&davUFRNYHflp<|ENwAxsp~4Hun$Q
z$w>@YzXp#VX~)ZP8`_b_sTg(Gt7?oXJW%^Pf0UW%YM+OGjKS}X`yO~{7WH6nX8S6Z
ztl!5AnM2Lo*_}ZLvo%?iV;D2z>#qdpMx*xY2*GGlRzmHCom`VedAoR=(A1nO)Y>;5
zCK-~a;#g5yDgf7_phlkM@)C8s!xOu)N2UnQhif-v5kL$*t=X}L9EyBRq$V(sI{90>
z=ghTPGswRVbTW@dS2H|)QYTY&I$ljbpNPTc_T|FEJkSW7MV!JM4I(ksRqQ8)V5>}v
z2Sf^Z9_v;dKSp_orZm09jb8;C(vzFFJgoYuWRc|Tt_&3k({wPKiD|*m!<X$9n>+<d
zfwbu(US!ljdxEAm@8L<A)Zh;j$1@#3i1Hba7~+=OxccBV?z(KQ^KiX-VG5F8{s<bz
zFTJ%sXJxQmw1gpd<`@dDaxW+0z@mAqX^!9bM3;Rg{vJ$y`jrFPf<taTjgOUiGo`tq
zU8^ynJ_vtmGeduy1>za$(l*!gNRo{xtmqjy1=kGzFkTH=Nc>EL@1Um0BiN1)wBO$i
z6rG={bRcT|%A3s3xh!Bw?=L&_-X+6}L9i~xRj2}-)7fsoq0|;;PS%mcn%_#oV#kAp
zGw^23c8<Fs8D%<c4(Nre;Oy43eq_*f8CVI6Y}O$7=4|A%Bh)_#+dgnF@9B?PZV+FK
z@D@b4V?}Hxb;mfT&3=fp%aQ_W<BpQFv8~7WceKD?ShRTE&*xP)Rp+|r%gOH<b#|`R
zpMk-?U*1{fo}21TS!!|lrVyo(<X*MXXY2ANSehq$FQcbg%rc;PQLx;F?K=D0V>_0~
ze}v9(p};6HM0+qF5^^>BBEI3<e#!JG>d=2<KmW5$FDhY04Ol??L%ZUPasCO+Pj|h!
zL*;#e7cVT$<F2gg5E8<=1=Y;hH6lJKfJK6P>DW&O#|(;wg}?3?uO=w+{*)+^l_-gE
zSw8GV=4_%U4*OU^hibDV38{Qb7P#Y8zh@BM9pEM_o2FuFc2LWrW2jRRB<+IE)G=Vx
zuu?cp2-`hgqlsn|$nx@I%TC!`>bX^G00_oKboOGGXLgyLKXoo$^@L7v;GWqfUFw3<
zekKMWo0LR;TaFY}T<F_}BWybmURg`OY9TP`)1kkA_VfNgEb>t4!O$3MU@pqcw!0w0
zA}SnJ6Lb597|P5W8$OsEHTku2Kw<?~p>9y4V=hx*K%iSn!#LW9W#~OiWf^dXEP$^2
zaok=UyGwy3GRp)bm6Gqr>8-4h@3=2`Eto2|JE6Sufh?%U6;ut1v1d@#EfcQP2chCt
z+mB{Bk5~()7G>wM3KYf7Xh?LGbwg1uWLotmc_}Z_o;XOUDyfU?{9atAT$={v82^w9
z(MW$gINHt4xB3{bdbhRR%T}L?M<kc`={1@bX$@c}pX`50EN?1djSoIxW|Rhd4u^_G
z>cK?!zkLK3(e>zKyei(yq%Nsijm~LV|9mll-XHavFcc$teX7v);H>=oN-+E_Q{c|!
zp<Ol>JV~-9AH}jxf6IF!PxrB9is{_9s@PYth^`pb%DkwghLdAyDREz(csf9)HcVRq
z+2Vn~>{(S&_;bq_qA{v7XbU?yR7;~JrLfo;g$Lkm#ufO1P`QW_`zWW+4+7xzQZnO$
z5&GyJs4-VGb5MEDBc5=zxZh9xEVoY(|2yRv&!T7LAlIs@tw+4n?v1T8M>;hBv}2n)
zcqi+>M*U@uY>4N3eDSAH2Rg@dsl!1py>kO39GMP#qOHipL~*cCac2_vH^6x@xmO|E
zkWeyvl@P$2Iy*mCgVF+b{&|FY*5Ygi82<omgvyGTqv|QTz=sN}TZ6>37i)9YW#Fp&
z?TJTQW+7U)xCE*`Nsx^yaiJ0KSW}}jc-ub)8Z8x(|K7G>`&l<bpFP+cS)jO`N_V-8
zyz*&criMW3Tge5_hcWP_V8=SvD0*^&JQ8c;z^ZkOK$H-b5$|gm$@;EfT;;o$;h7dL
z>{Y&~W=q#^4Gf{}aJ%6kLXsmv6cr=Hi*uB`V26;dr<aSR^xo%$9+~>4C$WrPnHO>g
zg1@A%DvIWPDtXzll39kY6#%j;aN7grYJP9AlJgs3FnC?crv$wC7S4_Z?<_s0j;MmE
z75yQGul2=bY%`l__1X3jxju2$Ws%hNv75ywfAqjgFO7wFsFDOW^)q2%VIF~WhwEW0
z45z^+r+}sJ{q+>X-w(}OiD(!*&cy4X&yM`!L0Fe+_RUfs@=J{AH#K~gArqT=#DcGE
z<spTPWq%oqdho$J0xh~6Q=^v}j=vBe*gL+|ud>!FwY(h&+&811rVCVoOuK)Z<-$EX
zp`TzcUQC256@YWZ*GkE@P_et4D@qpM92fWA6c$MV=^qTu7&g)U?O~-fUR&xFqNiY1
zRd=|zUs_rmFZhKI|H}dcKhy%Okl(#y#QuMi81zsY56Y@757xBQqDNkd+XhLQhp2BB
zBF^aJ<Yyh7pM5zBvii~t`R`{~RErSl{}-#Xjol7q<MNou@wjO9-e0+&FnPZ^6y4Ll
zv-4#qZasZI63$>__D676wLu|yYo6jNJNw^B+Ce;DYK!f$!dNs1*?D^97u^jKS++7S
z5qE%zG#HY-SMUn^_yru=T6v`)CM%K<>_Z>tPe|js`c<|y7<QPU`KuA=GRt?21}m$3
z)egm^_v+KsYdG<vt|ej|@=R$E;WS|)lq&hvDYqCWcNQVq*yoxejj)Sgjj^Yjl?NcX
zDFDlAtM@eY^eKCIx3Bn3tvI+`G-P!;moJ^4fysp(P&5s?@p7p>?qol&)C=>uLWkg5
zmzNcSAG_sL)E9or;i+O}tY^70@h7+=bG1;YDlX{<4zF_?{)K5B&?^tKZ6<$SD%@>F
zY0cl2H7)%zKeDX%Eo7`ky^mzS)s;842cP{_;dzFuyd~Npb4u!bwkkhf8-^C2e3`<?
zuCMcKb49Z{nlgrGOONk{-i&aq@>q8>MuPhgiv0VxHxvrN9_`rJv&GX0fWz-L-Jg^B
zrTsm>)-~j0F1sV=^V?UUi{L2cp%YwpvHwwLaSsCIrGI#({{QfbgDxLKsUC6w@m?y}
zg?l=7aMX-RnMxvLn_4oSB|9t;)Qf2%m-GKo_07?N1l^ahJ+W<L;$&i*6MJG?J+W=u
zwrv{|+x8@xWb=KyXLo=7Po1iIr>f8C>h5~=-o1BJzV@5HBTB-ACNpsHnGt6_ku37M
z{vIEB^tR=--4SEg{jfF=gEogtGwi&A$mwk7E+SV$$ZuU}#F3Y7t}o{!w<wD={LS;b
zZLz-;o0$Q>4LJh8v4PW%8HfUK@dta#l*z@w*9Xzz(i)r#WXi`r1D#oBPtNM7M?Hkq
zhhS1)ea5(6VY45|)tCTr*@yc$^Zc!<pn44UDZ+IBYjAJ`-ggM&;kb0pvO!s`m^2xQ
zD2Cg>zQzsNXU?aRN6mh7zVu~i=qTrX^>de+f6HYfDsW@6PBlw0CsDBcOWUmt&st>Z
zYNJEsRCP1#g0+Htb=wITvexBY@fOpAmR7<uCg}62o&UsLHP(Phe;7?`HXB?HKdlV$
z;n1V`+Y7~g)l@mcyIJ}uTJ!G<@eKpC>?szQNR~nM)?sPWIj)0)jG-EF8U@nnBaQZy
z)ImpVYQL>lBejMDjlxA$#G4%y+^_>N;}r@Zoe2|u-9-x@vvD^ZWnV>Gm=pZa7REAf
zOnomhCxBaGZgT+4kiE%aS&lH2sI1mSCM<%)Cr*Sli;#!aXcUb&@Z|H<WEuC{@x3F<
zcu=w}m*tw?_*4~yqRvV|o9ns5GNSy=_8IwkLCEw54?sk2a(<Mdh^0{u1T=9t*NInH
zFsLj&_yjHc6;)bz9kl9!kgEUg^82nn>j{VPsJyClqD%>hy`Y7<pRj7bPY9jQyVGWO
zS%<st>z(GAS<aP)Y1uSg%`b@4p69s=<%Tt{igr8{?&jG%;a(i_%-UCF>s8Mqas3!D
zSQE83*%uctlD|p%4)v`arra4y>yP5m25V*_+n)Ry1v>z_Fz!TV6t+N?x?#iH$q=m=
z8&X{uW%LVRO87dVl=$Y*>dabJVq{o|Kx`7(D2$5DVX&}XGbg|Ua(*5b=;5qzW<SoC
zzQ#?BPpCI=6I*c_i^sgOQ>9;|w>m{hIO(Tu-z(ey8H=EMluJNyK4BJmGpX~ZM2O61
zk*O7js{-MBqwq>Urf0igN+6soGGc!Y?SP6hiX<gKeOf#K(fq=b1cjQoa=0h1Uc|1x
zrs@w%$10Lm8HWHTQsEqE(JV%p5=DhOaEnmC%>uJzZ1V4WZqE*?h;PG84gvG~dd<ZH
zbY$=c6i~~+P41czFCy_EI$dg`c<Pyn!p1u1+?ktG?l^rT?wPQ2A8drLUBX$i@QPw|
zqn``t@{_q6YANM!8WD%c{DkVuIAOr;O55VmO-cSXNNJ&(*COQc4ZZm^4K%c3mN$0E
zrov1s*jjy;dRKW=-Unr5Q~DZR{;p;L#!x#C2mnbI-cg1JPiV8@_y)_H7BEWiz!+s)
z0{psz93NN}c(aYN&3g{jLeCy{0X^`+?aBhSW*V;^{S7@L=6)4l+Fy-5Vm@KCM_5?x
z(92sH+oi*?$hATGUM-)1KM)NSn*XULvnqVli`x(wEF2!vUY3O~qogY>s6~484!kPM
zMP87IP?dhdc;%|cS&LxY*Ib6P3%p|9)E3IgRmhhwtUR3eRK6iZ_6fiGW}jnL4(I|t
ze`2yLvmuY42lNwO6>I#Son3$R4NOoP*WUm1R4jl#agtSLE}fSu-Z>{+*?<Ox|0|h?
ziVAXEejmb)TNIL)WMKcfBnN%2`pTyl?}<6Yv7aXa$?4(@S45I<K83!~!85D$T^sNt
z#G2I?J>pQIn7`s3LAz<sUxT1J#To$wq)LLuvyHUwXZGnH5bA&W>F#1pSxCAo?clr9
z9PUj#REq28*ZkJnxs$aK%8^5?P<_Q!#Z?%JH0FKVF;&zH3F#J^fz|a<d2t+(J$r8k
z@gS5dPqSn!bSO@C%)Xy-d?WBwvVYS>hl$Ycs~kFij_XP;U<`Fca<h#1CHf8w@jZxi
z4E}>DYyXYPM~&jEe1Xj1n;wyRdD;lmnq&FEro=;+Z$=v-&fYM9eK*S_D&oTXFW#b0
zRY}Y7R#bLzTfg9i7{s?=P9~qjA?$-U2p5;0?gPPu`1JY|*?*8IPO!eX>oiX=O#F!A
zl`S%e5Y(csR1f)I(iKMf-;5%_rPP7h&}5Fc(<tM)r%wv3LCm#xoQU~A@B|1<`oLmM
zFJo+&#d1H1oV@`@@judX4nHzi7~-Q3%iaTZO>8byKUH1*d7?9%QC<dVX!I)~1Vf#}
zwN9HFJ-qutr_)&Ld#X9pYCw5!Ln<R5Cw|Z|F-c$v`hl=tR{-56UrGfK9_2*yyZ@fO
z8nZ1uLsgGiL>|4aADj3L8yuo6GOv#%HDgU3bN(UHw1+(99&Om%f!DY(RYSf4&Uny%
zH}*&rEXc$W5+eyeEg|I|E-HnkIO0!$1sV7Z&NXxiCZJ@`kH4eEi5}q~!Vv5qQq{MI
zi4^`GYoUN-7Q(jy^SKXL4$G4K+FQXR)B}ee=pS0RyK=YC8c2bGnMA~rrOh&jd3_AT
zxVaq37w^-;OU3+C`Kko-Z%l_2FC^maa=Ae0Fm@PEtXEg@cX*oka1Lt&h@jES<6<l#
z+$g2<Asf=9S1fdM-Zf&z(e6fWKei}lbCaFL*0(Jmb1OHeueb>?o1Oi1C9>}7+U(Ve
zQ$=8RlzcnfCd59CsJ=gG^A!2Bb_PY~K2sSau{)?Ge03G7US&qrgV!3NUi>UHWZ*lo
zS;~0--vn{ot+7UWMV{a(X3rZ8Z06Ps3$-sd|CWE(Y#l`swvcDbMjuReGsoA`rmZ`^
z=AaArdbeU0EtwnOuzq@u5P1rlZ<sl{`#t>jH#gNgh6HIhG(>dX%4m{_!&DNTQE)8=
zXD-vcpcSi|DSm3aUMnrV;DQY?svz?9*#GT$NXb~Hem=24iy>7<t7QQV*?t4jd6F$+
zoVdvlUnbR4c30FRg<e9<@PVR%rHqJUA;fJYA($o}&dnWgkF9ZP7&B+X`n^IHYt(Lf
z2F5n3wJiK|+9K0rQqBtOu&~*tu?)dBq^i>xj367(!#RjnrHtrP-Q`T2W*PEvAR-=j
ztY2|#<|JvHNVnM-tNdoS_yRSo=yFqukTZmB$|>Vclj)o=Y<JL-iyAg07VsydL3{X*
z%2mLNnQsQbJh5(AD4t{|q}=<Sep&lQi`=uoh?ZK7-3Ggm7(e$G$$!eV5RX%BIxMx_
zcZ~)&tjIxU_5Hcg)fpWZ(znGWsvo-$Kg%Yi^j*N+;M7L5ByQ~FpSX7CHVVaMYgQ$F
zLE#|7`dM0nng{OAF}Qcjup5gR2}Yw<aDD&b!H-Me0}~kt6{J6Shasi;%3^mM=_Fj;
zCJmpL_-hPyxj$4N{M|RVj$&&&ZX8z~2^&gzl4o!r2Z8|PlPVdE08_x)A!qpcu0#xY
zXnk?%F^@+%cB0BC7MsbX<)>zC9!ph8)ZOH5X=%Aq|9gNgc}^KFVLht!Lyw54v5u&D
zW%vT%z`H{Ax>Ry+bD&QjHQke_wEA;oj(&E!s4|OURButQKSc7Ar-<s}yde;WAK~$P
z^Gqnxrogbi{lG5aFCO5?K&xA8mnwDs7h<9^6YcR8q^0TexEaqnTnUG7d`^9oo$h?D
zKq5j>PzIiFa8F@ezkaY2J9&PH+VI1!G+{JgsQ7%da*_Gr!exT*<Bi+ppC%q~o7ojT
zw%-XGzS*XV$!)J(aAPAYRAt79+7J$~+w?BI2Q1s93#u0$U9&Ct=DGGT|JFamN<I30
zezX;tHfoBvUeBbp68x?km4lTPls>OgJld)b-?cd)xI+|v_C`h(Cg`N~oj0`SQPTma
z{@vc8L^D-rBXwS#00jT#@=-n1H-C3hvg61r2jx#<RRl|hqSXC@053(FHh{sXK3n-p
z<$eTP-2N28_#nt+&me>ok&cr#BV~9JdPaVihyrGq*lb>bm$H6rIoc}ifaSn6mTD9%
z$FRJxbNozOo6y}!OUci1VBv-7{TYZ4GkOM@46Y9?8%mSH9?l&lU59)T#Fjg(h%6I}
z?ib<r4n}!XkXC&NufL;-hcHF&UUVnNQHeZ=Wv<~uyVX-uR$QEL_l&SGi$i}9x_ul>
zZ(xb8Rwr+vv>@$h{WglT2lL`#V=-9tP^c)cjvnz(g|VL^h8^CPVv12dE(o}WQ@0OP
z^2-&ssBXP^#Oh`X<d4_zj?ko>5@F+~$PCB6kK-T7sFUK|>$lNDSkvAy%{y2qgq-&v
zv}^&gm`wiYztWgMS<{^qQKYNV=>CQaOeglAY~EZvr}n~tW=yg)_+fzqF%~+*V_$3h
z2hDW`e$qR;QMg?(w<b_xzz9GLD(1{PR-OZ8-*QD}x+s!v>KE>%H_6ASS@6bkOi-m-
zg6B7AzD;gBS1%OD7|47a%3BykN{w}P!Wn-nQOfpKUpx8Mk{$IO62D!%U9$kr!e%T>
zlqQih?3(U&5%r!KZFZPd<X|#ogu*zbQE07H>bwZ0laAJCj!c&pEFVzrH&_&i<N3C?
zo@%)1zi#=6wREw)wQ(6!5<Zb2Qvw`@PmV*Wp>5m68Y_*J+-Qjlnz}Q{3oAD)`d14H
zKUGmbwC|beC9Mtp>SbL~NVrlctU3WBpHz(UeIa~_{u^_4OaHs_LQt>bUwcyD`_Bbh
zC=x|1vSjL)JvVHLw|xKynEvq2m)7O-6qdmjht7pZ*z|o%NA17v$9H*(5D5(MXiNo1
z72Tv}QASqr$!mY58s_Q{hHa9MY+QZ`2zX-FT@Kd?`8pczcV^9IeOKDG4WKqiP7N|S
z+O977=VQTk8k5dafK`vd(4?_3pBdB?YG9*Z=R@y|$S+d%1sJf-Ka++I&v9hH)h#}}
zw-MjQWJ?ME<7PR(G<1#*Z-&M?%=yzhQw$Lki(R+Pq$X~Q!9BO=fP9FyCIS8zE3n04
z8ScD%XmJnIv=pMTgt6VSxBXOZucndRE@7^aU0wefJYueY(Cb%?%0rz)zWEnsNsKhQ
z+&o6d^x=R;Pt7fUa_`JVb1HPHYbXg{Jvux|atQ^bV#_|>7QZNC<m56gmQTDz)Sv34
znQxO@_?jSZ-}u2#aunxbjO?K=1UNC4B}tiiM9CZDpDyu{zkOqqjq!7q99sz6e|N}H
zPQo%$KE#d!)5wiR%<;fHk2yH4+dlwL2Rr23RY5vDJ>~P^IKUThB6{kvz2pr2*Cyxj
zy37Nri8za8J!@Iw9rbt~#^<9zOaM8LOi$kPBcAGqPq-DB^-93Qeup{9@9&=zV6KQN
zL)ic5S%n1!F(7b>MQ973$~<0|9MY-G!?wk?j-cQhMQlM2n{&7JoTBGsP;=fC6CBJn
zxlpk^%x=B16rfb-W9pYV#9IRHQL9VG4?Uh>pN>2}0-MST2AB2pQjf*rT+TLCX-+&m
z9I{ic2ogXoh=HwdI#igr(JC>>NUP|M>SA?-ux<2&>Jyx>Iko!B<3vS}{g*dKqxYW7
z0i`&U#*v)jot+keO#G&wowD!VvD(j`Z9a*-_RALKn0b(KnZ37d#Db7royLhBW~*7o
zRa`=1fo9C4dgq;;R)JpP++a9^{xd)8``^fPW9!a%MCDYJc;3yicPs8IiQM>DhUX*;
zeIrxE#JRrr|D$@bKgOm4C9D+e!_hQKj3LC`Js)|Aijx=J!rlgnpKeF>b+QlKhI^4*
zf%Of^RmkW|xU|p#Lad44Y5LvIU<E*FXDgB**mKwp2!mq`hNR)=z0QQMaupuiAd7N|
zo&AY$ODOt<Q38Y^Ii6uZyvYkO%G!LUYf+Tw(6oPEJFUo(I+rIZ?v_|(9>IR>VGH8G
zz7ZEIREG%UOy4)C!$muX6StM4@Fsh&Goa}cj10RL(#>oGtr6<bK`a)SgxmFZVL)U~
z49v!>h~7tZDDQ_J>h)VmYlKK>9ns8w4tdx6LdN5xJQ9t-A<deKX4bX$^Jb$>BtTf_
zf1dKVv!mhhQFSN=ggf(#$)FtN-okyT&o6Ms+*u72Uf$5?4)78EErTECzweDUbbU))
zc*tt+9J~Pt%!M352Y5b`Mwrjn^Orp+)L_U1ORHJ}OUsB78YPcIRh4p5jzoDB7B*fb
z4v`bouQeCAW#z9b1?4(M3dcwNn2F2plwC^RVHl#h&b-8n#5^o+Ll20OlJ^gOYiK2<
z;MQuR!t!>`i}CAOa4a+Rh5IL|@kh4EdEL*O=3oGx4asg?XCTcUOQnmHs^6nLu6WcI
zSt9q7nl*?2TIikKNb?3JZBo$cW6)b#;ZKzi+(~D-%0Ec+QW=bZZm@w|prGiThO3dy
zU#TQ;RYQ+xU~*@Zj;Rf~z~iL8Da`RT!Z)b3ILBhnIl@VX9K0PSj5owH#*FJXX3vZ=
zg_Zyn^G&l!WR6wN9GWvt)sM?g2^CA8&F#&t2z<OchUXqZRy~h=2dtIHh}9X16StjX
zE*X$O$hrKjDx*fT_Kdt~%HxH)(Q<=+n-bsLpy;hW_+>3_MiluRqvNbV{Me6yZ&X-_
zd6#Xdh%+6tCmSNTdCBusV<OLU^zAuj3oC#*62FAg&jRV{eA?56gCxRYk9nH%WWtia
z(L;^ByCp%<7za*^^B&PIjiYnY$Gom{Mk62$4uvORBiduv?$8V_-U*iYK!Is}K=K4!
zRqXL>kRwJ_A~<^Nd6~MNOvS;YDix<nmp2CFVZb7>M43`|8e_bmc*UWi7TLA})`T_F
ztk&Nd=dgFUss#Ol$LXTRzP9l1JOSvAws~^X%(`ct$?2Im?UNpXjBec_-+8YK%rq#P
zT9=h8&gCtgx?<LJXBIGvqMB)2=D#1(E#`Gft|z7B(M_Jurie!W@A&*CdPfK%1_($Q
z&41z76E^&SjZfwJd{?_|K()<UmsT?~p*FNKwP__D#d_feWy|Ge{Dz}bgKej%s3<xl
zL=(7@V%Z6Ls;(9_^N&q~-}$9{nVk@Q(D|jMzcY@#rP>=Oj$Yr2j<c`drr*4eGwnY6
zpou_hH7hY!9S)PMQgqe-l!Y{7*V&uqvM#Fq)W)d?^1IAL-DK&GVzdmu_H<-MCk_GM
zy&hTx7mWj(2fxj^o94>I3`VVuZ`lH>*N+*K11CD&>>F)?(`yr~54vHJftY*z?EorK
zm`euBK<$(!XO%6-1=m>qqp6F`S@Pe3;pK5URT$8!Dd|;`eOWdmn916Ut5;iXWQoXE
z0qtwxlH=m_NONP3EY2eW{Qwr-X1V3;5tV;g7tlL4BRilT#Y&~o_!f;*hWxWmvA;Pg
zRb^Y$#PipnVlLXQIzKCuQP9IER0Ai4jZp+STb1Xq0w(nVn<3j(<#!vuc?7eJEZC<-
zPhM7ObhgabN2`pm($tu^MaBkRLzx&jdh;>BP|^$TyD1UHt9Qvr<czlD@G+^h_RO(*
zD$4@5T5mOOg&QM%!B(9nVM{bT<4ry`1>{ZcBs^l!JI4~d-Py$P5QOYO&8eQOFe)&G
zZm+?jOJioGs7MkkQBCzJSFJV6DiCav#kmdxc@IJ9j5m#&1)dhJt`y8{T!uxpBZ>&z
zD^V~%GEaODak5qGj|@cA7HSH{#jHW;Q0KRdTp@PJO#Q1gGI=((a1o%X*{knz&_`ym
zkRLikN^fQ%Gy1|~6%h^vx>ToJ(#aJDxoD8qyOD{CPbSvR*bC>Nm+mkw>6mD0mlD0X
zGepCc<fcI8GR5U|Yh-F^k<?4h>S_x7+6X7dH;%e`aIfPr-NXSqlu&?$Br1R}3lSF2
zWOXDtG;v#EVLSQ!>4323VX-<G6zshc*}#z*<OCtlCr=7_9N%0P@aw0NW3NYMY?UTk
zv23GF46nFqpVnQb;X)T9o|+e;#c6DHFd+rvvY`*?nmy>|E#qb+x%IxzUBDI~N23x?
zXUHfTTV#_f9T$-2FPG@t)rpc9u9!@h^!4=fL<UA2Sb|t&Ay-N0cvK7#l<pL<Ge|wZ
z<{l=m(jmt{aK61fo4#z+N%`p~HEyEO)J*8(AY!addNzdFY|eI)Y^jThWjX-+2s1gI
zR-J}6pgw5eaN{`!XN-3n?FAT?R1z;uAYmO-(xK(CQ<gbE!uo-ct({xjk-*(K3=4b2
zuZ;EzSTEFKBtAl-Y8l=R*0oyp4c|8Hu<A_TRga2_IR3O)Z#Sv=f)G{j@$4)Q>^kg9
zVv%&KY3!?bU*V4X)wNT%Chr;YK()=~lc%$auOB_|oH`H)Xot@1cmk{^qdt&1C55>k
zYnIkdoiAYW41<U-q!=TqI<2ry9=m3D#)D^9-z8@sblghCY~vfX?X5t$KPF5bE#--%
zE5hxIRaHx0Xj9^l7<qXiyd@3`cU<of!XEaJ1U@*tv-q;5CI7%jj1+;qoyl(u34Kyx
zTwi$K?X7Phe&4^6RRa`$h5KJCs<~Ar*~m>zrRBfqR?9r^cpWIEqfS;|R#bIs4$cqA
zoq~$yl8h{IXTSdSdH?;`ky6i%+Oc?HvwH+IS`%_a!d#CqQob9OTNIuhUnOQsX;nl_
z;1w99qO9lAb|guQ9?p4*9TmIZ5{su!h?v-jpOuShq!{AuHUYtmZ%brpgHl$BKLK_L
z6q5vZodM$)RE^NNO>{ZWPb%Ce111V4wIX}?DHA=uzTu0$1h8zy!SID~m5t)(ov$!6
zB^@fP#vpx3enbrbX=<eanFKywoTJQhG@2(niq0oP7<R5Z?3qzMSy}E7WYo#&>vzol
zj^Bg7V$Qa53#3Lptz<6Dz=!f+FvUBVIBtYPN{(%t(EcveSuxi3DI>XQ*<e7R(p~Iw
z59+#M)uYI&a@UPC1IMUQX*f(>$HX~O{KLK5Dh{H2ir87E^!(ye{9H&2U4kFxtKHkw
zZPOTIa*29KbXx-U4hj&iH<9Z@0wh8B6+>qQJn{>F0mGnrj|0_{nwN}Vw_C!rm0!dC
z>iRlEf}<+z<vi^;1*|;$>&?Z4o3?C>QrLBhXP!MV0L#CgF{>;ydIBd5A{bd-S+VFn
zLqq4a*HD%65IqQ5BxNz~vOGU=JJv|NG{OcW%2PU~MEfy6(bl#^TfT7+az5M-I`i&l
z#g!HUfN}j#adA-21x7jbP6F;`99c8Qt|`_@u@fbhZF+Wkmr;IdVHj+F=pDb4MY?fU
znDe##Hn)<n&2RRZwLB>{D}<>vVhYL#)+6p9eAT3T$?;-~bZU%l7MpPNh_mPc(h@79
z;LPOXk>e3nmIxl9lno5cI5G@Q!pE&hQ`s{$Ae4JhTebeTsj*|!6%0;g=wH?B1-p{P
z`In#EP12q6=xXU)LiD+mLidPrYGHaKbe5%|vzApq9(PI6I5XjlGf<_uyy59iw8W;k
zdLZ|8R8RWDc`#)n2?~}@5)vvksY9UaLW`FM=2s|<qH#E;S20;V52?wL&5dX{o72+Z
z0FywvdTk{!EC;eL4ha(sv&@IG+>vyg>Remm=QGthdNL87$nR&TKB*LB%*B}|HkG64
zZ|O4=Yq?Zwl>_KgIG@<Nm24#{ff*;8vIRUT2g{<33$-hCA*Ib8=7nT4{VJ-HH4MLV
zIT25bC`h^*$P3+Yx*l|$yU`O)`Mbhv0n7XuDy|BiR*M8CJI><8i{Zw#P3q_CVT7Dt
zoMwoI)BkpQj8u(m!>1dfOwin(50}VNiLA>A2OG&TBXcP=H(3I;!Wd<ZETqW($r$}b
zQAfz<3tM=bQ%n$55j+bugZnrB#mmMWnY_~$z$Liaj`bJ38rjM<FDunrR#<^_1JENt
zu+3j^HqtsLHiNherQg9BtTZ6*`5u=2BqRzR-k~&QM}H%8Wtp~yT(x{XA+v{;Lk0T_
z8ZN}a;s?PGwtWpK@H2sUmlJ{<9{&n|P#c&W@4~v-%@c<EWQ?t#KLSmY9bDL{#7T?~
z76PxY&TkxgEcPdAIhcw*XW%cHKY+7+aX(-B-5MrXK50f+^TJP!RT6=!f<geMB`%Fd
zpZqq;(7VECXEvcw|AQDRe)aU3W}r0&1CN{y>PFe?r_e{%>bc6(Zk?6~Ew&;#ZxBJ|
zAd1(sAHqlo_*rP;nTk)kAORe3cF<p$fDY|A3iAZP$ueYIxQXJ+7w98(0U(!3@$IXC
zkArCaQGgWCTUEiE#Ljn4e2+xH8Lo+U&pRkmvSNL+lo$iGII$|wCe1zIC^v<yjtGc?
zbPEkKJ&O*&`bDbHgA@N}y7EOuEceoYIA&k#uEVgi3I3Y=_BkG)yv^|q$IgyV_I#%x
zis#68f%rr`5{z^U5Xvbb15OyZ7n3z;MKelR1ZnQZ?|9dO1LM(y5^1HBHewj8z0X6b
z5ns&>&tj>m&LsvB)`-y9#$4XU=Dd^+CzvoAz%9216#f0cS`;kERxrtjbl^7pmO;_y
zYBGOL7R1ne7%F9<i~hqkED!wXO5LBBco|+$qCFci6|!-WQ>M2~0a7Srciz=MeaMU~
zV%Y#m_KV$XReYHtsraWL<vKnB$+Zz-Psj~gMs%tLbl;djUW;H;u25)lZ~Qm%4FzzV
zfs~u@z?ywI*zMsCG+$x}k+Zt=z;A=9uoFZ0uyjL?unMSF6p`V2w|s^vq37hCdK<*M
zQG4HE?XU*W1jw^vfg9M{L@tofUxN*3g2Z~iPTJR8y3WBJ(gUv}lBzRwS|l?`_FLLt
zDW+p@*L9!&Q{&Zrmi{U_bUOaiNV1ZE_G|QIq57EotA?e^B=gl*)4mnUB{ID2*hS?K
z97*K-A4~i4U^!JB&H{YCj!coICIVY)QNpfeE&d15$&n^iL%ZPcjxr-(h$wToAxn2L
zPRdYf%644lF&Ct8opZ&DwcltfV$?3&sTTr>rdJItLtRiRo98T3J|x~(a>~)#>JHDJ
z|4j!VO^qWQfCm9-$N29SpHUqvz62%#%98;2FNIF<r)HDZh-H_}MwGugFOnYSqd%Sy
zI=YaMecE)Ud_fxgSR<n-(O`=`i9bA1ZwT6qs+cI!LFWB2FCcvPar+8o0PX}9gN%GU
z#oIy=fF~{NHiEcjJAgRPrJ{92E7!?|80I39UP!TZA$bl-0B38>*?c9hZ7GAu$q>=0
zX_igPSK8Et(fmD)V=CvbtA-V(wS?z6WV|RX2`g=w=4D)+H|F_N(^ON!jHf72<2nCJ
z^$hEygTAq7URR{Vq$)BsmFKTZ+i1i(D@SJuTGBN<J^d%g3-SsRr1Zq23^Y<XMk33u
z0YpIow(yh?q_G$W1G9T?O`wbI_&XbT=@+$4lm9L~7k=bD&%`F5G=A^k_|g(QrA7|R
zlKb%Fw^<1ppNM&?|K#tNLnJ^+TYlwL9RC;8+T<k(qy{d2CuhNoF@jMeCRv$p@)2rZ
z>3W8<ub;IgTVJz1Un@p7$E&xFXkfS;|BBLRtyYXByIi)rTxM@Bn`|z3wO{>{JpJ^J
zkF=gBTz|P;Xxo1NIypGzJq8GK^#4tl)S%8$PP6E8c|GkkQ)vZ1OiB%mH#@hO1Z%Hp
zv%2~Mlar^}7TRN-SscvQ*xVv+i1g8CwybQHCi3k;o$K@bmB%^-U8dILX)7b~#iPu@
z&D&W7YY2M3v`s(lNm2#^dCRFd;UYMUw1Rh2mto8laH1m`n0u;>okp5XmbsShOhQwo
z@EYOehg-KNab)Rieib?m&NXls+&31)MB&H-zj_WmJsGjc1sCSOz0!2Cm1vV?y@kkQ
z<1k6O$hvTQnGD*esux*aD3lEm$mUi0td0NiOtz3?7}h;Bt*vIC{tDBr@D)9rjhP^<
zY*uKu^BiuSO%)&FL>C?Ng!HYZHLy`R>`rgq+lJh<d&y2bxU%x2bp&;{EY>dXfo|df
zmkzpQf{6o9%^|7Yb<H9Djj73&=!H<l1fR%V0gHumtd=k;shCSDzmr2~L?7>5v{Tu&
zsP*Y~<#jK$S_}uEisRC;=y{zbq`4Owc@JyvB->nPzb#&vcMKi5n66PVV{Aub>*>q8
z=@u7jYA4Ziw2{fSED#t4QLD7Rt`au^y(Ggp3y(UcwIKtI(OMi@GHxs!bj$v~j(FZK
zbdcP^gExtXQqQ8^Q#rHy1&W8q!@^aL><u!}aGR8PHJwu92JD4JDlbnay+X<Ycn<ki
zD^80O2}kCVUWjex%^O<n@=mW<38)Qu_YK#S=N;)9mm7XNJJECSa=DJ$G|#yLf3~L@
z-HW4z+Cyl}+dB?P4cDZq;7|41V@~U!6UPdEGIJj(3NaGsZl6`hnAr?OX*FX_ttOXb
zsH?UN_o>g1v2R45T(KErWB)1rB@rU`#n&-?g2Ti~xXCrexrLgajgzNy=N9|A6K=RZ
zc3yk>w5sz1zsg~tO~-Ie?%Aplh#)l3`s632mi#CCl^75%i6IY;dzpuxu+2fliEjQn
z&=~U+@fV4>{Fp=kk0oQIvBdqS#yY`Z+>Z|T&K{d;v3}=JqzKx05XU3M&@D<Xziu0j
z$T_QZ!o|grC*e9F1-Dp?WgCQfrKo7wqcyX&!TI}mTs|pg4*U1gOy^7sU?zi1GJ1ue
z?TkG+pTl*xn`gl>5!uPTGydasyeZ5=1~IX-?HlM@AGB9|Mzb{{Dt@bUU8{KUPU@EX
zv0fpQNvG~nD2WiOe{Vn=hE^rQD(5m+!$rs%s{w9;yg9oxRhqi0)rwsd245)igLmv*
zJb@Xlet$+)oS1Ra#qTB@U|lix{Y4lGW-$5*4xOLY{9v9&RK<|K!fTd0wCKYZ)h&2f
zEMcTCd+bj&YVmc#>&|?F!3?br3ChoMPTA{RH@NF(jmGMB2fMyW(<0jUT=8QFYD7-%
zS0ydgp%;?W=>{V9>BOf=p$q5U511~Q0-|C!85)W0ov7eb35%XV;3mdUI@f5|x5C)R
z$t?xLFZOv}A(ZjjSbF+8&%@RChpRvo>)sy>-IO8A@>i1A+8bZd^5J#(lgNH&A=V4V
z*HUa0{zT{u-_FF$978RziwA@@*XkV{<-CE1N=Z!_!7;wq*xt3t((m+^$SZKaPim3K
zO|Gq*w5r&7iqiQ!03SY{@*LKDkzhkHe*TzQaYAkz&jNxf^&A_-40(aGs53&}$dlKz
zsel3=FvHqdeIf!UYwL&Mg3w_H?utbE_(PL9B|VAyaOo8k4qb>EvNYHrVmj^ocJQTf
zL%4vl{qgmJf#@uWL@)WiB>Lm>?ivwB%uO|)i~;#--nFx4Kr6{TruZU0N_t_zqkg`?
zwPFK|WiC4sI%o1H%$!1ANyq6_0OSPQJybh^vFriV=`S;kSsYkExZwB{68$dTODWJQ
z@N57kBhwN(y~OHW_M}rX2W13cl@*i_tjW`TMfa~Y;I}1hzApXgWqag@(*@(|EMOg-
z^qMk(s~dL#ps>>`oWZD=i1XI3(;gs7q#^Uj&L`gVu#4zn$i!BIHMoOZG!YoPO^=Gu
z5`X-(KoSsHL77c<7^Y*IM2bI!dzg5j>;I@2-EeB$LgW|;csQTM&Z|R)q>yEjk@Sw%
z6FQk*&zHWzcXalUJSoa&pgH24n`wKkg=2^ta$b1`(BBpBT2Ah9yQF&<s0t#=CvrrS
zhID}v^bURD{xYt-7G^$r)TW*tHPfwa<MygGIW`SIzn~^mpOlkddrC2QnaKRX^i|}+
ztb9<mhSg<Qxk*l=ZE}+5_uD(mzp&OPh211AXsFW}kd_5%1?CrhNsJe#*Y<ZlnvU3i
z9FSrFY?Dr^Z=A0b%B#l7!s~@FV})h2QE2dq9XU8D;k$CT$b*|~B+ozO*7MGpG-4l_
zHB>Kh+3jTaSE|=vChGz2_R^{$C;D`Ua(_=|OO11uLm;+3k%kO19EA`U065i;fRBoH
z{Hq$cgHKRFPf0#%L?$*KeS@FDD;_TfJ#dwP7zzO5F>xntH(ONK{4)#jYUDQr6N(N<
zp<gr$yEF+Bh<8VMyl!Z~38n3=ExaSY0h2rbMXGGeh?xbO6ZTOM{<7x2ScCLhXP2Ft
z)INnh8dDXY+c;~$2FxS>+fAS9l9)^c4Ss8628Zq5AzMq4zc(In_yJSXAT57Dtl}@=
zvZoD7iq0cx7*#I{{r9m{%~g6@Hdr|*njKBb_5}mobCv=&X^`D9?;x6cHwRcwnlO^h
zl;MiKr#LaoB*PEL<dy!vtQo2cB(_!Lx!l45orL*11H7TM(b>m8+8%btnC)b^E12!^
zMmVA!z>59e7n+^!P{PA?f9M^2FjKVw1%x~<`RY5FcXJE)AE}MTopGFDkyEjGiE|C6
z(ad%<3?v*?p;LJGopSEY18HPu2*}U!Nm|rfewc6(&y(&}B#j85d-5PeQ{}zg>>Rvl
zDQ3H4E%q_P&kjuAQ>!0bqgAj){vzHpnn+h(AjQ6GO9v**l0|aCsCyXVE@uh?DU;Em
zE*+7EU9tDH````D`|rM6WUlzBf1e{ht8$62#ilA6Dcw)qAzSRwu{czZJAcKv8w(Q6
zx)b$aq*=E=b5(UH-5*u)3iFlD;XQyklZrwHy}+=h6=aKtTriguHP@Inf+H@q32_LL
z2tX|+X}4dMYB;*EW9~^5bydv)_!<%q#%Ocyh=1>FwL{rtZ?#2Scp{Q55%Fd-LgLU$
zM2u#|F{%vi%+O2^<ba$kF;<;;Rt95z5<wHL5K(j+Q8k$UbpgxRp3j{Mp^w)B^HfYf
zDA0hRMhNbE2=Vj2YiL~z1)_|?;fh9jerBnuTe?P4hal2R4f7=m$Q(~VOU_C<;|0{Z
z3|u!g6zg=FOiT;#Yow7zlAZMlBeju5?i6<U#@UV0Bb~8OCe{A7Ccmu?Gyqdhjg(s#
z+lg05=w;`87GW>~uK3)?$6<Ii6i|CibK0rXDvO^#;fkTXF9V{wZfQL2cW5uAO1K21
zyr@nFcEG^iu`2#R9oGU;Lq1G%agZl2%SOI7JXztK#Z;$;)Lz*}ur422VHM7@=-v?f
zBlBe`7E*iZpFhhLUcSr4=IECR7@1F_Ml2FfW0MO&eq+fQw8MX@4v0dY1>>9cc7_}F
zWU72eFrzZ~x3ZIBH;~EMtD%51o*bnW;&QuzwWd$<Sv0&$GkytlNUOK6=cE?4aMBme
z1h#{t_Gn%>ds=O>Ev807cu%>Ac^ZK&7bCN;Ftk#eeQL4pG0p!W{Ri@tGw>nhIo`rC
zi!Z6?70nYr<u`@synlh-MYS0<B^dq282OT7OFjP<^MkKg!2Li61nIAG$0&EsmPTPr
zR)}<%a~xy;pP3@S-zf2+&bEh1;6wodY7P24J|Iev;5LQ+!5jDK@1Jw&i1XSXY*mE}
zt$!>Nf92V{Y_i(a4DG=5>RktP=?%GcHEx?aKN$@{w{uj#Cqev$bXefo?yC6KI%Rol
z%~$974WCymg;BBhd9Mv}_MeNro_8IB4!evgo*je4h?B-CAkEW-Wr-Q_V9~ef(znU&
z{f-OHnj>@lZH(EcUb2TpOkc70@1BPiY0B#++1EPY5|UU?&^Vpw|C`k4ZWiB-3oAQM
zgmG%M`2qDw5BMY|tG++34My2fE|^kvMSp(d+~P(Vk*d+RW1833i_bX^RYbg9tDtX`
zox?y^YYfs-#fX|y7i(FN7js)66jN!`p9^r7oildEU#6J1(415H3h>W*p(p9@dI|c7
z&c*Aqzksg}o`D@i+o@WIw&jjvL!(`)JglV5zwMn)praO2M05H&CDeps0Wq8(8AkuE
zPm|8MB6f0kOzg(gw}k>rzhQyo#<#sVdht~Wdk`y`=%0!jbd1&>Kxed8lS{Xq?Zw>*
zU5;dM1tt``JH+A9@>H%-9f=<q<+LVr{M#T<q^Cnc-&?xMZN&ijTkf8?0<(syaVN;&
z(C3Efq5EyyrE~N$#X}_S8c^@i6}!4ZVjtXj?-J{FtLW*bQ{65S1G|KSG!!l69P$zC
z{siN1*3l-kL=tqVq$_mWRT&v!^(EH-d>EnW)UkRJe0+e^iqm0C5Z5?iEn#lbp}Xso
ztleC}hl&*yPFcoCZ@s<vLB4vKL);?UQ8SNsN-4lT7E(h++9yfu0i+RG?zQZ`Vyu!N
z2csF4b4_|Yj8d3fGl192T|jkwza^`sMg!wyzjg4YT*3%R=hCis_KugR){yZ{69#*3
zK<)AFqpKOIQ3(`3fk3V+JNe31%APK*Q^d*1qjD!07S=F+NAlE*3L1Zbd=12fPr9W#
zjsGg@L~A0y+Dv6%JO9(Ta*Ah3uR3g(3z}G`82M)ekyTxF_;RCYSlBdkw<IaMsMQ?e
zbsMfyKpLJ)Okmnzl2(R+c-sy1Ki;aKK-sU-FR6hwy#IRr72%A4)qj-ug11pmglUA^
zU@_5Gw6!(Wdy$iHzby;~7AhGdig(Mkgk1LPjq5QFk0S4Cl+LBU^bzgEux96pNv;KN
zUR+$IJ#$~DWu^%Nz(1&3Se2cJg9EwK1ih4HlD~*P+AU9y9Gh^R$@}B0u-*gkxP!N`
zI`OEtCMx2=OXR>gvvjBA_Ew6msFml$cfLQY_(=h03WS_z+Leeh$M3#-?f9YT^Q($z
z+pgaEv$r<Ugeur|SR*p$+UFDnLL;xoN-%!T43feTdR&aK^iX*=YkAhKCcZGH6=Hc`
zK<f-)!~rlB=sV5y>Ia*9wST`WHASQio=9IaVS7l<87%;83~X*`{BX#@>>p=k`@FYo
ze!K5_h8hOc`m0mK0p}LxsguM}w=9vw6Ku8y@RNrXSRPh&S`t4UQY=e-B8~3YCt1Fc
zU$CtRW%hbcy{6K{>v0F*X<`rXVM3a{!muAeG$zBf`a(^l${EA9w3>J{<NHaZSe3Dx
zY8Dg(e$l6m2U+$Er9e(r#AcIf<kD?-fMtOH$;$Ez#?uh-p(moA2-$e?HZ?d3`gHO^
zLs#=xyRr;&%j-2s;@q4VtWbGT-(B6wwnV|+r>aPwJT?mKVN2ba+v)Mp*~gQ_+Ws6=
zy@D?85!U@VY0z9T=E9LMbe$?7_KIg)-R$tD)9NqIt84fb{B;f7C)n+B8)Cvo*F0t!
zva6LeeC}AK4gL#d#<LpOpPalt`s^?A%fPT!zdEW(FA^EFVBGD5=s#;p_o>N_HvvD&
z0;mdU3@7%d5>h(xX-NBmJAOChtb(pX-qUtR<NGf~8Y({*{Qd2}r;MLZ_vin>LF5f$
z`X<bri1=?cM=8<+71}cIK~u!^Xc=hLQi+++$ZeGh>?Kpu?ENMc88>O&ym_$Jc7LZ>
z#73|xJ|aa@l}PawS4Mpt9n)38w#q^P1w2N|rYKdcG;<tU5_YID!TW~j<gG_D0?bfZ
zAx5SYdUR0;(Qp~TWf)<@H*?|jqIg?jv>nb!_nHMZA_09L!j)pBK~e+j?tb-_A`wF8
zIyh>&%v=|n?+~h}%i1#^9UqZ?E9W!qJ0d0EHmioSt@%v7FzF`eM$X==#oaPESHBm@
zYzTXVo*y|C0~l_)|NF|F(If~YWJVkQAEMf5IbH{}#>PZpbXZU;+b^P8L<V=zh^PmD
zL`7&OcUEqxV0p9pX<02z-THUDhdvEGk97J;t-GBJ<nF(C?^w$|%4@|~DP~oZlYgIl
zuI+NRKR*sNKvHH6R=w{`+FIkv?XA>WmlmDJ%Zu)4CajvRL!g_Faph`g0hpA2)D0|h
zYy0h5+<vXcbvZ!}-Ti2$yE64n^Sfk2I--2XL&qjovG)2_V|^=LBv9V~JglT}?G{~<
z!pmh?-EZ?Hnt@nUC6p23YL!L4k^U?vrZo%<4hyajBFhPn!n$|mWN_#4En32u<1zn!
zu-nPRAo(%8b6>@4T81(s0D=crojdj|dYa{Y=<2zKp@xl&{sHO;#|!uTHtTey25f1U
z#=Nyz{rJy#@SPk3_U|aALcg%vEjwIqSO$LZI59<YMW2T5A_lwnC?F0LTE49h#h(4+
z5@#;o=aOg6Ty^S7NeNEi8BQv>^;Mu~Swb53L+>oxWiN7J{;P*(2b@ao*aU~}-_j10
z@fQiaWnb}fRrHhNKrxKmi{aC#34<Ne?5f?i(t}|X_E14g4c;<RChmG6j68+0n_%a=
z5|bnIjoCZq*qma>BRP(a#0K>-J8D+v_2!~(V-6J%M@L{s?fU5ChwFfqn)2$siOUKw
z?SmIRlbE8ot5P^z0J&G+rQ5}H=JE{FNsg`^jab7g-c}o`s{JS{-#}CRdW@hO`HfEp
z1eR0<hko%0^tlAD{Bs=wlKM5<olTH(jMXi+3-x##vBp{A3eZRJrKN8uvd%5RzMT#n
z68`b?pf9bXk0(J)&vOsl{d9K0)2Cr%Vk3VloUq}n%`ke311Pl4;%DuDh2;4Z>DsN!
zt5xmsYt{Uu;ZM`CgW)VYk=!$}N;w+Ct$Wf!*Z-7}@pA62F^1e$Ojz9O5H;TyT&rV(
zr#<BLuF>IBM8<W03YGL3%?f^4_AqtJpyj#h&p$VI3rl`I`^taW$jxAML<$(BdMwx_
zlLmr$vOaoI0IA<o)>te~-2t2;kv2xm&z%tt3pyt|s#vg2EOx1XkfsB<u=a#QAHFyw
zH^#er(Fasy7e*uyj*LRn;;cw)`8HoCa<6waLzyN|B}26Z?f#|l5{!Lo7xhOuO!}qW
zbC^9ND3VwU&(zUs<yvQCMiiM!;ElHzg2J~V^hj}-Lfwl~SrFWideTJ|+XrQuCDiS<
z)vM6eQf~V@yT)+mzn`+@eTvHTgxq*XAn*5~P%Kpv#89KZUu>*RM;D>ab$W-D6#Jdf
zJ3{yD;P4=pFNk2GL$g~+5x;f9m*U2!ovWMK^U5`mAgBRhGpu)e`?#4vsE1aofu)iT
zDm;aQIK6pNd8MMt@}h|t9c$)FT7PLDvu3e)y`otVe1SU4U=o@d!gn(DB9kC>Ac1wJ
z?`{Hq$Q!rGb9h&VL#z+BKsLciCttdLJe9EmZF)J)c1MdVCrxg~EM80<OYrqK%FS_O
zx}L#|X%K3tWqV@0vzDsnnR3Yz?SjfReMeOp#n1B7DSod$5n^}wbM_)dHEDu_nV)?R
z!c3rf)Of{=$5uKL#I@V1Ws|Z5fZ4NpoMn{Vt{-gR^QY-}n)k?MJLe+(H*DLi@ubuD
zJ>_b3k{ur=jVjrVhDK1GTjd3&t#ORvC0Q_&m|n>&TF1C_>k^8&ylR7oz#rG?mE%V|
zepj0BlD|o?p8~LK_to`GINhGyW{{jZ{xqaO*SPvH)BYy1eH22DL_Kkn28N!0z3fzj
z_+xZ3{ph_Tgkd)D$OjREak$O{F~mODA_D`5VsoobVnpxI<L#y)dT{XZ@6C1AwZkx=
z#&n}Q%H5?TjCQfym&hus3U5BHHIrCx1lCD^^tf!XhezUxy8nw6bXYs))PhzY)EbV~
z3_uarYiYvr3Pwc2$wF1I24aXT5%LN$)?(Q8ezWWgqA6SimfTwJTLp0QrC{lEbrWx)
zJ;3xJ%h9biZ#RRkBdua)Y&WiYHVQr56z{shOIS=w&x(H=87V2*HYE2h0gb!qf{Jvq
z^Y==~Gp_|z$%Yc`0s9OhhH~H)!fwAop3O+L?QQ&)<5x(4oOcNSU`gIBbPz$19h8Vi
z=x{Sq{n8pR)3(2lByb(h#PyQT+r6_RQ~eScAm}hch0nJiuE6tB&ns}cLsxs|ui(=>
zV0F_79%JB!?@jPs=cY73FhGuT!<K$a=G|53e{10$p+byZeKD{oLj89mh@LC}=%NX0
z;*KNX7R-K^!*zxK691T$=ETKR3FMLcQe#X=<6^Ubhq;0af0G`<fK*adOjW173pl<B
z01`*u%;G66RFIDk4yL`Xeck?Ec|CUlA6<`FzY$xfm|b6;?e52Vf1?xaS(4T=peR5I
z`?V%dkA|C!r{l_7!OGwBXtbID_FDLfk@nuPBO#I2YcSR_(&2p~ezLTg$*hJyS9id*
zIVqsN$5(G<j!-p(3x6BVMb~3eT}-TWte&t8Hw$UAjb%!=vt3(V#FxL>?fpVX1W=Wm
zK5}i7(Pfh4o|Z{Ur=Y>bM1BDo2OdXBB(4Y#Z!61A8C6;7`6v-(P{ou1mAETEV?Nt<
zMY&?ucJcJ$NyK0Zf@b;U#3ad?#dp`>zmN<gr`{4O%cMrKxJwRla^p0?!O?&AEgD?v
z+o$qCnnug51;!OM^et<=@e!eMXwUGFJXwg~`R((9W1ml^g2V_H^M~P?j;4e#lHGTQ
z@o$?Z;gOHRYcLSD_PBHaO);T6r9aGw>n=H1&-<Xr>H`Y+)ai-TfyZJX@O&nRB*7j$
zDQF!q#a7VHL3z#Hc?Ca!MRbg<t0CM<jNd$9g{^<cPIWj4e6fx|J;XcBe<*U<nwT?=
z|2dLWm#eObkDu9iqDM9n+dIe!&FvDUWehAEb@;_47iOsYhZJ1`$c&IWG&}fV$2)}H
z$Ag?+n{DXVCqvJ=xR_!Y(QY$HsT4GoXpk@2mNXrU7sE_1ouq{duV;<HzGQ?qpd*wp
z@ZzwS=W7<)0Hr(b4ettbWO2^GYSwO|sWaRtx#Mtwp4TWZlPkdry}8Q423of>L`daF
zW#;L$yiQP|5VvgvRLluk3>-1cS+7MQ1)DC&DpYyS9j;!Rt$HdXK1}tG3G_)ZwXvGH
zG;PB^f@CFrbEK4>3gTVj73~Tny+~k_pEHt|^<lR`mlNFIi9z2bxO*+RL86KeAWH4$
zU4VsFpeQG~WVDSof1UN^o@%PJAewV!QpQH-VHGj;=}0vP`eH{TpWLIqcJqm)jkN53
z<II)B6VMc5@{*J}uF{>eLw{?6NbG&`Ng9diB9XsMr(ztNC!{FhW8Hi!)TI`(Q|F*b
z-z;#*c1T~kN67omP(l7)ZuTlxaC_XI(K8$VPfAzj?R**AMb0*p@$^PsN!LB@RYQ4U
zA^xYY9sX4+;7gY%$i%ddfvneGfzbE4ZTJT5Vk3&1`?ULTy28&D#A&{dr5ZlZH&NTz
zdfZr%Rw*Ukmgu@$C5$}QLOyb|PMA5syQns?iN@F|VFEvFPK321mTW^uv?GGNH6rnM
zR9a2vB`}Y++T3Wumy$6<!67?cC%>`W)_c0PS*L;;0J^(T7<)`s{}lZVp`e)fM^?{$
zLbNw>N&6aw5Hlf_M)h8=)x0$*)V-w-Pw5Kh+EY{^$?#{v)_Y{9p5K{DjLnJ(Z<ay_
zQ|zCgJ7Z<@f^IyT#!w8xRb(NaD!Nm0u%yH}t?$zg)w^&IK4Z`0m)_RSq1QCUJEibT
z7kQvMAXo4U!zLzUk>Ucyk*y(6D8wHB8=>Y)fb_Pw0v)Xybk`Sw@hNEaHP$-n`DtYP
ziJyiauEXtuMpWyQjg$gdJR?e+=8w+=5GO-OT8pRaVFP1k^vI|I&agGjN-O*bJEK!M
z`kt^POhUexh+PA&@And|vk-*MirW?>qB(f<fb}iMf`M1*cE2&^ns<lAgNbe9%8FD$
zwjvM|R2me}U1H(R)#ASROdqw^rZnHr0&f97^OKn&Qj^OAr-J8;B23)_uUb<>%y{ux
z*d44UXxQOs+C`e-x4KSWhPg-!gO~kavIL8X3?!Ac2ih-dkK~Ua2qlcs1b-AIWg*8u
z0QvL~51vS$LnmJSO<hpkIr+#{?3T{F9>nV4JUCUzg&4;bSsR5r_=FD@y|)Y2R_--e
zMWJ;~*r<i!;TA2bZ)HKh8;KT9<ZHJ@A^xz#EfNXNM<Iaxvn=m=sJYOJ@7H!t#H3v1
zI%NsU5_wkr0!QqEEvXG^6jST`^S>=vJssF5_*n?wF0DO_>Mja=g+HvT=Yd^uBU|aw
zRixHUQJX0Pgt-nFV+8&|;-n<aRd<Z#DH?k4n__AwYiBH~b*I+O${aSV!V6<4WmU!Y
zsadZ+r7y_;@eome8lEH+W|l)H7<g)uB&cK=<NbeH>>!jNUj!8Y_YzH*%M!-_uWt6&
z|Ec+lAD``i^do;u_?<(RpzsYZVJ8~}|NjUFgXltofbjhf!v&208g^#0<NyCEi~Rq?
z$PwqiMoc*zc>h-x?`z8cInq!9kfVwJ|HQ;VK>p_-fn@(3q?e51Keq(=U-7C0#as-q
z8Or}Ps07>O2@AAXz_%3bTOh{tKm#uRe}Sqr=w6-Wz$FCdfF3qNabEaj`-OfipxaL-
zPh2R*l&%ZbcV?lv4C3+t2DAVSFaRo20^W_n4|0t(_*`?KmmUHG2sNZ*CRZ<Vl7Sz}
z4RnAGishdzC)X@en*4i_sx;)tVg`me7}2q6vh`wr8N?~M3=EDah6vkFwplEVbilC{
zEBNTu$^DB(nS>lCFIyZbJqLdBCj)~%if)g|4NJr(8!R!E0iBbm$;`m;1n2@(8*E%B
zH!g{hK|WK?1jUfM9zX?hlV#l%!6^p$$P+~rg}OdKg|d^Ed4WTY1$1J@WWHr$Os_(L
z;-Zu1FJqhR4LrCUl<W{I<bh|WpctYa0aa`ugIHt3z@UVpXlgD<(WK=XGQjmCkmV!5
z;MYY_v#}DQ2DB;%lGYKUcMJ>)C~E7gA!^wtA6YIh10In9rX@LGSjnTPtLp+gPGp6u
z3}{?J1!yT~?FwqT;O_-1%37f#4ek&DL){N}MX3RbNfRb-T;U^wXhx#De&QssA$lu~
mWkA_K7-+yz9tH*t6hj_Qg(_m7JaeTomk=)l!_+yTk^le-`GmOu

delta 34176
zcmX7vV`H6d(}mmEwr$(CZQE$vU^m*aZQE(=WXEZ2+l}qF_w)XN>&rEBu9;)4>7EB0
zo(HR^Mh47P)@z^^pH!4#b(O8!;$>N+S+v5K5f8RrQ+Qv0_oH#e!pI2>yt4ij>fI9l
zW&-hsVAQg%dpn3NRy$kb_vbM2sr`>bZ48b35m{D=OqX;p8A${^Dp|W&J5mXvUl#_I
zN!~GCBUzj~C%K?<7<Lh)kN@p@FGCC|S+xWQML0moHteKXeb8wJdQH-gjR&7XH4^`G
z18O**eOiwBMhV|yW}+)H1-qhO2aiyh%|2AVjN^2;YOavpiJ0<eag))8h36*<J}UmM
zh5)@`0psU<7DA@>+UZ_q|L)EGG#_*2Zzko-&Kck)Qd2%CpS3{P1co1?$|Sj1?E;PO
z7alI9$X(MDly9AIEZ-vDLhpAKd1x4U#w$OvBtaA{fW9)iD#|AkMrsSaN<NH{Yj<;r
zJR^3Ei65zvOeKyy368On7xFr|udQdRg!uvBg!o!BMrLO4`R~=WSNU)FX3DN?UiRax
zt1Nc*-cP@`?<QFC2v<*!x!PMc9!`H@mKU%%&E@IsfX5sqlj5iC9s3zUdp-4#$Pj{D
z-ud{50Mow^jMOt&t!;$ig-cEDVcZ_LuIYDHKBg^7I=ypK+jgs5kU>z(69;h1iM1#_
z?u?O_aKa>vk=j;AR&*V-p3SY`CI}Uo%eRO(Dr-Te<99WQhi>y&l%UiS%W2m(d#woD
zW?alFl75!1NiUzVqgqY98fSQNjhX3uZ&orB08Y*DFD;sjIddWoJF;S_@{Lx#SQk+9
zvSQ-620z0D7cy8-u_7u?PqYt?R0m2k%PWj%V(L|MCO(@3%l&pzEy7ijNv(VXU9byn
z@6=4zL|qk*7!@QWd9imT9i%y}1#6+%w=s%WmsHbw@{UVc^?nL*GsnACaLnTbr9A>B
zK)H-$tB`>jt9LSw<e?{;w%G1n2P1|J0yx{P*t9jy^kFBQA#W^mMH}$k)?g@xYa+r6
zlJ|^>aY+4!F1q(YO!E7@?SX3X-Ug4r($QrmJnM8m#;#LN`kE>?<{vbCZbhKOrMpux
zTU=<lXsfLv18QvxbhhA4+*COFL{lUUKwTI^?+b!W)%bL{5ICeYgxi==?r7ml!xm!7
zhdowRK-_IXtccBJOk{G;-<SoBvpq6sZLl$N`1P40zF|?WL@Z|Q9X1BGIgTLA`g8zK
z6>02hy${;n&ikcP8PqufhT9nJU>s;dyl;&~|Cs+o{9pCu{cRF+0{iyuH~6=tIZXVd
zR~pJBC3Hf-g%Y|bhTuGyd~3-sm}kaX5=T?p$V?48h4{h2;_u{<w17YEUL6d7r?`IW
zC*$~_<n<G0=2K)oe-lc+anc79OB&v^xcsx>b}8s~Jar{39PnL7DsXpxcX#3zx@f9K
zkkrw9s2*>)&=fLY{=xeIYVICff2Id5cc*~l7ztSsU@xuXYdV1(lLGZ5)?mXyIDf1-
zA7j3P{C5s?$Y-kg60&XML*y93zrir8CNq*EMx)Kw)XA(N({9t-<qoKDSToyx=0O<F
zcZOoLjIgey#`z<{D=F=|jxIUj7G)oJ<z}l*I|pAYR$!%#pP+(2spDO4`pdL|xx}1{
zoCp>XAdX;rjxk`OF%4-0x?ne@LlBQMJe5+$Ir{Oj`@#qe+_-z!g5qQ2<A}CkiPnG%
z#<9e?dl5B^C&2WW>SxKQy1ex_x^Huj%u+S@Ef<hwN)i9H)8f1HlVw`IOD9pOk3F=2
zUFdA$C*zaFwUQBueYyoochf_SCZNJv(o>EPP-70KeL@7@PBfadCUBt%`huTknOCj{
z;v?wZ2&wsL@-iBa(iFd)7duJTY8z-q5^HR-R9d*ex2m^A-~uCvz9B-1C$2xXL#>ow
z!O<5&jhbM&@m=l_aW3F>vjJyy27gY}!9PSU3kITbrbs#Gm<n97W+KiQNM{Zc)RZRJ
zO9VM|sK^tMcU~i!8>0gD?~Tub8ZFFK$X?pdv-%EeopaGB#$rDQHELW!8bVt`%?&>0
zrZUQ0!yP(uzVK?jWJ8^n915hO$v1SLV_&$-2y(iDIg}GDFRo!JzQF#gJoWu^UW0#?
z*OC-SPMEY!LYY*OO95!sv{#-t!3Z!CfomqgzFJld>~CTFKGcr^sUai5s-y^vI5K={
z)cmQthQuKS07e8nLfaIYQ5f}PJQqcmokx?%yzFH*`%k}RyXCt1C<ET#?;-nOj&O_w
z0RjAX8jLRfx+{_%VvA`D$(8(CLoDsiJS|qdlA=Gf(}R42xBwFy^Y@K2Y2B5F73lIC
z_Y!h7$sAGI9OU(?0{R3(+-fA%SuZuc8k~w~-j=n9^J*)UAglq-zi6`BA?FFqiPk`=
zFg88a>hfv5KAeMWbq^2MNft;@`hMyhWg50(!jdAn;Jyx4Yt)^^DVCSu?xRu^<p^cd
z`AfT9K@W351Yp;%_)?5A+{-Sd(?fRZxF>$*&&=O6#JVShU_N3?D)|$5pyP8A!f)`|
z>t0k&S66T*es5(_cs>0F=twYJUrQMqYa2HQvy)d+XW&rai?m;8nW9tL9Ivp9qi2-`
zOQM<}D*g`<R@=@6xNeyfv+%3f>28wJ54H~1U!+)vQh)(cpuf^&8uteU$G{9BUhOL|
zBX{5E1**;hlc0ZAi(r@)IK{Y*ro_UL8Ztf8n{Xnwn=s=qH;<JNq#i!T6Ol{k1N4b^
z$<%S3ch|aFn`rrM7xp7Akdf|09MH!w#X^*^qWGG{=N4~T_X*vr=<9Sr1u0vsR}sts
z)nFJ(24Ixno*7<(J<m!__?DA*(CbmQp|*(D0(fX(I-?~pJ$dd#K$~K_UvNXKlCI0J
zAi?sKWY{_$3e#h0k$v$pwmR{i3$N!{^!LlUzPj3g^GFVYwXlrd2LlB<R)>fxkK+uL
zY)0pvf6-iHfX+{F8&6LzG;&d%^5g`_&GEEx0GU=cJM*}RecV-AqHSK@{TMi<FO;*^
zVIJ;K`_Wr?eL=2LPxeiG(NZ1XC=-->r1jaFf&R{@?|ieOUnmb?lQxCN!GnAqcii9$
z{a!Y{Vfz)xD!m2VfPH=`bk5m6dG{LfgtA4ITT?Sckn<92rt@pG+sk>3UhTQx9ywF3
z=<e;Zhr+VTogSTRTa`0dZhDEYFlz}iYJ-Ca|IInsl^}*}ws*9OfO%7=435_R)~{jX
z|C))|=(?2={M$D}=%iUpf}~t>$|RgTN<fa5trxoGzi!H8bL1;zwAZzO0dvgcXe@7*
zY^o`jnIo&H5j7~yXg%$-pqf(9s|<SUx!UU?X^UEuJZ$s34K@~hg}#3V7Z`DSr`YnM
zvIU1teWhe~+xESB+c<W5In8tK0k4NOX6yQKGIQC&FBWX~Q<%G1#g|49BQj)83&2%|
zUs<JZ#bIxoeZa-tcILuIpp|3uGrjDI|D_t;j#v#bOj@j6furiEu;aSGahA)d$u{2o
zqNVUdWs5~-2cc><=6-B4+UbYWxfQUOe8cmEDY3QL$;mOw&X2;q9x9qNz3J97)3^jb
zdlzkDYLKm^5?3IV>t3fdWwNpq3qY;hsj<HM2+6N#zwmbwcBhBpw2~<e#bkw}$`!Kn
z1)=dUBB{1-93?8Rgm^nY*tF;B7wJ85X%C|9a*1^ara~8e6Ph?%g(rj#aA~z51%f(0
zrd%qQA0cZ0Z2^@t)yso?$#(rr$H2>=pk9;P!wVmjP|6Dw^ez7_&DH9X33$T=Q{>Nl
zv*a*QMM1-2XQ)O=3n@X+RO~S`N13QM81^ZzljPJIFBh%x<~No?@z_&LAl)ap!AflS
zb{yFXU(Uw(dw%NR_l7%eN2VVX;^Ln{I1G+yPQr1AY+0MapBnJ3k1>Zdrw^3aUig*!
z?xQe8C0LW;EDY(qe_P!Z#Q^jP3u$Z3hQpy^w7?jI;~XTz0ju$DQNc4LUyX}+S5zh>
zGkB%~XU+L?3pw&j!i|x6C+RyP+_XYNm9<?|3Dg4wCcC?VQr<bzeKrSpw8b>`rtH<m
z^q>pqxvoCdV_MXg847oHhYJqO+{t!xxdbsw4Ugn($Cwkm<z&(cEdbmmHs@&T9(Qe{
zY0mG0W~lWDbhWX{<STjlgSTP0kYxL@HoRpRDzEHe(+0b)#tmEv>^+36&goy$vkaFs
zrH6F29eMPXyoBha7X^b+N*a!>VZ<&G<WRC+Em14oermb3Ag6>f3eeE+Bgz7PB-6X7
z_%2M~{sTwC^iQVjH9#fVa3IO6E4b*S%M;#WhHa^L+=DP%arD_`eW5G0<9Tk=Ci?P@
z6tJXhej{ZWF=idj32x7dp{zmQY;;D2*11&-(~wifGXLmD6C-XR=K3c>S^_+x!3OuB
z%D&!EOk;V4Sq6eQcE{UEDsPMtED*;qgcJU^UwLwjE-Ww54d73fQ`<mR8A?eX*vOu(
z;s!AeRF7I0k*#FrPh}Ask{q+pF}CGNj<O)=(?Ka}%Tcfb=@JBbsSO=CT&lP}la&!T
zB*Gtuxukvb<;Db%3gAA$>9Sv%^H>juEKmxN+*aD=0Q+ZFH1_J(*$~9&JyUJ6!>(Nj
zi3Z6zWC%Yz0ZjX>thi~rH+lqv<9nkI3?Ghn7@!u3Ef){G(0Pvwnxc&(YeC=Kg2-7z
zr>a^@b_QClXs?Obplq@Lq-l5>W);Y^JbCYk^n8G`8PzCH^rnY5Zk-AN6|7Pn=oF(H
zxE#8LkI;;}K7I^UK55Z)c=zn7OX_XVgFlEGSO}~H^y|wd7piw*b1$kA!0*X*DQ~O`
z*vFvc5Jy7(fFMRq>XA8Tq`E>EF35{?(_;yAdbO8rrmrlb&LceV%;U3haVV}Koh9C|
zTZnR0a(*yN^Hp9u*h+eAdn)d}vPCo3k?GCz1w>OOeme(Mbo*A7)*nEmmUt?eN_vA;
z=~2}K_}BtDXJM-y5fn^v>QQo+%*FdZQFNz^j&rYhmZHgDA-TH47#Wjn_@iH4?6R{J
z%+C8LYIy>{3~A@|y4kN8YZZp72F8F@dOZWp>N0-DyVb4UQd_t^`P)zsCoygL_>>x|
z2Hyu7;n(4G&?wCB4YVUIVg0K!CALjRsb}&4aLS|}0t`C}orYqhFe7N~h9XQ_bIW*f
zGlDCIE`&wwyFX1U>}g#P0xRRn2q9%FPRfm{-M7;}6cS(V6;kn@6!$y06lO>8AE_!O
z{|W{HEA<ltk<^3&v(zsFX*bwn-7C9ar58A%@T#a2aBXBI3o8y|0%iXyV|l}sg<m!U
zT-k`jQb^8oQ(X_pliK#72^1&??md*<37!Bk(#NKKogWc3``(E}^iGHbqW0g7K=ab9
zUpQLYqhr5^vY&U1qg(xAv~ioY%|o`fES|sX{~Wc{ICx(f+xCyUQrPRZo%C)iUsb?a
zew2`T0vB@=rlln7nHw`MOepMp{ML^Ja1HQ#K5&NkV!sK<&i;~{TAs9+9gJ+;Lz$da
zS};~j46utcwL=M>bI0eD$z9tQvWth7y>qpTKQ0$EDsJkQxAaV2+gE28Al8W%t`Pbh
zP<xNDbl}BXrCZDI-ZIp5KNryWfd5bbbb`{6SD?OqGei3JO+_9Y0^=JD49vHrX%anv
zx});E3Pu1^ut&6#2_NeA4BT3`JGz8npG~rjS&&I~|7dIHW39&8ddY=MbOQ07^K~#4
zAG+`}_`B-xd@Z|;49eO;uCH?5F>l#%_S@a^6Y;lH6BfUfZNRKwS#x_keQ`;Rjg@qj
zZRwQ<VVZO-GvahB#0&T?91+}W8%P`g+CV*WTDcQ*ECFq*EHu?RdUx6LGyt0&7`Ke<
z(79}oUUG8J6UjGrhsg*-n+Rzoc4V@;e92&Fv0kzs^dCx1BLpw}k)lbrZ+!!4b9rh_
z{{2T!5K`M=B=D3khI9L_t!YX2loweH^^dUIKs*I^r|TIqcqitK3=NKDIU!(-Fg-cJ
za`ZI<4s`pPup!z94p%kUdn`lUG3}x)93kQpWvT(t+fqvi%M1$&iX<2wVFk(pY2$hw
z;e()Ntun1;jRBx1Z_weKQ&VAEOVyzxlq3KpHRdO0*!<udc458*N}Tk<yvh(lq)+w`
zFEEJtD{)&$;xqmiHT}}u4L}!;Y0mFCG2c*If`gd*;VV3Vr8aURePSQ2wwxyp-Zg;}
z2y5*0{Ns5&4yo$wW`M$Wj^j~)lpJ%p0cE61bW)~~n2lIIinNG2#<)ly#Dd$&{nJPv
zg4t&sQ>XZd-rWngbYC}r6X)VCJ-=D54A+81%(L*8?+&r7(wOxDSNn!t(U}!;5|sjq
zc5yF5$V!;%C#T+T3*AD+A({T)#p$H_<$nDd#M)KOLbd*KoW~9E19BBd-UwBX1<0h9
z8lNI&7Z_r4bx;`%5&;ky+y7PD9F^;Qk{`J@z!jJKyJ|s@lY^y!r9p^75D)_TJ6S*T
zLA7AA*m}Y|5~)-`cyB+lUE9CS_`iB;MM&0f<l?>X**f;$n($fQ1_Zo=u>|n~r$<Jr
zR^#jf&wU3t7icxNwdO00+#eaU-n3>HvkOUK(gv_L&@DE0b4#ya{HN)8bNQMl9hCva
zi~j0v&plR<c*E(e0zzsewSSCWoYNe#zBz~4gPJgFZQI3UCOBaQQRa$tLad>sp?_zR
zA}uI4n;^_Ko5`N-HCw_1BMLd#OAmmIY#ol4M^UjLL-UAat+xA+zxrFqKc@V5Zqan_
z+L<vzaOW^{oV^!V7EZrt5zgiq1K|N3u8LNf1;g77T|EW9D_#STe{m<8-*ti95##sc
zyGd7O;e|{=b{s4r=uzKM#MLq2_E;Cl(9-pAG6@1SP&CT#ClJkT<QXn*!E+nh?X8yH
zcMM9Cu5gnFv(px!)SJCU0$OR|_#*aP@{anWc1hQYPQ<vIYfp}%E>oVX-Ub2mT7Dk_
z<+_3?XWBEM84@J_F}FDe-hl@}x@v-s1AR{_YD!_fMgagH6s9uyi6pW3gdhauG>+H?
zi<5^{dp*5-9v`|m*ceT&`Hqv77oBQ+Da!=?dDO&9jo;<Pz>=JkzrQKx^o$RqAgzL{
zjK@n)JW~lzxB>(o(21ibI}i|r3e;17zT<i-A5WFI)l0=DfsHm5h*a$|nW5w2Z&g&C
zfIgxfWxL<b7<;2;0#?vaiBe<amMP~Hu(BM=D~XPJ7}ZO*rIjib5G!Hs^`n3(-k2!C
z!UiS)1Z$XWmt$B+Er?b&6mkk#-D8jLY4v(Z+xBe4-ze{3Z8c}rWsNw!vUg{(e_E!O
zGy`+{2}jJyQpV^h@Z#T~xRhsp27NBzs^L@HyvOdwuq!yc4;~zS%a~%ld&pD6w$+^X
z4e>jdEl5c`Cn-KAlR7EPp84M@!8~CywES-`mxKJ@Dsf6B18_!XMIq$Q3rTDeIgJ3X
zB1)voa#V{iY^ju>*Cdg&UCbx?d3UMArPRHZauE}c@<I9zuOvh8{QaF~!8NmT14ZcB
zXDiDmH{)Nqe}et`?!f-+>Fdk;z85OcA&Th>ZN%}=VU%3b9={Q(@M4QaeuGE(BbZ{U
z?WPD<in~-M2H?eVIA^jvfi8U5&YUg%&UM*~ACg=ZhWs!uCP(%s`pq9sl~Rn<t^myp
zyBzUD^1Z%w8~>G+sjJSz1OYFpdImKYHUa@ELn%n&PR9&I7B$<-c3e|{tPH*u@hs<V
z<H3(ufuU6=m@PPl*Z5lGM+(I8E&;iIFkSFjg3169JAkH{U;A7oj2f|AG+4C#qrq+Q
zSEMgDyQI)s?9FfgI&>)Ci>Z@5$M?lP(#d#QIz}~()P7mt`<2PT4oHH}R&#dIx4<lE
zZM@x|FQ}SFXpD?iesWRPGe4;I$q-cmdj*5H_;}S&ml}cX6{lN;Hc9cQTAqNu`75Pu
zm)lpl05-+8mc%^XjQ*+==x*CFMVtN~s|N?yyj?nb@+0b65tp1aoBu<}P|sqT#oxbu
z^9TFiuY0AD1GFwI&Vgx(N@^#!xc>uq943D8gVbaa2&Fygr<d;h^~M>Sk3*whGr~Jn
zR4QnS@83UZ_BUGw<CYZIe2@PKT3-tK{$nVwgnd*Bz&L7Z=B%q|uGZL4XMBC&BC8z`
z)4RakhXH*19!wvlHiX`r;-qnceEtmXoaf9Zp<!_s;6!03b&^hmte9c568Cp_f0FC4
z@(8kb-pvrKkck`L9ixZbpEUWNQ0z@i@*Zn<GA0aPaURQm-ybWQ)C=W4nKD;in<YK)
z$64%N3;kIt{W*W&vFr90Wr=?+S%KEmK=Dy9&+KBnRXYEkzmZq_)xy)7^gtx11&*2l
zFT&6!U^|U`oJ*VvM^5Eh;*~96yk0B!PnHB-7=?Z<!C6~0J+Dn$C3)1UuE9Wd>;?@T
zo5jA#potERcBv+dd8V$xTh)COur`TQ^^Yb&cdBcesjHlA3O8SBeKrVj!-D3+_p6%P
zP@e{|^-G-C(}g+=bAuAy8)wcS{$XB?I=|r=&=TvbqeyXiuG43RR>R72Ry7d6RS;n^
zO5J-QIc@)sz_l6%Lg5zA8cgNK^GK_b-Z+M{RLYk5=O|6c%!1u6YMm3jJg{TfS*L%2
zA<*7$@wgJ(M*gyTzz8+7{iRP_e~(CCbGB}FN-#`&1ntct@`5gB-u6oUp3#QDxyF8v
zOjxr}pS{5RpK1l7+l(bC)0>M;%7L?@<gTpQHS2vT6B!6J&3W7)M@B+Riqg<&33yLj
zDz(l!s5Pz!g$so{@5)~9fIqA36)y?X;K=l3yijdZuqQpxQ-60#B4i){*o9>6t}S&a
zx0gP8^sXi(g2_g8+8-1~hKO;9Nn%_S%9djd*;nCLadHpVx(S0tixw2{Q}vOPCWvZg
zjYc6LQ~nIZ*b0m_uN~l{&2df2*ZmBU8dv`#o+^5p>D5l%9@(Y-g%`|$%nQ|SSRm0c
zLZV)45DS8d#v(z6gj&6|ay@MP23leodS8-GWIMH8_YCScX#Xr)mbuvXqSHo*)cY9g
z#Ea+NvHIA)@`L+)T|f$Etx;-vrE3;Gk^O@IN@1{lpg&XzU5Eh3!w;6l=Q$k|%7nj^
z|HGu}c59-Ilzu^w<93il$cRf@C(4Cr2S!!E&7#)GgUH@py?O;Vl&joXrep=2A|3Vn
zH+e$Ctmdy3B^fh%12D$nQk^j|v=>_3JAdKP<DgTp;^DIt;)F-EZ+0`lzs+uJ77mXF
zKO=n36r1cJ6ZeI&O)*O1QP@8JX7{q1%3ybU`ux1R!~W&-hs|$w8=(7Htli#B52kNC
zQ^VL@u3up6CP>t2YVusbNW&CL?M*?`K1mK*!&-9Ecp~>V1w{EK(429OT>DJAV21fG
z=XP=%m+0vV4LdIi#(~XpaUY$~fQ=xA#5?V%xGRr_|5WWV=uoG_Z&{fae)`2~u{6-p
zG>E>8j({w7njU-5Lai|2HhDPn<YS^8ehQhE5I<iRyh-9mT)mWhU$mn4$2>tQ(X@yB
z9l?NGoKB5N98fWrkdN3g8ox7Vic|gfTF~jIfXkm|9Yuu-p>v3d{5&hC+ZD%mh|_=*
zD5v*u(SuLxzX~owH!mJQi%Z=ALvdjyt9U6baVY<88B>{HApAJ~>`buHVGQd%KUu(d
z5#{NEKk6Vy08_8*E(?hqZe2L?P2$>!0~26N(rVzB9KbF&JQOIaU{SumX!TsYzR%wB
z<5EgJXDJ=1L_SNCNZcBWBNeN+Y`)B%R(wEA?}Wi@mp(jcw9&^1EMSM58?68gwnXF`
zzT0_7>)e<HsEdDR$NlH-Oz4z)4YK4*R4;r~#@Bf-qNf*W7N2rb$7_0t8fk5+&(AR?
zC<bDf1Ag^T@+r>p%6hid-*DZ42eU)tFcFz7@bo=<<wtaeOw^nO{PSb9Z|&*5Akk|K
zT2kaOkz6nI=><d29ZzfLPsac!Z^b$VBXIcmh|xmM@r6)Mz5F3JrGc)HCM;QKXdFn2
zN)F<RDq^r=d;jb2Z_(C<ZvTXP1meFPC<#c8lVpTKloUq~l@!Z@3NZ9Vm&6DNgWy6`
zT}ws|4y{dC<8Z`V4@yo&7N=2Lrz)W}q0uSpoVQq?bXwW`hPgX1VxJc#-*ykw$5f~i
zC|ut1-c`N^gj;icG!hG34qg7q@S4iL%Vod%yqRXM`*vS6e0@J#RM8K8!&}P;Cm)AA
z)coSDZ19_26LgSk3ec~JtbuRjAHdkaY|=S$lB0)roUPH^c%Bj9Hk?JTOI5=(QpBAL
z3sAe_Ulf+sv37C%@|)xs+e!7Qj4K_W!%anrHBA})sm;TbBl+0B6}Z)(%yc<S4D7=h
zfb2Z7*=sa~gIe^IJ52<6s26k2%UEB?h3dcZ_+%c$(i1?60)R)50crnWc*@*MJ7j?m
z+#_q#5p|jLUHhtmm6?Sn8@r$&TZ(yj%-c?;b-Jx|^5Dlk#go9U4yb7#!fULIqQBds
z&2I{W54>~CrLXpNDM}tv*-B(ZF`(9^RiM9W4xC%@ZHv=>w(&~$Wta%)Z;d!{J;e@z
zX1Gkw^XrHOfYHR#<ZxGz&lw$T6G0Nxnpuw>hAU=G`v43E$Iq}*gwqm@-mPac0HOZ0
zVtfu7>CQYS_F@n6n#CGcC5R%4{+P4m7uVlg3axX}B(_kf((>W?EhIO&rQ{iUO$16X
zv{Abj3ZApUrc<F~?Lk!w&f2Xtq>ar7Ck}B1%RvnR%uocMlKsRxV9Qqe^Y_5<iO7aU
zq3dedVPYS67#l$e6weB_b04g?APY5;b;`8Mge$lbj+y6C?EmJGf9^Ws=8R+H6IIt7
zKe|lohcPUhO~ppHQMA6xg5WT0HL*b(&d+ITYX&Vxg_jkc$c~ekjBwTWLMPMT{$RcV
z!!#NM$Q>C$xQW@9QdCcF%W#!zj;!xWc+0#VQ*}u&rJ7)zc+{vpw+nV?{tdd&Xs`NV
zKUp|dV98WbWl*_MoyzM0xv8tTNJChwifP!9WM^GD|Mkc75$F;j$K%Y8K@7?uJjq-w
zz*|>EH5jH&oTKlIzueAN2926Uo1OryC|Cmkyo<gXO1RvR0{t_&+f8HnilyXjJ;i5c
zxvz56DxWcDNyd~<XL{%_niD)o#)a~_BvNhsPHlrS(q>QZABt#FtHz)QmQvSX35o`f
z<^*5XXxexj+Q-a#2h4(?_*|!5Pjph@?Na8Z>K%AAjNr3T!7RN;7c)1SqAJfHY|xAV
z1f;p%lSdE8I}E4~tRH(l*rK?OZ>mB4C{3e%E-bUng2ymerg8?M$rXC!D?3O}_mka?
zm*Y~JMu+_F7O4T;#nFv)?Ru6<Jo9R8v(kHoGs?{2=?i%Iyi}0!42){}rxVvL!pJk>
z92r|old*4ZB$*6M<gaMZQRWjo$?vcL>40B;V&2w->#>4DEu0;#vHSgXdEzm{+VS48
z7U1tVn<u)EUy|Q1O0Ir9Rk;P;jjZ*6(#9^$Kg9d7dC=gOZg$lTVjzn?JP?E=U!^HZ
zZX5_RsBK=U`f|HJ?<gCGcIs3j%>#AnQ3z#gP26$!dmS5&JsXsrR>~rWA}%qd{92+j
zu+wYAqrJYOA%WC9nZ>BKH&;9vMSW_59z5LtzS4Q@o5vcrWjg+<k0<>28#&$*8SMYP
z!l5=|p@x6YnmNq>23sQ(^du5K)TB&K8t{P`@T4J5cEFL@qwtsCmn~p>>*b=37y!kB
zn6x{#KjM{S9O_otGQub*K)iIjtE2NfiV~zD2x{4r)IUD(Y8%r`n;#)ujIrl8Sa+L{
z<QB-&R!kciC=u%_Pu&BK7EGV1yUp_s@Qqpd^*Cf6xOgo*V|4$hGSSKA2WpaB33(Y*
zN`4+3BzbZ%lDtqn<HlZ9zS#|E)QFj}w@z>>ixGoZJ1K@;wTUbRRFgnltN_U*^EOJS
zRo4Y+S`cP}e-zNtdl^S5#%oN#HLjmq$W^(Y6=5tM#RBK-M14RO7X(8Gliy3+&9fO;
zXn{60%0sWh1_g1Z2r0MuGwSGUE;l4TI*M!$5dm&v9pO7@KlW@j_Qboe<WL{+D=0|S
z04rq}Cn#r|Qh`OLI1_QL5U}R{geifFF9P8T)vM{2=Wygi8p}p~9owhqk+?19n*#yM
zj*#{xPp0@cJDT3i?GOww`QWntOHm1f!c}va;b?tOb2%YIGP6RX6g6=_2i^T=N30Lx
zGGxIOD~)lgV;r;otK)h-PN}|RW44>Dd1k9!7S)jIwBza-V#1)(7ht|sjY}a19sO!T
z2VEW7nB0!zP=Sx17-6S$r=A)MZikCjlQHE)%_Ka|OY4+jgGOw=I3CM`3ui^=o0p7u
z?xujpg#dRVZCg|{%!^DvoR*~;QBH8ia6%4pOh<#t+e_u!8gjuk<TWg3Zfj1VT(rT(
z+;cFX>_Aic=|*H24Yq~Wup1dTRQs0nlZOy+30f16;f7EYh*^*i9hTZ`h`015%{i|4
z?$7qC3&kt#(jI#<76B<Zc8_jzF}-jz?c}RJj?}d~o|A$peyx&2tAof6rbF8`JJ<LA
zkx}wt?p2W^anecKr7&{Wp)yE)mY79tNY^xv{LL@QzaTPd04d4;6lK5S1YUv7FN_j~
ztrXYDtR<2@iHMgRaE>iz=bl=k<lj_Eu32rlOy9O^>=&qyaH><ZvCyu=u73u5Amj3Z
zlBLffeJyS(F;wLAij_z7LBu;aGHquG4Ecd?iDly_Tmu`)-}iF7q9@!(siy|rCL-T)
zMJ=;>foM#zA7}N`Ji~<RlOT@^-5)rOA8|yZid=(^iWhCiV9#ueyabSRKG4M!ILEeK
z@}E_xM=5xQ^8{3sm^736?!4juH{#woOSkhU{pHmMs7%U%L{L7L?9}ZSTh126kqS##
z7l>)-f-t&tR4^do)-5t?Hz_Q+X~S2bZx{t+MEjwy3kGfbv(ij^@;<DrdI3T`-v~_9
zcRbkI9fdmuaS>=?H_^FIIu*HP_7mpV)NS{MY-Rr7&rvWo@Wd~{Lt!8|66rq`GdGu%
z@<(<7bYcZKCt%_RmTpAjx=TNvdh+ZiLkMN+hT;=tC?%vQQGc7WrCPIYZwYTW`;x|N
zrlEz1yf95FiloUU^(onr3A3>+96;;6aL?($@!JwiQ2hO|^<dM|7%UlhRzx6gJV9|=
z=NpG%ffNZg)Ud#-8(4E`FwI`_0dIbwuD$x#G#SyFPIa|i#MJAmCx}gJ+(iX-<$2`F
zq$b)!gO0}32UorvmcjGcs0oMXeFUsIgX?OLA?6&a$PuFr{46GnR!g|y(}h#A`6l)i
zpT!HijVb^7e>i)b4pCJ7-y&a~B#J`#FO!3uBp{5GBvM2U@K85&o0q~6#LtppE&cVY
z3Bv{xQ-;i}LN-60B2*1suMd=Fi%Y|7@52axZ|b=Wiwk^5eg{9X4}(q%4D5N5_Gm)`
zg~VyFCwfkIKW(@@ZGAlTra6CO$RA_b*yz#){B82N7AYpQ9)s<e%0{=46^?9=ZEQ-l
zAQWxGXxNdHjc*vyWIT!33B;oq*FC${iQp|GUP9ZdljM~?wy^$O5X<tj{P6elLK%T5
zC#svA*D5wmh9fQFpm0}|k!**l^Nlj<UP85>LQfhOAOMUV7$0|d$=_y&jl>va$3u-H
z_+H*|UXBPLe%N2Uk<CC2<5os`NfNoGP!Ib0m9`%)&9#>wu1*)kt!$Y>(IH3`YbEt;
znb1uB*{UgwG{pQnh>h@vyCE!6B~!k}NxEai#iY{$!_w54s5!6jG9%pr=S~3Km^EEA
z)sCnnau+ZY)(}IK#(3jGGADw8V7#v~<&y5cF=5_Ypkrs3&<giP_=TY>7{}%(4KM7)
zuSHVqo~g#1kzNwXc39%hL8atpa1Wd#V^uL<!G-fTwDKo?kirzAqPxES@VyF*HS5iQ
z6u^SQDo@?kL(sZYU;lwg?P>=W^&E)fvGivt)B!M)?)Y#Ze&zU6O_I?1wj)*M;b*dE
zqlcwgX#eVuZj2GKgBu@QB(#LHMd`qk<08i$hG1@g1;zD*#(9PHjVWl*5!;ER{Q#A9
zyQ%fu<$U?dOW=&_#~{nrq{RRyD8upRi}c-m!n)DZw9P>WGs>o1vefI}ujt_`O@l#Z
z%xnOt4&e}LlM1-0*dd?|EvrAO-$fX8i{aTP^2wsmSDd!Xc9DxJB=x1}6|yM~QQPbl
z0xrJcQNtWHgt*MdGmt<!{59BQ@6R8^J{W0F$k->j%x6SWYd?uGnrx4{m{6A9bYx`m
z$*UAs@9?3s;@Jl19%$!3TxPlCkawEk12FADYJClt0N@O@Pxxhj+Kk(1jK~<na;r+|
zm{2yx7Tja~_uQ<c+6H|8mu<-YUr>laR0*KGAc7%C4nI^v2NShTc4#?!p{0@p<m@p~
zP+-C1vUzlE>0T#HSIRndH;#Ts0YECtlSR}~{Uck+keoJq6iH)(Zc~C!fBe2~4(Wd>
zR<rpD32Yz=|MVv!jF0SQWXAJmWFDV0ka3bIedmZ8+smZ53Ho~&j~&2Ke3DNyKLA+A
zbJ10q_<tT*Npv&sy}xR;{lmk%mN;2WFCXUz0F+vcRuJB#5+NyF41aEUQ~yy+_{tj7
zrTp6j;lWAkEZhiI7QwVMiDXtF6(d<{RD?!c&lT5iMqpaD7bA4G&{!+@%C<d+c70Wi
zU;*do=_MRGv5LZbeY<RwC*Ablvqx17uLc~M%ux><4I1zMeW$<0xww(@09!l?;oDiq
zk8qjS9Lxv$<5m#j(?4VLDgLz;8b$B%XO|9i7^1M;V{aGC#JT)c+L=BgCfO5k>CTlI
zOlf~DzcopV29Dajzt*OcYvaUH{UJPaD$;spv%>{y8goE+bDD$~HQbON>W*~JD`;`-
zZEcCP<ta~{4Akw!C0ePB!7KfHnqyMC?0a{DvplTx4je+~_=&7<z9BH2!>SdlCvANe
z=?|+e{6AW$f(H;BND>uy1MvQ`pri>SafK5bK!YAE>0URAW9RS8#LWUHBOc&BNQ9T+
zJpg~Eky!u!9WBk)!$Z?!^3M~o_VPERYnk1NmzVYaGH;1h+;st==-;jzF~2LTn+x*k
zvywHZg7~=<axHmC^DNM(rrxXXZ+6q8kDUy#Wo11Iv58)OcYVE^Pyh_LpKFMxqfz^m
zX-?%QT4qvLZS`UJtdA|zc7nI&w@Ai(hyYF^v>aiJe=OhS@U>1fYGvT1+jsAaiaM;)
zay2xsMKhO+FIeK?|K{G4SJOEt*eX?!>K8jpsZWW8c!X|JR#v(1+Ey5NM^TB1n|_40
z@Db2gH}PNT+3YEyqXP8U@)`E|Xat<{K5K;eK7O0yV72m|b!o43!e-!P>iW>7-9HN7
zmmc7)JX0^lPz<DsX{e!8YOmOSO*e%?uh^R5cfwUMA1cm{WQM4>F#>$#D~nU^3f!~Q
zQWly&oZEb1847&czU;dg?=dS>z3lJkADL1innNtE(f?~OxM`%A_PBp?Lj;zDDomf$
z;|P=FTmqX|!sHO6uIfCmh4Fbgw@`DOn#`qAPEsYUi<Sw*qR|D%uSTGytX2<>BvUlw
zevH{)YWQu>FPXU$%1!h*2rtk_J}qNkkq+StX8Wc*KgG$yH#p-kcD&)%>)Yctb^JDB
zJe>=!)5nc~?6hrE_3n^_BE<^;2{}&Z>Dr)bX>H{?kK{@R)`R5lnlO6yU&UmWy=d03
z*(jJIwU3l0HRW1PvReOb|MyZT^700rg8eFp#p<3Et%9msiCxR+jefK%x81+iN0=hG
z;<`^RUVU+S)Iv-*5y^MqD@=cp{_cP4`s=z)Ti3!Bf@zCmfpZTwf|>|0t^E8R^s`ad
z5~tA?0x7OM{*D;zb6bvPu|F5XpF11`U<t~+Cx_B1`Z?9!gYQ4&!ts?51b2>5;b*$p
zNAq7E6c=aUnq>}$JAYsO&=L^`M|DdSSp5O4LA{|tO5^8%Hf1lqqo)sj=!aLNKn9(3
zvKk($N`p`f&u+8e^Z-?uc2GZ_6-HDQs@l%+pWh!|S9+y3!jrr3V%cr{FNe&U6(tYs
zLto$0D+2}K_<BSVc4q*O)uySXMPx!L{%pO}$eiT(du2M$spXfHZRz<w6X+6bVU4k9
zN@%hHFXZKrp?pIw&xB@-kL0cNJ>9kuxgFSeQ!EOXjJtZ$Pyl_|$mPQ9#fES=Sw8L%
zO7Jij9cscU)@W+$jeG<B&7|0SBAwFCf%-Zn?fuTSb+p^e@h~60vII8TXxfM|e04Bt
zy@W0cVbw}gVt?ity0Gt&-}Z-&kd+(02DzwKxR8olSYId-ycIO>px&vWP9ZN3fLDTp
zaYM$gJD8ccf&g>n?a56<DwgIK`LkiBKZbuS+^?j{+S7aD;C88gpGwnU|F*rQZag*2
zUg`I<vSR(<ms`C@QZl9<7{O`3#mCATDto?_bb8(LkEVJ4q%*CfZ#+s#!(loq^>X=y
zec%n<x7o9&6f>LN`(dVCpSl9&pJLf2BN;cR5F0Nn{(<Ebul!}s7PIg_pO;G7Fpl=0
z@*Mn6d7fm%|BL90VthiyGnR}>LjGe7RjFe7efp3R_2JmHOY#nWEc2TMhMSj5tBf-L
zlxP3<v`xK)VzhGRzi_HGgaSOoYS>sV`!?@!mRnDTac{35I7<izXnk?f{kA#xa2S3!
z{k7hS;q0Sh7b`M#(tpCk400Lc;D6?CQ^v8;AHXR5eyBE`_v<xTtKe)Mwp#-j^DsjS
zsLKM$h-e&5i>h@WTfRjRiFw*Q*aD8)n)jdkJC@)jD-&mzAdK6Kqdct8P}~dqixq;n
zjnX!pb^;5*Rr?5ycT7>AB9)RED^x+DV<PtA2YVOErZzn~r7$zWHe1`39HIxp#q<lB
zGJt15`R_`Vb@|g>DmIbHKjcDv2lHK;apZOc=O@`4nJ;k|iikKk66v4{zN#lmSn$lh
z_-Y3FC)iV$rFJH!#mNqWHF-DtSNbI)84+VLDWg$ph_tkKn_6+M1RZ!)EKaRhY={el
zG-i@H!fvpH&4~$5Q+zHU(Ub=;Lzcrc3;4Cqqbr$O`c5M#UMtslK<bCBM6b4qP%uu2
zXpMuX2`WDC1lqF`Y$CO^C&>$3r+Cuz>xKl+xW?`t2o=q`1djXC=Q6`3C${*>dm~I{
z(aQH&Qd{{X+&+-4{epSL;q%n$)NOQ7kM}ea9bA++*F+t$2$%F!U!U}(&y7Sd0jQMV
zkOhuJ$+g7^kb<`jqFiq(y1-~JjP13J&uB=hfjH5yAArMZx?VzW1~>tln~d5p<T#Fl
zEVb(Bo{e)Z!MoQ~F^-3jEd3&CO04c*(y4e(v&DYgDaSv=<a!2FuuuIn;dZ$$Ay{KO
z7sdqF?l_u#_)CM+2NYmxY}Ccl0Y%psg1B}Z_hNri;B-leZ>t$uWR~TM!lIg+D)prR
zocU0N2}_WTYpU`@Bsi1z{$le`dO{-pHFQr{M}%iEkX@0fv!AGCTcB90@e|slf#unz
z*w4Cf>(^XI64l|MmWih1g!kwMJiifdt4C<5BHtaS%Ra>~3IFwjdu;_v*7BL|fPu+c
zNp6<j(&v4l8ppxnoKOCUI~><clB|zIt>87`{}e@|%)5g4U*i=0zlSWXzz=YcZ*&Bg
zr$r(SH0V5a%oHh*t&0y%R8&jDI=6VTWS_kJ!^WN!ET@XfEHYG-T1jJsDd`yEgh!^*
z+!P62=v`R2=TBVjt=h}|JIg7N^RevZuyxyS+jsk>=iLA52Ak+7L?2$ZDUaWdi1PgB
z_;*Uae_n&7o27ewV*y(wwK~8~tU<#Np6UUIx}zW6fR&dKiPq|$A{BwG_-wVfkm+EP
zxHU@m`im3cD#fH63>_X`Il-HjZN_hqOVMG;(#7RmI13D-s_>41l|vDH1BglPsNJ+p
zTniY{Hwoief+h%C^|@Syep#722=wmcTR7awIzimAcye?@F~f|n<$%<glynGe@l_#9
z30nmky{?<JKz{){SsGIs*^?U{?(p8<zkVtCHiV|D1BtlXY&|PWhdBFPEJ=ac!<CGF
zDzeno1U+s@ohY`k^InNs;j?P(V=hKzj^=A>=rM+Jkz9<kZJd?(pl-XwbK9uEX7+62
zMb6tu%v|dH9xCO5RX}L!n!qj65W$a-v}tm=>m>PF70$)AK@|h_^(zn?!;={;9Zo7{
zBI7O?6!J2Ixxk;XzS~ScO9{K1U9swGvR_d+SkromF040|Slk%$)M;9O_8h0@WPe4=
z%iWM^ust8w$(NhO)7*8uq+9CycO$3m-l}O70sBi<4=j0CeE_&3iRUWJkDM$FIfrkR
zHG2|hVh3?Nt$fdI$W?<|Qq@#hjDijk@7eUr1&JHYI>(_Q4^3$+Zz&R)Z`WqhBIvjo
zX#EbA8P0Qla-yACvt)%oAVHa#kZi3Y8|(IOp_Z6J-t{)98*OXQ#8^>vTENsV@(M}^
z(>8BXw`{+)BfyZB!&85hT0!$>7$uLgp9hP9M7v=5@H`atsri1^{1VDxDqizj46-2^
z?&eA9udH#BD|QY2B7Zr$l;NJ-$L!u8G{MZoX)~bua5J=0p_JnM`$(D4S!uF}4smWq
zVo%<tG@hoMj@<)H-JgP<Q7M9}m_vBCyCqYX-AdJU@&LEsL`m0haD)<x4J(=(3c|s#
zbn&v-l*)`eo?|iFuk96r?UB%1QXu?v?Njkszif{O!?^mu1lpr2r-OQV&9H@hntG(^
zj|_uj(Vzk+(<<LE1b+=&nd@qy$tcs41*1E26ryDYO9tiLn1xdy13ji(ME+C8#&uuz
zBIDnqWI!p+pkz{ewa6|8Ly~HAiW$WIf_|tUyK#R2a7JuZi2J)_S$l>kQ~C~X?cWCH
zo4s#FqJ)k|D{c_ok+sZ8`m2#-Uk8*o)io`B+WTD0PDA!G`DjtibftJXhPVjLZj~g&
z=MM9nF$7}xvILx}BhM;J-Xnz0=^m1N2`Mhn6@ct+-!ijIcgi6FZ*oIPH(tGYJ2EQ0
z{;cjcc>_GkAlWEZ2zZLA_oa-(vYBp7XLPbHCBcGH$K9AK6nx}}ya%QB2=r$A;11*~
z_wfru1SkIQ0&QUqd)%eAY^FL!G;t@7-prQ|drDn#yDf%Uz8&kGtrPxKv?*TqkC(}g
zUx10<;3Vhn<P(upRheT&3q1!X(bJNsLya$_=t*6hp5+8ZJ+|fGwhh!2S54M5{Z^Ik
zEWCn6ECjbZ;0S?VE83KP;dH@N8NAfvRqx3ALd>x{<Qf_SE}JUOjTetQ0k>gpWXM8H
zKc0kkM~gIAts$E!X-?3DWG&^knj4h(q5(L;V81VWyC@_71oIpXfsb0S(^Js#N_0E}
zJ%|XX&EeVPyu<mLTv8P+%0HPi%$><S@&-Ug3j3&ZjEz64uu4u7(cVJluR3`Mvu>}?
zz~(%slTw+tcY<LfEsDqkG5n$OFpv4hT>3ZMG$+diC8zed=CTN}1fB`RXD_v2;{evY
z@MCG$l9Az+F()8*SqFyrg3jrN7k^x3?;A?L&>y{ZUi$T8!F7Dv8s}}4r9+Wo0h^m=
zAob@CnJ;IR-{|<mV&-EKHX@5=1&om;I{UO+M+*6S+1rH~%#vNH`@gg_)uUaJdy=-_
zt(r+-w@a`jChNk+<Fm9pQ;UiGUjV$D;bb|zan@bWL;-y+;<a~?cC;!wr2>_D;_w)?
zcH@~&V^(}Ag}%A90);X2AhDj(-YB>$>GrW1F4C*1S5`u@N{T|;pYX1;E?gtBbPvS*
zlv3r#rw2KCmLqX0kGT8&%#A6Sc(S>apOHtfn+UdYiN4qPawcL{Sb$>&I)Ie>Xs~ej
z7)a=-92!sv-A{-7sqiG-ysG0k&beq6^<kX#blPuBTLk4nUZLa0Rw?p{JzAbo@#y^A
zDH9Et(b$xVU^HU5S36gthHjgsck-_;Jyj@#E`~oe>nX1L!Fs$JU#fsV*CbsZqBQ|y
z{)}zvtEwO%(&mIG|L?qs2Ou1rqTZHV@H+sm8Nth(+#dp0DW4VXG;;tCh`{BpY)THY
z_10NNWpJuzCG%Q@#Aj>!v7Eq8eI6_JK3g2CsB2jz)2^bWiM{&U8clnV7<2?Qx5*k_
zl9B$P@LV7Sani>Xum{^yJ6uYxM4UHnw4zbPdM|PeppudXe<Dfw0>}+<H|lAF(>OcX
z!nr!xaUA|xYtA~jE|436iL&L={H3e}H`M1;2|pLG)Z~~Ug9X%_#D!DW>w}Es!D{=4
zxRPBf5UWm2{}D>Em;v43miQ~2{>%>O*`wA{7j;yh;*DV=C-bs;3p{AD;>VPcn>E;V
zLgtw|Y{|Beo+_ABz`lofH+cdf33LjIf!RdcW~wWgmsE%2yCQGbst4TS_t%6nS8a+m
zFEr<|9TQzQC@<(yNN9GR4S$H-SA?xiLIK2O2>*w-?cdzNPsG4D3&%$QOK{w)@Dk}W
z|3_Z>U`XBu7j6Vc=es(tz}c7k4al1$cqDW4a~|xgE9zPX(C`IsN(QwNomzsBOHqjd
zi{<R{)|=YTo!0p@cbjBET4CHtzjF)FzZS}wo@ivqa)?dg1(I9m+LTuz^9@^HgJ7x6
zruk6yLp~0QlAd9mMnv(61_Zgfu%Fglg5Jq?%opsVV1y)NRQk$pq7n0k+_>D|jYSv5
zC>6#uB~%#!!*?zXW`!yHWjbjwm!#eo3hm;>nJ!<`ZkJamE6i>>WqkoTpbm(~b%G_v
z`t3Z#ERips;EoA_0c?r@WjEP|ulD+hue5r8946Sd0kuBD$A!=dxigTZn)u3>U;Y8l
zX9j(R*(;;i&HrB&M|Xnitzf@><3#)aKy=bFCf5Hz@_);{nlL?J!U>%fL$Fk~Ocs3&
zB@-Ek%W>h9#$QIYg07&lS_CG3d~LrygXclO!Ws<oAB91#zpSK#Pi1&6uk?}<Ww;*D
zKHhb!yUGj8>-|PxMsn@n{?77wCaq?uj`dd7lllDCGd?ed&%5k{RqUhiN1u&?uz@Fq
zNkv_4xmF<uz?T3Rp6xNF5T%O}(Z;=T72xBIUa=nmY%9x|r@N@*M&qcZo<xtHF_?Oa
zF1a!vjr>cl?vs>;emR1R<$tg;*Ay<V#f!wRQz%qNBZlVHFu*nL9%5U{m0>p@rl=ik
z=x2<DREGik9u=4@Y%Eekt_pE0+QU2oAJ}tFU)uZDF1G-h(D*jZy9JCh@`s8N*i>Hk
zJqsM%++e|*+#camAiem6f;3-khtIgjYmNL0x|Mz|y{r{6<@_&a7^1XDyE>v*uo!qF
zBq^I8PiF#w<-lFvFx9xKoi&0j)4LX~rWsK$%3hr@ebDv^($$T^4m4h#Q-(u*Mbt6F
zE%y0Fvozv=WAaTj6EWZ)cX{|9=AZDvPQuq>2fUkU(!j1GmdgeYLX`B0BbGK(331ME
zu3yZ3jQ@2)WW5!C#~y}=q5Av=_;+hNi!%gmY;}~~e!S&&^{4eJuNQ2kud%Olf8TRI
zW-Dze987<Ep(e1WRKI|@Jy&6ah2?#Q>Il<^!hCO{AR5tLW{F1WLuZ>nhPjke@CSnN
zzoW{m!+PSCb7byUf-1b;`{0GU^zg7b9c!7ueJF`>L;|akVzb&IzoLNNEfxp7b7xMN
zKs9QG6v@t7X)yYN9}3d4>*ROMiK-Ig8(Do$3U<Qx6_~&x4muKM7GIi?xqFM9bm$N9
z#ze4ENG#9H&h^cus4XL3-?GKO9{iJ@&Va8SUG@C6*P0WGjNj5=YpI9rfms4rql;R`
zaDToC?<qV4voyYA2A<Z)IzS$)GMPeQLqyW*S0S%v0iytIX9PpAFOUXkH2lc|+N4iO
z1h5RtQ+&FD@|rHV+H&b20qiLt>I&E}z!vcH2t(VIk-cLyC-Y%`)~>Ce23A=dQsc<(
ziy;8MmHki+5-(CR8$=lRt{(9B9W59Pz|z0^;`C!q<^PyE$KXt!KibFH*xcB9V%xTD
zn;YlZ*tTukwr$(mWMka@|8CW-J8!zCXI{<pR8OBi-_H?FfA)@2XB?(9yoWQ__Z_lr
zeuIIdY}4-^pM-ftYI`S08*}cGCw>P1-&=<B^P5d1)YJk^PW05ogC|Bdkjfg9uR{xP
zHcZUdWCT8bPfNC#O%hKtj9d2Hc0YH&znsX_@_kUDRL2|3x^Fv*tcGwbJzmTYiqoj0
zRfIn+FRhAB9~tuITO69xq_LVntt^TU%&|bRLqEzgavO>wSvZf&%9SZ7m`1&2^nV#D
z6T*)`Mz3wGUC69Fg0Xk!hwY}ykk!TE%mr57TLX*U4ygwvM^!#G`HYKLIN>gT;?mo%
zAxGgzSnm{}vRG}K)8n(XjG#d+I<su=uH~+nz!ctFY7^qIE<|rut@=Lcgc2xFld(*9
zs|yrxP&}#v4{LU8qAk^lu@bX}_!mF={O^YR+|?QD%b%o6xX>yAFnozhk|uwiey(p@
zu>j#n4C|Mhtd=0G?Qn5OGh{{^MWR)V*geNY8d)py)@5a85G&_&OSCx4ASW8g&AEXa
zC}^ET`eORgG*$$Q1L=9_8MCUO4Mr^1IA{^nsB$>#Bi(vN$l8+p(U^0dvN_{Cu-UUm
zQyJc!8>RWp;C3*2dGp49QVW`CRR@no(t+D|@nl138lu@%c1VCy3|v4VoKZ4AwnnjF
z__8f$usTzF)TQ$sQ^|#(M}-#0^3Ag%A0%5vA=KK$37I`RY({kF-z$(P50pf3_<kT*
zu`s$>20YTr%G@w+bxE_V+Tt^YHgrlu$#wjp7igF!=o8e2rqCs|>XM9+M7~TqI&fcx
z=pcX6_MQQ{TIR6a<EerAAlkb)G^0$baC*tO(ycXzU0DmM83ruVs#2~=MFE#!!@jm(
zza|i2_2T@P<~j(R6~06cLW14iRP_SHz{-R%KQd3+A}<-iYz8nW#YK8kur2(c!C1}9
zL6X9`9Wc(<GPrs01Jd(MdV}iM6OieZ64HwmB~*qo(TfICd;&yxM3NTWUkRMO8yxq#
zG_bBGM>0*~xdgFvs<2!yaA1F*4IZgI!)xnzJCwsG&EElg_IpFbrT}nr)UQy}GiK;(
zDlG$cksync34R3J^FqJ=={_y9x_pcd%$B*u&vr7^ItxqWFIAkJgaAQiA)pioK1JQ|
zYB_6IUKc$UM*~f9{Xzw*tY$pUglV*?BDQuhsca*Fx!sm`9y`V&?lVTH%%1eJ74#D_
z7W+@8@7LAu{aq)sPys{MM~;`k>T%-wPA)E2QH7(Z4XEUrQ5YstG`Uf@w{n_Oc!wem
z7=8z;k$N{T74B*zVyJI~4d60M09FYG`33;Wxh=^Ixhs69U_SG_deO~_OUO1s9K-8p
z5{HmcXAaKqHrQ@(t?d@;63;Pnj2Kk<;Hx=kr>*Ko`F*l){%GVDj5nkohSU)B&5Vrc
zo0u%|b%|VITSB)BXTRPQC=Bv=qplloSI#iKV#~z#t#q*jcS`3s&w-z^m--CYDI7n2
z%{LHFZ*(1u4DvhES|Dc*n%JL8%8?h7boNf|qxl8D)np@5t~VORwQn)TuSI07b-T=_
zo8qh+0yf|-6=x;Ra$w&WeVZhUO%3v6Ni*}i&sby3s_(?l5Er{K9%0_dE<`7^>8mLr
zZ|~<y&-g)~txDcjmw~CDXEPUCj(!3kY~@IQaXl0)+%b5&Y?+~Wa*LtayngeKl0gR2
zWSheF+&IbkBG<*mqT`}~I@WLlB@30ju(e^JkaW9W6|<#tM`e^}3);+iAItViV&wB#
zO`W^b1H3h$Hzv(58Ribb-Xh1k%qv7UK2J5l^fgp7_sBjE)_|1fO^WL(vNm6CKEXLc
z?6h^X9|hD1FbuF+;0(pOKIu8jXEKi%ig3C)<$yC6mqD839;oCI6{f?r8h#I)q}7{z
zhdCEvja~GfE&fFAUVvbZM51*`2CP+9d3)CBN9C4Zs6P6Z9~R`mfjXnj{hP08?Qt1g
zLDpcl_8|@m5Xgk|IY;j#AY4Ij2!R9>l#Bi@5}8{<Vy?pt0J|y0{gh3bi6~2-C_tIM
zVh!Pagqv5~mhUyyW>iZ$(d9)!`}@2~#sA~?uH|EbrJQcTw|ssG)MSJJIF96-_gf&*
zy~I&$m6e0nnL<IlXZRPqNxp=jHeJQ42(Z{Y*!;`f3W#rwCyTB1h*-OJ@@e3h&}SQu
zU-t(Z{uSv>z^M2;G|IeUk?s+afSZ){10*P~9W%RtYeSg{Nv5FG<2QaWpj?d`;}<4(
z>V1i|wNTpH`jJtvTD0C3CTws410U9HS_%Ti2HaB~%^h6{+$@5`K9}T=eQL;dMZ?=Y
zX^z?B3ZU_!E^OW%Z*-+t&B-(kLmDw<gr*TZvRf2rfa=KH9`=-+9W9)d*GPjw!lWmR
zNmE-kllByCsai0Tahs#03W>ikb9+F9bj;NFq-XHRB=+L)Rew{w|7p~7ph{#fRT}}K
zWA)F7;kJBCk^aFILnk<BF~6}Ua_B}80{ftECm@?iNgT`lB?3~df1uI+ft9Cw7i;yt
z3<ShU2pmPSJS)Vf*8uEGi-}U9VKstmq^<fVRnV6yeSGxb@aM0~`8;RI!)TH{dMd$T
zS<*v=bgkcnOdnp>V^EMs=B~#qh*RG2&@F|x2$?7QTX_T6qL?i$c6J*-cNQC~E6dro
zR)CGIoz;~V?=>;(NF4dihkz~Koqu}VNPE9^R{L@e6WkL{fK84H?C*uvKkO(!H-&y(
zq|@B~juu*x#J_i3gBrS0*5U*%NDg+Ur9euL*5QaF^?-pxxieMM6k_xAP;S}sfKmIa
zj(T6o{4RfARHz25YWzv=QaJ4P!O$LHE(L~6fB89$`6+olZR!#%y?_v+Cf+g)5#!ZM
zkabT-y%v|ihYuV}Y%-B%pxL264?K%CXlbd_s<<TimgGAnDShiYe)n)%+e)ctIIBiN
zCdyp?(mnu<>GY5BG*`kYQjao$QHiC_qPk5uE~AO+F=eOtTWJ1vm*cU(D5kvs3kity
z$IYG{$L<8|&I>|WwpCWo5K3!On`)9PIx(u<N-i&JsU?~{x!<le*<Cho@L`rVa4ToR
zlqrqoBV>WAq>bSQTvSW`NqgprBIuV^V>C~?+d(w$ZXb39Vs`R=BX;4HISfN^qW!{4
z^amy@Nqw6oqqobiNlxzxU*z2>2Q;9$Cr{K;*&l!;Y??vi^)G|tefJG9utf|~4x<cZ
zX0P0O4txu3=E3GE&U2m@J>h=r3UjmRlADyLC*i`r+m;$7?7*bL!oR4=yU<8<-3XVA
z%sAb<meJ2Y3ySbt6O+x7lG@C4_*@!j6u>`xe&4RV(2vj+1*ktLs<&m~mGJ@RuJ)1c
zLxZyjg~*PfOeAm8R>7e&#FXBsfU<eMU79O%BS@_5qwkw4T7wgygMtxSp&l=ZdEk5B
zBg%Q(1EGMM#TWgrrn6WAtlk?eIn)E68`DMBOB~f=Xt5D?9G8fOPMg)3mJ~jk+$=Dw
zZPEKM1TNUI%fmqSr@^O<v~HmJAfIt9A1nh$j2K$G>_?azU=uxBm=E6z7FSr7J>{XY
z1qUT>dh`X(zHRML_H-7He^P_?148AkDqrb>;~1M-k+xHVy>;D7p!z=XBgxMGQX2{*
z-xMCOwS33&K^~3%#k`eIjKWvNe1f3y#}U4;J+#-{;=Xne^6+eH@eGJK#i|`~dgV5S
zdn%`RHBsC!=9Q=&=wNbV#pDv6rgl?k1wM03*mN`dQBT4K%uRoyoH{e=ZL5E*`~X|T
zbKG9aWI}7NGTQtjc3BYDTY3LbkgBNSHG$5xVx8gc@dEuJqT~QPBD=Scf53#kZzZ6W
zM^$vkvMx+-0$6R^{{hZ2qLju~e85Em>1nDcRN3-Mm7x;87W#@RSIW9G>TT6Q{4e~b
z8DN%n83FvXWdpr|I_8TaMv~MCqq0TA{AXYO-(~l=ug42gpMUvOjG_pWSEdDJ2Bxqz
z!em;9=7y3HW*XUtK+M^)fycd8A6Q@B<4biGAR)r%gQf>lWI%WmMbij;un)qhk$bff
zQxb{&L;`-1uv<!Unl~Y>aCE7Fm*83^0;!QA5-zeSvKY}WjbwE68)jqnOmj^CTBHaD
zvK6}Mc$a39b~Y(AoS|$%ePoHgMjIIux?;*;=Y|3zyfo)^fM=1GBbn7NCuKSxp1J|z
zC>n4!X_w*R8es1ofcPrD>%e=E*@^)7gc?+JC@mJAYsXP;10~gZv0!Egi~){3mjVzs
z^PrgddFewu>Ax_G&tj-!L=TuRl0FAh#X0gtQE#~}(dSyP<a8`o1qYX&B$5>O=@7yd
zNC6l<clAx3O3@a<LB2`9Fw(6kTH2y{(PQ8Xy4XFR0|)dgRB7oTri5>_?zs_u5&x8O
zQ|_JvKf!WHf43F0R%NQwGQi-Dy7~<x)tZ#=V~Be^Zc!Y~@6@<%snVBkQxwTo5x0`E
z#gimVo$wNZs({agM0#vX$3ivZBc#i$E%mb~I-^M+w!gE01e0%}n1qOi9A3Z<ZJe37
z2Z*yG2W+iY2P#c};mhfLc~rFVw)4Je)QnZxDw5wvLup|wd%@P4J*tGP9yF1%Nll-@
zlO1hF2n*2K@2Zu{`CQPt46~lch;D>PGZ@KRKMp?kxlaLAV=X{UkKgaTu2!qzPi8aJ
z-;n$}unR?%uzCkMHwb56T%IUV)h>qS(X<EpfLtJYhILPy8sW%(9OlK128bkhQ-#BL
zVMG_gO7R`(7jS=<1ehw;*`&(cjBT66@$i~b&l@n0xao!&{}w0SC+)|=y8jd%Zy2H@
zWBEqpyS|mllmaeA;dIAUTbBDCP@K?Ji9y)BM*@uV70w6-+JLqNGt$tJxi$dY?2_XL
zx@z%Vb(YkK^=I>i<zZShICE5HaaJv#-(t%csf^j}rnT`qq#k+2M`Xh^Wr?@u%)lO|
z4ZQQR$;A{D>uRLh3fdlr!Cri|{fZf0x9GVYUOlsKgxLA7vHrkpQddcSsg4JfibzpB
zwR!vYiL)7%u8JG7^x@^px(t-c_Xt|9Dm)C@_zGeW_3nMLZBA*9*!fLTV$Uf1a0rDt
zJI@Z6pdB9J(a|&T_&AocM2WLNB;fpLnlOFtC9yE6cb39?*1@wy8UgruTtX?@=<6YW
zF%82|(F7ANWQ`#HPyPqG6~ggFlhJW#R>%p@fzrpL^K)Kbwj(@#7s97r`)iJ{&-ToR
z$7(mQI@~;lwY+8dSKP~0G|#sjL2lS0LQP3Oe=>#NZ|JKKYd6s6qwe#<Anf0<kXN`R
z5lnAdra^vwG(T7cLsTWNRURHI4_OuFP<tc0rV%hLL$|;t?1Z|9Pw@&<N%-9r64fj<
zNinfK&F{Ul6`)k;VjEVt#9IR&P-@C1jy<1Xo=!Z!f||>_6Xz_^L4PJ5TM_|#&~zy=
zabr|kkr3Osj;bPz`B0s;c&kzzQ2C8|tC9tz;es~zr{hom8bT?t$c|t;M0t2F{xI;G
z`0`ADc_nJSdT`#PYCWu4R0Rmbk#PARx(NBfdU>8wxzE(`jA}atMEsaG6zy8^^nCu|
z9_tLj90r-&Xc~+p%1vyt>=q_hQsDYB&-hPj(-OGxFpesWm;A(Lh>UWy4SH9&+mB(A
z2jkTQ2C&o(Q4wC_>|c()M8_kF?qKhNB+PW6__;U+?ZUoDp2GNr<|*j(CC*#v0{L2E
zgVBw6|3c(~V4N*WgJsO(I3o>8)EO5;p7Xg8yU&%rZ3QSRB6Ig6MK7Wn5r+xo2V}fM
z0QpfDB9^xJEi}W*Fv6>=p4%@eP`K5k%kCE0<R2h_PEom0LB7a~l&d1S3a77Hw2%_q
zqH(%vVfZpV_?Y3fF(xKF3+bm0z%8-Lz=1SYxL60R*CBzCSqTVx-`Lb%(u0^@jiL81
z>YF2E<Ezn!7fLCH??-dr?nIFY1<!~lO!ZTqU0)76!oysNs=YTeKF$}l9H5Qod@f10
z!aQmamt|K#Z}Q{gbm7MaqZN;@^eN-4gx^Jw(=2bDOWHR7F{^JWJp+>u5L!D<x34Y+
zc>M1ZY7wh`kgh<M{Gp`d0HdTLKw%XhgHm3=&oCYCnpM*NH+F)MqI##k#3U_iO9`0q
z6eX1$A7~_~dR%;g_==*9=d|P=D;@b~;Q(b^fCex+4qDqp!@y?PN`m^!qd2mod%?QO
zAOHiBwz31qA@k|Dc#@wM_-GEebzK6rovctQS1Yr{ec1A>C^NwxrL}90dRXjQx=H>8
zOWP@<+C!tcw8EL8aCt9{|4aT+x|70i6m*LP<O&^G|2#sCbRkD2k+TMRR0ESMhh$uB
z*bNRR4O)Jlw+XIMt<^OQbKV(NlJC#wAE{$4sZ@z$2JKmkBdnoCpxU|i*#t_vdIxUR
zsrIVZW&8NDF`tgEjviFbbxqyht^v0`uctQnOV7YdieqP92j^|`{@wi!72f2@Wc-yz
zk##%1&ZM-zR~q){3am^@AFF~q)g$JlQcan7O}VDbb9Y1IVj`B__M(Fi^sL;^apGmg
zk5Yf;d|4xJO4lFRfEgmQ^SUc#36*8iZ>*lhp;kGr5f#OwRy`(60LK@rd=to5yk^%N
z6MTSk)7)#!cGDV@pbQ>$N8i2rAD$f{8T{QM+|gaj^sBt%24UJGF4ufrG1_Ag$Rn?c
z<kbZlPrl`GIaLM&F`-vvwlO8^B!`m7qu6<C&$N}X=f&Yaws1}AUo3hgy~3AAtCrm^
z)OL<F1<^A^!C5+1i7c|I+_lBiGw^yRyuztGyCOPS&q|4v2^o4e<-=~2%^f=ub~UF~
zl?&3FNjg@Ye=w{y^o#H(Cg(6t+|Fc3M*pv?vGSIi=o=jbB%1oaQk7aeJ|N#l&E`UP
zgMDC)-A1>zICg9`ICT>9N_2vqvVG#_lf9IEd%G5gJ_!j)1X#d^KUJBkE9?|K03AEe
zo>5Rql|WuUU=LhLRkd&0rH4#!!>sMg@4Wr=z2|}dpOa`4c;_DqN{3Pj`AgSnc;h%#
z{ny1lK%7?@rwZO(ZACq#8mL)|vy8tO0d1^4l;^e?hU+zuH%-8Y^5YqM9}sRzr-XC0
zPzY1l($LC-yyy*1@eoEANoTLQAZ2lVto2r7$|?;PPQX`}rbxPDH-a$8ez@J#v0R5n
z7P*qT3aHj02*cK)WzZmoXkw?e3XNu&DkElGZ0Nk~wBti%yLh+l2DYx&U1lD_NW_Yt
zGN>yOF?u%ksMW?^+~2&p@NoPzk`T)8qifG_owD>@iwI3@u^Y;Mqaa!2DGUKi{?U3d
z|Efe=CBc!_ZDoa~LzZr}%;J|I$dntN24m4|1(#&Tw0R}lP`a`?uT;>szf^0mDJx3u
z6IJvpeOpS$OV!Xw21p>Xu~<j_Of`?i&1Y`>MZ(Nas5Iim-#QSLIYSNhYgx1V!AR>b
zf5b7<W!s$(gS4VH1kyzJoZ^(hV%J?suaCZLpXgV{{xF&BNv#&x4NVE{ARYqj2iU(+
z_h&b$A{cOJBDvz}60OK+U?|+WUp3DacWx;+gK7@=d%%fT3p4_)fGEK?Uq5l2sg}-|
z4>O`ITTvW5z%X8|7>&BeEs8~J1i47l;`7Y#MUMReQ4z!IL1rh8UauKNPG?7rV_;#Y
zG*6Vrt^SsTMOpV7mkui}l_S8UNOBcYi+DzcMF>YKrs3*(q5fwVCr;_zO?gpGx*@%O
zl`KOwYMSUs4e&}<bH2!=YCfBQLA|t^M=$T-j5>eM#FhB3<C>(RIDJ9ZRn6NN{2Nf+
z2jcz%-u6IPq{n7N3wLH{9c+}4G(NyZa`UmDr5c-SPgj0Sy$VN#Vxxr;kF>-P;5k!w
zuAdrP(H+v{Dybn78xM6^*Ym@UGxx?L)m}WY#R>6M2zXnPL_M9#h($ECz^+(4HmKN7
zA>E;`AEqouHJd7pegrq4zkk>kHh`TEb`^(_ea;v{?MW3Sr^FXegkqAQPM-h^)$#Jn
z?bKbnXR@k~%*?q`TPL=sD8C+n^I#08(}d$H(@Y;3*{~nv4RLZLw`v=1M0-%j>CtT(
zTp#U03GAv{RFAtj4vln4#E4eLO<W2JC=!fExpZr3MTUIkw8`fkw=p#&Y_VE{op>vt
zs;=`m&{S@AJbcl1q^39VOtmN^Zm(*x(`(SUgF(=6#&^7oA8T_ojX>V5sJx@*cV|29
z)6_%P6}e}`58Sd;LY2cWv~w}fer&_c1&mlY0`YNNk9q=TRg@Khc5E$N`aYng=!afD
z@ewAv^jl$`U5;q4OxFM4ab%X_Jv>V!98w$8ZN*`D-)0S7Y^6xW$pQ%g3_lEmW9Ef^
zGmFsQw`E!ATjDvy@%mdcqrD-uiKB}!)ZRwpZRmyu+x|RUXS+oQ*_jIZKAD~U=3B|t
zz>9QQr91qJi<sty>hg9j9rWHww{v@+SYBzCfc0kI=4Gr{ZLcC~mft^EkJ`CMl?8fZ
z3G4ix71=2dQ`5QuTOYA0(}f`@`@U<#K?1TI(XO9c*()q!Hf}JUCaUmg#y?ffT9w1g
zc)e=JcF-9J`hK{0##K#A>m^@ZFx!$g09WSBdc8O^IdP&JE@O{i0&G!Ztvt{L4q%x&
zGE2s<zC{NKu8NO}E6}a-)2AXw*VFhsg!|B9v|+S86E+_Sc*Ud^OvwvTfLqQVdOn|@
zN!5IQG|)+M0>!RVi6ZN9)E*(c33HuMf7#X2*VPVThdmrVz-Fyqxcs<k=FMvlffDQ+
z1}^dfltdS*U4kR=<1n-YWaPlaYLEnHy;!@y#i4z>&aI4DvP#bfW={h$9>K0HsBTUf
z2&!G;(<vtQI;LqNW~Xf?xS*27tM?(|+lSBVJBc-TS<Yl{0MOO9mU;e!Mu{ZgP;SH>
z^oOVIYJv~OM=-i`6=r4Z1*hC8Fcf3rI9?;a_rL*nr@zxwKNlxf(-#Kgn@C~4?BdKk
zYvL?QcQeDwwR5_S(`sn&{PL6FYxwb-qSh_rUUo{Yi-GZz5rZotG<e?XQbfL-zzd~}
zQxfAiC<F?(dz{hU)N3muv#7R9aey2eb^k=U<DLpU?#}Y1Ht%&uL~r*Ou+8=~BLl@R
zL8Gi7Ou^b!B4$lJC{-^na|TDZ&C1LADI(lBfr27zJn42_W)bgJ&fLrWWF}nM8obDJ
z22n19%fxa9ike9*iDkC@D6gVRVu4+5RW$uB+DfHJQ`Q|1lS>4R<+!PfsGg`MVtomw
z<EW-Dg15ZwzU<kY&O~_@$Z&${pfnDa5<AY;L26n@f~n{^)0Mp-b7*n=NORAvuWEI`
zSs#}HG0nlMM(&MYTb55ET-gk^92zM(;P^4ZPMYQVg86o?Jv;@`3yG$ydNvTjjVTj_
zU$}ghm{_Gr77AUTR<b;}uu5?lgTo@fi^=YFOhuYvZH&XwqO}eSG!^}5uO8bK+maL}
zB~Ra#jpO(8s!B%@a074GD2+JQ!dWnCQc}1b#KY-lIwrF)5u4ds^|7CU;+r=oEU}z<
z8x1?k;)*8O61_FYU2Z39r7+13>5kzOZJrh(#rMR_87KeP0Q=#^5~r_?y1*kN?3Fq%
zvnzHw$r!w|Soxz8Nbx2d&{!#w$^Hua%fx!xUbc2SI-<{h>e2I;$rJL)4)hnT5cx^*
zIq#+<g_FC1I$r=}e#(WAtb)=04{PCNe37!`vjX&#WcGz#ba}ji2qD!XM{NOIYh}v|
z3H*84VgoU%V3v@1RBj98(_roA0>{3;Leun3Xo=C(XVjt_z)F#PIoAw%SqJ=~DMQeB
zNWQ={d|1qtlDS3xFik}#j*8%DG0<^6fW~|NGL#P_weHnJ(cYEdJtI9#1-Pa8M}(r{
zwnPJB_qB?IqZw5h!hRwW2WIEb?&F<52Ruxpr77O2K>=t*3&Z@=5(c^Uy&JSph}{Q^
z0Tl|}gt=&vK;Rb9Tx{{jUvhtmF>;~k$8T7kp;EV`C!~FKW|r$n^d6=thh`)^uYgBd
zydgnY9&mm$?B@pKK+_QreOm?wnl5l}-wA$RZCZukfC$slx<D0d<J+Cw;}=Cm{CH*G
z3I#Y*VHC++-6WTU13^Tb{+v0A5|5;&p98U>bqv9uKq0o^QeSID96{Rm^084kZ)*`P
zk))V~+<4-_7d6<~)PL%!+%JP`Dn23vUpH47h~xnA=B_a}rLy|7U-f0W+fH`{wnyh2
zD$JYdXuygeP5&OAqpl2)BZ|X){~G;E|7{liYf%AZFmXXyA@32qLA)tuuQz`n^iH1Y
z=)pAzxK$jw0Xq?7`M`=<A}Zu*B(WK%s9cbyOinKSRv#SgfRAEEObC$uiHc$R8I~sX
zE@F(?ZZE*)#ZciBY-TL42=MqJO{IDCL~VDpQIM1-;m<S2{d@ee(h&3E+{D^YQ?(RA
zsaHCU#>kN2<PEl==iW6b{LM*AQAaNkRc7NDXcmslS<B{l8>WeQFhz)p;QhjbKg#SB
zP~_Vqo0SGbc5Q;v4Q7vm6_#iT+p9B>%{s`8H}r|hAL5I8Q|ceJAL*eruzD8~<gkwq
zvx^l#{8v+M0LC~Y2*bE0DEUAXMgh}?JStrO9?u9X(uTZCUYB7X;Xob60c{YKpNb$A
zc;|Fq#0e5sI?RG9K%{SS)-mChed+5|5p)}#T=P?pLne#(u(j=td?xOGQ`h}}XFPh(
zlK+}c-Om4Y60f46T8zD@)L8uQ3`>_m>fg26HvLpik&#{3Zd#|1C_>l&-RW2nBBzSO
zQ3%G{nI*T}jBjr%3fjG*&G#ruH^ioD<PZ?iS!V3_Bib@z*xEykgCY!@q63$4+>M>0
zb0vSM8ML?tPU*y%aoCq;V%x%~!W*HaebuDn9qeT*vk0%X>fq-4zrrQf{Uq5zI1rEy
zjQ@V|Cp~$AoBu=VgnVl@Yiro>ZF{uB=5)~i1rZzmDTIzLBy`8Too!#Z4nE$Z{~uB(
z_=o=gK<S)17$`VnQy;G<3X(el`nw>uhVpy&`}<v<r!A(AB2s5JZ9<#XokF-4(v!Ny
z;aH`__4bm9O+%UZR^53b{n6JWE8A8wcI7wbfOxFQDV1P}HVpO_4(+LolUbk6N;bF_
z(r&2MBvBe-W}0x#>-c&f%**M&(|;2iy+nZy2Su}GOAH_GT9z`!ogwn$+Bi&1ZhtPF
z<FQ)JtUVOSX?C$RHNR}`2=&|1sQ|UQ+>VS&LO5#Bq}cew$kvE7*t8W^{{7&7<g9Jf
z53}%j>WaF{upy0mj*K&xbnXvSP9V$6m6cesHGC!&Us36ld9f*Pn8gbJb3`PPT|<Fb
z|1-Xv{%3qi_m%`Q!Iaxc8q*OBBUcCt*A&~l2e{UH-(u{U=5LEXU9{BLbzpXc&}|;r
z%5*mPUnOBaSvQ#Jb=vE7TC}ohwYt^Tcnc`M3FMOLw3sc}eY~7+-M-Jb_dH}j2*l3T
z`d%ikhFij&rqSo{xLb}U0z|w{l8nrBI&_!^0oMQ=9s!{FiR)}rwdu|bZkl^kR(>ZG
zri2?uIu09i>6Y-0-8sREOU?WaGke0+rHPb^sp;*E{Z5P7kFJ@RiLZTO`cN2mRR#Nz
zxjJ##Nk+Uy-2N-8K_@576L(kJ>$UhP+)|w!SQHkkz+e62*hpzyfmY4eQLZtZUhEdG
zIZluDOoPDlt5#iw+2epC3vEATfok^?SDT`TzBwt<L&)-&a(au{C~U;V4AtmQ>gKjY
z>ZImbO)i~T=IYAfw$3j2mF1Cj*_yqK(qw(U^r-!gcUKvWQrDG@E{lEyWDWOPtA9v{
z5($&mxw{nZWo_Ov??S#Bo1;+YwVfx%M23|o$24Hdf^&4hQeV=Cffa5MMYOu2NZLSC
zQ4UxWvn+8%YVGDg(Y*1iHbUyT^=gP*COcE~QkU|&6_3h<QOP!SXtKnFZ(t6QiNz*>
z-GOS6-@o9+Vd(D7x#N<VZFr-?w0u0<s3ZF(&RZgVV0jd(9Fm@KCZpz*28|E31YxOS
z-V4|fS*}%&$;-03ZgJ||hwgX*MLnYGa5UO=kN(8OrY^6V#T*KLm^4=wz0sdB7=;&t
zhHGp$YQD=Oi@!vD+OIFiqU?nyp>Yt{Bvx2`P&ZuCx#^l0bR89Hr6V<YJ*Pbj)-qkc
zv<0$8-MTLHJoaT@QrzeUztAl!w75-*1p{29x7L$=p8u4M6)2M>m<||c3Waq(KO0eZ
zH(|B;X}{FaZ8_4yyWLdK!G_q9AYZcoOY}Jlf3R;%oR5dwR(rk7NqyF%{r>F4s^>li
z`R~-fh>YIAC1?%!O?mxLx!dq*=%IRCj;vXX628aZ;+^M0CDFUY0Rc<1P5e(OVX8n-
z*1UOrX{J}b2N)6m5&_xw^WSN=L<b&156RUh1DwBDzk$u$#CR}Gk^gq0PISm*L(jv?
zNQiS;LWLg@`(YmKDvhG_H=puK_*;-75WG4Lab`lMg}6)Ro%(e*Z8J5!^pyiCsf(dv
z2on?K8Lxe9Aja=INHwxYl$%EAw={qj+To~1h_M=|`UdS^jCPg&39Y|K3<X{k6`Utq
zu89M{SE(>p$I$T>f8K6|J_bj%ZsIYKNs1$TFt!RuCWF48;98`7D(XPVnk+~~i=U$}
zR#;!ZRo4eVqlDxjDeE^3+8)bzG_o~VRwdxqvD^HNh#@o>1My$0*Y_`wfQ$y}az|Uz
zM47oEaYNTH?J^w9EVNnvfmmbV+GHDe)Kf;$^@6?9DrSHnk@*{PuJ>ra|9KO!qQ-Fp
zNNcZB4ZdAI>jEh@3Mt(E1Fy!^gH-Zx6&lr8%=duIgI^~gC{Q;4yoe;#F7B`w9daIe
z{(I;y)=)anc;C;)#P`8H6~iAG_q-4rPJb(6rn4pjclGi6$_L79sFAj#CTv;t@94S6
zz`Id7?k!#3JItckcwOf?sj=Xr6oKvAyt1=jiWN@XBFoW6dw_+c9O9x2i4or?*~8f&
zm<>yzc6Aw_E-gsGAa`6`cjK~k^TJt(^`E1^_h)5(8)1kzAsBxjd4+!hJ&&T!qklDN
z`?j#za=(^wRCvEI75uE^K#IBe5!5g2XW}|lUqAmdmIQb7xJtP}G9^(=!V`ZS_7#RZ
zjXq#Cekw>fE*YS-?Qea|7~H?)bbLK;G&(~%!B@H`o#LYAuu6;-c~jFfjY7GKZ|9~{
zE!`!d@@rhY_@5fDbuQ8gRI~R_vs4%fR5$?yot4hDPJ28k_Wzmc^0yzwMr#*(OXq@g
zRUgQmJA?E>3GO=5N8iWIfBP{&QM%!Oa*iwTlbd0Fbm*QCX>oRb*2XfG-=Bz1Qz0$v
zn#X!2C!LqE601LEMq;X7`P*5nurdKZAmmsI-zZ|rTH;AFxNDyZ_#hN2m4W(|YB64E
z470#yh$;8QzsdA;6vbNvc95HLvZvyT4{C>F(fwy&izvNDuvfO1<f=DficjH^Ptk=}
zvKKKL1@~P^ao{l`XvpGhd0nnNocBP}t45`-1b1UFhCUXuTeXRN_=Dn&OM_57pS&UM
z<}D_GQ2GngSG{l*c-DGs>Z;`Ss#4a_c6pm*{0t|_i9z{@84^lffQa5zG4<{(+p5-S
z^>lG-^GJR#V>;5f3~y%n=`U_jBp~WgB0cp;Lx5VZYPYCH&(evw#}AYRlGJ>vcoeVr
z3%#-QUBgeH!GB>XLw;rT&<H4u)0+b2gp`B<SS1EU2B<;6FO0V){?53`ii~RP0^+xy
z<G;h8dy{UFKk?BD<-fsiI`+URBzNO}4H3XBa**eIg@56!KOqw&qb?J_tx9{U@N@HC
ztXVt~`#<-JFs+s7Dh@v8f+c_FeOJ(jLRWyj0{dbKP-DdhC!^@mHB<K82)LGN!E9iL
z2t{`qx)ykVE}m6(@)m*bOho;W5Lh5qc_#4D%3<IjyEhf4SO-KM;W}MQ0lG+QaQd;F
z6pPpOTpLpaA)ZX3Y%3n)SqUwMw>oMI9ynP;leDwh4O2uM!oIWo&Qxk{^9#nX&^3GJ
z(U~5{S9aw@yHH^yuQGso=~*JOC9Zdi6(TFP+IddkfK5Eu9q;+F9?PPNAe-O;;P_Aa
zPJ{Dqa1gQb%dZ|0I{#B0(z|r(qq!A4CxlW92-LwXFjYfOzAT1DDK`9rm4AB~l&oVv
zi6_{)M9L1%JP}i52y@`!T9RB~!CRel53wl?amNHqcuElq%hn)|#BPvW5_m51RVb|?
zXQ&B*eAD}}QamG>o{?i~usG5<d^#sT9$FSql@}}40+?obf=&rIT|~3@_jk&b;f8tU
zg^liav>X6IDa3+Xkb8w%7;C8|Cln70biA+ZH}fxkH^Wei$vZPnuqIT!Mmy26;mLfU
z3Bbv4M^vvMlz-I+46=g>0^wWkmA!hlYj*I!%it^x9Kx(d{L|+L{rW?Y#hLHWJfd5X
z>B=Swk8=;mRtIz}Hr3NE_garb5W*!7fnNM{+m2_>!cHZZlNEeof~7M#FBEQ+f&gJ3
z^z<nug~GvKX^YC6t1KJ-As1VD8M(-W&=(T`J@+Nrc5Ym_dy|Eo70Z}P(l@7Y%dQZh
zk7p}j2&d$wI-fgROr|*m;cR+PGJKJk$+9p)oslb16vuOM*TNzmKDAhOa?M~231`L#
zMJ!VZBjBzj7We2@;Ke$w_lpX*ggj5E&yX4*GQ%4_FMjtfv^umnN?Cr7TP=wstL*YY
zQ!TAqEcvdc?V{CRnzORdu|VO6taAYr#G*V9>v*t?XV)jQi%0-Ra|ISiW-fx)DsK->
zI}Fv%uee$#-1PKJwr=lU89eh=M{>Nk7IlJ)U33U)lLW+OOU%A|9-Lf;`@c*+vX{W2
z{{?0QoP!#?8=5%yL=fP%iF+?n$0#iHz`P;1{Ra6iwr=V7v^8<sopg0Y@yZ7NO*6=q
zFFlApm-2#`(9hEp&IVc>;NoLJ5)QxIyIx>ur?lMwV=mBo0BA?28kMow8SX=<Y+)==
z;R=1CQ>Ax5L%S~x4+EQi#Ig`(ht%)D(F#Pa!)SiHy&PvUp32=VtAsR|6|NZR@jkad
zX^aEgojf9(-)rNOZ=NVA&a;6Cljkb=H-bY9m^_I)`p<N16r`XAE_(4fS-+dl!Kd9K
z>BHB16QW)sU27zF13ypefeATJc1Wzy39GrKF{UntHsIU59AdXp?j{eh2R)IbU&omd
zk6(qzvE@hve1yM6dgkbz>5HDR&MD~yi$yymQ}?b;RfL$N-#l7(u?T^Wlu+Q;fo|jd
zBe^jzGMHY(2=5<P2*tw)ntAE!i|kvrfQiOA_7D5amQ`g(2*cXve*N10PW_ErW^V8L
z1MqDEjW6kdkAOC-z}uc?m@V^a5B40*PRN+-f=MjuJ@4}mlCv);PKIt(DEf@zhUnJR
z!oyb^SGMdOXtqePw<6%)O~~F2FTY<jX!>l?bEIh+zgE$1TEQ&!p3fH;AW`P?W<ESK
zH#g7ZdfetwZ2v=9o0mn(AO?csJT~Gn*&Y}OjOpbUo}~Hm@;x|NO+0T%-0Axc&%7lF
zJ1pX`Z^AlfV3?dm5G@PwxK_i|&Of7xpxF?vju^4*EO?%y7Xv=e>5Hkj3eJnT>dqg!
zf~}A*SZU5HHDCbdywQ^l_PqssHRlrySYN=`hAv2sVrtcF!`kyEu%XeeRUTJU7vB%h
zY0*)N$mLo6d=tJfe}IPIeiH~>AKwCpkn&WEfYgl?3anq5#-F$6$v-(G_j0*S9mdsn
zg@ek_ut4(?+JP_9-n`Yqo<vHf2Ir!Nonv)J(3x#DWYX;y3-f=3t|>D(gAz+Ttm1#t
za96D}oQR(o=e8wwes19_(p4g(A1vSGwPAp~Hh3hh!fc>u{1E^+^}AzwilFVf6^vbL
zc&NnRs`u)N-P|Cu4()yTiuE{j_V&=K?iP!IUBf~ei2}<whJD5S_DJ}8lyz{d5e0m^
zXp7wURYpf){TL0trzEJmlx>~_KBvUAlXa;R#Wl`gOBtJ$Y5(L))@`riLB)v*r><aR
zw7qT#lC53m61B>9*8VfmQt<PROW2c<^h*+X1{2yJxbMATtPvxHpc+?jPI+FPnVH_&
zfj7uqK><72?+fdwP{BA@?_qo>mN7yzICUCaeG(+>Rb~8wg~6U(P<nm(X&r_f6obUi
z$dKyG&rI&@a%L*(6>)NlDLuhQgjbC}=)HuZgC}0Z-qLX4lJ7^)8~!!*qP0=~`Y_(A
z{@15*ZevZSI^s|OnpCeCwLXf#tgbq8y~R*GB5anmZ;_N!+-3>!wu@NBFCNJ$#y?{?
zMI!?s*=_xA;V&aX)ROxzVW8*de+&P#2zucA|8mksdgCXBsZ*TM=%{L1Tk5LB<Z!KJ
z;lp910!MJ8J~+1dY_Xn{p8}Y~34%%$3)5agk@iGzX6H!Anv5uUI$ciQvmGcbPG)xv
zdO@rD(K-3Ky;MGnIEtGU{Htp(S893+bn<t9@uj!@5B4gD7du9ZgQn18YO4uo06rZq
z#s2-PcRt)y(275pYB#9Hbw&B>_*^@&S?O=ot{h)1xRVSn27&Tk8>rF|6ruzYb;Nq)
z;qvlmrP^SL$mhe4Ai)xpl6Wx&y;z8o!7-+6$qj;ZLXvfR71I@w(R|6lyuP6v-lP&r
z@KK-TEmGQfMmk1c0^fd7!^si}T%b5a2%>T-Drh|^<Hr=j)=_((p;KuSfKido&LV#J
zFFfl}!<eBhy2#i=6c#ra%kZ1$j(EY7lFC*v8!P_^%qy;W8jQ+Q`Z<xkWskY<0J>Cf
z$}qxIv@zxbm<k^RE-aX~4rCWmq?oKl-EW;PQ_l{3e+jJBhWP}gKtyF&I$%ZeMK`+v
z8e^?6J-`{J<4-qj-Ad~A)|HKBJMM+itDqMD(Et|5n|aQ=z$&Sb7P;n0h5a2h`q=X8
zoL#YBVnHjcM6gYTkYUnp7Y*}nB=Og*ah7+WL}S|s!dTMo_mJp5G)1p4pQo%*M<ul@
zTl*h(Spw1r1ddb%$9LJbAQ>J#qjK6Q_aGDe{ciVT20V1lW52Xs!}x(4_j)sUXYdm4
zwYC9FOa;X*c*LxL;xE5ov?|?^7gWXyALy_D2GvDo-8%0-Y%9TkkO_Tcr2qIUg3(OC
z%3wt?<o?7ws>hyn*+e^z%(~2#!2dvMFa$mzgwk1I1X;naFMjXSbnmZ!zd%7u)=cgi
z*0&@Scrl&BDfU(9Pks8#;!~v~r7<YRA|b+cmZ<O+%qVn;SdwH~C-8G;f`4)Dnc*H4
zz^itXa><D&I>~DN{G6WE&_;7i{{a*?oiCao(l%2ruxX0fAt69e2vLgL%Mf_)!*(Tz
zNKW>sW@YB2vBfP>C&L|-pq)Uq^PsG_THu;8iEcqafO?0k$IQp1KyWyOoTxwmKvlc^
zO9$%Tt8;%qQxwy5;CsJ)V}a7I6}SvQ%0_H53Kcqx=m8<MOsI~Wzc<Q!qbFzwSB`D+
z{D9hC-4#S=>3fIzpLSGgfVe^SPdc*xPdciI5dg}#{Etv$e<)gGD=qm0v=!aN@*?$s
zLhzD%4w{vf-g6FHQjG9XyC+4=bewb?Mz%!u8%oP{G9{UJFTLTcCi3R(=Nm&t&Sl(?
zr>pj?=ECdDVa}-g%`LF^1EY@>7d}%VhYpKFS<Su*wlsyNILeDAmwC&4vcc1^J!Zw#
z1{mO|uomzhkc4n!BQAnn>DPH)D(z<fPLgY#uASJ*Dl<~1S0ksFUXxI*v2|bDYL^Mp
z+uux$8f>B+gPe1m7E}W>TiW=8L0&(D&YG=0<&7G4Bu{;-#Ud;-1%Ta9V}U6fyK1YX
z`Rq|i-X(loPZ)M$H%m@j7bGx>uj~y=0)!t#dc|c}+hT%~Sq>fefez0Ul|jOJHta~u
zx7*mV6~Jpt(FkY(pQN91>aFk7VS%Sa^oLaq$*)W?fy`xuFJgH<2s=!Rz}_(qdmdF~
zlr2f=)q_vpi8X;Jq><kv8yxSR^Y*0XEH>5^$GweJ{iS`Khw2f)fsvKpgh;U~13a+9
zfaw}UuGiBy;q10pI^Avb#X3D=k_r(T{N;-xA)OM}2Py5L##<96NU*Sr7GQqhfrPej
z?;B$Bt_sTxuSAPXfTSC{zr?@$$0iHxC@z*5F52j*PG87hh`0w3At8jPf*rjNE~_Gj
z2)fjeUFJ(#l9uWuw&5#@13|AQ1;pdA?EL4YKq0JDR5T8I?aWGxI=J9}vdyH;gQ@iE
z>+UnC2iwT0f80-VuE^bY!N@(}9?bOXyy%rTqSNDN4rO4Zt#(kZwcGgTp&3((F+nsd
ze~B<vkvAmqZQCN{VT^_3N)dA|5@JD*b^OT9w?DKQKQ;>)%K6oP4WX_w1>|QImC;9q
zy}4p+s%^Too2(gE>yo%+yY#F{)phtmNqsJPVQQ0lGR|H9q>aA&AtU4M+EZ%`xvQLb
zbigBOc`dL}&j3er?EOI`!W)N#>+uwp_!h^5FspaEylq!e(FPY-6T3~WeNmZ<$?Y6y
z-!bM1kD7ZF8xl+Pi6fiv1?)q%`aNxn#pK%)ct||L&Xnf8Gu&3g;Of{B8Pt=u`e+Mn
zA(DmU#3cF#Nr7W;X0V4ksFHMcNDAf4G&D8VjLeZ^|5-f$>_|71>P3xuu)?4NJed*w
z6GR_RB5HQLzT(h+`Y?-3esxeue{-Q%b+!&o>IJ<U{K5n=>!#=}#_&q+h<RCdh)3ad
z3u9U*q-7CzJO1;f{W)bBVeSC|{^P$QIIk-CyR3gi5P7Korq0R)C7=#L)o&^V8kh(x
zNN{4}&7CJN$8L7_)db6<z!w~!{P5^-h7S-wszcjbc^U6D3XVpe@7=4Lshiqe0Y5N7
zsFoi-9bHpq0%4HaycQW`HDSXEoRpZA$r%O8D&^MW(rU(>wJga>fkt(*(WdoN5vSta
z#$mMN6}YzYRpaBZ)j)EL91-oL1(|d(>%UclsTUOyXyWM<i=3tzRhCib5fq2Ef#*;)
z!4K=N!LPQ;#t8R?NIyV<J=&|0SM3GNVDlC9rKZ`^G%|W;P3k6I7uja571~nqmtHor
z`gh1ba^|7r%2g9p-h)}xS-a;H1mn;Lmy(6EFMmKhY12ffmjeU0ZfKOR@8x(I{qJ^q
zWjzyb+f<%IGR(jVuT@@|r|AHvml$9>&(hNqLwqtn`!E<P-d=Tz2P)hWLDtC`SMn-I
zloIV)$+_p$q6pB=3{+T$bt&h5MiRm@f))DR4ajXxL~e~d?aZ7D`BA#RIe#z5AdNuY
z4Mu~W1;T&@$^*+^zTgSBFa_%Z+#!Ex?(l(z&?%r3{UE;C#v+0r-0~JQ95Xz5N?r&y
z*034nI1U7@Q9#B{slBKMiyAtxAI4+f8Y--}xBIB$X;zz`!CSfV2z_$UfxvE>>HJM{
zh>M~xa1@*U^cwx-k5QjePr5=B6u*jpJ)C0{C?f7Yga+I^4$TleyX$x&jm9z@c!?cC
z<2kY7)p^+<iaz~w05^&?q!U1}|D10Ixrw;JZn#OjY5bsrsT0c{MzF*BVSiu=+KAdT
zKQuo-KJJbUw`(m-=2HrjxRrUZ<4T2VI}lY0Ghf>W{AXd@l1C09_yB*TG|yzb96BYk
z8Wpj81vB>zcR+qM4m~A44w1n7$fxB$-?MV}S?Fh}c_|2FXg`cZ?750i;Cdl-_nGK#
zta)h)6!<k-ithudu6LI{`^y6%$8{g&DkiXYPz8(kVsF-P5TW&F<TGk7-`qR&|E4Od
zgJr;N;#RT&P%aaBU_tTM95&<ztJNAH6Be>*AsQ-z8caSh)%5JY>_yCeJs~FpAzdY8
zF@SU_hN#~ip5I;UACFzx1v0yf{j97l&)e-=`d#1Kp6A(Kj&HC!%vK!wEdK3HFJ?|6
za;WwUczZ+&<$g!Td^48@lJtfW@doXL#jY6)dK_RDCQAZ}l&OdD+?Yl5-bqpsHZR^(
zF{u_cR(x>u(c4i5f(^8!h6CV0#ZxRFhLlunWiGDLO6yoRb(wV<(P^8=fOU7Hp{AHE
z;Yg%kg@6&tL3Z*IrbkDeQ$%rbalVP39D@LVrC2xSavnTp%PorXPf1DVzHyqjDsDnS
zL=mv0a2s60bHKGQM)ue>npH0SCp;XtZFUzm?R-x7D*(PxMmuJ4J*K2eY&ebe0yQHe
zVG&*qe{pot{PM^xQv`H_rn2FcYOrEN+I#uX^1`Id%J$;Hi2cNCU!0Hlc0TjxLzkss
zHxmC;hQBu5U4J0XflWM;{uH`_47Sg)QyZ{8D&T0;bdc3{^^<=q7P?C_2E-}PQn>*=
z2T5q^J|Q_2+x%Qt`i3m6=6V$)BxIx{2KAFkMb#q`iMCD|L>+}_dYVA$wBr1Zr}YOF
z^MMGO@PHGGh>g|^yF`PvvtDwN@kxt?ClLcG<+murHMz1Asj!$l=b)4{d}SqOJ}>Y<
zSeAyP@ZEcpx`ayIdp>{--UVLYC_cZZURh_!4u2(*#x@Tk(QJa}4BqqZ$6%LhF-HB~
zAcc?$I6KP}IxANcAteEBX$Ys?T=JB|Fnd3*UAO0mYAXCgWf~?7Z_G7G5`H4;S^QKK
zG*2l75vI@DHQC*es>6&|r^#RHKRQ5rwv_l4`!(!I3%)Z$P1f<pp$a6um{0o<xo1(S
zZ`Q0UjI755S`%%J_bkQdMJX=gQ{yGwHkEGhB^jy#Kdw`Bz|GK-!lL;4BMah}BF+x+
z?HI()zNqsZIGTqA_xWDA=-;C4;b|i|!YxXR_7sk5(v2GtT%3GTmNwkY;US<pe8iMx
z?CMVyA>nZ8N@27zyg}54ElO%<W#-o+m4iU~8Q?$4$|V+p5$&IW7v%jYE2kKCPp*(d
zFu#B%8S=B~N(!;YOU;1OEc3s-B5iV$wBx@k(n04f!GdovshST2rB@sL)o7#C00dbz
zXLs^S+aB?68q$l630l4mcvrwQA6iub28Kfn2K|9b`26o(qj%;Gb~*&bcgMHKOZ}{Y
zpH25NG-g&gW}2EiE*s0bJ7k5r6CL;J<)P?02__GA3^V)q7ZuC)FdRFi4Ui^Hu`+ke
zlP$?ecllVihlR$b$u#yp1B#i7to3FBXsOesoC<RET3TvI{~Xg=|3aNr1q=*+q#7{W
z-Y$TNBqW^r8?8@4K)pJ3Mu3uzqB%>SjQ_4uujX)4ta@Gz2)_>4b~vX|rhRIH-eqdD
zL)xaEpW3K|a>daQRRR*_$W&gtrWOsW-IE4VQl3L$3}=-PFU)s@<aWQn^ePgERyg<~
zi9>XG&9+DFivH-;2&w~$ES_nJZJH!?1mO!CnP)Jb{mW9=f`bDpo^PI6i4|YurK)Q1
z^Ys1oHRdr!$X4RuyR%kgp!a*Lz*_AAoJ$EVAdsNCoPA^VZE1pGO@D3UStACE+%vs6
z<P#Iz;tqZ&(cujrDA6_F?Rn}(1d-gA3k(F0-q!_>$io@E>DmB|3VV~GbOt2oc+K;t
zdn3gaFvYz;vRN-+2+Qk{8|O}e86nVck)fZn3sg$j#dLVham{yGkc$I#!HF7mRS%f*
z!+NdzG49K(qaO^SBlp@K@D?|^rAq;8{*@kRc4sYSNQmoy7@_RS_ksWl2T_38h2A)#
ziU2WXWD03(NqS&Mu*?0-iK8X_Z3w`}c7MPv0qZ7iM|L3xdTnR{y!7{#82$}uJCiGT
z<R=!u#|^6|?wiC#sfCopSzEKCR6U6Bw(Q;@s$H1rn`1g@5SxCRm~1fj@s^zS?<3!C
zifc616*O$TXPDdO5kx`EKMG|B6zm3|2vBezn)24W%c37snNJGF8e>qa=8<9L05hu6
z1N+2n<bYn!L1|(Gqhn$6j9K??r7Po{X_%ib8Rqb7TWs5SRq}=2dXRLIAumPFyfCrC
zDiI||Hbl~nD|ql~>7OzT{NEf?gS@eq7@buCDFe9mAxY%THo^b@BHckKK>jg6{@)>n
z43cPs%$Qi0iwyZ+{C491>FRu5+6baJ{&XXXC@Sp+b!Q<l1VW-1h>E|{7_d?lm5K=B
z)myKEcxjFm74+drF|JCYcxdY%ASig#YoRBRUV7An7f-%<D?52!XFl`Zn^^{ShT;3I
zI<T$g4}e~*gKXa&-QyT9VSr&V{tp%o7Tij&#SziJ(&k5ME%t`kBX71Cwyq<7<3_Q^
zHQr(Xa%dr@sn1-1v9SW^7YS~0`f$$GMn;WvV$Re5WJdJgCv=nq{y&U|*odda3tRIk
zzko?E=~Q~8WW~^B-1Zi0*k}NsJlmR1>rqj%PHECbxh#5476cEq@NQL?dI6gUqvS@w
zq!WmD(<!x6<aoxbqX{?AGp1;={PQ8ebhe#@zB4txI_~4KZH2Rd-MI-}g)&O50No<?
zR1OOz8&~sj1_S9zN|F=cSe`)^cpX(nsSlt>aR0{NxItAZCKDCVw=Zu{9WGDu^i?2g
zLerPiOU*HSaXg^3CdOX^F6c9MiHINP339N%)a96`^Z-c#&EogcxMSYo0Cb4{-}q1(
zRrJine`P|6WRkm8u4Ja1QRYq$AR>b7tugd#EsT-VmXN-t!TYjZy}i!uKi6$u>EJ?w
zvdHZg+hp+5ree?>fdJAX)5#Wtm#2M-{~2jfX2{G`)?D6UD1MevdeeU;;HCi}AtJr(
SGW6ptSs!X7{rG*o_g?|vpSEZK

diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties
index e6aba25..b82aa23 100644
--- a/gradle/wrapper/gradle-wrapper.properties
+++ b/gradle/wrapper/gradle-wrapper.properties
@@ -1,6 +1,6 @@
 distributionBase=GRADLE_USER_HOME
 distributionPath=wrapper/dists
-distributionUrl=https\://services.gradle.org/distributions/gradle-8.5-all.zip
+distributionUrl=https\://services.gradle.org/distributions/gradle-8.7-bin.zip
 networkTimeout=10000
 validateDistributionUrl=true
 zipStoreBase=GRADLE_USER_HOME
diff --git a/gradlew.bat b/gradlew.bat
index 6689b85..7101f8e 100644
--- a/gradlew.bat
+++ b/gradlew.bat
@@ -43,11 +43,11 @@ set JAVA_EXE=java.exe
 %JAVA_EXE% -version >NUL 2>&1
 if %ERRORLEVEL% equ 0 goto execute
 
-echo.
-echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
-echo.
-echo Please set the JAVA_HOME variable in your environment to match the
-echo location of your Java installation.
+echo. 1>&2
+echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2
+echo. 1>&2
+echo Please set the JAVA_HOME variable in your environment to match the 1>&2
+echo location of your Java installation. 1>&2
 
 goto fail
 
@@ -57,11 +57,11 @@ set JAVA_EXE=%JAVA_HOME%/bin/java.exe
 
 if exist "%JAVA_EXE%" goto execute
 
-echo.
-echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
-echo.
-echo Please set the JAVA_HOME variable in your environment to match the
-echo location of your Java installation.
+echo. 1>&2
+echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2
+echo. 1>&2
+echo Please set the JAVA_HOME variable in your environment to match the 1>&2
+echo location of your Java installation. 1>&2
 
 goto fail
 
diff --git a/pgjdbc/build.gradle b/pgjdbc/build.gradle
index a6e49ca..5e9edf8 100644
--- a/pgjdbc/build.gradle
+++ b/pgjdbc/build.gradle
@@ -1,3 +1,30 @@
 dependencies {
     api project(':scram-client')
+    testImplementation testLibs.junit.runner
+    testImplementation testLibs.junit.jupiter.engine
+    testImplementation testLibs.bytebuddy
+    testImplementation testLibs.bytebuddy.agent
+    testImplementation testLibs.classloader.leak.test
+    testImplementation testLibs.testcontainers
+    testImplementation testLibs.testcontainers.junit.jupiter
+    testImplementation testLibs.testcontainers.postgresql
+}
+
+test {
+    systemProperty 'username', 'test'
+    systemProperty 'server', 'localhost'
+    systemProperty 'port', '5432'
+    systemProperty 'secondaryServer1', 'localhost'
+    systemProperty 'secondaryPort1', '5433'
+    systemProperty 'secondaryServer2', 'localhost'
+    systemProperty 'secondaryPort2', '5434'
+    systemProperty 'database', 'test'
+    systemProperty 'username', 'test'
+    systemProperty 'password', 'test'
+    systemProperty 'privilegedUser', 'postgres'
+    systemProperty 'privilegedPassword', ''
+    systemProperty 'sspiusername', 'testsspi'
+    systemProperty 'preparethreshold', '5'
+    systemProperty 'protocolVersion', '0'
+    systemProperty 'sslpassword', 'sslpwd'
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/Driver.java b/pgjdbc/src/main/java/org/postgresql/Driver.java
index 2fac15d..e85c909 100644
--- a/pgjdbc/src/main/java/org/postgresql/Driver.java
+++ b/pgjdbc/src/main/java/org/postgresql/Driver.java
@@ -59,738 +59,736 @@ import java.util.logging.Logger;
 @SuppressWarnings("try")
 public class Driver implements java.sql.Driver {
 
-  private static Driver registeredDriver;
-  private static final Logger PARENT_LOGGER = Logger.getLogger("org.postgresql");
-  private static final Logger LOGGER = Logger.getLogger("org.postgresql.Driver");
-  private static final SharedTimer SHARED_TIMER = new SharedTimer();
+    private static final Logger PARENT_LOGGER = Logger.getLogger("org.postgresql");
+    private static final Logger LOGGER = Logger.getLogger("org.postgresql.Driver");
+    private static final SharedTimer SHARED_TIMER = new SharedTimer();
+    private static Driver registeredDriver;
 
-  static {
-    try {
-      // moved the registerDriver from the constructor to here
-      // because some clients call the driver themselves (I know, as
-      // my early jdbc work did - and that was based on other examples).
-      // Placing it here, means that the driver is registered once only.
-      register();
-    } catch (SQLException e) {
-      throw new ExceptionInInitializerError(e);
-    }
-  }
-
-  // Helper to retrieve default properties from classloader resource
-  // properties files.
-  private Properties defaultProperties;
-
-  private final ResourceLock lock = new ResourceLock();
-
-  public Driver() {
-  }
-
-  private Properties getDefaultProperties() throws IOException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (defaultProperties != null) {
-        return defaultProperties;
-      }
-
-      // Make sure we load properties with the maximum possible privileges.
-      try {
-        defaultProperties =
-            doPrivileged(new PrivilegedExceptionAction<Properties>() {
-              @Override
-              public Properties run() throws IOException {
-                return loadDefaultProperties();
-              }
-            });
-      } catch (PrivilegedActionException e) {
-        Exception ex = e.getException();
-        if (ex instanceof IOException) {
-          throw (IOException) ex;
+    static {
+        try {
+            // moved the registerDriver from the constructor to here
+            // because some clients call the driver themselves (I know, as
+            // my early jdbc work did - and that was based on other examples).
+            // Placing it here, means that the driver is registered once only.
+            register();
+        } catch (SQLException e) {
+            throw new ExceptionInInitializerError(e);
         }
-        throw new RuntimeException(e);
-      } catch (Throwable e) {
-        if (e instanceof IOException) {
-          throw (IOException) e;
-        }
-        if (e instanceof RuntimeException) {
-          throw (RuntimeException) e;
-        }
-        if (e instanceof Error) {
-          throw (Error) e;
-        }
-        throw new RuntimeException(e);
-      }
-
-      return defaultProperties;
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  private static <T> T doPrivileged(PrivilegedExceptionAction<T> action) throws Throwable {
-    try {
-      Class<?> accessControllerClass = Class.forName("java.security.AccessController");
-      Method doPrivileged = accessControllerClass.getMethod("doPrivileged",
-          PrivilegedExceptionAction.class);
-      return (T) doPrivileged.invoke(null, action);
-    } catch (ClassNotFoundException e) {
-      return action.run();
-    } catch (InvocationTargetException e) {
-      throw e.getCause();
-    }
-  }
-
-  private Properties loadDefaultProperties() throws IOException {
-    Properties merged = new Properties();
-
-    try {
-      PGProperty.USER.set(merged, System.getProperty("user.name"));
-    } catch (SecurityException se) {
-      // We're just trying to set a default, so if we can't
-      // it's not a big deal.
     }
 
-    // If we are loaded by the bootstrap classloader, getClassLoader()
-    // may return null. In that case, try to fall back to the system
-    // classloader.
-    //
-    // We should not need to catch SecurityException here as we are
-    // accessing either our own classloader, or the system classloader
-    // when our classloader is null. The ClassLoader javadoc claims
-    // neither case can throw SecurityException.
-    ClassLoader cl = getClass().getClassLoader();
-    if (cl == null) {
-      LOGGER.log(Level.FINE, "Can't find our classloader for the Driver; "
-          + "attempt to use the system class loader");
-      cl = ClassLoader.getSystemClassLoader();
-    }
-
-    if (cl == null) {
-      LOGGER.log(Level.WARNING, "Can't find a classloader for the Driver; not loading driver "
-          + "configuration from org/postgresql/driverconfig.properties");
-      return merged; // Give up on finding defaults.
-    }
-
-    LOGGER.log(Level.FINE, "Loading driver configuration via classloader {0}", cl);
-
-    // When loading the driver config files we don't want settings found
-    // in later files in the classpath to override settings specified in
-    // earlier files. To do this we've got to read the returned
-    // Enumeration into temporary storage.
-    ArrayList<URL> urls = new ArrayList<>();
-    Enumeration<URL> urlEnum = cl.getResources("org/postgresql/driverconfig.properties");
-    while (urlEnum.hasMoreElements()) {
-      urls.add(urlEnum.nextElement());
-    }
-
-    for (int i = urls.size() - 1; i >= 0; i--) {
-      URL url = urls.get(i);
-      LOGGER.log(Level.FINE, "Loading driver configuration from: {0}", url);
-      InputStream is = url.openStream();
-      merged.load(is);
-      is.close();
-    }
-
-    return merged;
-  }
-
-  /**
-   * <p>Try to make a database connection to the given URL. The driver should return "null" if it
-   * realizes it is the wrong kind of driver to connect to the given URL. This will be common, as
-   * when the JDBC driverManager is asked to connect to a given URL, it passes the URL to each
-   * loaded driver in turn.</p>
-   *
-   * <p>The driver should raise an SQLException if it is the right driver to connect to the given URL,
-   * but has trouble connecting to the database.</p>
-   *
-   * <p>The java.util.Properties argument can be used to pass arbitrary string tag/value pairs as
-   * connection arguments.</p>
-   *
-   * <ul>
-   * <li>user - (required) The user to connect as</li>
-   * <li>password - (optional) The password for the user</li>
-   * <li>ssl -(optional) Use SSL when connecting to the server</li>
-   * <li>readOnly - (optional) Set connection to read-only by default</li>
-   * <li>charSet - (optional) The character set to be used for converting to/from
-   * the database to unicode. If multibyte is enabled on the server then the character set of the
-   * database is used as the default, otherwise the jvm character encoding is used as the default.
-   * This value is only used when connecting to a 7.2 or older server.</li>
-   * <li>loglevel - (optional) Enable logging of messages from the driver. The value is an integer
-   * from 0 to 2 where: OFF = 0, INFO =1, DEBUG = 2 The output is sent to
-   * DriverManager.getPrintWriter() if set, otherwise it is sent to System.out.</li>
-   * <li>compatible - (optional) This is used to toggle between different functionality
-   * as it changes across different releases of the jdbc driver code. The values here are versions
-   * of the jdbc client and not server versions. For example in 7.1 get/setBytes worked on
-   * LargeObject values, in 7.2 these methods were changed to work on bytea values. This change in
-   * functionality could be disabled by setting the compatible level to be "7.1", in which case the
-   * driver will revert to the 7.1 functionality.</li>
-   * </ul>
-   *
-   * <p>Normally, at least "user" and "password" properties should be included in the properties. For a
-   * list of supported character encoding , see
-   * http://java.sun.com/products/jdk/1.2/docs/guide/internat/encoding.doc.html Note that you will
-   * probably want to have set up the Postgres database itself to use the same encoding, with the
-   * {@code -E <encoding>} argument to createdb.</p>
-   *
-   * <p>Our protocol takes the forms:</p>
-   *
-   * <pre>
-   *  jdbc:postgresql://host:port/database?param1=val1&amp;...
-   * </pre>
-   *
-   * @param url the URL of the database to connect to
-   * @param info a list of arbitrary tag/value pairs as connection arguments
-   * @return a connection to the URL or null if it isnt us
-   * @exception SQLException if a database access error occurs or the url is
-   *            {@code null}
-   * @see java.sql.Driver#connect
-   */
-  @Override
-  public Connection connect(String url, Properties info) throws SQLException {
-    if (url == null) {
-      throw new SQLException("url is null");
-    }
-    // get defaults
-    Properties defaults;
-
-    if (!url.startsWith("jdbc:postgresql:")) {
-      return null;
-    }
-    try {
-      defaults = getDefaultProperties();
-    } catch (IOException ioe) {
-      throw new PSQLException(GT.tr("Error loading default settings from driverconfig.properties"),
-          PSQLState.UNEXPECTED_ERROR, ioe);
-    }
-
-    // override defaults with provided properties
-    Properties props = new Properties(defaults);
-    if (info != null) {
-      Set<String> e = info.stringPropertyNames();
-      for (String propName : e) {
-        String propValue = info.getProperty(propName);
-        if (propValue == null) {
-          throw new PSQLException(
-              GT.tr("Properties for the driver contains a non-string value for the key ")
-                  + propName,
-              PSQLState.UNEXPECTED_ERROR);
-        }
-        props.setProperty(propName, propValue);
-      }
-    }
-    // parse URL and add more properties
-    if ((props = parseURL(url, props)) == null) {
-      throw new PSQLException(
-          GT.tr("Unable to parse URL {0}", url),
-          PSQLState.UNEXPECTED_ERROR);
-    }
-    try {
-
-      LOGGER.log(Level.FINE, "Connecting with URL: {0}", url);
-
-      // Enforce login timeout, if specified, by running the connection
-      // attempt in a separate thread. If we hit the timeout without the
-      // connection completing, we abandon the connection attempt in
-      // the calling thread, but the separate thread will keep trying.
-      // Eventually, the separate thread will either fail or complete
-      // the connection; at that point we clean up the connection if
-      // we managed to establish one after all. See ConnectThread for
-      // more details.
-      long timeout = timeout(props);
-      if (timeout <= 0) {
-        return makeConnection(url, props);
-      }
-
-      ConnectThread ct = new ConnectThread(url, props);
-      Thread thread = new Thread(ct, "PostgreSQL JDBC driver connection thread");
-      thread.setDaemon(true); // Don't prevent the VM from shutting down
-      thread.start();
-      return ct.getResult(timeout);
-    } catch (PSQLException ex1) {
-      LOGGER.log(Level.FINE, "Connection error: ", ex1);
-      // re-throw the exception, otherwise it will be caught next, and a
-      // org.postgresql.unusual error will be returned instead.
-      throw ex1;
-    } catch (Exception ex2) {
-      if ("java.security.AccessControlException".equals(ex2.getClass().getName())) {
-        // java.security.AccessControlException has been deprecated for removal, so compare the class name
-        throw new PSQLException(
-            GT.tr(
-                "Your security policy has prevented the connection from being attempted.  You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."),
-            PSQLState.UNEXPECTED_ERROR, ex2);
-      }
-      LOGGER.log(Level.FINE, "Unexpected connection error: ", ex2);
-      throw new PSQLException(
-          GT.tr(
-              "Something unusual has occurred to cause the driver to fail. Please report this exception."),
-          PSQLState.UNEXPECTED_ERROR, ex2);
-    }
-  }
-
-  /**
-   *  this is an empty method left here for graalvm
-   *  we removed the ability to setup the logger from properties
-   *  due to a security issue
-   * @param props Connection Properties
-   */
-  private void setupLoggerFromProperties(final Properties props) {
-  }
-
-  /**
-   * Perform a connect in a separate thread; supports getting the results from the original thread
-   * while enforcing a login timeout.
-   */
-  private static class ConnectThread implements Runnable {
     private final ResourceLock lock = new ResourceLock();
-    private final Condition lockCondition = lock.newCondition();
+    // Helper to retrieve default properties from classloader resource
+    // properties files.
+    private Properties defaultProperties;
 
-    ConnectThread(String url, Properties props) {
-      this.url = url;
-      this.props = props;
+    public Driver() {
     }
 
-    @Override
-    public void run() {
-      Connection conn;
-      Throwable error;
-
-      try {
-        conn = makeConnection(url, props);
-        error = null;
-      } catch (Throwable t) {
-        conn = null;
-        error = t;
-      }
-
-      try (ResourceLock ignore = lock.obtain()) {
-        if (abandoned) {
-          if (conn != null) {
-            try {
-              conn.close();
-            } catch (SQLException e) {
-            }
-          }
-        } else {
-          result = conn;
-          resultException = error;
-          lockCondition.signal();
+    @SuppressWarnings("unchecked")
+    private static <T> T doPrivileged(PrivilegedExceptionAction<T> action) throws Throwable {
+        try {
+            Class<?> accessControllerClass = Class.forName("java.security.AccessController");
+            Method doPrivileged = accessControllerClass.getMethod("doPrivileged",
+                    PrivilegedExceptionAction.class);
+            return (T) doPrivileged.invoke(null, action);
+        } catch (ClassNotFoundException e) {
+            return action.run();
+        } catch (InvocationTargetException e) {
+            throw e.getCause();
         }
-      }
     }
 
     /**
-     * Get the connection result from this (assumed running) thread. If the timeout is reached
-     * without a result being available, a SQLException is thrown.
+     * Create a connection from URL and properties. Always does the connection work in the current
+     * thread without enforcing a timeout, regardless of any timeout specified in the properties.
      *
-     * @param timeout timeout in milliseconds
-     * @return the new connection, if successful
-     * @throws SQLException if a connection error occurs or the timeout is reached
+     * @param url   the original URL
+     * @param props the parsed/defaulted connection properties
+     * @return a new connection
+     * @throws SQLException if the connection could not be made
      */
-    public Connection getResult(long timeout) throws SQLException {
-      long expiry = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) + timeout;
-      try (ResourceLock ignore = lock.obtain()) {
-        while (true) {
-          if (result != null) {
-            return result;
-          }
+    private static Connection makeConnection(String url, Properties props) throws SQLException {
+        return new PgConnection(hostSpecs(props), props, url);
+    }
 
-          Throwable resultException = this.resultException;
-          if (resultException != null) {
-            if (resultException instanceof SQLException) {
-              resultException.fillInStackTrace();
-              throw (SQLException) resultException;
-            } else {
-              throw new PSQLException(
-                  GT.tr(
-                      "Something unusual has occurred to cause the driver to fail. Please report this exception."),
-                  PSQLState.UNEXPECTED_ERROR, resultException);
+    /**
+     * Returns the server version series of this driver and the specific build number.
+     *
+     * @return JDBC driver version
+     * @deprecated use {@link #getMajorVersion()} and {@link #getMinorVersion()} instead
+     */
+    @Deprecated
+    public static String getVersion() {
+        return DriverInfo.DRIVER_FULL_NAME;
+    }
+
+    /**
+     * Constructs a new DriverURL, splitting the specified URL into its component parts.
+     *
+     * @param url      JDBC URL to parse
+     * @param defaults Default properties
+     * @return Properties with elements added from the url
+     */
+    public static Properties parseURL(String url, Properties defaults) {
+        // priority 1 - URL values
+        Properties priority1Url = new Properties();
+        // priority 2 - Properties given as argument to DriverManager.getConnection()
+        // argument "defaults" EXCLUDING defaults
+        // priority 3 - Values retrieved by "service"
+        Properties priority3Service = new Properties();
+        // priority 4 - Properties loaded by Driver.loadDefaultProperties() (user, org/postgresql/driverconfig.properties)
+        // argument "defaults" INCLUDING defaults
+        // priority 5 - PGProperty defaults for PGHOST, PGPORT, PGDBNAME
+
+        String urlServer = url;
+        String urlArgs = "";
+
+        int qPos = url.indexOf('?');
+        if (qPos != -1) {
+            urlServer = url.substring(0, qPos);
+            urlArgs = url.substring(qPos + 1);
+        }
+
+        if (!urlServer.startsWith("jdbc:postgresql:")) {
+            LOGGER.log(Level.FINE, "JDBC URL must start with \"jdbc:postgresql:\" but was: {0}", url);
+            return null;
+        }
+        urlServer = urlServer.substring("jdbc:postgresql:".length());
+
+        if ("//".equals(urlServer) || "///".equals(urlServer)) {
+            urlServer = "";
+        } else if (urlServer.startsWith("//")) {
+            urlServer = urlServer.substring(2);
+            long slashCount = urlServer.chars().filter(ch -> ch == '/').count();
+            if (slashCount > 1) {
+                LOGGER.log(Level.WARNING, "JDBC URL contains too many / characters: {0}", url);
+                return null;
             }
-          }
+            int slash = urlServer.indexOf('/');
+            if (slash == -1) {
+                LOGGER.log(Level.WARNING, "JDBC URL must contain a / at the end of the host or port: {0}", url);
+                return null;
+            }
+            if (!urlServer.endsWith("/")) {
+                String value = urlDecode(urlServer.substring(slash + 1));
+                if (value == null) {
+                    return null;
+                }
+                PGProperty.PG_DBNAME.set(priority1Url, value);
+            }
+            urlServer = urlServer.substring(0, slash);
 
-          long delay = expiry - TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
-          if (delay <= 0) {
-            abandoned = true;
-            throw new PSQLException(GT.tr("Connection attempt timed out."),
-                PSQLState.CONNECTION_UNABLE_TO_CONNECT);
-          }
-
-          try {
-            lockCondition.await(delay, TimeUnit.MILLISECONDS);
-          } catch (InterruptedException ie) {
-
-            // reset the interrupt flag
-            Thread.currentThread().interrupt();
-            abandoned = true;
-
-            // throw an unchecked exception which will hopefully not be ignored by the calling code
-            throw new RuntimeException(GT.tr("Interrupted while attempting to connect."));
-          }
-        }
-      }
-    }
-
-    private final String url;
-    private final Properties props;
-    private Connection result;
-    private Throwable resultException;
-    private boolean abandoned;
-  }
-
-  /**
-   * Create a connection from URL and properties. Always does the connection work in the current
-   * thread without enforcing a timeout, regardless of any timeout specified in the properties.
-   *
-   * @param url the original URL
-   * @param props the parsed/defaulted connection properties
-   * @return a new connection
-   * @throws SQLException if the connection could not be made
-   */
-  private static Connection makeConnection(String url, Properties props) throws SQLException {
-    return new PgConnection(hostSpecs(props), props, url);
-  }
-
-  /**
-   * Returns true if the driver thinks it can open a connection to the given URL. Typically, drivers
-   * will return true if they understand the subprotocol specified in the URL and false if they
-   * don't. Our protocols start with jdbc:postgresql:
-   *
-   * @param url the URL of the driver
-   * @return true if this driver accepts the given URL
-   * @see java.sql.Driver#acceptsURL
-   */
-  @Override
-  public boolean acceptsURL(String url) {
-    return parseURL(url, null) != null;
-  }
-
-  /**
-   * <p>The getPropertyInfo method is intended to allow a generic GUI tool to discover what properties
-   * it should prompt a human for in order to get enough information to connect to a database.</p>
-   *
-   * <p>Note that depending on the values the human has supplied so far, additional values may become
-   * necessary, so it may be necessary to iterate through several calls to getPropertyInfo</p>
-   *
-   * @param url the Url of the database to connect to
-   * @param info a proposed list of tag/value pairs that will be sent on connect open.
-   * @return An array of DriverPropertyInfo objects describing possible properties. This array may
-   *         be an empty array if no properties are required
-   * @see java.sql.Driver#getPropertyInfo
-   */
-  @Override
-  public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) {
-    Properties copy = new Properties(info);
-    Properties parse = parseURL(url, copy);
-    if (parse != null) {
-      copy = parse;
-    }
-
-    PGProperty[] knownProperties = PGProperty.values();
-    DriverPropertyInfo[] props = new DriverPropertyInfo[knownProperties.length];
-    for (int i = 0; i < props.length; i++) {
-      props[i] = knownProperties[i].toDriverPropertyInfo(copy);
-    }
-
-    return props;
-  }
-
-  @Override
-  public int getMajorVersion() {
-    return DriverInfo.MAJOR_VERSION;
-  }
-
-  @Override
-  public int getMinorVersion() {
-    return DriverInfo.MINOR_VERSION;
-  }
-
-  /**
-   * Returns the server version series of this driver and the specific build number.
-   *
-   * @return JDBC driver version
-   * @deprecated use {@link #getMajorVersion()} and {@link #getMinorVersion()} instead
-   */
-  @Deprecated
-  public static String getVersion() {
-    return DriverInfo.DRIVER_FULL_NAME;
-  }
-
-  /**
-   * <p>Report whether the driver is a genuine JDBC compliant driver. A driver may only report "true"
-   * here if it passes the JDBC compliance tests, otherwise it is required to return false. JDBC
-   * compliance requires full support for the JDBC API and full support for SQL 92 Entry Level.</p>
-   *
-   * <p>For PostgreSQL, this is not yet possible, as we are not SQL92 compliant (yet).</p>
-   */
-  @Override
-  public boolean jdbcCompliant() {
-    return false;
-  }
-
-  /**
-   * Constructs a new DriverURL, splitting the specified URL into its component parts.
-   *
-   * @param url JDBC URL to parse
-   * @param defaults Default properties
-   * @return Properties with elements added from the url
-   */
-  public static Properties parseURL(String url, Properties defaults) {
-    // priority 1 - URL values
-    Properties priority1Url = new Properties();
-    // priority 2 - Properties given as argument to DriverManager.getConnection()
-    // argument "defaults" EXCLUDING defaults
-    // priority 3 - Values retrieved by "service"
-    Properties priority3Service = new Properties();
-    // priority 4 - Properties loaded by Driver.loadDefaultProperties() (user, org/postgresql/driverconfig.properties)
-    // argument "defaults" INCLUDING defaults
-    // priority 5 - PGProperty defaults for PGHOST, PGPORT, PGDBNAME
-
-    String urlServer = url;
-    String urlArgs = "";
-
-    int qPos = url.indexOf('?');
-    if (qPos != -1) {
-      urlServer = url.substring(0, qPos);
-      urlArgs = url.substring(qPos + 1);
-    }
-
-    if (!urlServer.startsWith("jdbc:postgresql:")) {
-      LOGGER.log(Level.FINE, "JDBC URL must start with \"jdbc:postgresql:\" but was: {0}", url);
-      return null;
-    }
-    urlServer = urlServer.substring("jdbc:postgresql:".length());
-
-    if ("//".equals(urlServer) || "///".equals(urlServer)) {
-      urlServer = "";
-    } else if (urlServer.startsWith("//")) {
-      urlServer = urlServer.substring(2);
-      long slashCount = urlServer.chars().filter(ch -> ch == '/').count();
-      if (slashCount > 1) {
-        LOGGER.log(Level.WARNING, "JDBC URL contains too many / characters: {0}", url);
-        return null;
-      }
-      int slash = urlServer.indexOf('/');
-      if (slash == -1) {
-        LOGGER.log(Level.WARNING, "JDBC URL must contain a / at the end of the host or port: {0}", url);
-        return null;
-      }
-      if (!urlServer.endsWith("/")) {
-        String value = urlDecode(urlServer.substring(slash + 1));
-        if (value == null) {
-          return null;
-        }
-        PGProperty.PG_DBNAME.set(priority1Url, value);
-      }
-      urlServer = urlServer.substring(0, slash);
-
-      String[] addresses = urlServer.split(",");
-      StringBuilder hosts = new StringBuilder();
-      StringBuilder ports = new StringBuilder();
-      for (String address : addresses) {
-        int portIdx = address.lastIndexOf(':');
-        if (portIdx != -1 && address.lastIndexOf(']') < portIdx) {
-          String portStr = address.substring(portIdx + 1);
-          ports.append(portStr);
-          CharSequence hostStr = address.subSequence(0, portIdx);
-          if (hostStr.length() == 0) {
-            hosts.append(PGProperty.PG_HOST.getDefaultValue());
-          } else {
-            hosts.append(hostStr);
-          }
+            String[] addresses = urlServer.split(",");
+            StringBuilder hosts = new StringBuilder();
+            StringBuilder ports = new StringBuilder();
+            for (String address : addresses) {
+                int portIdx = address.lastIndexOf(':');
+                if (portIdx != -1 && address.lastIndexOf(']') < portIdx) {
+                    String portStr = address.substring(portIdx + 1);
+                    ports.append(portStr);
+                    CharSequence hostStr = address.subSequence(0, portIdx);
+                    if (hostStr.length() == 0) {
+                        hosts.append(PGProperty.PG_HOST.getDefaultValue());
+                    } else {
+                        hosts.append(hostStr);
+                    }
+                } else {
+                    ports.append(PGProperty.PG_PORT.getDefaultValue());
+                    hosts.append(address);
+                }
+                ports.append(',');
+                hosts.append(',');
+            }
+            ports.setLength(ports.length() - 1);
+            hosts.setLength(hosts.length() - 1);
+            PGProperty.PG_HOST.set(priority1Url, hosts.toString());
+            PGProperty.PG_PORT.set(priority1Url, ports.toString());
+        } else if (urlServer.startsWith("/")) {
+            return null;
         } else {
-          ports.append(PGProperty.PG_PORT.getDefaultValue());
-          hosts.append(address);
+            String value = urlDecode(urlServer);
+            if (value == null) {
+                return null;
+            }
+            priority1Url.setProperty(PGProperty.PG_DBNAME.getName(), value);
+        }
+
+        // parse the args part of the url
+        String[] args = urlArgs.split("&");
+        String serviceName = null;
+        for (String token : args) {
+            if (token.isEmpty()) {
+                continue;
+            }
+            int pos = token.indexOf('=');
+            if (pos == -1) {
+                priority1Url.setProperty(token, "");
+            } else {
+                String pName = PGPropertyUtil.translatePGServiceToPGProperty(token.substring(0, pos));
+                String pValue = urlDecode(token.substring(pos + 1));
+                if (pValue == null) {
+                    return null;
+                }
+                if (PGProperty.SERVICE.getName().equals(pName)) {
+                    serviceName = pValue;
+                } else {
+                    priority1Url.setProperty(pName, pValue);
+                }
+            }
+        }
+
+        // load pg_service.conf
+        if (serviceName != null) {
+            LOGGER.log(Level.FINE, "Processing option [?service={0}]", serviceName);
+            Properties result = PgServiceConfParser.getServiceProperties(serviceName);
+            if (result == null) {
+                LOGGER.log(Level.WARNING, "Definition of service [{0}] not found", serviceName);
+                return null;
+            }
+            priority3Service.putAll(result);
+        }
+
+        // combine result based on order of priority
+        Properties result = new Properties();
+        result.putAll(priority1Url);
+        if (defaults != null) {
+            // priority 2 - forEach() returns all entries EXCEPT defaults
+            defaults.forEach(result::putIfAbsent);
+        }
+        priority3Service.forEach(result::putIfAbsent);
+        if (defaults != null) {
+            // priority 4 - stringPropertyNames() returns all entries INCLUDING defaults
+            defaults.stringPropertyNames().forEach(s -> result.putIfAbsent(s, defaults.getProperty(s)));
+        }
+        // priority 5 - PGProperty defaults for PGHOST, PGPORT, PGDBNAME
+        result.putIfAbsent(PGProperty.PG_PORT.getName(), PGProperty.PG_PORT.getDefaultValue());
+        result.putIfAbsent(PGProperty.PG_HOST.getName(), PGProperty.PG_HOST.getDefaultValue());
+        if (PGProperty.USER.getOrDefault(result) != null) {
+            result.putIfAbsent(PGProperty.PG_DBNAME.getName(), PGProperty.USER.getOrDefault(result));
+        }
+
+        // consistency check
+        if (!PGPropertyUtil.propertiesConsistencyCheck(result)) {
+            return null;
+        }
+
+        // try to load .pgpass if password is missing
+        if (PGProperty.PASSWORD.getOrDefault(result) == null) {
+            String password = PgPassParser.getPassword(
+                    PGProperty.PG_HOST.getOrDefault(result), PGProperty.PG_PORT.getOrDefault(result), PGProperty.PG_DBNAME.getOrDefault(result), PGProperty.USER.getOrDefault(result)
+            );
+            if (password != null && !password.isEmpty()) {
+                PGProperty.PASSWORD.set(result, password);
+            }
+        }
+        //
+        return result;
+    }
+
+    // decode url, on failure log and return null
+    private static String urlDecode(String url) {
+        try {
+            return URLCoder.decode(url);
+        } catch (IllegalArgumentException e) {
+            LOGGER.log(Level.FINE, "Url [{0}] parsing failed with error [{1}]", new Object[]{url, e.getMessage()});
         }
-        ports.append(',');
-        hosts.append(',');
-      }
-      ports.setLength(ports.length() - 1);
-      hosts.setLength(hosts.length() - 1);
-      PGProperty.PG_HOST.set(priority1Url, hosts.toString());
-      PGProperty.PG_PORT.set(priority1Url, ports.toString());
-    } else if (urlServer.startsWith("/")) {
-      return null;
-    } else {
-      String value = urlDecode(urlServer);
-      if (value == null) {
         return null;
-      }
-      priority1Url.setProperty(PGProperty.PG_DBNAME.getName(), value);
     }
 
-    // parse the args part of the url
-    String[] args = urlArgs.split("&");
-    String serviceName = null;
-    for (String token : args) {
-      if (token.isEmpty()) {
-        continue;
-      }
-      int pos = token.indexOf('=');
-      if (pos == -1) {
-        priority1Url.setProperty(token, "");
-      } else {
-        String pName = PGPropertyUtil.translatePGServiceToPGProperty(token.substring(0, pos));
-        String pValue = urlDecode(token.substring(pos + 1));
-        if (pValue == null) {
-          return null;
+    /**
+     * @return the address portion of the URL
+     */
+    private static HostSpec[] hostSpecs(Properties props) {
+        String[] hosts = PGProperty.PG_HOST.getOrDefault(props).split(",");
+        String[] ports = PGProperty.PG_PORT.getOrDefault(props).split(",");
+        String localSocketAddress = PGProperty.LOCAL_SOCKET_ADDRESS.getOrDefault(props);
+        HostSpec[] hostSpecs = new HostSpec[hosts.length];
+        for (int i = 0; i < hostSpecs.length; i++) {
+            hostSpecs[i] = new HostSpec(hosts[i], Integer.parseInt(ports[i]), localSocketAddress);
         }
-        if (PGProperty.SERVICE.getName().equals(pName)) {
-          serviceName = pValue;
-        } else {
-          priority1Url.setProperty(pName, pValue);
+        return hostSpecs;
+    }
+
+    /**
+     * @return the timeout from the URL, in milliseconds
+     */
+    private static long timeout(Properties props) {
+        String timeout = PGProperty.LOGIN_TIMEOUT.getOrDefault(props);
+        if (timeout != null) {
+            try {
+                return (long) (Float.parseFloat(timeout) * 1000);
+            } catch (NumberFormatException e) {
+                LOGGER.log(Level.WARNING, "Couldn't parse loginTimeout value: {0}", timeout);
+            }
         }
-      }
+        return (long) DriverManager.getLoginTimeout() * 1000;
     }
 
-    // load pg_service.conf
-    if (serviceName != null) {
-      LOGGER.log(Level.FINE, "Processing option [?service={0}]", serviceName);
-      Properties result = PgServiceConfParser.getServiceProperties(serviceName);
-      if (result == null) {
-        LOGGER.log(Level.WARNING, "Definition of service [{0}] not found", serviceName);
-        return null;
-      }
-      priority3Service.putAll(result);
+    /**
+     * This method was added in v6.5, and simply throws an SQLException for an unimplemented method. I
+     * decided to do it this way while implementing the JDBC2 extensions to JDBC, as it should help
+     * keep the overall driver size down. It now requires the call Class and the function name to help
+     * when the driver is used with closed software that don't report the stack trace
+     *
+     * @param callClass    the call Class
+     * @param functionName the name of the unimplemented function with the type of its arguments
+     * @return PSQLException with a localized message giving the complete description of the
+     * unimplemented function
+     */
+    public static SQLFeatureNotSupportedException notImplemented(Class<?> callClass,
+                                                                 String functionName) {
+        return new SQLFeatureNotSupportedException(
+                GT.tr("Method {0} is not yet implemented.", callClass.getName() + "." + functionName),
+                PSQLState.NOT_IMPLEMENTED.getState());
     }
 
-    // combine result based on order of priority
-    Properties result = new Properties();
-    result.putAll(priority1Url);
-    if (defaults != null) {
-      // priority 2 - forEach() returns all entries EXCEPT defaults
-      defaults.forEach(result::putIfAbsent);
-    }
-    priority3Service.forEach(result::putIfAbsent);
-    if (defaults != null) {
-      // priority 4 - stringPropertyNames() returns all entries INCLUDING defaults
-      defaults.stringPropertyNames().forEach(s -> result.putIfAbsent(s, defaults.getProperty(s)));
-    }
-    // priority 5 - PGProperty defaults for PGHOST, PGPORT, PGDBNAME
-    result.putIfAbsent(PGProperty.PG_PORT.getName(), PGProperty.PG_PORT.getDefaultValue());
-    result.putIfAbsent(PGProperty.PG_HOST.getName(), PGProperty.PG_HOST.getDefaultValue());
-    if (PGProperty.USER.getOrDefault(result) != null) {
-      result.putIfAbsent(PGProperty.PG_DBNAME.getName(), PGProperty.USER.getOrDefault(result));
+    public static SharedTimer getSharedTimer() {
+        return SHARED_TIMER;
     }
 
-    // consistency check
-    if (!PGPropertyUtil.propertiesConsistencyCheck(result)) {
-      return null;
+    /**
+     * Register the driver against {@link DriverManager}. This is done automatically when the class is
+     * loaded. Dropping the driver from DriverManager's list is possible using {@link #deregister()}
+     * method.
+     *
+     * @throws IllegalStateException if the driver is already registered
+     * @throws SQLException          if registering the driver fails
+     */
+    public static void register() throws SQLException {
+        if (isRegistered()) {
+            throw new IllegalStateException(
+                    "Driver is already registered. It can only be registered once.");
+        }
+        Driver registeredDriver = new Driver();
+        DriverManager.registerDriver(registeredDriver);
+        Driver.registeredDriver = registeredDriver;
     }
 
-    // try to load .pgpass if password is missing
-    if (PGProperty.PASSWORD.getOrDefault(result) == null) {
-      String password = PgPassParser.getPassword(
-          PGProperty.PG_HOST.getOrDefault(result), PGProperty.PG_PORT.getOrDefault(result), PGProperty.PG_DBNAME.getOrDefault(result), PGProperty.USER.getOrDefault(result)
-      );
-      if (password != null && !password.isEmpty()) {
-        PGProperty.PASSWORD.set(result, password);
-      }
+    /**
+     * According to JDBC specification, this driver is registered against {@link DriverManager} when
+     * the class is loaded. To avoid leaks, this method allow unregistering the driver so that the
+     * class can be gc'ed if necessary.
+     *
+     * @throws IllegalStateException if the driver is not registered
+     * @throws SQLException          if deregistering the driver fails
+     */
+    public static void deregister() throws SQLException {
+        if (registeredDriver == null) {
+            throw new IllegalStateException(
+                    "Driver is not registered (or it has not been registered using Driver.register() method)");
+        }
+        DriverManager.deregisterDriver(registeredDriver);
+        registeredDriver = null;
     }
-    //
-    return result;
-  }
 
-  // decode url, on failure log and return null
-  private static String urlDecode(String url) {
-    try {
-      return URLCoder.decode(url);
-    } catch (IllegalArgumentException e) {
-      LOGGER.log(Level.FINE, "Url [{0}] parsing failed with error [{1}]", new Object[]{url, e.getMessage()});
+    /**
+     * @return {@code true} if the driver is registered against {@link DriverManager}
+     */
+    public static boolean isRegistered() {
+        return registeredDriver != null;
     }
-    return null;
-  }
 
-  /**
-   * @return the address portion of the URL
-   */
-  private static HostSpec[] hostSpecs(Properties props) {
-    String[] hosts = PGProperty.PG_HOST.getOrDefault(props).split(",");
-    String[] ports = PGProperty.PG_PORT.getOrDefault(props).split(",");
-    String localSocketAddress = PGProperty.LOCAL_SOCKET_ADDRESS.getOrDefault(props);
-    HostSpec[] hostSpecs = new HostSpec[hosts.length];
-    for (int i = 0; i < hostSpecs.length; i++) {
-      hostSpecs[i] = new HostSpec(hosts[i], Integer.parseInt(ports[i]), localSocketAddress);
+    private Properties getDefaultProperties() throws IOException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (defaultProperties != null) {
+                return defaultProperties;
+            }
+
+            // Make sure we load properties with the maximum possible privileges.
+            try {
+                defaultProperties =
+                        doPrivileged(new PrivilegedExceptionAction<Properties>() {
+                            @Override
+                            public Properties run() throws IOException {
+                                return loadDefaultProperties();
+                            }
+                        });
+            } catch (PrivilegedActionException e) {
+                Exception ex = e.getException();
+                if (ex instanceof IOException) {
+                    throw (IOException) ex;
+                }
+                throw new RuntimeException(e);
+            } catch (Throwable e) {
+                if (e instanceof IOException) {
+                    throw (IOException) e;
+                }
+                if (e instanceof RuntimeException) {
+                    throw (RuntimeException) e;
+                }
+                if (e instanceof Error) {
+                    throw (Error) e;
+                }
+                throw new RuntimeException(e);
+            }
+
+            return defaultProperties;
+        }
     }
-    return hostSpecs;
-  }
 
-  /**
-   * @return the timeout from the URL, in milliseconds
-   */
-  private static long timeout(Properties props) {
-    String timeout = PGProperty.LOGIN_TIMEOUT.getOrDefault(props);
-    if (timeout != null) {
-      try {
-        return (long) (Float.parseFloat(timeout) * 1000);
-      } catch (NumberFormatException e) {
-        LOGGER.log(Level.WARNING, "Couldn't parse loginTimeout value: {0}", timeout);
-      }
+    private Properties loadDefaultProperties() throws IOException {
+        Properties merged = new Properties();
+
+        try {
+            PGProperty.USER.set(merged, System.getProperty("user.name"));
+        } catch (SecurityException se) {
+            // We're just trying to set a default, so if we can't
+            // it's not a big deal.
+        }
+
+        // If we are loaded by the bootstrap classloader, getClassLoader()
+        // may return null. In that case, try to fall back to the system
+        // classloader.
+        //
+        // We should not need to catch SecurityException here as we are
+        // accessing either our own classloader, or the system classloader
+        // when our classloader is null. The ClassLoader javadoc claims
+        // neither case can throw SecurityException.
+        ClassLoader cl = getClass().getClassLoader();
+        if (cl == null) {
+            LOGGER.log(Level.FINE, "Can't find our classloader for the Driver; "
+                    + "attempt to use the system class loader");
+            cl = ClassLoader.getSystemClassLoader();
+        }
+
+        if (cl == null) {
+            LOGGER.log(Level.WARNING, "Can't find a classloader for the Driver; not loading driver "
+                    + "configuration from org/postgresql/driverconfig.properties");
+            return merged; // Give up on finding defaults.
+        }
+
+        LOGGER.log(Level.FINE, "Loading driver configuration via classloader {0}", cl);
+
+        // When loading the driver config files we don't want settings found
+        // in later files in the classpath to override settings specified in
+        // earlier files. To do this we've got to read the returned
+        // Enumeration into temporary storage.
+        ArrayList<URL> urls = new ArrayList<>();
+        Enumeration<URL> urlEnum = cl.getResources("org/postgresql/driverconfig.properties");
+        while (urlEnum.hasMoreElements()) {
+            urls.add(urlEnum.nextElement());
+        }
+
+        for (int i = urls.size() - 1; i >= 0; i--) {
+            URL url = urls.get(i);
+            LOGGER.log(Level.FINE, "Loading driver configuration from: {0}", url);
+            InputStream is = url.openStream();
+            merged.load(is);
+            is.close();
+        }
+
+        return merged;
     }
-    return (long) DriverManager.getLoginTimeout() * 1000;
-  }
 
-  /**
-   * This method was added in v6.5, and simply throws an SQLException for an unimplemented method. I
-   * decided to do it this way while implementing the JDBC2 extensions to JDBC, as it should help
-   * keep the overall driver size down. It now requires the call Class and the function name to help
-   * when the driver is used with closed software that don't report the stack trace
-   *
-   * @param callClass the call Class
-   * @param functionName the name of the unimplemented function with the type of its arguments
-   * @return PSQLException with a localized message giving the complete description of the
-   *         unimplemented function
-   */
-  public static SQLFeatureNotSupportedException notImplemented(Class<?> callClass,
-      String functionName) {
-    return new SQLFeatureNotSupportedException(
-        GT.tr("Method {0} is not yet implemented.", callClass.getName() + "." + functionName),
-        PSQLState.NOT_IMPLEMENTED.getState());
-  }
+    /**
+     * <p>Try to make a database connection to the given URL. The driver should return "null" if it
+     * realizes it is the wrong kind of driver to connect to the given URL. This will be common, as
+     * when the JDBC driverManager is asked to connect to a given URL, it passes the URL to each
+     * loaded driver in turn.</p>
+     *
+     * <p>The driver should raise an SQLException if it is the right driver to connect to the given URL,
+     * but has trouble connecting to the database.</p>
+     *
+     * <p>The java.util.Properties argument can be used to pass arbitrary string tag/value pairs as
+     * connection arguments.</p>
+     *
+     * <ul>
+     * <li>user - (required) The user to connect as</li>
+     * <li>password - (optional) The password for the user</li>
+     * <li>ssl -(optional) Use SSL when connecting to the server</li>
+     * <li>readOnly - (optional) Set connection to read-only by default</li>
+     * <li>charSet - (optional) The character set to be used for converting to/from
+     * the database to unicode. If multibyte is enabled on the server then the character set of the
+     * database is used as the default, otherwise the jvm character encoding is used as the default.
+     * This value is only used when connecting to a 7.2 or older server.</li>
+     * <li>loglevel - (optional) Enable logging of messages from the driver. The value is an integer
+     * from 0 to 2 where: OFF = 0, INFO =1, DEBUG = 2 The output is sent to
+     * DriverManager.getPrintWriter() if set, otherwise it is sent to System.out.</li>
+     * <li>compatible - (optional) This is used to toggle between different functionality
+     * as it changes across different releases of the jdbc driver code. The values here are versions
+     * of the jdbc client and not server versions. For example in 7.1 get/setBytes worked on
+     * LargeObject values, in 7.2 these methods were changed to work on bytea values. This change in
+     * functionality could be disabled by setting the compatible level to be "7.1", in which case the
+     * driver will revert to the 7.1 functionality.</li>
+     * </ul>
+     *
+     * <p>Normally, at least "user" and "password" properties should be included in the properties. For a
+     * list of supported character encoding , see
+     * http://java.sun.com/products/jdk/1.2/docs/guide/internat/encoding.doc.html Note that you will
+     * probably want to have set up the Postgres database itself to use the same encoding, with the
+     * {@code -E <encoding>} argument to createdb.</p>
+     *
+     * <p>Our protocol takes the forms:</p>
+     *
+     * <pre>
+     *  jdbc:postgresql://host:port/database?param1=val1&amp;...
+     * </pre>
+     *
+     * @param url  the URL of the database to connect to
+     * @param info a list of arbitrary tag/value pairs as connection arguments
+     * @return a connection to the URL or null if it isnt us
+     * @throws SQLException if a database access error occurs or the url is
+     *                      {@code null}
+     * @see java.sql.Driver#connect
+     */
+    @Override
+    public Connection connect(String url, Properties info) throws SQLException {
+        if (url == null) {
+            throw new SQLException("url is null");
+        }
+        // get defaults
+        Properties defaults;
 
-  @Override
-  public Logger getParentLogger() {
-    return PARENT_LOGGER;
-  }
+        if (!url.startsWith("jdbc:postgresql:")) {
+            return null;
+        }
+        try {
+            defaults = getDefaultProperties();
+        } catch (IOException ioe) {
+            throw new PSQLException(GT.tr("Error loading default settings from driverconfig.properties"),
+                    PSQLState.UNEXPECTED_ERROR, ioe);
+        }
 
-  public static SharedTimer getSharedTimer() {
-    return SHARED_TIMER;
-  }
+        // override defaults with provided properties
+        Properties props = new Properties(defaults);
+        if (info != null) {
+            Set<String> e = info.stringPropertyNames();
+            for (String propName : e) {
+                String propValue = info.getProperty(propName);
+                if (propValue == null) {
+                    throw new PSQLException(
+                            GT.tr("Properties for the driver contains a non-string value for the key ")
+                                    + propName,
+                            PSQLState.UNEXPECTED_ERROR);
+                }
+                props.setProperty(propName, propValue);
+            }
+        }
+        // parse URL and add more properties
+        if ((props = parseURL(url, props)) == null) {
+            throw new PSQLException(
+                    GT.tr("Unable to parse URL {0}", url),
+                    PSQLState.UNEXPECTED_ERROR);
+        }
+        try {
 
-  /**
-   * Register the driver against {@link DriverManager}. This is done automatically when the class is
-   * loaded. Dropping the driver from DriverManager's list is possible using {@link #deregister()}
-   * method.
-   *
-   * @throws IllegalStateException if the driver is already registered
-   * @throws SQLException if registering the driver fails
-   */
-  public static void register() throws SQLException {
-    if (isRegistered()) {
-      throw new IllegalStateException(
-          "Driver is already registered. It can only be registered once.");
+            LOGGER.log(Level.FINE, "Connecting with URL: {0}", url);
+
+            // Enforce login timeout, if specified, by running the connection
+            // attempt in a separate thread. If we hit the timeout without the
+            // connection completing, we abandon the connection attempt in
+            // the calling thread, but the separate thread will keep trying.
+            // Eventually, the separate thread will either fail or complete
+            // the connection; at that point we clean up the connection if
+            // we managed to establish one after all. See ConnectThread for
+            // more details.
+            long timeout = timeout(props);
+            if (timeout <= 0) {
+                return makeConnection(url, props);
+            }
+
+            ConnectThread ct = new ConnectThread(url, props);
+            Thread thread = new Thread(ct, "PostgreSQL JDBC driver connection thread");
+            thread.setDaemon(true); // Don't prevent the VM from shutting down
+            thread.start();
+            return ct.getResult(timeout);
+        } catch (PSQLException ex1) {
+            LOGGER.log(Level.FINE, "Connection error: ", ex1);
+            // re-throw the exception, otherwise it will be caught next, and a
+            // org.postgresql.unusual error will be returned instead.
+            throw ex1;
+        } catch (Exception ex2) {
+            if ("java.security.AccessControlException".equals(ex2.getClass().getName())) {
+                // java.security.AccessControlException has been deprecated for removal, so compare the class name
+                throw new PSQLException(
+                        GT.tr(
+                                "Your security policy has prevented the connection from being attempted.  You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to."),
+                        PSQLState.UNEXPECTED_ERROR, ex2);
+            }
+            LOGGER.log(Level.FINE, "Unexpected connection error: ", ex2);
+            throw new PSQLException(
+                    GT.tr(
+                            "Something unusual has occurred to cause the driver to fail. Please report this exception."),
+                    PSQLState.UNEXPECTED_ERROR, ex2);
+        }
     }
-    Driver registeredDriver = new Driver();
-    DriverManager.registerDriver(registeredDriver);
-    Driver.registeredDriver = registeredDriver;
-  }
 
-  /**
-   * According to JDBC specification, this driver is registered against {@link DriverManager} when
-   * the class is loaded. To avoid leaks, this method allow unregistering the driver so that the
-   * class can be gc'ed if necessary.
-   *
-   * @throws IllegalStateException if the driver is not registered
-   * @throws SQLException if deregistering the driver fails
-   */
-  public static void deregister() throws SQLException {
-    if (registeredDriver == null) {
-      throw new IllegalStateException(
-          "Driver is not registered (or it has not been registered using Driver.register() method)");
+    /**
+     * this is an empty method left here for graalvm
+     * we removed the ability to setup the logger from properties
+     * due to a security issue
+     *
+     * @param props Connection Properties
+     */
+    private void setupLoggerFromProperties(final Properties props) {
     }
-    DriverManager.deregisterDriver(registeredDriver);
-    registeredDriver = null;
-  }
 
-  /**
-   * @return {@code true} if the driver is registered against {@link DriverManager}
-   */
-  public static boolean isRegistered() {
-    return registeredDriver != null;
-  }
+    /**
+     * Returns true if the driver thinks it can open a connection to the given URL. Typically, drivers
+     * will return true if they understand the subprotocol specified in the URL and false if they
+     * don't. Our protocols start with jdbc:postgresql:
+     *
+     * @param url the URL of the driver
+     * @return true if this driver accepts the given URL
+     * @see java.sql.Driver#acceptsURL
+     */
+    @Override
+    public boolean acceptsURL(String url) {
+        return parseURL(url, null) != null;
+    }
+
+    /**
+     * <p>The getPropertyInfo method is intended to allow a generic GUI tool to discover what properties
+     * it should prompt a human for in order to get enough information to connect to a database.</p>
+     *
+     * <p>Note that depending on the values the human has supplied so far, additional values may become
+     * necessary, so it may be necessary to iterate through several calls to getPropertyInfo</p>
+     *
+     * @param url  the Url of the database to connect to
+     * @param info a proposed list of tag/value pairs that will be sent on connect open.
+     * @return An array of DriverPropertyInfo objects describing possible properties. This array may
+     * be an empty array if no properties are required
+     * @see java.sql.Driver#getPropertyInfo
+     */
+    @Override
+    public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) {
+        Properties copy = new Properties(info);
+        Properties parse = parseURL(url, copy);
+        if (parse != null) {
+            copy = parse;
+        }
+
+        PGProperty[] knownProperties = PGProperty.values();
+        DriverPropertyInfo[] props = new DriverPropertyInfo[knownProperties.length];
+        for (int i = 0; i < props.length; i++) {
+            props[i] = knownProperties[i].toDriverPropertyInfo(copy);
+        }
+
+        return props;
+    }
+
+    @Override
+    public int getMajorVersion() {
+        return DriverInfo.MAJOR_VERSION;
+    }
+
+    @Override
+    public int getMinorVersion() {
+        return DriverInfo.MINOR_VERSION;
+    }
+
+    /**
+     * <p>Report whether the driver is a genuine JDBC compliant driver. A driver may only report "true"
+     * here if it passes the JDBC compliance tests, otherwise it is required to return false. JDBC
+     * compliance requires full support for the JDBC API and full support for SQL 92 Entry Level.</p>
+     *
+     * <p>For PostgreSQL, this is not yet possible, as we are not SQL92 compliant (yet).</p>
+     */
+    @Override
+    public boolean jdbcCompliant() {
+        return false;
+    }
+
+    @Override
+    public Logger getParentLogger() {
+        return PARENT_LOGGER;
+    }
+
+    /**
+     * Perform a connect in a separate thread; supports getting the results from the original thread
+     * while enforcing a login timeout.
+     */
+    private static class ConnectThread implements Runnable {
+        private final ResourceLock lock = new ResourceLock();
+        private final Condition lockCondition = lock.newCondition();
+        private final String url;
+        private final Properties props;
+        private Connection result;
+        private Throwable resultException;
+        private boolean abandoned;
+        ConnectThread(String url, Properties props) {
+            this.url = url;
+            this.props = props;
+        }
+
+        @Override
+        public void run() {
+            Connection conn;
+            Throwable error;
+
+            try {
+                conn = makeConnection(url, props);
+                error = null;
+            } catch (Throwable t) {
+                conn = null;
+                error = t;
+            }
+
+            try (ResourceLock ignore = lock.obtain()) {
+                if (abandoned) {
+                    if (conn != null) {
+                        try {
+                            conn.close();
+                        } catch (SQLException e) {
+                        }
+                    }
+                } else {
+                    result = conn;
+                    resultException = error;
+                    lockCondition.signal();
+                }
+            }
+        }
+
+        /**
+         * Get the connection result from this (assumed running) thread. If the timeout is reached
+         * without a result being available, a SQLException is thrown.
+         *
+         * @param timeout timeout in milliseconds
+         * @return the new connection, if successful
+         * @throws SQLException if a connection error occurs or the timeout is reached
+         */
+        public Connection getResult(long timeout) throws SQLException {
+            long expiry = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) + timeout;
+            try (ResourceLock ignore = lock.obtain()) {
+                while (true) {
+                    if (result != null) {
+                        return result;
+                    }
+
+                    Throwable resultException = this.resultException;
+                    if (resultException != null) {
+                        if (resultException instanceof SQLException) {
+                            resultException.fillInStackTrace();
+                            throw (SQLException) resultException;
+                        } else {
+                            throw new PSQLException(
+                                    GT.tr(
+                                            "Something unusual has occurred to cause the driver to fail. Please report this exception."),
+                                    PSQLState.UNEXPECTED_ERROR, resultException);
+                        }
+                    }
+
+                    long delay = expiry - TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
+                    if (delay <= 0) {
+                        abandoned = true;
+                        throw new PSQLException(GT.tr("Connection attempt timed out."),
+                                PSQLState.CONNECTION_UNABLE_TO_CONNECT);
+                    }
+
+                    try {
+                        lockCondition.await(delay, TimeUnit.MILLISECONDS);
+                    } catch (InterruptedException ie) {
+
+                        // reset the interrupt flag
+                        Thread.currentThread().interrupt();
+                        abandoned = true;
+
+                        // throw an unchecked exception which will hopefully not be ignored by the calling code
+                        throw new RuntimeException(GT.tr("Interrupted while attempting to connect."));
+                    }
+                }
+            }
+        }
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/PGConnection.java b/pgjdbc/src/main/java/org/postgresql/PGConnection.java
index b0b438c..124cac7 100644
--- a/pgjdbc/src/main/java/org/postgresql/PGConnection.java
+++ b/pgjdbc/src/main/java/org/postgresql/PGConnection.java
@@ -32,350 +32,349 @@ import java.util.Map;
  */
 public interface PGConnection {
 
-  /**
-   * Creates an {@link Array} wrapping <i>elements</i>. This is similar to
-   * {@link java.sql.Connection#createArrayOf(String, Object[])}, but also
-   * provides support for primitive arrays.
-   *
-   * @param typeName
-   *          The SQL name of the type to map the <i>elements</i> to.
-   *          Must not be {@code null}.
-   * @param elements
-   *          The array of objects to map. A {@code null} value will result in
-   *          an {@link Array} representing {@code null}.
-   * @return An {@link Array} wrapping <i>elements</i>.
-   * @throws SQLException
-   *           If for some reason the array cannot be created.
-   * @see java.sql.Connection#createArrayOf(String, Object[])
-   */
-  Array createArrayOf(String typeName, Object elements) throws SQLException;
+    /**
+     * Creates an {@link Array} wrapping <i>elements</i>. This is similar to
+     * {@link java.sql.Connection#createArrayOf(String, Object[])}, but also
+     * provides support for primitive arrays.
+     *
+     * @param typeName The SQL name of the type to map the <i>elements</i> to.
+     *                 Must not be {@code null}.
+     * @param elements The array of objects to map. A {@code null} value will result in
+     *                 an {@link Array} representing {@code null}.
+     * @return An {@link Array} wrapping <i>elements</i>.
+     * @throws SQLException If for some reason the array cannot be created.
+     * @see java.sql.Connection#createArrayOf(String, Object[])
+     */
+    Array createArrayOf(String typeName, Object elements) throws SQLException;
 
-  /**
-   * This method returns any notifications that have been received since the last call to this
-   * method. Returns null if there have been no notifications.
-   *
-   * @return notifications that have been received
-   * @throws SQLException if something wrong happens
-   * @since 7.3
-   */
-  PGNotification[] getNotifications() throws SQLException;
+    /**
+     * This method returns any notifications that have been received since the last call to this
+     * method. Returns null if there have been no notifications.
+     *
+     * @return notifications that have been received
+     * @throws SQLException if something wrong happens
+     * @since 7.3
+     */
+    PGNotification[] getNotifications() throws SQLException;
 
-  /**
-   * This method returns any notifications that have been received since the last call to this
-   * method. Returns null if there have been no notifications. A timeout can be specified so the
-   * driver waits for notifications.
-   *
-   * @param timeoutMillis when 0, blocks forever. when &gt; 0, blocks up to the specified number of millis
-   *        or until at least one notification has been received. If more than one notification is
-   *        about to be received, these will be returned in one batch.
-   * @return notifications that have been received
-   * @throws SQLException if something wrong happens
-   * @since 43
-   */
-  PGNotification[] getNotifications(int timeoutMillis) throws SQLException;
+    /**
+     * This method returns any notifications that have been received since the last call to this
+     * method. Returns null if there have been no notifications. A timeout can be specified so the
+     * driver waits for notifications.
+     *
+     * @param timeoutMillis when 0, blocks forever. when &gt; 0, blocks up to the specified number of millis
+     *                      or until at least one notification has been received. If more than one notification is
+     *                      about to be received, these will be returned in one batch.
+     * @return notifications that have been received
+     * @throws SQLException if something wrong happens
+     * @since 43
+     */
+    PGNotification[] getNotifications(int timeoutMillis) throws SQLException;
 
-  /**
-   * This returns the COPY API for the current connection.
-   *
-   * @return COPY API for the current connection
-   * @throws SQLException if something wrong happens
-   * @since 8.4
-   */
-  CopyManager getCopyAPI() throws SQLException;
+    /**
+     * This returns the COPY API for the current connection.
+     *
+     * @return COPY API for the current connection
+     * @throws SQLException if something wrong happens
+     * @since 8.4
+     */
+    CopyManager getCopyAPI() throws SQLException;
 
-  /**
-   * This returns the LargeObject API for the current connection.
-   *
-   * @return LargeObject API for the current connection
-   * @throws SQLException if something wrong happens
-   * @since 7.3
-   */
-  LargeObjectManager getLargeObjectAPI() throws SQLException;
+    /**
+     * This returns the LargeObject API for the current connection.
+     *
+     * @return LargeObject API for the current connection
+     * @throws SQLException if something wrong happens
+     * @since 7.3
+     */
+    LargeObjectManager getLargeObjectAPI() throws SQLException;
 
-  /**
-   * This returns the Fastpath API for the current connection.
-   *
-   * @return Fastpath API for the current connection
-   * @throws SQLException if something wrong happens
-   * @since 7.3
-   * @deprecated This API is somewhat obsolete, as one may achieve similar performance
-   *         and greater functionality by setting up a prepared statement to define
-   *         the function call. Then, executing the statement with binary transmission of parameters
-   *         and results substitutes for a fast-path function call.
-   */
-  @Deprecated
-  Fastpath getFastpathAPI() throws SQLException;
+    /**
+     * This returns the Fastpath API for the current connection.
+     *
+     * @return Fastpath API for the current connection
+     * @throws SQLException if something wrong happens
+     * @since 7.3
+     * @deprecated This API is somewhat obsolete, as one may achieve similar performance
+     * and greater functionality by setting up a prepared statement to define
+     * the function call. Then, executing the statement with binary transmission of parameters
+     * and results substitutes for a fast-path function call.
+     */
+    @Deprecated
+    Fastpath getFastpathAPI() throws SQLException;
 
-  /**
-   * This allows client code to add a handler for one of org.postgresql's more unique data types. It
-   * is approximately equivalent to <code>addDataType(type, Class.forName(name))</code>.
-   *
-   * @param type JDBC type name
-   * @param className class name
-   * @throws RuntimeException if the type cannot be registered (class not found, etc).
-   * @deprecated As of 8.0, replaced by {@link #addDataType(String, Class)}. This deprecated method
-   *             does not work correctly for registering classes that cannot be directly loaded by
-   *             the JDBC driver's classloader.
-   */
-  @Deprecated
-  void addDataType(String type, String className);
+    /**
+     * This allows client code to add a handler for one of org.postgresql's more unique data types. It
+     * is approximately equivalent to <code>addDataType(type, Class.forName(name))</code>.
+     *
+     * @param type      JDBC type name
+     * @param className class name
+     * @throws RuntimeException if the type cannot be registered (class not found, etc).
+     * @deprecated As of 8.0, replaced by {@link #addDataType(String, Class)}. This deprecated method
+     * does not work correctly for registering classes that cannot be directly loaded by
+     * the JDBC driver's classloader.
+     */
+    @Deprecated
+    void addDataType(String type, String className);
 
-  /**
-   * <p>This allows client code to add a handler for one of org.postgresql's more unique data types.</p>
-   *
-   * <p><b>NOTE:</b> This is not part of JDBC, but an extension.</p>
-   *
-   * <p>The best way to use this is as follows:</p>
-   *
-   * <pre>
-   * ...
-   * ((org.postgresql.PGConnection)myconn).addDataType("mytype", my.class.name.class);
-   * ...
-   * </pre>
-   *
-   * <p>where myconn is an open Connection to org.postgresql.</p>
-   *
-   * <p>The handling class must extend org.postgresql.util.PGobject</p>
-   *
-   * @param type the PostgreSQL type to register
-   * @param klass the class implementing the Java representation of the type; this class must
-   *        implement {@link org.postgresql.util.PGobject}).
-   * @throws SQLException if <code>klass</code> does not implement
-   *         {@link org.postgresql.util.PGobject}).
-   * @see org.postgresql.util.PGobject
-   * @since 8.0
-   */
-  void addDataType(String type, Class<? extends PGobject> klass) throws SQLException;
+    /**
+     * <p>This allows client code to add a handler for one of org.postgresql's more unique data types.</p>
+     *
+     * <p><b>NOTE:</b> This is not part of JDBC, but an extension.</p>
+     *
+     * <p>The best way to use this is as follows:</p>
+     *
+     * <pre>
+     * ...
+     * ((org.postgresql.PGConnection)myconn).addDataType("mytype", my.class.name.class);
+     * ...
+     * </pre>
+     *
+     * <p>where myconn is an open Connection to org.postgresql.</p>
+     *
+     * <p>The handling class must extend org.postgresql.util.PGobject</p>
+     *
+     * @param type  the PostgreSQL type to register
+     * @param klass the class implementing the Java representation of the type; this class must
+     *              implement {@link org.postgresql.util.PGobject}).
+     * @throws SQLException if <code>klass</code> does not implement
+     *                      {@link org.postgresql.util.PGobject}).
+     * @see org.postgresql.util.PGobject
+     * @since 8.0
+     */
+    void addDataType(String type, Class<? extends PGobject> klass) throws SQLException;
 
-  /**
-   * Set the default statement reuse threshold before enabling server-side prepare. See
-   * {@link org.postgresql.PGStatement#setPrepareThreshold(int)} for details.
-   *
-   * @param threshold the new threshold
-   * @since build 302
-   */
-  void setPrepareThreshold(int threshold);
+    /**
+     * Get the default server-side prepare reuse threshold for statements created from this
+     * connection.
+     *
+     * @return the current threshold
+     * @since build 302
+     */
+    int getPrepareThreshold();
 
-  /**
-   * Get the default server-side prepare reuse threshold for statements created from this
-   * connection.
-   *
-   * @return the current threshold
-   * @since build 302
-   */
-  int getPrepareThreshold();
+    /**
+     * Set the default statement reuse threshold before enabling server-side prepare. See
+     * {@link org.postgresql.PGStatement#setPrepareThreshold(int)} for details.
+     *
+     * @param threshold the new threshold
+     * @since build 302
+     */
+    void setPrepareThreshold(int threshold);
 
-  /**
-   * Set the default fetch size for statements created from this connection.
-   *
-   * @param fetchSize new default fetch size
-   * @throws SQLException if specified negative <code>fetchSize</code> parameter
-   * @see Statement#setFetchSize(int)
-   */
-  void setDefaultFetchSize(int fetchSize) throws SQLException;
+    /**
+     * Get the default fetch size for statements created from this connection.
+     *
+     * @return current state for default fetch size
+     * @see PGProperty#DEFAULT_ROW_FETCH_SIZE
+     * @see Statement#getFetchSize()
+     */
+    int getDefaultFetchSize();
 
-  /**
-   * Get the default fetch size for statements created from this connection.
-   *
-   * @return current state for default fetch size
-   * @see PGProperty#DEFAULT_ROW_FETCH_SIZE
-   * @see Statement#getFetchSize()
-   */
-  int getDefaultFetchSize();
+    /**
+     * Set the default fetch size for statements created from this connection.
+     *
+     * @param fetchSize new default fetch size
+     * @throws SQLException if specified negative <code>fetchSize</code> parameter
+     * @see Statement#setFetchSize(int)
+     */
+    void setDefaultFetchSize(int fetchSize) throws SQLException;
 
-  /**
-   * Return the process ID (PID) of the backend server process handling this connection.
-   *
-   * @return PID of backend server process.
-   */
-  int getBackendPID();
+    /**
+     * Return the process ID (PID) of the backend server process handling this connection.
+     *
+     * @return PID of backend server process.
+     */
+    int getBackendPID();
 
-  /**
-   * Sends a query cancellation for this connection.
-   * @throws SQLException if there are problems cancelling the query
-   */
-  void cancelQuery() throws SQLException;
+    /**
+     * Sends a query cancellation for this connection.
+     *
+     * @throws SQLException if there are problems cancelling the query
+     */
+    void cancelQuery() throws SQLException;
 
-  /**
-   * Return the given string suitably quoted to be used as an identifier in an SQL statement string.
-   * Quotes are added only if necessary (i.e., if the string contains non-identifier characters or
-   * would be case-folded). Embedded quotes are properly doubled.
-   *
-   * @param identifier input identifier
-   * @return the escaped identifier
-   * @throws SQLException if something goes wrong
-   */
-  String escapeIdentifier(String identifier) throws SQLException;
+    /**
+     * Return the given string suitably quoted to be used as an identifier in an SQL statement string.
+     * Quotes are added only if necessary (i.e., if the string contains non-identifier characters or
+     * would be case-folded). Embedded quotes are properly doubled.
+     *
+     * @param identifier input identifier
+     * @return the escaped identifier
+     * @throws SQLException if something goes wrong
+     */
+    String escapeIdentifier(String identifier) throws SQLException;
 
-  /**
-   * Return the given string suitably quoted to be used as a string literal in an SQL statement
-   * string. Embedded single-quotes and backslashes are properly doubled. Note that quote_literal
-   * returns null on null input.
-   *
-   * @param literal input literal
-   * @return the quoted literal
-   * @throws SQLException if something goes wrong
-   */
-  String escapeLiteral(String literal) throws SQLException;
+    /**
+     * Return the given string suitably quoted to be used as a string literal in an SQL statement
+     * string. Embedded single-quotes and backslashes are properly doubled. Note that quote_literal
+     * returns null on null input.
+     *
+     * @param literal input literal
+     * @return the quoted literal
+     * @throws SQLException if something goes wrong
+     */
+    String escapeLiteral(String literal) throws SQLException;
 
-  /**
-   * <p>Returns the query mode for this connection.</p>
-   *
-   * <p>When running in simple query mode, certain features are not available: callable statements,
-   * partial result set fetch, bytea type, etc.</p>
-   * <p>The list of supported features is subject to change.</p>
-   *
-   * @return the preferred query mode
-   * @see PreferQueryMode
-   */
-  PreferQueryMode getPreferQueryMode();
+    /**
+     * <p>Returns the query mode for this connection.</p>
+     *
+     * <p>When running in simple query mode, certain features are not available: callable statements,
+     * partial result set fetch, bytea type, etc.</p>
+     * <p>The list of supported features is subject to change.</p>
+     *
+     * @return the preferred query mode
+     * @see PreferQueryMode
+     */
+    PreferQueryMode getPreferQueryMode();
 
-  /**
-   * Connection configuration regarding automatic per-query savepoints.
-   *
-   * @return connection configuration regarding automatic per-query savepoints
-   * @see PGProperty#AUTOSAVE
-   */
-  AutoSave getAutosave();
+    /**
+     * Connection configuration regarding automatic per-query savepoints.
+     *
+     * @return connection configuration regarding automatic per-query savepoints
+     * @see PGProperty#AUTOSAVE
+     */
+    AutoSave getAutosave();
 
-  /**
-   * Configures if connection should use automatic savepoints.
-   * @param autoSave connection configuration regarding automatic per-query savepoints
-   * @see PGProperty#AUTOSAVE
-   */
-  void setAutosave(AutoSave autoSave);
+    /**
+     * Configures if connection should use automatic savepoints.
+     *
+     * @param autoSave connection configuration regarding automatic per-query savepoints
+     * @see PGProperty#AUTOSAVE
+     */
+    void setAutosave(AutoSave autoSave);
 
-  /**
-   * @return replication API for the current connection
-   */
-  PGReplicationConnection getReplicationAPI();
+    /**
+     * @return replication API for the current connection
+     */
+    PGReplicationConnection getReplicationAPI();
 
-  /**
-   * Change a user's password to the specified new password.
-   *
-   * <p>
-   * If the specific encryption type is not specified, this method defaults to querying the database server for the server's default password_encryption.
-   * This method does not send the new password in plain text to the server.
-   * Instead, it encrypts the password locally and sends the encoded hash so that the plain text password is never sent on the wire.
-   * </p>
-   *
-   * <p>
-   * Acceptable values for encryptionType are null, "md5", or "scram-sha-256".
-   * Users should avoid "md5" unless they are explicitly targeting an older server that does not support the more secure SCRAM.
-   * </p>
-   *
-   * @param user The username of the database user
-   * @param newPassword The new password for the database user. The implementation will zero
-   *                    out the array after use
-   * @param encryptionType The type of password encryption to use or null if the database server default should be used.
-   * @throws SQLException If the password could not be altered
-   */
-  default void alterUserPassword(String user, char[] newPassword, String encryptionType) throws SQLException {
-    try (Statement stmt = ((Connection) this).createStatement()) {
-      if (encryptionType == null) {
-        try (ResultSet rs = stmt.executeQuery("SHOW password_encryption")) {
-          if (!rs.next()) {
-            throw new PSQLException(GT.tr("Expected a row when reading password_encryption but none was found"),
-                PSQLState.NO_DATA);
-          }
-          encryptionType = rs.getString(1);
-          if (encryptionType == null) {
-            throw new PSQLException(GT.tr("SHOW password_encryption returned null value"),
-                PSQLState.NO_DATA);
-          }
+    /**
+     * Change a user's password to the specified new password.
+     *
+     * <p>
+     * If the specific encryption type is not specified, this method defaults to querying the database server for the server's default password_encryption.
+     * This method does not send the new password in plain text to the server.
+     * Instead, it encrypts the password locally and sends the encoded hash so that the plain text password is never sent on the wire.
+     * </p>
+     *
+     * <p>
+     * Acceptable values for encryptionType are null, "md5", or "scram-sha-256".
+     * Users should avoid "md5" unless they are explicitly targeting an older server that does not support the more secure SCRAM.
+     * </p>
+     *
+     * @param user           The username of the database user
+     * @param newPassword    The new password for the database user. The implementation will zero
+     *                       out the array after use
+     * @param encryptionType The type of password encryption to use or null if the database server default should be used.
+     * @throws SQLException If the password could not be altered
+     */
+    default void alterUserPassword(String user, char[] newPassword, String encryptionType) throws SQLException {
+        try (Statement stmt = ((Connection) this).createStatement()) {
+            if (encryptionType == null) {
+                try (ResultSet rs = stmt.executeQuery("SHOW password_encryption")) {
+                    if (!rs.next()) {
+                        throw new PSQLException(GT.tr("Expected a row when reading password_encryption but none was found"),
+                                PSQLState.NO_DATA);
+                    }
+                    encryptionType = rs.getString(1);
+                    if (encryptionType == null) {
+                        throw new PSQLException(GT.tr("SHOW password_encryption returned null value"),
+                                PSQLState.NO_DATA);
+                    }
+                }
+            }
+            String sql = PasswordUtil.genAlterUserPasswordSQL(user, newPassword, encryptionType);
+            stmt.execute(sql);
+        } finally {
+            Arrays.fill(newPassword, (char) 0);
         }
-      }
-      String sql = PasswordUtil.genAlterUserPasswordSQL(user, newPassword, encryptionType);
-      stmt.execute(sql);
-    } finally {
-      Arrays.fill(newPassword, (char) 0);
     }
-  }
 
-  /**
-   * <p>Returns the current values of all parameters reported by the server.</p>
-   *
-   * <p>PostgreSQL reports values for a subset of parameters (GUCs) to the client
-   * at connect-time, then sends update messages whenever the values change
-   * during a session. PgJDBC records the latest values and exposes it to client
-   * applications via <code>getParameterStatuses()</code>.</p>
-   *
-   * <p>PgJDBC exposes individual accessors for some of these parameters as
-   * listed below. They are more backwards-compatible and should be preferred
-   * where possible.</p>
-   *
-   * <p>Not all parameters are reported, only those marked
-   * <code>GUC_REPORT</code> in the source code. The <code>pg_settings</code>
-   * view does not expose information about which parameters are reportable.
-   * PgJDBC's map will only contain the parameters the server reports values
-   * for, so you cannot use this method as a substitute for running a
-   * <code>SHOW paramname;</code> or <code>SELECT
-   * current_setting('paramname');</code> query for arbitrary parameters.</p>
-   *
-   * <p>Parameter names are <i>case-insensitive</i> and <i>case-preserving</i>
-   * in this map, like in PostgreSQL itself. So <code>DateStyle</code> and
-   * <code>datestyle</code> are the same key.</p>
-   *
-   * <p>
-   *  As of PostgreSQL 11 the reportable parameter list, and related PgJDBC
-   *  interfaces or assessors, are:
-   * </p>
-   *
-   * <ul>
-   *  <li>
-   *    <code>application_name</code> -
-   *    {@link java.sql.Connection#getClientInfo()},
-   *    {@link java.sql.Connection#setClientInfo(java.util.Properties)}
-   *    and <code>ApplicationName</code> connection property.
-   *  </li>
-   *  <li>
-   *    <code>client_encoding</code> - PgJDBC always sets this to <code>UTF8</code>.
-   *    See <code>allowEncodingChanges</code> connection property.
-   *  </li>
-   *  <li><code>DateStyle</code> - PgJDBC requires this to always be set to <code>ISO</code></li>
-   *  <li><code>standard_conforming_strings</code> - indirectly via {@link #escapeLiteral(String)}</li>
-   *  <li>
-   *    <code>TimeZone</code> - set from JDK timezone see {@link java.util.TimeZone#getDefault()}
-   *    and {@link java.util.TimeZone#setDefault(TimeZone)}
-   *  </li>
-   *  <li><code>integer_datetimes</code></li>
-   *  <li><code>IntervalStyle</code></li>
-   *  <li><code>server_encoding</code></li>
-   *  <li><code>server_version</code></li>
-   *  <li><code>is_superuser</code> </li>
-   *  <li><code>session_authorization</code></li>
-   * </ul>
-   *
-   * <p>Note that some PgJDBC operations will change server parameters
-   * automatically.</p>
-   *
-   * @return unmodifiable map of case-insensitive parameter names to parameter values
-   * @since 42.2.6
-   */
-  Map<String, String> getParameterStatuses();
+    /**
+     * <p>Returns the current values of all parameters reported by the server.</p>
+     *
+     * <p>PostgreSQL reports values for a subset of parameters (GUCs) to the client
+     * at connect-time, then sends update messages whenever the values change
+     * during a session. PgJDBC records the latest values and exposes it to client
+     * applications via <code>getParameterStatuses()</code>.</p>
+     *
+     * <p>PgJDBC exposes individual accessors for some of these parameters as
+     * listed below. They are more backwards-compatible and should be preferred
+     * where possible.</p>
+     *
+     * <p>Not all parameters are reported, only those marked
+     * <code>GUC_REPORT</code> in the source code. The <code>pg_settings</code>
+     * view does not expose information about which parameters are reportable.
+     * PgJDBC's map will only contain the parameters the server reports values
+     * for, so you cannot use this method as a substitute for running a
+     * <code>SHOW paramname;</code> or <code>SELECT
+     * current_setting('paramname');</code> query for arbitrary parameters.</p>
+     *
+     * <p>Parameter names are <i>case-insensitive</i> and <i>case-preserving</i>
+     * in this map, like in PostgreSQL itself. So <code>DateStyle</code> and
+     * <code>datestyle</code> are the same key.</p>
+     *
+     * <p>
+     * As of PostgreSQL 11 the reportable parameter list, and related PgJDBC
+     * interfaces or assessors, are:
+     * </p>
+     *
+     * <ul>
+     *  <li>
+     *    <code>application_name</code> -
+     *    {@link java.sql.Connection#getClientInfo()},
+     *    {@link java.sql.Connection#setClientInfo(java.util.Properties)}
+     *    and <code>ApplicationName</code> connection property.
+     *  </li>
+     *  <li>
+     *    <code>client_encoding</code> - PgJDBC always sets this to <code>UTF8</code>.
+     *    See <code>allowEncodingChanges</code> connection property.
+     *  </li>
+     *  <li><code>DateStyle</code> - PgJDBC requires this to always be set to <code>ISO</code></li>
+     *  <li><code>standard_conforming_strings</code> - indirectly via {@link #escapeLiteral(String)}</li>
+     *  <li>
+     *    <code>TimeZone</code> - set from JDK timezone see {@link java.util.TimeZone#getDefault()}
+     *    and {@link java.util.TimeZone#setDefault(TimeZone)}
+     *  </li>
+     *  <li><code>integer_datetimes</code></li>
+     *  <li><code>IntervalStyle</code></li>
+     *  <li><code>server_encoding</code></li>
+     *  <li><code>server_version</code></li>
+     *  <li><code>is_superuser</code> </li>
+     *  <li><code>session_authorization</code></li>
+     * </ul>
+     *
+     * <p>Note that some PgJDBC operations will change server parameters
+     * automatically.</p>
+     *
+     * @return unmodifiable map of case-insensitive parameter names to parameter values
+     * @since 42.2.6
+     */
+    Map<String, String> getParameterStatuses();
 
-  /**
-   * Shorthand for getParameterStatuses().get(...) .
-   *
-   * @param parameterName case-insensitive parameter name
-   * @return parameter value if defined, or null if no parameter known
-   * @see #getParameterStatuses
-   * @since 42.2.6
-   */
-  String getParameterStatus(String parameterName);
+    /**
+     * Shorthand for getParameterStatuses().get(...) .
+     *
+     * @param parameterName case-insensitive parameter name
+     * @return parameter value if defined, or null if no parameter known
+     * @see #getParameterStatuses
+     * @since 42.2.6
+     */
+    String getParameterStatus(String parameterName);
 
-  /**
-   * Turn on/off adaptive fetch for connection. Existing statements and resultSets won't be affected
-   * by change here.
-   *
-   * @param adaptiveFetch desired state of adaptive fetch.
-   */
-  void setAdaptiveFetch(boolean adaptiveFetch);
+    /**
+     * Get state of adaptive fetch for connection.
+     *
+     * @return state of adaptive fetch (turned on or off)
+     */
+    boolean getAdaptiveFetch();
 
-  /**
-   * Get state of adaptive fetch for connection.
-   *
-   * @return state of adaptive fetch (turned on or off)
-   */
-  boolean getAdaptiveFetch();
+    /**
+     * Turn on/off adaptive fetch for connection. Existing statements and resultSets won't be affected
+     * by change here.
+     *
+     * @param adaptiveFetch desired state of adaptive fetch.
+     */
+    void setAdaptiveFetch(boolean adaptiveFetch);
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/PGEnvironment.java b/pgjdbc/src/main/java/org/postgresql/PGEnvironment.java
index ac4e611..ae063cf 100644
--- a/pgjdbc/src/main/java/org/postgresql/PGEnvironment.java
+++ b/pgjdbc/src/main/java/org/postgresql/PGEnvironment.java
@@ -14,95 +14,95 @@ import java.util.Map;
  */
 public enum PGEnvironment {
 
-  /**
-   * Specified location of password file.
-   */
-  ORG_POSTGRESQL_PGPASSFILE(
-      "org.postgresql.pgpassfile",
-      null,
-      "Specified location of password file."),
+    /**
+     * Specified location of password file.
+     */
+    ORG_POSTGRESQL_PGPASSFILE(
+            "org.postgresql.pgpassfile",
+            null,
+            "Specified location of password file."),
 
-  /**
-   * Specified location of password file.
-   */
-  PGPASSFILE(
-      "PGPASSFILE",
-      "pgpass",
-      "Specified location of password file."),
+    /**
+     * Specified location of password file.
+     */
+    PGPASSFILE(
+            "PGPASSFILE",
+            "pgpass",
+            "Specified location of password file."),
 
-  /**
-   * The connection service resource (file, url) allows connection parameters to be associated
-   * with a single service name.
-   */
-  ORG_POSTGRESQL_PGSERVICEFILE(
-      "org.postgresql.pgservicefile",
-      null,
-      "Specifies the service resource to resolve connection properties."),
+    /**
+     * The connection service resource (file, url) allows connection parameters to be associated
+     * with a single service name.
+     */
+    ORG_POSTGRESQL_PGSERVICEFILE(
+            "org.postgresql.pgservicefile",
+            null,
+            "Specifies the service resource to resolve connection properties."),
 
-  /**
-   * The connection service resource (file, url) allows connection parameters to be associated
-   * with a single service name.
-   */
-  PGSERVICEFILE(
-      "PGSERVICEFILE",
-      "pg_service.conf",
-      "Specifies the service resource to resolve connection properties."),
+    /**
+     * The connection service resource (file, url) allows connection parameters to be associated
+     * with a single service name.
+     */
+    PGSERVICEFILE(
+            "PGSERVICEFILE",
+            "pg_service.conf",
+            "Specifies the service resource to resolve connection properties."),
 
-  /**
-   * sets the directory containing the PGSERVICEFILE file and possibly other system-wide
-   * configuration files.
-   */
-  PGSYSCONFDIR(
-      "PGSYSCONFDIR",
-      null,
-      "Specifies the directory containing the PGSERVICEFILE file"),
-  ;
+    /**
+     * sets the directory containing the PGSERVICEFILE file and possibly other system-wide
+     * configuration files.
+     */
+    PGSYSCONFDIR(
+            "PGSYSCONFDIR",
+            null,
+            "Specifies the directory containing the PGSERVICEFILE file"),
+    ;
 
-  private final String name;
-  private final String defaultValue;
-  private final String description;
+    private static final Map<String, PGEnvironment> PROPS_BY_NAME = new HashMap<>();
 
-  PGEnvironment(String name, String defaultValue, String description) {
-    this.name = name;
-    this.defaultValue = defaultValue;
-    this.description = description;
-  }
-
-  private static final Map<String, PGEnvironment> PROPS_BY_NAME = new HashMap<>();
-
-  static {
-    for (PGEnvironment prop : PGEnvironment.values()) {
-      if (PROPS_BY_NAME.put(prop.getName(), prop) != null) {
-        throw new IllegalStateException("Duplicate PGProperty name: " + prop.getName());
-      }
+    static {
+        for (PGEnvironment prop : PGEnvironment.values()) {
+            if (PROPS_BY_NAME.put(prop.getName(), prop) != null) {
+                throw new IllegalStateException("Duplicate PGProperty name: " + prop.getName());
+            }
+        }
     }
-  }
 
-  /**
-   * Returns the name of the parameter.
-   *
-   * @return the name of the parameter
-   */
-  public String getName() {
-    return name;
-  }
+    private final String name;
+    private final String defaultValue;
+    private final String description;
 
-  /**
-   * Returns the default value for this parameter.
-   *
-   * @return the default value for this parameter or null
-   */
-  public String getDefaultValue() {
-    return defaultValue;
-  }
+    PGEnvironment(String name, String defaultValue, String description) {
+        this.name = name;
+        this.defaultValue = defaultValue;
+        this.description = description;
+    }
 
-  /**
-   * Returns the description for this parameter.
-   *
-   * @return the description for this parameter
-   */
-  public String getDescription() {
-    return description;
-  }
+    /**
+     * Returns the name of the parameter.
+     *
+     * @return the name of the parameter
+     */
+    public String getName() {
+        return name;
+    }
+
+    /**
+     * Returns the default value for this parameter.
+     *
+     * @return the default value for this parameter or null
+     */
+    public String getDefaultValue() {
+        return defaultValue;
+    }
+
+    /**
+     * Returns the description for this parameter.
+     *
+     * @return the description for this parameter
+     */
+    public String getDescription() {
+        return description;
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/PGNotification.java b/pgjdbc/src/main/java/org/postgresql/PGNotification.java
index 03c8bb8..322a129 100644
--- a/pgjdbc/src/main/java/org/postgresql/PGNotification.java
+++ b/pgjdbc/src/main/java/org/postgresql/PGNotification.java
@@ -9,29 +9,29 @@ package org.postgresql;
  * This interface defines the public PostgreSQL extension for Notifications.
  */
 public interface PGNotification {
-  /**
-   * Returns name of this notification.
-   *
-   * @return name of this notification
-   * @since 7.3
-   */
-  String getName();
+    /**
+     * Returns name of this notification.
+     *
+     * @return name of this notification
+     * @since 7.3
+     */
+    String getName();
 
-  /**
-   * Returns the process id of the backend process making this notification.
-   *
-   * @return process id of the backend process making this notification
-   * @since 7.3
-   */
-  int getPID();
+    /**
+     * Returns the process id of the backend process making this notification.
+     *
+     * @return process id of the backend process making this notification
+     * @since 7.3
+     */
+    int getPID();
 
-  /**
-   * Returns additional information from the notifying process. This feature has only been
-   * implemented in server versions 9.0 and later, so previous versions will always return an empty
-   * String.
-   *
-   * @return additional information from the notifying process
-   * @since 8.0
-   */
-  String getParameter();
+    /**
+     * Returns additional information from the notifying process. This feature has only been
+     * implemented in server versions 9.0 and later, so previous versions will always return an empty
+     * String.
+     *
+     * @return additional information from the notifying process
+     * @since 8.0
+     */
+    String getParameter();
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/PGProperty.java b/pgjdbc/src/main/java/org/postgresql/PGProperty.java
index 571ed36..81e6123 100644
--- a/pgjdbc/src/main/java/org/postgresql/PGProperty.java
+++ b/pgjdbc/src/main/java/org/postgresql/PGProperty.java
@@ -22,1010 +22,1015 @@ import java.util.Properties;
  */
 public enum PGProperty {
 
-  /**
-   * Specifies if number of rows, used during fetching rows of a result set, should be computed
-   * dynamically. Number of rows will be calculated by dividing maxResultBuffer size by max row size
-   * observed so far, rounded down. First fetch will have number of rows declared in
-   * defaultRowFetchSize. Number of rows can be limited by adaptiveFetchMinimum and
-   * adaptiveFetchMaximum. Requires declaring of maxResultBuffer and defaultRowFetchSize to work.
-   * Default value is false.
-   */
-  ADAPTIVE_FETCH(
-      "adaptiveFetch",
-      "false",
-      "Specifies if number of rows fetched in ResultSet should be adaptive to maxResultBuffer and max row size."),
-
-  /**
-   * Specifies the highest number of rows which can be calculated by adaptiveFetch. Requires
-   * adaptiveFetch set to true to work. Default value is -1 (used as infinity).
-   */
-  ADAPTIVE_FETCH_MAXIMUM(
-      "adaptiveFetchMaximum",
-      "-1",
-      "Specifies maximum number of rows used by adaptive fetch."),
-
-  /**
-   * Specifies the lowest number of rows which can be calculated by adaptiveFetch. Requires
-   * adaptiveFetch set to true to work. Default value is 0.
-   */
-  ADAPTIVE_FETCH_MINIMUM(
-      "adaptiveFetchMinimum",
-      "0",
-      "Specifies minimum number of rows used by adaptive fetch."),
-
-  /**
-   * When using the V3 protocol the driver monitors changes in certain server configuration
-   * parameters that should not be touched by end users. The {@code client_encoding} setting is set
-   * by the driver and should not be altered. If the driver detects a change it will abort the
-   * connection.
-   */
-  ALLOW_ENCODING_CHANGES(
-      "allowEncodingChanges",
-      "false",
-      "Allow for changes in client_encoding"),
-
-  /**
-   * The application name (require server version &gt;= 9.0).
-   */
-  APPLICATION_NAME(
-      "ApplicationName",
-      DriverInfo.DRIVER_NAME,
-      "Name of the Application (backend >= 9.0)"),
-
-  /**
-   * Assume the server is at least that version.
-   */
-  ASSUME_MIN_SERVER_VERSION(
-      "assumeMinServerVersion",
-      null,
-      "Assume the server is at least that version"),
-
-  /**
-   * AuthenticationPluginClass
-   */
-
-  AUTHENTICATION_PLUGIN_CLASS_NAME(
-      "authenticationPluginClassName",
-      null,
-      "Name of class which implements AuthenticationPlugin"
-  ),
-
-  /**
-   * Specifies what the driver should do if a query fails. In {@code autosave=always} mode, JDBC driver sets a savepoint before each query,
-   * and rolls back to that savepoint in case of failure. In {@code autosave=never} mode (default), no savepoint dance is made ever.
-   * In {@code autosave=conservative} mode, savepoint is set for each query, however the rollback is done only for rare cases
-   * like 'cached statement cannot change return type' or 'statement XXX is not valid' so JDBC driver rollsback and retries
-   */
-  AUTOSAVE(
-      "autosave",
-      "never",
-      "Specifies what the driver should do if a query fails. In autosave=always mode, JDBC driver sets a savepoint before each query, "
-          + "and rolls back to that savepoint in case of failure. In autosave=never mode (default), no savepoint dance is made ever. "
-          + "In autosave=conservative mode, safepoint is set for each query, however the rollback is done only for rare cases"
-          + " like 'cached statement cannot change return type' or 'statement XXX is not valid' so JDBC driver rollsback and retries",
-      false,
-      new String[]{"always", "never", "conservative"}),
-
-  /**
-   * Use binary format for sending and receiving data if possible.
-   */
-  BINARY_TRANSFER(
-      "binaryTransfer",
-      "true",
-      "Use binary format for sending and receiving data if possible"),
-
-  /**
-   * Comma separated list of types to disable binary transfer. Either OID numbers or names.
-   * Overrides values in the driver default set and values set with binaryTransferEnable.
-   */
-  BINARY_TRANSFER_DISABLE(
-      "binaryTransferDisable",
-      "",
-      "Comma separated list of types to disable binary transfer. Either OID numbers or names. Overrides values in the driver default set and values set with binaryTransferEnable."),
-
-  /**
-   * Comma separated list of types to enable binary transfer. Either OID numbers or names
-   */
-  BINARY_TRANSFER_ENABLE(
-      "binaryTransferEnable",
-      "",
-      "Comma separated list of types to enable binary transfer. Either OID numbers or names"),
-
-  /**
-   * Cancel command is sent out of band over its own connection, so cancel message can itself get
-   * stuck.
-   * This property controls "connect timeout" and "socket timeout" used for cancel commands.
-   * The timeout is specified in seconds. Default value is 10 seconds.
-   */
-  CANCEL_SIGNAL_TIMEOUT(
-      "cancelSignalTimeout",
-      "10",
-      "The timeout that is used for sending cancel command."),
-
-  /**
-   * Determine whether SAVEPOINTS used in AUTOSAVE will be released per query or not
-   */
-  CLEANUP_SAVEPOINTS(
-      "cleanupSavepoints",
-      "false",
-      "Determine whether SAVEPOINTS used in AUTOSAVE will be released per query or not",
-      false,
-      new String[]{"true", "false"}),
-
-  /**
-   * <p>The timeout value used for socket connect operations. If connecting to the server takes longer
-   * than this value, the connection is broken.</p>
-   *
-   * <p>The timeout is specified in seconds and a value of zero means that it is disabled.</p>
-   */
-  CONNECT_TIMEOUT(
-      "connectTimeout",
-      "10",
-      "The timeout value in seconds used for socket connect operations."),
-
-  /**
-   * Specify the schema (or several schema separated by commas) to be set in the search-path. This schema will be used to resolve
-   * unqualified object names used in statements over this connection.
-   */
-  CURRENT_SCHEMA(
-      "currentSchema",
-      null,
-      "Specify the schema (or several schema separated by commas) to be set in the search-path"),
-
-  /**
-   * Specifies the maximum number of fields to be cached per connection. A value of {@code 0} disables the cache.
-   */
-  DATABASE_METADATA_CACHE_FIELDS(
-      "databaseMetadataCacheFields",
-      "65536",
-      "Specifies the maximum number of fields to be cached per connection. A value of {@code 0} disables the cache."),
-
-  /**
-   * Specifies the maximum size (in megabytes) of fields to be cached per connection. A value of {@code 0} disables the cache.
-   */
-  DATABASE_METADATA_CACHE_FIELDS_MIB(
-      "databaseMetadataCacheFieldsMiB",
-      "5",
-      "Specifies the maximum size (in megabytes) of fields to be cached per connection. A value of {@code 0} disables the cache."),
-
-  /**
-   * Default parameter for {@link java.sql.Statement#getFetchSize()}. A value of {@code 0} means
-   * that need fetch all rows at once
-   */
-  DEFAULT_ROW_FETCH_SIZE(
-      "defaultRowFetchSize",
-      "0",
-      "Positive number of rows that should be fetched from the database when more rows are needed for ResultSet by each fetch iteration"),
-
-  /**
-   * Enable optimization that disables column name sanitiser.
-   */
-  DISABLE_COLUMN_SANITISER(
-      "disableColumnSanitiser",
-      "false",
-      "Enable optimization that disables column name sanitiser"),
-
-  /**
-   * Specifies how the driver transforms JDBC escape call syntax into underlying SQL, for invoking procedures or functions. (backend &gt;= 11)
-   * In {@code escapeSyntaxCallMode=select} mode (the default), the driver always uses a SELECT statement (allowing function invocation only).
-   * In {@code escapeSyntaxCallMode=callIfNoReturn} mode, the driver uses a CALL statement (allowing procedure invocation) if there is no return parameter specified, otherwise the driver uses a SELECT statement.
-   * In {@code escapeSyntaxCallMode=call} mode, the driver always uses a CALL statement (allowing procedure invocation only).
-   */
-  ESCAPE_SYNTAX_CALL_MODE(
-      "escapeSyntaxCallMode",
-      "select",
-      "Specifies how the driver transforms JDBC escape call syntax into underlying SQL, for invoking procedures or functions. (backend >= 11)"
-          + "In escapeSyntaxCallMode=select mode (the default), the driver always uses a SELECT statement (allowing function invocation only)."
-          + "In escapeSyntaxCallMode=callIfNoReturn mode, the driver uses a CALL statement (allowing procedure invocation) if there is no return parameter specified, otherwise the driver uses a SELECT statement."
-          + "In escapeSyntaxCallMode=call mode, the driver always uses a CALL statement (allowing procedure invocation only).",
-      false,
-      new String[]{"select", "callIfNoReturn", "call"}),
-
-  /**
-   * Group startup parameters in a transaction
-   * This is important in pool-by-transaction scenarios in order to make sure that all the statements
-   * reaches the same connection that is being initialized. All of the startup parameters will be wrapped
-   * in a transaction
-   * Note this is off by default as pgbouncer in statement mode
-   */
-  GROUP_STARTUP_PARAMETERS(
-      "groupStartupParameters",
-      "false",
-      "This is important in pool-by-transaction scenarios in order to make sure that all "
-          + "the statements reaches the same connection that is being initialized."
-  ),
-
-  GSS_ENC_MODE(
-      "gssEncMode",
-      "allow",
-      "Force Encoded GSS Mode",
-      false,
-      new String[]{"disable", "allow", "prefer", "require"}
-  ),
-
-  /**
-   * Force one of
-   * <ul>
-   * <li>SSPI (Windows transparent single-sign-on)</li>
-   * <li>GSSAPI (Kerberos, via JSSE)</li>
-   * </ul>
-   * to be used when the server requests Kerberos or SSPI authentication.
-   */
-  GSS_LIB(
-      "gsslib",
-      "auto",
-      "Force SSSPI or GSSAPI",
-      false,
-      new String[]{"auto", "sspi", "gssapi"}),
-
-  /**
-   * <p>After requesting an upgrade to SSL from the server there are reports of the server not responding due to a failover
-   * without a timeout here, the client can wait forever. The pattern for requesting a GSS encrypted connection is the same so we provide the same
-   * timeout mechanism This timeout will be set before the request and reset after </p>
-   */
-  GSS_RESPONSE_TIMEOUT(
-      "gssResponseTimeout",
-      "5000",
-      "Time in milliseconds we wait for a response from the server after requesting a GSS upgrade"),
-
-
-  /**
-   * Enable mode to filter out the names of database objects for which the current user has no privileges
-   * granted from appearing in the DatabaseMetaData returned by the driver.
-   */
-  HIDE_UNPRIVILEGED_OBJECTS(
-      "hideUnprivilegedObjects",
-      "false",
-      "Enable hiding of database objects for which the current user has no privileges granted from the DatabaseMetaData"),
-
-  HOST_RECHECK_SECONDS(
-      "hostRecheckSeconds",
-      "10",
-      "Specifies period (seconds) after which the host status is checked again in case it has changed"),
-
-  /**
-   * Specifies the name of the JAAS system or application login configuration.
-   */
-  JAAS_APPLICATION_NAME(
-      "jaasApplicationName",
-      "pgjdbc",
-      "Specifies the name of the JAAS system or application login configuration."),
-
-  /**
-   * Flag to enable/disable obtaining a GSS credential via JAAS login before authenticating.
-   * Useful if setting system property javax.security.auth.useSubjectCredsOnly=false
-   * or using native GSS with system property sun.security.jgss.native=true
-   */
-  JAAS_LOGIN(
-      "jaasLogin",
-      "true",
-      "Login with JAAS before doing GSSAPI authentication"),
-
-  /**
-   * The Kerberos service name to use when authenticating with GSSAPI. This is equivalent to libpq's
-   * PGKRBSRVNAME environment variable.
-   */
-  KERBEROS_SERVER_NAME(
-      "kerberosServerName",
-      null,
-      "The Kerberos service name to use when authenticating with GSSAPI."),
-
-  LOAD_BALANCE_HOSTS(
-      "loadBalanceHosts",
-      "false",
-      "If disabled hosts are connected in the given order. If enabled hosts are chosen randomly from the set of suitable candidates"),
-
-  /**
-   * <p>If this is set then the client side will bind to this address. This is useful if you need
-   * to choose which interface to connect to.</p>
-   */
-  LOCAL_SOCKET_ADDRESS(
-      "localSocketAddress",
-      null,
-      "Local Socket address, if set bind the client side of the socket to this address"),
-
-  /**
-   * This property is no longer used by the driver and will be ignored.
-   * @deprecated Logging is configured via java.util.logging.
-   */
-  @Deprecated
-  LOGGER_FILE(
-      "loggerFile",
-      null,
-      "File name output of the Logger"),
-
-  /**
-   * This property is no longer used by the driver and will be ignored.
-   * @deprecated Logging is configured via java.util.logging.
-   */
-  @Deprecated
-  LOGGER_LEVEL(
-      "loggerLevel",
-      null,
-      "Logger level of the driver",
-      false,
-      new String[]{"OFF", "DEBUG", "TRACE"}),
-
-  /**
-   * Specify how long to wait for establishment of a database connection. The timeout is specified
-   * in seconds.
-   */
-  LOGIN_TIMEOUT(
-      "loginTimeout",
-      "0",
-      "Specify how long in seconds to wait for establishment of a database connection."),
-
-  /**
-   * Whether to include full server error detail in exception messages.
-   */
-  LOG_SERVER_ERROR_DETAIL(
-      "logServerErrorDetail",
-      "true",
-      "Include full server error detail in exception messages. If disabled then only the error itself will be included."),
-
-  /**
-   * When connections that are not explicitly closed are garbage collected, log the stacktrace from
-   * the opening of the connection to trace the leak source.
-   */
-  LOG_UNCLOSED_CONNECTIONS(
-      "logUnclosedConnections",
-      "false",
-      "When connections that are not explicitly closed are garbage collected, log the stacktrace from the opening of the connection to trace the leak source"),
-
-  /**
-   * Specifies size of buffer during fetching result set. Can be specified as specified size or
-   * percent of heap memory.
-   */
-  MAX_RESULT_BUFFER(
-      "maxResultBuffer",
-      null,
-      "Specifies size of buffer during fetching result set. Can be specified as specified size or percent of heap memory."),
-
-  /**
-   * Specify 'options' connection initialization parameter.
-   * The value of this parameter may contain spaces and other special characters or their URL representation.
-   */
-  OPTIONS(
-      "options",
-      null,
-      "Specify 'options' connection initialization parameter."),
-
-  /**
-   * Password to use when authenticating.
-   */
-  PASSWORD(
-      "password",
-      null,
-      "Password to use when authenticating.",
-      false),
-
-  /**
-   * Database name to connect to (may be specified directly in the JDBC URL).
-   */
-  PG_DBNAME(
-      "PGDBNAME",
-      null,
-      "Database name to connect to (may be specified directly in the JDBC URL)",
-      true),
-
-  /**
-   * Hostname of the PostgreSQL server (may be specified directly in the JDBC URL).
-   */
-  PG_HOST(
-      "PGHOST",
-      "localhost",
-      "Hostname of the PostgreSQL server (may be specified directly in the JDBC URL)",
-      false),
-
-  /**
-   * Port of the PostgreSQL server (may be specified directly in the JDBC URL).
-   */
-  PG_PORT(
-      "PGPORT",
-      "5432",
-      "Port of the PostgreSQL server (may be specified directly in the JDBC URL)"),
-
-  /**
-   * <p>Specifies which mode is used to execute queries to database: simple means ('Q' execute, no parse, no bind, text mode only),
-   * extended means always use bind/execute messages, extendedForPrepared means extended for prepared statements only,
-   * extendedCacheEverything means use extended protocol and try cache every statement (including Statement.execute(String sql)) in a query cache.</p>
-   *
-   * <p>This mode is meant for debugging purposes and/or for cases when extended protocol cannot be used (e.g. logical replication protocol)</p>
-   */
-  PREFER_QUERY_MODE(
-      "preferQueryMode",
-      "extended",
-      "Specifies which mode is used to execute queries to database: simple means ('Q' execute, no parse, no bind, text mode only), "
-          + "extended means always use bind/execute messages, extendedForPrepared means extended for prepared statements only, "
-          + "extendedCacheEverything means use extended protocol and try cache every statement (including Statement.execute(String sql)) in a query cache.", false,
-      new String[]{"extended", "extendedForPrepared", "extendedCacheEverything", "simple"}),
-
-  /**
-   * Specifies the maximum number of entries in cache of prepared statements. A value of {@code 0}
-   * disables the cache.
-   */
-  PREPARED_STATEMENT_CACHE_QUERIES(
-      "preparedStatementCacheQueries",
-      "256",
-      "Specifies the maximum number of entries in per-connection cache of prepared statements. A value of {@code 0} disables the cache."),
-
-  /**
-   * Specifies the maximum size (in megabytes) of the prepared statement cache. A value of {@code 0}
-   * disables the cache.
-   */
-  PREPARED_STATEMENT_CACHE_SIZE_MIB(
-      "preparedStatementCacheSizeMiB",
-      "5",
-      "Specifies the maximum size (in megabytes) of a per-connection prepared statement cache. A value of {@code 0} disables the cache."),
-
-  /**
-   * Sets the default threshold for enabling server-side prepare. A value of {@code -1} stands for
-   * forceBinary
-   */
-  PREPARE_THRESHOLD(
-      "prepareThreshold",
-      "5",
-      "Statement prepare threshold. A value of {@code -1} stands for forceBinary"),
-
-  /**
-   * Force use of a particular protocol version when connecting, if set, disables protocol version
-   * fallback.
-   */
-  PROTOCOL_VERSION(
-      "protocolVersion",
-      null,
-      "Force use of a particular protocol version when connecting, currently only version 3 is supported.",
-      false,
-      new String[]{"3"}),
-
-  /**
-   * Quote returning columns.
-   * There are some ORM's that quote everything, including returning columns
-   * If we quote them, then we end up sending ""colname"" to the backend
-   * which will not be found
-   */
-  QUOTE_RETURNING_IDENTIFIERS(
-    "quoteReturningIdentifiers",
-    "true",
-    "Quote identifiers provided in returning array",
-      false),
-  /**
-   * Puts this connection in read-only mode.
-   */
-  READ_ONLY(
-      "readOnly",
-      "false",
-      "Puts this connection in read-only mode"),
-
-  /**
-   * Connection parameter to control behavior when
-   * {@link Connection#setReadOnly(boolean)} is set to {@code true}.
-   */
-  READ_ONLY_MODE(
-      "readOnlyMode",
-      "transaction",
-      "Controls the behavior when a connection is set to be read only, one of 'ignore', 'transaction', or 'always' "
-          + "When 'ignore', setting readOnly has no effect. "
-          + "When 'transaction' setting readOnly to 'true' will cause transactions to BEGIN READ ONLY if autocommit is 'false'. "
-          + "When 'always' setting readOnly to 'true' will set the session to READ ONLY if autoCommit is 'true' "
-          + "and the transaction to BEGIN READ ONLY if autocommit is 'false'.",
-      false,
-      new String[]{"ignore", "transaction", "always"}),
-
-  /**
-   * Socket read buffer size (SO_RECVBUF). A value of {@code -1}, which is the default, means system
-   * default.
-   */
-  RECEIVE_BUFFER_SIZE(
-      "receiveBufferSize",
-      "-1",
-      "Socket read buffer size"),
-
-  /**
-   * <p>Connection parameter passed in the startup message. This parameter accepts two values; "true"
-   * and "database". Passing "true" tells the backend to go into walsender mode, wherein a small set
-   * of replication commands can be issued instead of SQL statements. Only the simple query protocol
-   * can be used in walsender mode. Passing "database" as the value instructs walsender to connect
-   * to the database specified in the dbname parameter, which will allow the connection to be used
-   * for logical replication from that database.</p>
-   * <p>Parameter should be use together with {@link PGProperty#ASSUME_MIN_SERVER_VERSION} with
-   * parameter &gt;= 9.4 (backend &gt;= 9.4)</p>
-   */
-  REPLICATION(
-      "replication",
-      null,
-      "Connection parameter passed in startup message, one of 'true' or 'database' "
-          + "Passing 'true' tells the backend to go into walsender mode, "
-          + "wherein a small set of replication commands can be issued instead of SQL statements. "
-          + "Only the simple query protocol can be used in walsender mode. "
-          + "Passing 'database' as the value instructs walsender to connect "
-          + "to the database specified in the dbname parameter, "
-          + "which will allow the connection to be used for logical replication "
-          + "from that database. "
-          + "(backend >= 9.4)"),
-
-  /**
-   * Configure optimization to enable batch insert re-writing.
-   */
-  REWRITE_BATCHED_INSERTS(
-      "reWriteBatchedInserts",
-      "false",
-      "Enable optimization to rewrite and collapse compatible INSERT statements that are batched."),
-
-  /**
-   * Socket write buffer size (SO_SNDBUF). A value of {@code -1}, which is the default, means system
-   * default.
-   */
-  SEND_BUFFER_SIZE(
-      "sendBufferSize",
-      "-1",
-      "Socket write buffer size"),
-
-  /**
-   * Service name to use for additional parameters. It specifies a service name in "pg_service
-   * .conf" that holds additional connection parameters. This allows applications to specify only
-   * a service name so connection parameters can be centrally maintained.
-   */
-  SERVICE(
-      "service",
-      null,
-      "Service name to be searched in pg_service.conf resource"),
-
-  /**
-   * Socket factory used to create socket. A null value, which is the default, means system default.
-   */
-  SOCKET_FACTORY(
-      "socketFactory",
-      null,
-      "Specify a socket factory for socket creation"),
-
-  /**
-   * The String argument to give to the constructor of the Socket Factory.
-   */
-  SOCKET_FACTORY_ARG(
-      "socketFactoryArg",
-      null,
-      "Argument forwarded to constructor of SocketFactory class."),
-
-  /**
-   * The timeout value used for socket read operations. If reading from the server takes longer than
-   * this value, the connection is closed. This can be used as both a brute force global query
-   * timeout and a method of detecting network problems. The timeout is specified in seconds and a
-   * value of zero means that it is disabled.
-   */
-  SOCKET_TIMEOUT(
-      "socketTimeout",
-      "0",
-      "The timeout value in seconds max(2147484) used for socket read operations."),
-
-  /**
-   * Control use of SSL: empty or {@code true} values imply {@code sslmode==verify-full}
-   */
-  SSL(
-      "ssl",
-      null,
-      "Control use of SSL (any non-null value causes SSL to be required)"),
-
-  /**
-   * File containing the SSL Certificate. Default will be the file {@code postgresql.crt} in {@code
-   * $HOME/.postgresql} (*nix) or {@code %APPDATA%\postgresql} (windows).
-   */
-  SSL_CERT(
-      "sslcert",
-      null,
-      "The location of the client's SSL certificate"),
-
-  /**
-   * Classname of the SSL Factory to use (instance of {@link javax.net.ssl.SSLSocketFactory}).
-   */
-  SSL_FACTORY(
-      "sslfactory",
-      "org.postgresql.ssl.LibPQFactory",
-      "Provide a SSLSocketFactory class when using SSL."),
-
-  /**
-   * The String argument to give to the constructor of the SSL Factory.
-   * @deprecated use {@code ..Factory(Properties)} constructor.
-   */
-  @Deprecated
-  SSL_FACTORY_ARG(
-      "sslfactoryarg",
-      null,
-      "Argument forwarded to constructor of SSLSocketFactory class."),
-
-  /**
-   * Classname of the SSL HostnameVerifier to use (instance of {@link javax.net.ssl.HostnameVerifier}).
-   */
-  SSL_HOSTNAME_VERIFIER(
-      "sslhostnameverifier",
-      null,
-      "A class, implementing javax.net.ssl.HostnameVerifier that can verify the server"),
-
-  /**
-   * File containing the SSL Key. Default will be the file {@code postgresql.pk8} in {@code $HOME/.postgresql} (*nix)
-   * or {@code %APPDATA%\postgresql} (windows).
-   */
-  SSL_KEY(
-      "sslkey",
-      null,
-      "The location of the client's PKCS#8 SSL key"),
-
-  /**
-   * Parameter governing the use of SSL. The allowed values are {@code disable}, {@code allow},
-   * {@code prefer}, {@code require}, {@code verify-ca}, {@code verify-full}.
-   * If {@code ssl} property is empty or set to {@code true} it implies {@code verify-full}.
-   * Default mode is "require"
-   */
-  SSL_MODE(
-      "sslmode",
-      null,
-      "Parameter governing the use of SSL",
-      false,
-      new String[]{"disable", "allow", "prefer", "require", "verify-ca", "verify-full"}),
-
-  /**
-   * The SSL password to use in the default CallbackHandler.
-   */
-  SSL_PASSWORD(
-      "sslpassword",
-      null,
-      "The password for the client's ssl key (ignored if sslpasswordcallback is set)"),
-
-
-  /**
-   * The classname instantiating {@link javax.security.auth.callback.CallbackHandler} to use.
-   */
-  SSL_PASSWORD_CALLBACK(
-      "sslpasswordcallback",
-      null,
-      "A class, implementing javax.security.auth.callback.CallbackHandler that can handle PasswordCallback for the ssl password."),
-
-  /**
-   * <p>After requesting an upgrade to SSL from the server there are reports of the server not responding due to a failover
-   * without a timeout here, the client can wait forever. This timeout will be set before the request and reset after </p>
-   */
-  SSL_RESPONSE_TIMEOUT(
-      "sslResponseTimeout",
-      "5000",
-      "Time in milliseconds we wait for a response from the server after requesting SSL upgrade"),
-
-  /**
-   * File containing the root certificate when validating server ({@code sslmode} = {@code
-   * verify-ca} or {@code verify-full}). Default will be the file {@code root.crt} in {@code
-   * $HOME/.postgresql} (*nix) or {@code %APPDATA%\postgresql} (windows).
-   */
-  SSL_ROOT_CERT(
-      "sslrootcert",
-      null,
-      "The location of the root certificate for authenticating the server."),
-
-  /**
-   * Specifies the name of the SSPI service class that forms the service class part of the SPN. The
-   * default, {@code POSTGRES}, is almost always correct.
-   */
-  SSPI_SERVICE_CLASS(
-      "sspiServiceClass",
-      "POSTGRES",
-      "The Windows SSPI service class for SPN"),
-
-  /**
-   * Bind String to either {@code unspecified} or {@code varchar}. Default is {@code varchar} for
-   * 8.0+ backends.
-   */
-  STRING_TYPE(
-      "stringtype",
-      null,
-      "The type to bind String parameters as (usually 'varchar', 'unspecified' allows implicit casting to other types)",
-      false,
-      new String[]{"unspecified", "varchar"}),
-
-  TARGET_SERVER_TYPE(
-      "targetServerType",
-      "any",
-      "Specifies what kind of server to connect",
-      false,
-      new String []{"any", "primary", "master", "slave", "secondary", "preferSlave", "preferSecondary", "preferPrimary"}),
-
-  /**
-   * Enable or disable TCP keep-alive. The default is {@code false}.
-   */
-  TCP_KEEP_ALIVE(
-      "tcpKeepAlive",
-      "false",
-      "Enable or disable TCP keep-alive. The default is {@code false}."),
-
-  TCP_NO_DELAY(
-      "tcpNoDelay",
-      "true",
-      "Enable or disable TCP no delay. The default is (@code true}."
-  ),
-  /**
-   * Specifies the length to return for types of unknown length.
-   */
-  UNKNOWN_LENGTH(
-      "unknownLength",
-      Integer.toString(Integer.MAX_VALUE),
-      "Specifies the length to return for types of unknown length"),
-
-  /**
-   * Username to connect to the database as.
-   */
-  USER(
-      "user",
-      null,
-      "Username to connect to the database as.",
-      true),
-
-  /**
-   * Use SPNEGO in SSPI authentication requests.
-   */
-  USE_SPNEGO(
-      "useSpnego",
-      "false",
-      "Use SPNEGO in SSPI authentication requests"),
-
-  /**
-   * Factory class to instantiate factories for XML processing.
-   * The default factory disables external entity processing.
-   * Legacy behavior with external entity processing can be enabled by specifying a value of LEGACY_INSECURE.
-   * Or specify a custom class that implements {@link org.postgresql.xml.PGXmlFactoryFactory}.
-   */
-  XML_FACTORY_FACTORY(
-      "xmlFactoryFactory",
-      "",
-      "Factory class to instantiate factories for XML processing"),
-
-  ;
-
-  private final String name;
-  private final String defaultValue;
-  private final boolean required;
-  private final String description;
-  private final String [] choices;
-
-  PGProperty(String name, String defaultValue, String description) {
-    this(name, defaultValue, description, false);
-  }
-
-  PGProperty(String name, String defaultValue, String description, boolean required) {
-    this(name, defaultValue, description, required, (String[]) null);
-  }
-
-  PGProperty(String name, String defaultValue, String description, boolean required,
-      String [] choices) {
-    this.name = name;
-    this.defaultValue = defaultValue;
-    this.required = required;
-    this.description = description;
-    this.choices = choices;
-  }
-
-  private static final Map<String, PGProperty> PROPS_BY_NAME = new HashMap<>();
-
-  static {
-    for (PGProperty prop : PGProperty.values()) {
-      if (PROPS_BY_NAME.put(prop.getName(), prop) != null) {
-        throw new IllegalStateException("Duplicate PGProperty name: " + prop.getName());
-      }
+    /**
+     * Specifies if number of rows, used during fetching rows of a result set, should be computed
+     * dynamically. Number of rows will be calculated by dividing maxResultBuffer size by max row size
+     * observed so far, rounded down. First fetch will have number of rows declared in
+     * defaultRowFetchSize. Number of rows can be limited by adaptiveFetchMinimum and
+     * adaptiveFetchMaximum. Requires declaring of maxResultBuffer and defaultRowFetchSize to work.
+     * Default value is false.
+     */
+    ADAPTIVE_FETCH(
+            "adaptiveFetch",
+            "false",
+            "Specifies if number of rows fetched in ResultSet should be adaptive to maxResultBuffer and max row size."),
+
+    /**
+     * Specifies the highest number of rows which can be calculated by adaptiveFetch. Requires
+     * adaptiveFetch set to true to work. Default value is -1 (used as infinity).
+     */
+    ADAPTIVE_FETCH_MAXIMUM(
+            "adaptiveFetchMaximum",
+            "-1",
+            "Specifies maximum number of rows used by adaptive fetch."),
+
+    /**
+     * Specifies the lowest number of rows which can be calculated by adaptiveFetch. Requires
+     * adaptiveFetch set to true to work. Default value is 0.
+     */
+    ADAPTIVE_FETCH_MINIMUM(
+            "adaptiveFetchMinimum",
+            "0",
+            "Specifies minimum number of rows used by adaptive fetch."),
+
+    /**
+     * When using the V3 protocol the driver monitors changes in certain server configuration
+     * parameters that should not be touched by end users. The {@code client_encoding} setting is set
+     * by the driver and should not be altered. If the driver detects a change it will abort the
+     * connection.
+     */
+    ALLOW_ENCODING_CHANGES(
+            "allowEncodingChanges",
+            "false",
+            "Allow for changes in client_encoding"),
+
+    /**
+     * The application name (require server version &gt;= 9.0).
+     */
+    APPLICATION_NAME(
+            "ApplicationName",
+            DriverInfo.DRIVER_NAME,
+            "Name of the Application (backend >= 9.0)"),
+
+    /**
+     * Assume the server is at least that version.
+     */
+    ASSUME_MIN_SERVER_VERSION(
+            "assumeMinServerVersion",
+            null,
+            "Assume the server is at least that version"),
+
+    /**
+     * AuthenticationPluginClass
+     */
+
+    AUTHENTICATION_PLUGIN_CLASS_NAME(
+            "authenticationPluginClassName",
+            null,
+            "Name of class which implements AuthenticationPlugin"
+    ),
+
+    /**
+     * Specifies what the driver should do if a query fails. In {@code autosave=always} mode, JDBC driver sets a savepoint before each query,
+     * and rolls back to that savepoint in case of failure. In {@code autosave=never} mode (default), no savepoint dance is made ever.
+     * In {@code autosave=conservative} mode, savepoint is set for each query, however the rollback is done only for rare cases
+     * like 'cached statement cannot change return type' or 'statement XXX is not valid' so JDBC driver rollsback and retries
+     */
+    AUTOSAVE(
+            "autosave",
+            "never",
+            "Specifies what the driver should do if a query fails. In autosave=always mode, JDBC driver sets a savepoint before each query, "
+                    + "and rolls back to that savepoint in case of failure. In autosave=never mode (default), no savepoint dance is made ever. "
+                    + "In autosave=conservative mode, safepoint is set for each query, however the rollback is done only for rare cases"
+                    + " like 'cached statement cannot change return type' or 'statement XXX is not valid' so JDBC driver rollsback and retries",
+            false,
+            new String[]{"always", "never", "conservative"}),
+
+    /**
+     * Use binary format for sending and receiving data if possible.
+     */
+    BINARY_TRANSFER(
+            "binaryTransfer",
+            "true",
+            "Use binary format for sending and receiving data if possible"),
+
+    /**
+     * Comma separated list of types to disable binary transfer. Either OID numbers or names.
+     * Overrides values in the driver default set and values set with binaryTransferEnable.
+     */
+    BINARY_TRANSFER_DISABLE(
+            "binaryTransferDisable",
+            "",
+            "Comma separated list of types to disable binary transfer. Either OID numbers or names. Overrides values in the driver default set and values set with binaryTransferEnable."),
+
+    /**
+     * Comma separated list of types to enable binary transfer. Either OID numbers or names
+     */
+    BINARY_TRANSFER_ENABLE(
+            "binaryTransferEnable",
+            "",
+            "Comma separated list of types to enable binary transfer. Either OID numbers or names"),
+
+    /**
+     * Cancel command is sent out of band over its own connection, so cancel message can itself get
+     * stuck.
+     * This property controls "connect timeout" and "socket timeout" used for cancel commands.
+     * The timeout is specified in seconds. Default value is 10 seconds.
+     */
+    CANCEL_SIGNAL_TIMEOUT(
+            "cancelSignalTimeout",
+            "10",
+            "The timeout that is used for sending cancel command."),
+
+    /**
+     * Determine whether SAVEPOINTS used in AUTOSAVE will be released per query or not
+     */
+    CLEANUP_SAVEPOINTS(
+            "cleanupSavepoints",
+            "false",
+            "Determine whether SAVEPOINTS used in AUTOSAVE will be released per query or not",
+            false,
+            new String[]{"true", "false"}),
+
+    /**
+     * <p>The timeout value used for socket connect operations. If connecting to the server takes longer
+     * than this value, the connection is broken.</p>
+     *
+     * <p>The timeout is specified in seconds and a value of zero means that it is disabled.</p>
+     */
+    CONNECT_TIMEOUT(
+            "connectTimeout",
+            "10",
+            "The timeout value in seconds used for socket connect operations."),
+
+    /**
+     * Specify the schema (or several schema separated by commas) to be set in the search-path. This schema will be used to resolve
+     * unqualified object names used in statements over this connection.
+     */
+    CURRENT_SCHEMA(
+            "currentSchema",
+            null,
+            "Specify the schema (or several schema separated by commas) to be set in the search-path"),
+
+    /**
+     * Specifies the maximum number of fields to be cached per connection. A value of {@code 0} disables the cache.
+     */
+    DATABASE_METADATA_CACHE_FIELDS(
+            "databaseMetadataCacheFields",
+            "65536",
+            "Specifies the maximum number of fields to be cached per connection. A value of {@code 0} disables the cache."),
+
+    /**
+     * Specifies the maximum size (in megabytes) of fields to be cached per connection. A value of {@code 0} disables the cache.
+     */
+    DATABASE_METADATA_CACHE_FIELDS_MIB(
+            "databaseMetadataCacheFieldsMiB",
+            "5",
+            "Specifies the maximum size (in megabytes) of fields to be cached per connection. A value of {@code 0} disables the cache."),
+
+    /**
+     * Default parameter for {@link java.sql.Statement#getFetchSize()}. A value of {@code 0} means
+     * that need fetch all rows at once
+     */
+    DEFAULT_ROW_FETCH_SIZE(
+            "defaultRowFetchSize",
+            "0",
+            "Positive number of rows that should be fetched from the database when more rows are needed for ResultSet by each fetch iteration"),
+
+    /**
+     * Enable optimization that disables column name sanitiser.
+     */
+    DISABLE_COLUMN_SANITISER(
+            "disableColumnSanitiser",
+            "false",
+            "Enable optimization that disables column name sanitiser"),
+
+    /**
+     * Specifies how the driver transforms JDBC escape call syntax into underlying SQL, for invoking procedures or functions. (backend &gt;= 11)
+     * In {@code escapeSyntaxCallMode=select} mode (the default), the driver always uses a SELECT statement (allowing function invocation only).
+     * In {@code escapeSyntaxCallMode=callIfNoReturn} mode, the driver uses a CALL statement (allowing procedure invocation) if there is no return parameter specified, otherwise the driver uses a SELECT statement.
+     * In {@code escapeSyntaxCallMode=call} mode, the driver always uses a CALL statement (allowing procedure invocation only).
+     */
+    ESCAPE_SYNTAX_CALL_MODE(
+            "escapeSyntaxCallMode",
+            "select",
+            "Specifies how the driver transforms JDBC escape call syntax into underlying SQL, for invoking procedures or functions. (backend >= 11)"
+                    + "In escapeSyntaxCallMode=select mode (the default), the driver always uses a SELECT statement (allowing function invocation only)."
+                    + "In escapeSyntaxCallMode=callIfNoReturn mode, the driver uses a CALL statement (allowing procedure invocation) if there is no return parameter specified, otherwise the driver uses a SELECT statement."
+                    + "In escapeSyntaxCallMode=call mode, the driver always uses a CALL statement (allowing procedure invocation only).",
+            false,
+            new String[]{"select", "callIfNoReturn", "call"}),
+
+    /**
+     * Group startup parameters in a transaction
+     * This is important in pool-by-transaction scenarios in order to make sure that all the statements
+     * reaches the same connection that is being initialized. All of the startup parameters will be wrapped
+     * in a transaction
+     * Note this is off by default as pgbouncer in statement mode
+     */
+    GROUP_STARTUP_PARAMETERS(
+            "groupStartupParameters",
+            "false",
+            "This is important in pool-by-transaction scenarios in order to make sure that all "
+                    + "the statements reaches the same connection that is being initialized."
+    ),
+
+    GSS_ENC_MODE(
+            "gssEncMode",
+            "allow",
+            "Force Encoded GSS Mode",
+            false,
+            new String[]{"disable", "allow", "prefer", "require"}
+    ),
+
+    /**
+     * Force one of
+     * <ul>
+     * <li>SSPI (Windows transparent single-sign-on)</li>
+     * <li>GSSAPI (Kerberos, via JSSE)</li>
+     * </ul>
+     * to be used when the server requests Kerberos or SSPI authentication.
+     */
+    GSS_LIB(
+            "gsslib",
+            "auto",
+            "Force SSSPI or GSSAPI",
+            false,
+            new String[]{"auto", "sspi", "gssapi"}),
+
+    /**
+     * <p>After requesting an upgrade to SSL from the server there are reports of the server not responding due to a failover
+     * without a timeout here, the client can wait forever. The pattern for requesting a GSS encrypted connection is the same so we provide the same
+     * timeout mechanism This timeout will be set before the request and reset after </p>
+     */
+    GSS_RESPONSE_TIMEOUT(
+            "gssResponseTimeout",
+            "5000",
+            "Time in milliseconds we wait for a response from the server after requesting a GSS upgrade"),
+
+
+    /**
+     * Enable mode to filter out the names of database objects for which the current user has no privileges
+     * granted from appearing in the DatabaseMetaData returned by the driver.
+     */
+    HIDE_UNPRIVILEGED_OBJECTS(
+            "hideUnprivilegedObjects",
+            "false",
+            "Enable hiding of database objects for which the current user has no privileges granted from the DatabaseMetaData"),
+
+    HOST_RECHECK_SECONDS(
+            "hostRecheckSeconds",
+            "10",
+            "Specifies period (seconds) after which the host status is checked again in case it has changed"),
+
+    /**
+     * Specifies the name of the JAAS system or application login configuration.
+     */
+    JAAS_APPLICATION_NAME(
+            "jaasApplicationName",
+            "pgjdbc",
+            "Specifies the name of the JAAS system or application login configuration."),
+
+    /**
+     * Flag to enable/disable obtaining a GSS credential via JAAS login before authenticating.
+     * Useful if setting system property javax.security.auth.useSubjectCredsOnly=false
+     * or using native GSS with system property sun.security.jgss.native=true
+     */
+    JAAS_LOGIN(
+            "jaasLogin",
+            "true",
+            "Login with JAAS before doing GSSAPI authentication"),
+
+    /**
+     * The Kerberos service name to use when authenticating with GSSAPI. This is equivalent to libpq's
+     * PGKRBSRVNAME environment variable.
+     */
+    KERBEROS_SERVER_NAME(
+            "kerberosServerName",
+            null,
+            "The Kerberos service name to use when authenticating with GSSAPI."),
+
+    LOAD_BALANCE_HOSTS(
+            "loadBalanceHosts",
+            "false",
+            "If disabled hosts are connected in the given order. If enabled hosts are chosen randomly from the set of suitable candidates"),
+
+    /**
+     * <p>If this is set then the client side will bind to this address. This is useful if you need
+     * to choose which interface to connect to.</p>
+     */
+    LOCAL_SOCKET_ADDRESS(
+            "localSocketAddress",
+            null,
+            "Local Socket address, if set bind the client side of the socket to this address"),
+
+    /**
+     * This property is no longer used by the driver and will be ignored.
+     *
+     * @deprecated Logging is configured via java.util.logging.
+     */
+    @Deprecated
+    LOGGER_FILE(
+            "loggerFile",
+            null,
+            "File name output of the Logger"),
+
+    /**
+     * This property is no longer used by the driver and will be ignored.
+     *
+     * @deprecated Logging is configured via java.util.logging.
+     */
+    @Deprecated
+    LOGGER_LEVEL(
+            "loggerLevel",
+            null,
+            "Logger level of the driver",
+            false,
+            new String[]{"OFF", "DEBUG", "TRACE"}),
+
+    /**
+     * Specify how long to wait for establishment of a database connection. The timeout is specified
+     * in seconds.
+     */
+    LOGIN_TIMEOUT(
+            "loginTimeout",
+            "0",
+            "Specify how long in seconds to wait for establishment of a database connection."),
+
+    /**
+     * Whether to include full server error detail in exception messages.
+     */
+    LOG_SERVER_ERROR_DETAIL(
+            "logServerErrorDetail",
+            "true",
+            "Include full server error detail in exception messages. If disabled then only the error itself will be included."),
+
+    /**
+     * When connections that are not explicitly closed are garbage collected, log the stacktrace from
+     * the opening of the connection to trace the leak source.
+     */
+    LOG_UNCLOSED_CONNECTIONS(
+            "logUnclosedConnections",
+            "false",
+            "When connections that are not explicitly closed are garbage collected, log the stacktrace from the opening of the connection to trace the leak source"),
+
+    /**
+     * Specifies size of buffer during fetching result set. Can be specified as specified size or
+     * percent of heap memory.
+     */
+    MAX_RESULT_BUFFER(
+            "maxResultBuffer",
+            null,
+            "Specifies size of buffer during fetching result set. Can be specified as specified size or percent of heap memory."),
+
+    /**
+     * Specify 'options' connection initialization parameter.
+     * The value of this parameter may contain spaces and other special characters or their URL representation.
+     */
+    OPTIONS(
+            "options",
+            null,
+            "Specify 'options' connection initialization parameter."),
+
+    /**
+     * Password to use when authenticating.
+     */
+    PASSWORD(
+            "password",
+            null,
+            "Password to use when authenticating.",
+            false),
+
+    /**
+     * Database name to connect to (may be specified directly in the JDBC URL).
+     */
+    PG_DBNAME(
+            "PGDBNAME",
+            null,
+            "Database name to connect to (may be specified directly in the JDBC URL)",
+            true),
+
+    /**
+     * Hostname of the PostgreSQL server (may be specified directly in the JDBC URL).
+     */
+    PG_HOST(
+            "PGHOST",
+            "localhost",
+            "Hostname of the PostgreSQL server (may be specified directly in the JDBC URL)",
+            false),
+
+    /**
+     * Port of the PostgreSQL server (may be specified directly in the JDBC URL).
+     */
+    PG_PORT(
+            "PGPORT",
+            "5432",
+            "Port of the PostgreSQL server (may be specified directly in the JDBC URL)"),
+
+    /**
+     * <p>Specifies which mode is used to execute queries to database: simple means ('Q' execute, no parse, no bind, text mode only),
+     * extended means always use bind/execute messages, extendedForPrepared means extended for prepared statements only,
+     * extendedCacheEverything means use extended protocol and try cache every statement (including Statement.execute(String sql)) in a query cache.</p>
+     *
+     * <p>This mode is meant for debugging purposes and/or for cases when extended protocol cannot be used (e.g. logical replication protocol)</p>
+     */
+    PREFER_QUERY_MODE(
+            "preferQueryMode",
+            "extended",
+            "Specifies which mode is used to execute queries to database: simple means ('Q' execute, no parse, no bind, text mode only), "
+                    + "extended means always use bind/execute messages, extendedForPrepared means extended for prepared statements only, "
+                    + "extendedCacheEverything means use extended protocol and try cache every statement (including Statement.execute(String sql)) in a query cache.", false,
+            new String[]{"extended", "extendedForPrepared", "extendedCacheEverything", "simple"}),
+
+    /**
+     * Specifies the maximum number of entries in cache of prepared statements. A value of {@code 0}
+     * disables the cache.
+     */
+    PREPARED_STATEMENT_CACHE_QUERIES(
+            "preparedStatementCacheQueries",
+            "256",
+            "Specifies the maximum number of entries in per-connection cache of prepared statements. A value of {@code 0} disables the cache."),
+
+    /**
+     * Specifies the maximum size (in megabytes) of the prepared statement cache. A value of {@code 0}
+     * disables the cache.
+     */
+    PREPARED_STATEMENT_CACHE_SIZE_MIB(
+            "preparedStatementCacheSizeMiB",
+            "5",
+            "Specifies the maximum size (in megabytes) of a per-connection prepared statement cache. A value of {@code 0} disables the cache."),
+
+    /**
+     * Sets the default threshold for enabling server-side prepare. A value of {@code -1} stands for
+     * forceBinary
+     */
+    PREPARE_THRESHOLD(
+            "prepareThreshold",
+            "5",
+            "Statement prepare threshold. A value of {@code -1} stands for forceBinary"),
+
+    /**
+     * Force use of a particular protocol version when connecting, if set, disables protocol version
+     * fallback.
+     */
+    PROTOCOL_VERSION(
+            "protocolVersion",
+            null,
+            "Force use of a particular protocol version when connecting, currently only version 3 is supported.",
+            false,
+            new String[]{"3"}),
+
+    /**
+     * Quote returning columns.
+     * There are some ORM's that quote everything, including returning columns
+     * If we quote them, then we end up sending ""colname"" to the backend
+     * which will not be found
+     */
+    QUOTE_RETURNING_IDENTIFIERS(
+            "quoteReturningIdentifiers",
+            "true",
+            "Quote identifiers provided in returning array",
+            false),
+    /**
+     * Puts this connection in read-only mode.
+     */
+    READ_ONLY(
+            "readOnly",
+            "false",
+            "Puts this connection in read-only mode"),
+
+    /**
+     * Connection parameter to control behavior when
+     * {@link Connection#setReadOnly(boolean)} is set to {@code true}.
+     */
+    READ_ONLY_MODE(
+            "readOnlyMode",
+            "transaction",
+            "Controls the behavior when a connection is set to be read only, one of 'ignore', 'transaction', or 'always' "
+                    + "When 'ignore', setting readOnly has no effect. "
+                    + "When 'transaction' setting readOnly to 'true' will cause transactions to BEGIN READ ONLY if autocommit is 'false'. "
+                    + "When 'always' setting readOnly to 'true' will set the session to READ ONLY if autoCommit is 'true' "
+                    + "and the transaction to BEGIN READ ONLY if autocommit is 'false'.",
+            false,
+            new String[]{"ignore", "transaction", "always"}),
+
+    /**
+     * Socket read buffer size (SO_RECVBUF). A value of {@code -1}, which is the default, means system
+     * default.
+     */
+    RECEIVE_BUFFER_SIZE(
+            "receiveBufferSize",
+            "-1",
+            "Socket read buffer size"),
+
+    /**
+     * <p>Connection parameter passed in the startup message. This parameter accepts two values; "true"
+     * and "database". Passing "true" tells the backend to go into walsender mode, wherein a small set
+     * of replication commands can be issued instead of SQL statements. Only the simple query protocol
+     * can be used in walsender mode. Passing "database" as the value instructs walsender to connect
+     * to the database specified in the dbname parameter, which will allow the connection to be used
+     * for logical replication from that database.</p>
+     * <p>Parameter should be use together with {@link PGProperty#ASSUME_MIN_SERVER_VERSION} with
+     * parameter &gt;= 9.4 (backend &gt;= 9.4)</p>
+     */
+    REPLICATION(
+            "replication",
+            null,
+            "Connection parameter passed in startup message, one of 'true' or 'database' "
+                    + "Passing 'true' tells the backend to go into walsender mode, "
+                    + "wherein a small set of replication commands can be issued instead of SQL statements. "
+                    + "Only the simple query protocol can be used in walsender mode. "
+                    + "Passing 'database' as the value instructs walsender to connect "
+                    + "to the database specified in the dbname parameter, "
+                    + "which will allow the connection to be used for logical replication "
+                    + "from that database. "
+                    + "(backend >= 9.4)"),
+
+    /**
+     * Configure optimization to enable batch insert re-writing.
+     */
+    REWRITE_BATCHED_INSERTS(
+            "reWriteBatchedInserts",
+            "false",
+            "Enable optimization to rewrite and collapse compatible INSERT statements that are batched."),
+
+    /**
+     * Socket write buffer size (SO_SNDBUF). A value of {@code -1}, which is the default, means system
+     * default.
+     */
+    SEND_BUFFER_SIZE(
+            "sendBufferSize",
+            "-1",
+            "Socket write buffer size"),
+
+    /**
+     * Service name to use for additional parameters. It specifies a service name in "pg_service
+     * .conf" that holds additional connection parameters. This allows applications to specify only
+     * a service name so connection parameters can be centrally maintained.
+     */
+    SERVICE(
+            "service",
+            null,
+            "Service name to be searched in pg_service.conf resource"),
+
+    /**
+     * Socket factory used to create socket. A null value, which is the default, means system default.
+     */
+    SOCKET_FACTORY(
+            "socketFactory",
+            null,
+            "Specify a socket factory for socket creation"),
+
+    /**
+     * The String argument to give to the constructor of the Socket Factory.
+     */
+    SOCKET_FACTORY_ARG(
+            "socketFactoryArg",
+            null,
+            "Argument forwarded to constructor of SocketFactory class."),
+
+    /**
+     * The timeout value used for socket read operations. If reading from the server takes longer than
+     * this value, the connection is closed. This can be used as both a brute force global query
+     * timeout and a method of detecting network problems. The timeout is specified in seconds and a
+     * value of zero means that it is disabled.
+     */
+    SOCKET_TIMEOUT(
+            "socketTimeout",
+            "0",
+            "The timeout value in seconds max(2147484) used for socket read operations."),
+
+    /**
+     * Control use of SSL: empty or {@code true} values imply {@code sslmode==verify-full}
+     */
+    SSL(
+            "ssl",
+            null,
+            "Control use of SSL (any non-null value causes SSL to be required)"),
+
+    /**
+     * File containing the SSL Certificate. Default will be the file {@code postgresql.crt} in {@code
+     * $HOME/.postgresql} (*nix) or {@code %APPDATA%\postgresql} (windows).
+     */
+    SSL_CERT(
+            "sslcert",
+            null,
+            "The location of the client's SSL certificate"),
+
+    /**
+     * Classname of the SSL Factory to use (instance of {@link javax.net.ssl.SSLSocketFactory}).
+     */
+    SSL_FACTORY(
+            "sslfactory",
+            "org.postgresql.ssl.LibPQFactory",
+            "Provide a SSLSocketFactory class when using SSL."),
+
+    /**
+     * The String argument to give to the constructor of the SSL Factory.
+     *
+     * @deprecated use {@code ..Factory(Properties)} constructor.
+     */
+    @Deprecated
+    SSL_FACTORY_ARG(
+            "sslfactoryarg",
+            null,
+            "Argument forwarded to constructor of SSLSocketFactory class."),
+
+    /**
+     * Classname of the SSL HostnameVerifier to use (instance of {@link javax.net.ssl.HostnameVerifier}).
+     */
+    SSL_HOSTNAME_VERIFIER(
+            "sslhostnameverifier",
+            null,
+            "A class, implementing javax.net.ssl.HostnameVerifier that can verify the server"),
+
+    /**
+     * File containing the SSL Key. Default will be the file {@code postgresql.pk8} in {@code $HOME/.postgresql} (*nix)
+     * or {@code %APPDATA%\postgresql} (windows).
+     */
+    SSL_KEY(
+            "sslkey",
+            null,
+            "The location of the client's PKCS#8 SSL key"),
+
+    /**
+     * Parameter governing the use of SSL. The allowed values are {@code disable}, {@code allow},
+     * {@code prefer}, {@code require}, {@code verify-ca}, {@code verify-full}.
+     * If {@code ssl} property is empty or set to {@code true} it implies {@code verify-full}.
+     * Default mode is "require"
+     */
+    SSL_MODE(
+            "sslmode",
+            null,
+            "Parameter governing the use of SSL",
+            false,
+            new String[]{"disable", "allow", "prefer", "require", "verify-ca", "verify-full"}),
+
+    /**
+     * The SSL password to use in the default CallbackHandler.
+     */
+    SSL_PASSWORD(
+            "sslpassword",
+            null,
+            "The password for the client's ssl key (ignored if sslpasswordcallback is set)"),
+
+
+    /**
+     * The classname instantiating {@link javax.security.auth.callback.CallbackHandler} to use.
+     */
+    SSL_PASSWORD_CALLBACK(
+            "sslpasswordcallback",
+            null,
+            "A class, implementing javax.security.auth.callback.CallbackHandler that can handle PasswordCallback for the ssl password."),
+
+    /**
+     * <p>After requesting an upgrade to SSL from the server there are reports of the server not responding due to a failover
+     * without a timeout here, the client can wait forever. This timeout will be set before the request and reset after </p>
+     */
+    SSL_RESPONSE_TIMEOUT(
+            "sslResponseTimeout",
+            "5000",
+            "Time in milliseconds we wait for a response from the server after requesting SSL upgrade"),
+
+    /**
+     * File containing the root certificate when validating server ({@code sslmode} = {@code
+     * verify-ca} or {@code verify-full}). Default will be the file {@code root.crt} in {@code
+     * $HOME/.postgresql} (*nix) or {@code %APPDATA%\postgresql} (windows).
+     */
+    SSL_ROOT_CERT(
+            "sslrootcert",
+            null,
+            "The location of the root certificate for authenticating the server."),
+
+    /**
+     * Specifies the name of the SSPI service class that forms the service class part of the SPN. The
+     * default, {@code POSTGRES}, is almost always correct.
+     */
+    SSPI_SERVICE_CLASS(
+            "sspiServiceClass",
+            "POSTGRES",
+            "The Windows SSPI service class for SPN"),
+
+    /**
+     * Bind String to either {@code unspecified} or {@code varchar}. Default is {@code varchar} for
+     * 8.0+ backends.
+     */
+    STRING_TYPE(
+            "stringtype",
+            null,
+            "The type to bind String parameters as (usually 'varchar', 'unspecified' allows implicit casting to other types)",
+            false,
+            new String[]{"unspecified", "varchar"}),
+
+    TARGET_SERVER_TYPE(
+            "targetServerType",
+            "any",
+            "Specifies what kind of server to connect",
+            false,
+            new String[]{"any", "primary", "master", "slave", "secondary", "preferSlave", "preferSecondary", "preferPrimary"}),
+
+    /**
+     * Enable or disable TCP keep-alive. The default is {@code false}.
+     */
+    TCP_KEEP_ALIVE(
+            "tcpKeepAlive",
+            "false",
+            "Enable or disable TCP keep-alive. The default is {@code false}."),
+
+    TCP_NO_DELAY(
+            "tcpNoDelay",
+            "true",
+            "Enable or disable TCP no delay. The default is (@code true}."
+    ),
+    /**
+     * Specifies the length to return for types of unknown length.
+     */
+    UNKNOWN_LENGTH(
+            "unknownLength",
+            Integer.toString(Integer.MAX_VALUE),
+            "Specifies the length to return for types of unknown length"),
+
+    /**
+     * Username to connect to the database as.
+     */
+    USER(
+            "user",
+            null,
+            "Username to connect to the database as.",
+            true),
+
+    /**
+     * Use SPNEGO in SSPI authentication requests.
+     */
+    USE_SPNEGO(
+            "useSpnego",
+            "false",
+            "Use SPNEGO in SSPI authentication requests"),
+
+    /**
+     * Factory class to instantiate factories for XML processing.
+     * The default factory disables external entity processing.
+     * Legacy behavior with external entity processing can be enabled by specifying a value of LEGACY_INSECURE.
+     * Or specify a custom class that implements {@link org.postgresql.xml.PGXmlFactoryFactory}.
+     */
+    XML_FACTORY_FACTORY(
+            "xmlFactoryFactory",
+            "",
+            "Factory class to instantiate factories for XML processing"),
+
+    ;
+
+    private static final Map<String, PGProperty> PROPS_BY_NAME = new HashMap<>();
+
+    static {
+        for (PGProperty prop : PGProperty.values()) {
+            if (PROPS_BY_NAME.put(prop.getName(), prop) != null) {
+                throw new IllegalStateException("Duplicate PGProperty name: " + prop.getName());
+            }
+        }
     }
-  }
 
-  /**
-   * Returns the name of the connection parameter. The name is the key that must be used in JDBC URL
-   * or in Driver properties
-   *
-   * @return the name of the connection parameter
-   */
-  public String getName() {
-    return name;
-  }
+    private final String name;
+    private final String defaultValue;
+    private final boolean required;
+    private final String description;
+    private final String[] choices;
 
-  /**
-   * Returns the default value for this connection parameter.
-   *
-   * @return the default value for this connection parameter or null
-   */
-  public String getDefaultValue() {
-    return defaultValue;
-  }
-
-  /**
-   * Returns whether this parameter is required.
-   *
-   * @return whether this parameter is required
-   */
-  public boolean isRequired() {
-    return required;
-  }
-
-  /**
-   * Returns the description for this connection parameter.
-   *
-   * @return the description for this connection parameter
-   */
-  public String getDescription() {
-    return description;
-  }
-
-  /**
-   * Returns the available values for this connection parameter.
-   *
-   * @return the available values for this connection parameter or null
-   */
-  public String [] getChoices() {
-    return choices;
-  }
-
-  /**
-   * Returns the value of the connection parameter from the given {@link Properties} or the
-   * default value.
-   *
-   * @param properties properties to take actual value from
-   * @return evaluated value for this connection parameter
-   */
-  public String getOrDefault(Properties properties) {
-    return properties.getProperty(name, defaultValue);
-  }
-
-  /**
-   * Returns the value of the connection parameter from the given {@link Properties} or the
-   * default value
-   * @param properties properties to take actual value from
-   * @return evaluated value for this connection parameter or null
-   * @deprecated use {@link #getOrDefault(Properties)} instead
-   */
-  @Deprecated
-  public String get(Properties properties) {
-    return getOrDefault(properties);
-  }
-
-  /**
-   * Returns the value of the connection parameter from the given {@link Properties} or null if there
-   * is no default value
-   * @param properties properties object to get value from
-   * @return evaluated value for this connection parameter
-   */
-  public String getOrNull(Properties properties) {
-    return properties.getProperty(name);
-  }
-
-  /**
-   * Set the value for this connection parameter in the given {@link Properties}.
-   *
-   * @param properties properties in which the value should be set
-   * @param value value for this connection parameter
-   */
-  public void set(Properties properties, String value) {
-    if (value == null) {
-      properties.remove(name);
-    } else {
-      properties.setProperty(name, value);
+    PGProperty(String name, String defaultValue, String description) {
+        this(name, defaultValue, description, false);
     }
-  }
 
-  /**
-   * Return the boolean value for this connection parameter in the given {@link Properties}.
-   *
-   * @param properties properties to take actual value from
-   * @return evaluated value for this connection parameter converted to boolean
-   */
-  public boolean getBoolean(Properties properties) {
-    return Boolean.parseBoolean(getOrDefault(properties));
-  }
-
-  /**
-   * Return the int value for this connection parameter in the given {@link Properties}. Prefer the
-   * use of {@link #getInt(Properties)} anywhere you can throw an {@link java.sql.SQLException}.
-   *
-   * @param properties properties to take actual value from
-   * @return evaluated value for this connection parameter converted to int
-   * @throws NumberFormatException if it cannot be converted to int.
-   */
-  @SuppressWarnings("nullness:argument")
-  public int getIntNoCheck(Properties properties) {
-    String value = getOrDefault(properties);
-    return Integer.parseInt(value);
-  }
-
-  /**
-   * Return the int value for this connection parameter in the given {@link Properties}.
-   *
-   * @param properties properties to take actual value from
-   * @return evaluated value for this connection parameter converted to int
-   * @throws PSQLException if it cannot be converted to int.
-   */
-  @SuppressWarnings("nullness:argument")
-  public int getInt(Properties properties) throws PSQLException {
-    String value = getOrDefault(properties);
-    try {
-      return Integer.parseInt(value);
-    } catch (NumberFormatException nfe) {
-      throw new PSQLException(GT.tr("{0} parameter value must be an integer but was: {1}",
-          getName(), value), PSQLState.INVALID_PARAMETER_VALUE, nfe);
+    PGProperty(String name, String defaultValue, String description, boolean required) {
+        this(name, defaultValue, description, required, (String[]) null);
     }
-  }
 
-  /**
-   * Return the {@link Integer} value for this connection parameter in the given {@link Properties}.
-   *
-   * @param properties properties to take actual value from
-   * @return evaluated value for this connection parameter converted to Integer or null
-   * @throws PSQLException if unable to parse property as integer
-   */
-  public Integer getInteger(Properties properties) throws PSQLException {
-    String value = getOrDefault(properties);
-    if (value == null) {
-      return null;
+    PGProperty(String name, String defaultValue, String description, boolean required,
+               String[] choices) {
+        this.name = name;
+        this.defaultValue = defaultValue;
+        this.required = required;
+        this.description = description;
+        this.choices = choices;
     }
-    try {
-      return Integer.parseInt(value);
-    } catch (NumberFormatException nfe) {
-      throw new PSQLException(GT.tr("{0} parameter value must be an integer but was: {1}",
-          getName(), value), PSQLState.INVALID_PARAMETER_VALUE, nfe);
+
+    public static PGProperty forName(String name) {
+        return PROPS_BY_NAME.get(name);
     }
-  }
 
-  /**
-   * Set the boolean value for this connection parameter in the given {@link Properties}.
-   *
-   * @param properties properties in which the value should be set
-   * @param value boolean value for this connection parameter
-   */
-  public void set(Properties properties, boolean value) {
-    properties.setProperty(name, Boolean.toString(value));
-  }
-
-  /**
-   * Set the int value for this connection parameter in the given {@link Properties}.
-   *
-   * @param properties properties in which the value should be set
-   * @param value int value for this connection parameter
-   */
-  public void set(Properties properties, int value) {
-    properties.setProperty(name, Integer.toString(value));
-  }
-
-  /**
-   * Test whether this property is present in the given {@link Properties}.
-   *
-   * @param properties set of properties to check current in
-   * @return true if the parameter is specified in the given properties
-   */
-  public boolean isPresent(Properties properties) {
-    return getSetString(properties) != null;
-  }
-
-  /**
-   * Convert this connection parameter and the value read from the given {@link Properties} into a
-   * {@link DriverPropertyInfo}.
-   *
-   * @param properties properties to take actual value from
-   * @return a DriverPropertyInfo representing this connection parameter
-   */
-  public DriverPropertyInfo toDriverPropertyInfo(Properties properties) {
-    DriverPropertyInfo propertyInfo = new DriverPropertyInfo(name, getOrDefault(properties));
-    propertyInfo.required = required;
-    propertyInfo.description = description;
-    propertyInfo.choices = choices;
-    return propertyInfo;
-  }
-
-  public static PGProperty forName(String name) {
-    return PROPS_BY_NAME.get(name);
-  }
-
-  /**
-   * Return the property if exists but avoiding the default. Allowing the caller to detect the lack
-   * of a property.
-   *
-   * @param properties properties bundle
-   * @return the value of a set property
-   */
-  public String getSetString(Properties properties) {
-    Object o = properties.get(name);
-    if (o instanceof String) {
-      return (String) o;
+    /**
+     * Returns the name of the connection parameter. The name is the key that must be used in JDBC URL
+     * or in Driver properties
+     *
+     * @return the name of the connection parameter
+     */
+    public String getName() {
+        return name;
+    }
+
+    /**
+     * Returns the default value for this connection parameter.
+     *
+     * @return the default value for this connection parameter or null
+     */
+    public String getDefaultValue() {
+        return defaultValue;
+    }
+
+    /**
+     * Returns whether this parameter is required.
+     *
+     * @return whether this parameter is required
+     */
+    public boolean isRequired() {
+        return required;
+    }
+
+    /**
+     * Returns the description for this connection parameter.
+     *
+     * @return the description for this connection parameter
+     */
+    public String getDescription() {
+        return description;
+    }
+
+    /**
+     * Returns the available values for this connection parameter.
+     *
+     * @return the available values for this connection parameter or null
+     */
+    public String[] getChoices() {
+        return choices;
+    }
+
+    /**
+     * Returns the value of the connection parameter from the given {@link Properties} or the
+     * default value.
+     *
+     * @param properties properties to take actual value from
+     * @return evaluated value for this connection parameter
+     */
+    public String getOrDefault(Properties properties) {
+        return properties.getProperty(name, defaultValue);
+    }
+
+    /**
+     * Returns the value of the connection parameter from the given {@link Properties} or the
+     * default value
+     *
+     * @param properties properties to take actual value from
+     * @return evaluated value for this connection parameter or null
+     * @deprecated use {@link #getOrDefault(Properties)} instead
+     */
+    @Deprecated
+    public String get(Properties properties) {
+        return getOrDefault(properties);
+    }
+
+    /**
+     * Returns the value of the connection parameter from the given {@link Properties} or null if there
+     * is no default value
+     *
+     * @param properties properties object to get value from
+     * @return evaluated value for this connection parameter
+     */
+    public String getOrNull(Properties properties) {
+        return properties.getProperty(name);
+    }
+
+    /**
+     * Set the value for this connection parameter in the given {@link Properties}.
+     *
+     * @param properties properties in which the value should be set
+     * @param value      value for this connection parameter
+     */
+    public void set(Properties properties, String value) {
+        if (value == null) {
+            properties.remove(name);
+        } else {
+            properties.setProperty(name, value);
+        }
+    }
+
+    /**
+     * Return the boolean value for this connection parameter in the given {@link Properties}.
+     *
+     * @param properties properties to take actual value from
+     * @return evaluated value for this connection parameter converted to boolean
+     */
+    public boolean getBoolean(Properties properties) {
+        return Boolean.parseBoolean(getOrDefault(properties));
+    }
+
+    /**
+     * Return the int value for this connection parameter in the given {@link Properties}. Prefer the
+     * use of {@link #getInt(Properties)} anywhere you can throw an {@link java.sql.SQLException}.
+     *
+     * @param properties properties to take actual value from
+     * @return evaluated value for this connection parameter converted to int
+     * @throws NumberFormatException if it cannot be converted to int.
+     */
+    @SuppressWarnings("nullness:argument")
+    public int getIntNoCheck(Properties properties) {
+        String value = getOrDefault(properties);
+        return Integer.parseInt(value);
+    }
+
+    /**
+     * Return the int value for this connection parameter in the given {@link Properties}.
+     *
+     * @param properties properties to take actual value from
+     * @return evaluated value for this connection parameter converted to int
+     * @throws PSQLException if it cannot be converted to int.
+     */
+    @SuppressWarnings("nullness:argument")
+    public int getInt(Properties properties) throws PSQLException {
+        String value = getOrDefault(properties);
+        try {
+            return Integer.parseInt(value);
+        } catch (NumberFormatException nfe) {
+            throw new PSQLException(GT.tr("{0} parameter value must be an integer but was: {1}",
+                    getName(), value), PSQLState.INVALID_PARAMETER_VALUE, nfe);
+        }
+    }
+
+    /**
+     * Return the {@link Integer} value for this connection parameter in the given {@link Properties}.
+     *
+     * @param properties properties to take actual value from
+     * @return evaluated value for this connection parameter converted to Integer or null
+     * @throws PSQLException if unable to parse property as integer
+     */
+    public Integer getInteger(Properties properties) throws PSQLException {
+        String value = getOrDefault(properties);
+        if (value == null) {
+            return null;
+        }
+        try {
+            return Integer.parseInt(value);
+        } catch (NumberFormatException nfe) {
+            throw new PSQLException(GT.tr("{0} parameter value must be an integer but was: {1}",
+                    getName(), value), PSQLState.INVALID_PARAMETER_VALUE, nfe);
+        }
+    }
+
+    /**
+     * Set the boolean value for this connection parameter in the given {@link Properties}.
+     *
+     * @param properties properties in which the value should be set
+     * @param value      boolean value for this connection parameter
+     */
+    public void set(Properties properties, boolean value) {
+        properties.setProperty(name, Boolean.toString(value));
+    }
+
+    /**
+     * Set the int value for this connection parameter in the given {@link Properties}.
+     *
+     * @param properties properties in which the value should be set
+     * @param value      int value for this connection parameter
+     */
+    public void set(Properties properties, int value) {
+        properties.setProperty(name, Integer.toString(value));
+    }
+
+    /**
+     * Test whether this property is present in the given {@link Properties}.
+     *
+     * @param properties set of properties to check current in
+     * @return true if the parameter is specified in the given properties
+     */
+    public boolean isPresent(Properties properties) {
+        return getSetString(properties) != null;
+    }
+
+    /**
+     * Convert this connection parameter and the value read from the given {@link Properties} into a
+     * {@link DriverPropertyInfo}.
+     *
+     * @param properties properties to take actual value from
+     * @return a DriverPropertyInfo representing this connection parameter
+     */
+    public DriverPropertyInfo toDriverPropertyInfo(Properties properties) {
+        DriverPropertyInfo propertyInfo = new DriverPropertyInfo(name, getOrDefault(properties));
+        propertyInfo.required = required;
+        propertyInfo.description = description;
+        propertyInfo.choices = choices;
+        return propertyInfo;
+    }
+
+    /**
+     * Return the property if exists but avoiding the default. Allowing the caller to detect the lack
+     * of a property.
+     *
+     * @param properties properties bundle
+     * @return the value of a set property
+     */
+    public String getSetString(Properties properties) {
+        Object o = properties.get(name);
+        if (o instanceof String) {
+            return (String) o;
+        }
+        return null;
     }
-    return null;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/PGRefCursorResultSet.java b/pgjdbc/src/main/java/org/postgresql/PGRefCursorResultSet.java
index 8fc678b..e1692a2 100644
--- a/pgjdbc/src/main/java/org/postgresql/PGRefCursorResultSet.java
+++ b/pgjdbc/src/main/java/org/postgresql/PGRefCursorResultSet.java
@@ -9,17 +9,17 @@ package org.postgresql;
  * A ref cursor based result set.
  *
  * @deprecated As of 8.0, this interface is only present for backwards- compatibility purposes. New
- *             code should call getString() on the ResultSet that contains the refcursor to obtain
- *             the underlying cursor name.
+ * code should call getString() on the ResultSet that contains the refcursor to obtain
+ * the underlying cursor name.
  */
 @Deprecated
 public interface PGRefCursorResultSet {
 
-  /**
-   * @return the name of the cursor.
-   * @deprecated As of 8.0, replaced with calling getString() on the ResultSet that this ResultSet
-   *             was obtained from.
-   */
-  @Deprecated
-  String getRefCursor();
+    /**
+     * @return the name of the cursor.
+     * @deprecated As of 8.0, replaced with calling getString() on the ResultSet that this ResultSet
+     * was obtained from.
+     */
+    @Deprecated
+    String getRefCursor();
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/PGResultSetMetaData.java b/pgjdbc/src/main/java/org/postgresql/PGResultSetMetaData.java
index b0575cc..bd51047 100644
--- a/pgjdbc/src/main/java/org/postgresql/PGResultSetMetaData.java
+++ b/pgjdbc/src/main/java/org/postgresql/PGResultSetMetaData.java
@@ -11,45 +11,45 @@ import java.sql.SQLException;
 
 public interface PGResultSetMetaData {
 
-  /**
-   * Returns the underlying column name of a query result, or "" if it is unable to be determined.
-   *
-   * @param column column position (1-based)
-   * @return underlying column name of a query result
-   * @throws SQLException if something wrong happens
-   * @since 8.0
-   */
-  String getBaseColumnName(int column) throws SQLException;
+    /**
+     * Returns the underlying column name of a query result, or "" if it is unable to be determined.
+     *
+     * @param column column position (1-based)
+     * @return underlying column name of a query result
+     * @throws SQLException if something wrong happens
+     * @since 8.0
+     */
+    String getBaseColumnName(int column) throws SQLException;
 
-  /**
-   * Returns the underlying table name of query result, or "" if it is unable to be determined.
-   *
-   * @param column column position (1-based)
-   * @return underlying table name of query result
-   * @throws SQLException if something wrong happens
-   * @since 8.0
-   */
-  String getBaseTableName(int column) throws SQLException;
+    /**
+     * Returns the underlying table name of query result, or "" if it is unable to be determined.
+     *
+     * @param column column position (1-based)
+     * @return underlying table name of query result
+     * @throws SQLException if something wrong happens
+     * @since 8.0
+     */
+    String getBaseTableName(int column) throws SQLException;
 
-  /**
-   * Returns the underlying schema name of query result, or "" if it is unable to be determined.
-   *
-   * @param column column position (1-based)
-   * @return underlying schema name of query result
-   * @throws SQLException if something wrong happens
-   * @since 8.0
-   */
-  String getBaseSchemaName(int column) throws SQLException;
+    /**
+     * Returns the underlying schema name of query result, or "" if it is unable to be determined.
+     *
+     * @param column column position (1-based)
+     * @return underlying schema name of query result
+     * @throws SQLException if something wrong happens
+     * @since 8.0
+     */
+    String getBaseSchemaName(int column) throws SQLException;
 
-  /**
-   * Is a column Text or Binary?
-   *
-   * @param column column position (1-based)
-   * @return 0 if column data format is TEXT, or 1 if BINARY
-   * @throws SQLException if something wrong happens
-   * @see Field#BINARY_FORMAT
-   * @see Field#TEXT_FORMAT
-   * @since 9.4
-   */
-  int getFormat(int column) throws SQLException;
+    /**
+     * Is a column Text or Binary?
+     *
+     * @param column column position (1-based)
+     * @return 0 if column data format is TEXT, or 1 if BINARY
+     * @throws SQLException if something wrong happens
+     * @see Field#BINARY_FORMAT
+     * @see Field#TEXT_FORMAT
+     * @since 9.4
+     */
+    int getFormat(int column) throws SQLException;
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/PGStatement.java b/pgjdbc/src/main/java/org/postgresql/PGStatement.java
index 8a79ba9..521125a 100644
--- a/pgjdbc/src/main/java/org/postgresql/PGStatement.java
+++ b/pgjdbc/src/main/java/org/postgresql/PGStatement.java
@@ -12,86 +12,86 @@ import java.sql.SQLException;
  * constructed by the PostgreSQL driver implement PGStatement.
  */
 public interface PGStatement {
-  // We can't use Long.MAX_VALUE or Long.MIN_VALUE for java.sql.date
-  // because this would break the 'normalization contract' of the
-  // java.sql.Date API.
-  // The follow values are the nearest MAX/MIN values with hour,
-  // minute, second, millisecond set to 0 - this is used for
-  // -infinity / infinity representation in Java
-  long DATE_POSITIVE_INFINITY = 9223372036825200000L;
-  long DATE_NEGATIVE_INFINITY = -9223372036832400000L;
-  long DATE_POSITIVE_SMALLER_INFINITY = 185543533774800000L;
-  long DATE_NEGATIVE_SMALLER_INFINITY = -185543533774800000L;
+    // We can't use Long.MAX_VALUE or Long.MIN_VALUE for java.sql.date
+    // because this would break the 'normalization contract' of the
+    // java.sql.Date API.
+    // The follow values are the nearest MAX/MIN values with hour,
+    // minute, second, millisecond set to 0 - this is used for
+    // -infinity / infinity representation in Java
+    long DATE_POSITIVE_INFINITY = 9223372036825200000L;
+    long DATE_NEGATIVE_INFINITY = -9223372036832400000L;
+    long DATE_POSITIVE_SMALLER_INFINITY = 185543533774800000L;
+    long DATE_NEGATIVE_SMALLER_INFINITY = -185543533774800000L;
 
-  /**
-   * Returns the Last inserted/updated oid.
-   *
-   * @return OID of last insert
-   * @throws SQLException if something goes wrong
-   * @since 7.3
-   */
-  long getLastOID() throws SQLException;
+    /**
+     * Returns the Last inserted/updated oid.
+     *
+     * @return OID of last insert
+     * @throws SQLException if something goes wrong
+     * @since 7.3
+     */
+    long getLastOID() throws SQLException;
 
-  /**
-   * Turn on the use of prepared statements in the server (server side prepared statements are
-   * unrelated to jdbc PreparedStatements) As of build 302, this method is equivalent to
-   * <code>setPrepareThreshold(1)</code>.
-   *
-   * @param flag use server prepare
-   * @throws SQLException if something goes wrong
-   * @since 7.3
-   * @deprecated As of build 302, replaced by {@link #setPrepareThreshold(int)}
-   */
-  @Deprecated
-  void setUseServerPrepare(boolean flag) throws SQLException;
+    /**
+     * Checks if this statement will be executed as a server-prepared statement. A return value of
+     * <code>true</code> indicates that the next execution of the statement will be done as a
+     * server-prepared statement, assuming the underlying protocol supports it.
+     *
+     * @return true if the next reuse of this statement will use a server-prepared statement
+     */
+    boolean isUseServerPrepare();
 
-  /**
-   * Checks if this statement will be executed as a server-prepared statement. A return value of
-   * <code>true</code> indicates that the next execution of the statement will be done as a
-   * server-prepared statement, assuming the underlying protocol supports it.
-   *
-   * @return true if the next reuse of this statement will use a server-prepared statement
-   */
-  boolean isUseServerPrepare();
+    /**
+     * Turn on the use of prepared statements in the server (server side prepared statements are
+     * unrelated to jdbc PreparedStatements) As of build 302, this method is equivalent to
+     * <code>setPrepareThreshold(1)</code>.
+     *
+     * @param flag use server prepare
+     * @throws SQLException if something goes wrong
+     * @since 7.3
+     * @deprecated As of build 302, replaced by {@link #setPrepareThreshold(int)}
+     */
+    @Deprecated
+    void setUseServerPrepare(boolean flag) throws SQLException;
 
-  /**
-   * <p>Sets the reuse threshold for using server-prepared statements.</p>
-   *
-   * <p>If <code>threshold</code> is a non-zero value N, the Nth and subsequent reuses of a
-   * PreparedStatement will use server-side prepare.</p>
-   *
-   * <p>If <code>threshold</code> is zero, server-side prepare will not be used.</p>
-   *
-   * <p>The reuse threshold is only used by PreparedStatement and CallableStatement objects; it is
-   * ignored for plain Statements.</p>
-   *
-   * @param threshold the new threshold for this statement
-   * @throws SQLException if an exception occurs while changing the threshold
-   * @since build 302
-   */
-  void setPrepareThreshold(int threshold) throws SQLException;
+    /**
+     * Gets the server-side prepare reuse threshold in use for this statement.
+     *
+     * @return the current threshold
+     * @see #setPrepareThreshold(int)
+     * @since build 302
+     */
+    int getPrepareThreshold();
 
-  /**
-   * Gets the server-side prepare reuse threshold in use for this statement.
-   *
-   * @return the current threshold
-   * @see #setPrepareThreshold(int)
-   * @since build 302
-   */
-  int getPrepareThreshold();
+    /**
+     * <p>Sets the reuse threshold for using server-prepared statements.</p>
+     *
+     * <p>If <code>threshold</code> is a non-zero value N, the Nth and subsequent reuses of a
+     * PreparedStatement will use server-side prepare.</p>
+     *
+     * <p>If <code>threshold</code> is zero, server-side prepare will not be used.</p>
+     *
+     * <p>The reuse threshold is only used by PreparedStatement and CallableStatement objects; it is
+     * ignored for plain Statements.</p>
+     *
+     * @param threshold the new threshold for this statement
+     * @throws SQLException if an exception occurs while changing the threshold
+     * @since build 302
+     */
+    void setPrepareThreshold(int threshold) throws SQLException;
 
-  /**
-   * Turn on/off adaptive fetch for statement. Existing resultSets won't be affected by change
-   * here.
-   *
-   * @param adaptiveFetch desired state of adaptive fetch.
-   */
-  void setAdaptiveFetch(boolean adaptiveFetch);
+    /**
+     * Get state of adaptive fetch for statement.
+     *
+     * @return state of adaptive fetch (turned on or off)
+     */
+    boolean getAdaptiveFetch();
 
-  /**
-   * Get state of adaptive fetch for statement.
-   *
-   * @return state of adaptive fetch (turned on or off)
-   */
-  boolean getAdaptiveFetch();
+    /**
+     * Turn on/off adaptive fetch for statement. Existing resultSets won't be affected by change
+     * here.
+     *
+     * @param adaptiveFetch desired state of adaptive fetch.
+     */
+    void setAdaptiveFetch(boolean adaptiveFetch);
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/copy/CopyIn.java b/pgjdbc/src/main/java/org/postgresql/copy/CopyIn.java
index b0cd5b4..bd17831 100644
--- a/pgjdbc/src/main/java/org/postgresql/copy/CopyIn.java
+++ b/pgjdbc/src/main/java/org/postgresql/copy/CopyIn.java
@@ -5,48 +5,47 @@
 
 package org.postgresql.copy;
 
-import org.postgresql.util.ByteStreamWriter;
-
 import java.sql.SQLException;
+import org.postgresql.util.ByteStreamWriter;
 
 /**
  * Copy bulk data from client into a PostgreSQL table very fast.
  */
 public interface CopyIn extends CopyOperation {
 
-  /**
-   * Writes specified part of given byte array to an open and writable copy operation.
-   *
-   * @param buf array of bytes to write
-   * @param off offset of first byte to write (normally zero)
-   * @param siz number of bytes to write (normally buf.length)
-   * @throws SQLException if the operation fails
-   */
-  void writeToCopy(byte[] buf, int off, int siz) throws SQLException;
+    /**
+     * Writes specified part of given byte array to an open and writable copy operation.
+     *
+     * @param buf array of bytes to write
+     * @param off offset of first byte to write (normally zero)
+     * @param siz number of bytes to write (normally buf.length)
+     * @throws SQLException if the operation fails
+     */
+    void writeToCopy(byte[] buf, int off, int siz) throws SQLException;
 
-  /**
-   * Writes a ByteStreamWriter to an open and writable copy operation.
-   *
-   * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter
-   * @throws SQLException if the operation fails
-   */
-  void writeToCopy(ByteStreamWriter from) throws SQLException;
+    /**
+     * Writes a ByteStreamWriter to an open and writable copy operation.
+     *
+     * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter
+     * @throws SQLException if the operation fails
+     */
+    void writeToCopy(ByteStreamWriter from) throws SQLException;
 
-  /**
-   * Force any buffered output to be sent over the network to the backend. In general this is a
-   * useless operation as it will get pushed over in due time or when endCopy is called. Some
-   * specific modified server versions (Truviso) want this data sooner. If you are unsure if you
-   * need to use this method, don't.
-   *
-   * @throws SQLException if the operation fails.
-   */
-  void flushCopy() throws SQLException;
+    /**
+     * Force any buffered output to be sent over the network to the backend. In general this is a
+     * useless operation as it will get pushed over in due time or when endCopy is called. Some
+     * specific modified server versions (Truviso) want this data sooner. If you are unsure if you
+     * need to use this method, don't.
+     *
+     * @throws SQLException if the operation fails.
+     */
+    void flushCopy() throws SQLException;
 
-  /**
-   * Finishes copy operation successfully.
-   *
-   * @return number of updated rows for server 8.2 or newer (see getHandledRowCount())
-   * @throws SQLException if the operation fails.
-   */
-  long endCopy() throws SQLException;
+    /**
+     * Finishes copy operation successfully.
+     *
+     * @return number of updated rows for server 8.2 or newer (see getHandledRowCount())
+     * @throws SQLException if the operation fails.
+     */
+    long endCopy() throws SQLException;
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/copy/CopyManager.java b/pgjdbc/src/main/java/org/postgresql/copy/CopyManager.java
index 8849f19..ea1a4d7 100644
--- a/pgjdbc/src/main/java/org/postgresql/copy/CopyManager.java
+++ b/pgjdbc/src/main/java/org/postgresql/copy/CopyManager.java
@@ -5,6 +5,12 @@
 
 package org.postgresql.copy;
 
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.Reader;
+import java.io.Writer;
+import java.sql.SQLException;
 import org.postgresql.core.BaseConnection;
 import org.postgresql.core.Encoding;
 import org.postgresql.core.QueryExecutor;
@@ -13,244 +19,237 @@ import org.postgresql.util.GT;
 import org.postgresql.util.PSQLException;
 import org.postgresql.util.PSQLState;
 
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.Reader;
-import java.io.Writer;
-import java.sql.SQLException;
-
 /**
  * API for PostgreSQL COPY bulk data transfer.
  */
 public class CopyManager {
-  // I don't know what the best buffer size is, so we let people specify it if
-  // they want, and if they don't know, we don't make them guess, so that if we
-  // do figure it out we can just set it here and they reap the rewards.
-  // Note that this is currently being used for both a number of bytes and a number
-  // of characters.
-  static final int DEFAULT_BUFFER_SIZE = 65536;
+    // I don't know what the best buffer size is, so we let people specify it if
+    // they want, and if they don't know, we don't make them guess, so that if we
+    // do figure it out we can just set it here and they reap the rewards.
+    // Note that this is currently being used for both a number of bytes and a number
+    // of characters.
+    static final int DEFAULT_BUFFER_SIZE = 65536;
 
-  private final Encoding encoding;
-  private final QueryExecutor queryExecutor;
-  private final BaseConnection connection;
+    private final Encoding encoding;
+    private final QueryExecutor queryExecutor;
+    private final BaseConnection connection;
 
-  public CopyManager(BaseConnection connection) throws SQLException {
-    this.encoding = connection.getEncoding();
-    this.queryExecutor = connection.getQueryExecutor();
-    this.connection = connection;
-  }
-
-  public CopyIn copyIn(String sql) throws SQLException {
-    CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit());
-    if (op == null || op instanceof CopyIn) {
-      return (CopyIn) op;
-    } else {
-      op.cancelCopy();
-      throw new PSQLException(GT.tr("Requested CopyIn but got {0}", op.getClass().getName()),
-              PSQLState.WRONG_OBJECT_TYPE);
+    public CopyManager(BaseConnection connection) throws SQLException {
+        this.encoding = connection.getEncoding();
+        this.queryExecutor = connection.getQueryExecutor();
+        this.connection = connection;
     }
-  }
 
-  public CopyOut copyOut(String sql) throws SQLException {
-    CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit());
-    if (op == null || op instanceof CopyOut) {
-      return (CopyOut) op;
-    } else {
-      op.cancelCopy();
-      throw new PSQLException(GT.tr("Requested CopyOut but got {0}", op.getClass().getName()),
-              PSQLState.WRONG_OBJECT_TYPE);
-    }
-  }
-
-  public CopyDual copyDual(String sql) throws SQLException {
-    CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit());
-    if (op == null || op instanceof CopyDual) {
-      return (CopyDual) op;
-    } else {
-      op.cancelCopy();
-      throw new PSQLException(GT.tr("Requested CopyDual but got {0}", op.getClass().getName()),
-          PSQLState.WRONG_OBJECT_TYPE);
-    }
-  }
-
-  /**
-   * Pass results of a COPY TO STDOUT query from database into a Writer.
-   *
-   * @param sql COPY TO STDOUT statement
-   * @param to the Writer to write the results to (row by row).
-   *           The Writer is not closed at the end of the Copy Out operation.
-   * @return number of rows updated for server 8.2 or newer; -1 for older
-   * @throws SQLException on database usage errors
-   * @throws IOException upon writer or database connection failure
-   */
-  public long copyOut(final String sql, Writer to) throws SQLException, IOException {
-    byte[] buf;
-    CopyOut cp = copyOut(sql);
-    try {
-      while ((buf = cp.readFromCopy()) != null) {
-        to.write(encoding.decode(buf));
-      }
-      return cp.getHandledRowCount();
-    } catch (IOException ioEX) {
-      // if not handled this way the close call will hang, at least in 8.2
-      if (cp.isActive()) {
-        cp.cancelCopy();
-      }
-      try { // read until exhausted or operation cancelled SQLException
-        while ((buf = cp.readFromCopy()) != null) {
+    public CopyIn copyIn(String sql) throws SQLException {
+        CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit());
+        if (op == null || op instanceof CopyIn) {
+            return (CopyIn) op;
+        } else {
+            op.cancelCopy();
+            throw new PSQLException(GT.tr("Requested CopyIn but got {0}", op.getClass().getName()),
+                    PSQLState.WRONG_OBJECT_TYPE);
         }
-      } catch (SQLException sqlEx) {
-      } // typically after several kB
-      throw ioEX;
-    } finally { // see to it that we do not leave the connection locked
-      if (cp.isActive()) {
-        cp.cancelCopy();
-      }
     }
-  }
 
-  /**
-   * Pass results of a COPY TO STDOUT query from database into an OutputStream.
-   *
-   * @param sql COPY TO STDOUT statement
-   * @param to the stream to write the results to (row by row)
-   *           The stream is not closed at the end of the operation. This is intentional so the
-   *           caller can continue to write to the output stream
-   * @return number of rows updated for server 8.2 or newer; -1 for older
-   * @throws SQLException on database usage errors
-   * @throws IOException upon output stream or database connection failure
-   */
-  public long copyOut(final String sql, OutputStream to) throws SQLException, IOException {
-    byte[] buf;
-    CopyOut cp = copyOut(sql);
-    try {
-      while ((buf = cp.readFromCopy()) != null) {
-        to.write(buf);
-      }
-      return cp.getHandledRowCount();
-    } catch (IOException ioEX) {
-      // if not handled this way the close call will hang, at least in 8.2
-      if (cp.isActive()) {
-        cp.cancelCopy();
-      }
-      try { // read until exhausted or operation cancelled SQLException
-        while ((buf = cp.readFromCopy()) != null) {
+    public CopyOut copyOut(String sql) throws SQLException {
+        CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit());
+        if (op == null || op instanceof CopyOut) {
+            return (CopyOut) op;
+        } else {
+            op.cancelCopy();
+            throw new PSQLException(GT.tr("Requested CopyOut but got {0}", op.getClass().getName()),
+                    PSQLState.WRONG_OBJECT_TYPE);
         }
-      } catch (SQLException sqlEx) {
-      } // typically after several kB
-      throw ioEX;
-    } finally { // see to it that we do not leave the connection locked
-      if (cp.isActive()) {
-        cp.cancelCopy();
-      }
     }
-  }
 
-  /**
-   * Use COPY FROM STDIN for very fast copying from a Reader into a database table.
-   *
-   * @param sql COPY FROM STDIN statement
-   * @param from a CSV file or such
-   * @return number of rows updated for server 8.2 or newer; -1 for older
-   * @throws SQLException on database usage issues
-   * @throws IOException upon reader or database connection failure
-   */
-  public long copyIn(final String sql, Reader from) throws SQLException, IOException {
-    return copyIn(sql, from, DEFAULT_BUFFER_SIZE);
-  }
-
-  /**
-   * Use COPY FROM STDIN for very fast copying from a Reader into a database table.
-   *
-   * @param sql COPY FROM STDIN statement
-   * @param from a CSV file or such
-   * @param bufferSize number of characters to buffer and push over network to server at once
-   * @return number of rows updated for server 8.2 or newer; -1 for older
-   * @throws SQLException on database usage issues
-   * @throws IOException upon reader or database connection failure
-   */
-  public long copyIn(final String sql, Reader from, int bufferSize)
-      throws SQLException, IOException {
-    char[] cbuf = new char[bufferSize];
-    int len;
-    CopyIn cp = copyIn(sql);
-    try {
-      while ((len = from.read(cbuf)) >= 0) {
-        if (len > 0) {
-          byte[] buf = encoding.encode(new String(cbuf, 0, len));
-          cp.writeToCopy(buf, 0, buf.length);
+    public CopyDual copyDual(String sql) throws SQLException {
+        CopyOperation op = queryExecutor.startCopy(sql, connection.getAutoCommit());
+        if (op == null || op instanceof CopyDual) {
+            return (CopyDual) op;
+        } else {
+            op.cancelCopy();
+            throw new PSQLException(GT.tr("Requested CopyDual but got {0}", op.getClass().getName()),
+                    PSQLState.WRONG_OBJECT_TYPE);
         }
-      }
-      return cp.endCopy();
-    } finally { // see to it that we do not leave the connection locked
-      if (cp.isActive()) {
-        cp.cancelCopy();
-      }
     }
-  }
 
-  /**
-   * Use COPY FROM STDIN for very fast copying from an InputStream into a database table.
-   *
-   * @param sql COPY FROM STDIN statement
-   * @param from a CSV file or such
-   * @return number of rows updated for server 8.2 or newer; -1 for older
-   * @throws SQLException on database usage issues
-   * @throws IOException upon input stream or database connection failure
-   */
-  public long copyIn(final String sql, InputStream from) throws SQLException, IOException {
-    return copyIn(sql, from, DEFAULT_BUFFER_SIZE);
-  }
-
-  /**
-   * Use COPY FROM STDIN for very fast copying from an InputStream into a database table.
-   *
-   * @param sql COPY FROM STDIN statement
-   * @param from a CSV file or such
-   * @param bufferSize number of bytes to buffer and push over network to server at once
-   * @return number of rows updated for server 8.2 or newer; -1 for older
-   * @throws SQLException on database usage issues
-   * @throws IOException upon input stream or database connection failure
-   */
-  public long copyIn(final String sql, InputStream from, int bufferSize)
-      throws SQLException, IOException {
-    byte[] buf = new byte[bufferSize];
-    int len;
-    CopyIn cp = copyIn(sql);
-    try {
-      while ((len = from.read(buf)) >= 0) {
-        if (len > 0) {
-          cp.writeToCopy(buf, 0, len);
+    /**
+     * Pass results of a COPY TO STDOUT query from database into a Writer.
+     *
+     * @param sql COPY TO STDOUT statement
+     * @param to  the Writer to write the results to (row by row).
+     *            The Writer is not closed at the end of the Copy Out operation.
+     * @return number of rows updated for server 8.2 or newer; -1 for older
+     * @throws SQLException on database usage errors
+     * @throws IOException  upon writer or database connection failure
+     */
+    public long copyOut(final String sql, Writer to) throws SQLException, IOException {
+        byte[] buf;
+        CopyOut cp = copyOut(sql);
+        try {
+            while ((buf = cp.readFromCopy()) != null) {
+                to.write(encoding.decode(buf));
+            }
+            return cp.getHandledRowCount();
+        } catch (IOException ioEX) {
+            // if not handled this way the close call will hang, at least in 8.2
+            if (cp.isActive()) {
+                cp.cancelCopy();
+            }
+            try { // read until exhausted or operation cancelled SQLException
+                while ((buf = cp.readFromCopy()) != null) {
+                }
+            } catch (SQLException sqlEx) {
+            } // typically after several kB
+            throw ioEX;
+        } finally { // see to it that we do not leave the connection locked
+            if (cp.isActive()) {
+                cp.cancelCopy();
+            }
         }
-      }
-      return cp.endCopy();
-    } finally { // see to it that we do not leave the connection locked
-      if (cp.isActive()) {
-        cp.cancelCopy();
-      }
     }
-  }
 
-  /**
-   * Use COPY FROM STDIN for very fast copying from an ByteStreamWriter into a database table.
-   *
-   * @param sql  COPY FROM STDIN statement
-   * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter
-   * @return number of rows updated for server 8.2 or newer; -1 for older
-   * @throws SQLException on database usage issues
-   * @throws IOException  upon input stream or database connection failure
-   */
-  public long copyIn(String sql, ByteStreamWriter from)
-      throws SQLException, IOException {
-    CopyIn cp = copyIn(sql);
-    try {
-      cp.writeToCopy(from);
-      return cp.endCopy();
-    } finally { // see to it that we do not leave the connection locked
-      if (cp.isActive()) {
-        cp.cancelCopy();
-      }
+    /**
+     * Pass results of a COPY TO STDOUT query from database into an OutputStream.
+     *
+     * @param sql COPY TO STDOUT statement
+     * @param to  the stream to write the results to (row by row)
+     *            The stream is not closed at the end of the operation. This is intentional so the
+     *            caller can continue to write to the output stream
+     * @return number of rows updated for server 8.2 or newer; -1 for older
+     * @throws SQLException on database usage errors
+     * @throws IOException  upon output stream or database connection failure
+     */
+    public long copyOut(final String sql, OutputStream to) throws SQLException, IOException {
+        byte[] buf;
+        CopyOut cp = copyOut(sql);
+        try {
+            while ((buf = cp.readFromCopy()) != null) {
+                to.write(buf);
+            }
+            return cp.getHandledRowCount();
+        } catch (IOException ioEX) {
+            // if not handled this way the close call will hang, at least in 8.2
+            if (cp.isActive()) {
+                cp.cancelCopy();
+            }
+            try { // read until exhausted or operation cancelled SQLException
+                while ((buf = cp.readFromCopy()) != null) {
+                }
+            } catch (SQLException sqlEx) {
+            } // typically after several kB
+            throw ioEX;
+        } finally { // see to it that we do not leave the connection locked
+            if (cp.isActive()) {
+                cp.cancelCopy();
+            }
+        }
+    }
+
+    /**
+     * Use COPY FROM STDIN for very fast copying from a Reader into a database table.
+     *
+     * @param sql  COPY FROM STDIN statement
+     * @param from a CSV file or such
+     * @return number of rows updated for server 8.2 or newer; -1 for older
+     * @throws SQLException on database usage issues
+     * @throws IOException  upon reader or database connection failure
+     */
+    public long copyIn(final String sql, Reader from) throws SQLException, IOException {
+        return copyIn(sql, from, DEFAULT_BUFFER_SIZE);
+    }
+
+    /**
+     * Use COPY FROM STDIN for very fast copying from a Reader into a database table.
+     *
+     * @param sql        COPY FROM STDIN statement
+     * @param from       a CSV file or such
+     * @param bufferSize number of characters to buffer and push over network to server at once
+     * @return number of rows updated for server 8.2 or newer; -1 for older
+     * @throws SQLException on database usage issues
+     * @throws IOException  upon reader or database connection failure
+     */
+    public long copyIn(final String sql, Reader from, int bufferSize)
+            throws SQLException, IOException {
+        char[] cbuf = new char[bufferSize];
+        int len;
+        CopyIn cp = copyIn(sql);
+        try {
+            while ((len = from.read(cbuf)) >= 0) {
+                if (len > 0) {
+                    byte[] buf = encoding.encode(new String(cbuf, 0, len));
+                    cp.writeToCopy(buf, 0, buf.length);
+                }
+            }
+            return cp.endCopy();
+        } finally { // see to it that we do not leave the connection locked
+            if (cp.isActive()) {
+                cp.cancelCopy();
+            }
+        }
+    }
+
+    /**
+     * Use COPY FROM STDIN for very fast copying from an InputStream into a database table.
+     *
+     * @param sql  COPY FROM STDIN statement
+     * @param from a CSV file or such
+     * @return number of rows updated for server 8.2 or newer; -1 for older
+     * @throws SQLException on database usage issues
+     * @throws IOException  upon input stream or database connection failure
+     */
+    public long copyIn(final String sql, InputStream from) throws SQLException, IOException {
+        return copyIn(sql, from, DEFAULT_BUFFER_SIZE);
+    }
+
+    /**
+     * Use COPY FROM STDIN for very fast copying from an InputStream into a database table.
+     *
+     * @param sql        COPY FROM STDIN statement
+     * @param from       a CSV file or such
+     * @param bufferSize number of bytes to buffer and push over network to server at once
+     * @return number of rows updated for server 8.2 or newer; -1 for older
+     * @throws SQLException on database usage issues
+     * @throws IOException  upon input stream or database connection failure
+     */
+    public long copyIn(final String sql, InputStream from, int bufferSize)
+            throws SQLException, IOException {
+        byte[] buf = new byte[bufferSize];
+        int len;
+        CopyIn cp = copyIn(sql);
+        try {
+            while ((len = from.read(buf)) >= 0) {
+                if (len > 0) {
+                    cp.writeToCopy(buf, 0, len);
+                }
+            }
+            return cp.endCopy();
+        } finally { // see to it that we do not leave the connection locked
+            if (cp.isActive()) {
+                cp.cancelCopy();
+            }
+        }
+    }
+
+    /**
+     * Use COPY FROM STDIN for very fast copying from an ByteStreamWriter into a database table.
+     *
+     * @param sql  COPY FROM STDIN statement
+     * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter
+     * @return number of rows updated for server 8.2 or newer; -1 for older
+     * @throws SQLException on database usage issues
+     * @throws IOException  upon input stream or database connection failure
+     */
+    public long copyIn(String sql, ByteStreamWriter from)
+            throws SQLException, IOException {
+        CopyIn cp = copyIn(sql);
+        try {
+            cp.writeToCopy(from);
+            return cp.endCopy();
+        } finally { // see to it that we do not leave the connection locked
+            if (cp.isActive()) {
+                cp.cancelCopy();
+            }
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/copy/CopyOperation.java b/pgjdbc/src/main/java/org/postgresql/copy/CopyOperation.java
index 239c629..a7c485b 100644
--- a/pgjdbc/src/main/java/org/postgresql/copy/CopyOperation.java
+++ b/pgjdbc/src/main/java/org/postgresql/copy/CopyOperation.java
@@ -13,39 +13,39 @@ import java.sql.SQLException;
  */
 public interface CopyOperation {
 
-  /**
-   * @return number of fields in each row for this operation
-   */
-  int getFieldCount();
+    /**
+     * @return number of fields in each row for this operation
+     */
+    int getFieldCount();
 
-  /**
-   * @return overall format of each row: 0 = textual, 1 = binary
-   */
-  int getFormat();
+    /**
+     * @return overall format of each row: 0 = textual, 1 = binary
+     */
+    int getFormat();
 
-  /**
-   * @param field number of field (0..fieldCount()-1)
-   * @return format of requested field: 0 = textual, 1 = binary
-   */
-  int getFieldFormat(int field);
+    /**
+     * @param field number of field (0..fieldCount()-1)
+     * @return format of requested field: 0 = textual, 1 = binary
+     */
+    int getFieldFormat(int field);
 
-  /**
-   * @return is connection reserved for this Copy operation?
-   */
-  boolean isActive();
+    /**
+     * @return is connection reserved for this Copy operation?
+     */
+    boolean isActive();
 
-  /**
-   * Cancels this copy operation, discarding any exchanged data.
-   *
-   * @throws SQLException if cancelling fails
-   */
-  void cancelCopy() throws SQLException;
+    /**
+     * Cancels this copy operation, discarding any exchanged data.
+     *
+     * @throws SQLException if cancelling fails
+     */
+    void cancelCopy() throws SQLException;
 
-  /**
-   * After successful end of copy, returns the number of database records handled in that operation.
-   * Only implemented in PostgreSQL server version 8.2 and up. Otherwise, returns -1.
-   *
-   * @return number of handled rows or -1
-   */
-  long getHandledRowCount();
+    /**
+     * After successful end of copy, returns the number of database records handled in that operation.
+     * Only implemented in PostgreSQL server version 8.2 and up. Otherwise, returns -1.
+     *
+     * @return number of handled rows or -1
+     */
+    long getHandledRowCount();
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/copy/CopyOut.java b/pgjdbc/src/main/java/org/postgresql/copy/CopyOut.java
index e7918e1..73e09c7 100644
--- a/pgjdbc/src/main/java/org/postgresql/copy/CopyOut.java
+++ b/pgjdbc/src/main/java/org/postgresql/copy/CopyOut.java
@@ -8,22 +8,22 @@ package org.postgresql.copy;
 import java.sql.SQLException;
 
 public interface CopyOut extends CopyOperation {
-  /**
-   * Blocks wait for a row of data to be received from server on an active copy operation.
-   *
-   * @return byte array received from server, null if server complete copy operation
-   * @throws SQLException if something goes wrong for example socket timeout
-   */
-  byte [] readFromCopy() throws SQLException;
+    /**
+     * Blocks wait for a row of data to be received from server on an active copy operation.
+     *
+     * @return byte array received from server, null if server complete copy operation
+     * @throws SQLException if something goes wrong for example socket timeout
+     */
+    byte[] readFromCopy() throws SQLException;
 
-  /**
-   * Wait for a row of data to be received from server on an active copy operation.
-   *
-   * @param block {@code true} if need wait data from server otherwise {@code false} and will read
-   *              pending message from server
-   * @return byte array received from server, if pending message from server absent and use no
-   *         blocking mode return null
-   * @throws SQLException if something goes wrong for example socket timeout
-   */
-  byte [] readFromCopy(boolean block) throws SQLException;
+    /**
+     * Wait for a row of data to be received from server on an active copy operation.
+     *
+     * @param block {@code true} if need wait data from server otherwise {@code false} and will read
+     *              pending message from server
+     * @return byte array received from server, if pending message from server absent and use no
+     * blocking mode return null
+     * @throws SQLException if something goes wrong for example socket timeout
+     */
+    byte[] readFromCopy(boolean block) throws SQLException;
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/copy/PGCopyInputStream.java b/pgjdbc/src/main/java/org/postgresql/copy/PGCopyInputStream.java
index aefd13a..eea37a9 100644
--- a/pgjdbc/src/main/java/org/postgresql/copy/PGCopyInputStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/copy/PGCopyInputStream.java
@@ -5,174 +5,173 @@
 
 package org.postgresql.copy;
 
+import java.io.IOException;
+import java.io.InputStream;
+import java.sql.SQLException;
+import java.util.Arrays;
 import org.postgresql.PGConnection;
 import org.postgresql.util.GT;
 import org.postgresql.util.PSQLException;
 import org.postgresql.util.PSQLState;
 
-import java.io.IOException;
-import java.io.InputStream;
-import java.sql.SQLException;
-import java.util.Arrays;
-
 /**
  * InputStream for reading from a PostgreSQL COPY TO STDOUT operation.
  */
 public class PGCopyInputStream extends InputStream implements CopyOut {
-  private CopyOut op;
-  private byte [] buf;
-  private int at;
-  private int len;
+    private CopyOut op;
+    private byte[] buf;
+    private int at;
+    private int len;
 
-  /**
-   * Uses given connection for specified COPY TO STDOUT operation.
-   *
-   * @param connection database connection to use for copying (protocol version 3 required)
-   * @param sql COPY TO STDOUT statement
-   * @throws SQLException if initializing the operation fails
-   */
-  public PGCopyInputStream(PGConnection connection, String sql) throws SQLException {
-    this(connection.getCopyAPI().copyOut(sql));
-  }
-
-  /**
-   * Use given CopyOut operation for reading.
-   *
-   * @param op COPY TO STDOUT operation
-   */
-  public PGCopyInputStream(CopyOut op) {
-    this.op = op;
-  }
-
-  private CopyOut getOp() {
-    return op;
-  }
-
-  private byte [] fillBuffer() throws IOException {
-    if (at >= len) {
-      try {
-        buf = getOp().readFromCopy();
-      } catch (SQLException sqle) {
-        throw new IOException(GT.tr("Copying from database failed: {0}", sqle.getMessage()), sqle);
-      }
-      if (buf == null) {
-        at = -1;
-      } else {
-        at = 0;
-        len = buf.length;
-      }
+    /**
+     * Uses given connection for specified COPY TO STDOUT operation.
+     *
+     * @param connection database connection to use for copying (protocol version 3 required)
+     * @param sql        COPY TO STDOUT statement
+     * @throws SQLException if initializing the operation fails
+     */
+    public PGCopyInputStream(PGConnection connection, String sql) throws SQLException {
+        this(connection.getCopyAPI().copyOut(sql));
     }
-    return buf;
-  }
 
-  private void checkClosed() throws IOException {
-    if (op == null) {
-      throw new IOException(GT.tr("This copy stream is closed."));
+    /**
+     * Use given CopyOut operation for reading.
+     *
+     * @param op COPY TO STDOUT operation
+     */
+    public PGCopyInputStream(CopyOut op) {
+        this.op = op;
     }
-  }
 
-  @Override
-  public int available() throws IOException {
-    checkClosed();
-    return buf != null ? len - at : 0;
-  }
-
-  @Override
-  public int read() throws IOException {
-    checkClosed();
-    byte[] buf = fillBuffer();
-    return buf != null ? (buf[at++] & 0xFF)  : -1;
-  }
-
-  @Override
-  public int read(byte[] buf) throws IOException {
-    return read(buf, 0, buf.length);
-  }
-
-  @Override
-  public int read(byte[] buf, int off, int siz) throws IOException {
-    checkClosed();
-    int got = 0;
-    byte[] data = fillBuffer();
-    for (; got < siz && data != null; data = fillBuffer()) {
-      int length = Math.min(siz - got, len - at);
-      System.arraycopy(data, at, buf, off + got, length);
-      at += length;
-      got += length;
+    private CopyOut getOp() {
+        return op;
     }
-    return got == 0 && data == null ? -1 : got;
-  }
 
-  @Override
-  public byte [] readFromCopy() throws SQLException {
-    byte[] result = null;
-    try {
-      byte[] buf = fillBuffer();
-      if (buf != null) {
-        if (at > 0 || len < buf.length) {
-          result = Arrays.copyOfRange(buf, at, len);
-        } else {
-          result = buf;
+    private byte[] fillBuffer() throws IOException {
+        if (at >= len) {
+            try {
+                buf = getOp().readFromCopy();
+            } catch (SQLException sqle) {
+                throw new IOException(GT.tr("Copying from database failed: {0}", sqle.getMessage()), sqle);
+            }
+            if (buf == null) {
+                at = -1;
+            } else {
+                at = 0;
+                len = buf.length;
+            }
         }
-        // Mark the buffer as fully read
-        at = len;
-      }
-    } catch (IOException ioe) {
-      throw new PSQLException(GT.tr("Read from copy failed."), PSQLState.CONNECTION_FAILURE, ioe);
-    }
-    return result;
-  }
-
-  @Override
-  public byte [] readFromCopy(boolean block) throws SQLException {
-    return readFromCopy();
-  }
-
-  @Override
-  public void close() throws IOException {
-    // Don't complain about a double close.
-    CopyOut op = this.op;
-    if (op == null) {
-      return;
+        return buf;
     }
 
-    if (op.isActive()) {
-      try {
-        op.cancelCopy();
-      } catch (SQLException se) {
-        throw new IOException("Failed to close copy reader.", se);
-      }
+    private void checkClosed() throws IOException {
+        if (op == null) {
+            throw new IOException(GT.tr("This copy stream is closed."));
+        }
     }
-    this.op = null;
-  }
 
-  @Override
-  public void cancelCopy() throws SQLException {
-    getOp().cancelCopy();
-  }
+    @Override
+    public int available() throws IOException {
+        checkClosed();
+        return buf != null ? len - at : 0;
+    }
 
-  @Override
-  public int getFormat() {
-    return getOp().getFormat();
-  }
+    @Override
+    public int read() throws IOException {
+        checkClosed();
+        byte[] buf = fillBuffer();
+        return buf != null ? (buf[at++] & 0xFF) : -1;
+    }
 
-  @Override
-  public int getFieldFormat(int field) {
-    return getOp().getFieldFormat(field);
-  }
+    @Override
+    public int read(byte[] buf) throws IOException {
+        return read(buf, 0, buf.length);
+    }
 
-  @Override
-  public int getFieldCount() {
-    return getOp().getFieldCount();
-  }
+    @Override
+    public int read(byte[] buf, int off, int siz) throws IOException {
+        checkClosed();
+        int got = 0;
+        byte[] data = fillBuffer();
+        for (; got < siz && data != null; data = fillBuffer()) {
+            int length = Math.min(siz - got, len - at);
+            System.arraycopy(data, at, buf, off + got, length);
+            at += length;
+            got += length;
+        }
+        return got == 0 && data == null ? -1 : got;
+    }
 
-  @Override
-  public boolean isActive() {
-    return op != null && op.isActive();
-  }
+    @Override
+    public byte[] readFromCopy() throws SQLException {
+        byte[] result = null;
+        try {
+            byte[] buf = fillBuffer();
+            if (buf != null) {
+                if (at > 0 || len < buf.length) {
+                    result = Arrays.copyOfRange(buf, at, len);
+                } else {
+                    result = buf;
+                }
+                // Mark the buffer as fully read
+                at = len;
+            }
+        } catch (IOException ioe) {
+            throw new PSQLException(GT.tr("Read from copy failed."), PSQLState.CONNECTION_FAILURE, ioe);
+        }
+        return result;
+    }
 
-  @Override
-  public long getHandledRowCount() {
-    return getOp().getHandledRowCount();
-  }
+    @Override
+    public byte[] readFromCopy(boolean block) throws SQLException {
+        return readFromCopy();
+    }
+
+    @Override
+    public void close() throws IOException {
+        // Don't complain about a double close.
+        CopyOut op = this.op;
+        if (op == null) {
+            return;
+        }
+
+        if (op.isActive()) {
+            try {
+                op.cancelCopy();
+            } catch (SQLException se) {
+                throw new IOException("Failed to close copy reader.", se);
+            }
+        }
+        this.op = null;
+    }
+
+    @Override
+    public void cancelCopy() throws SQLException {
+        getOp().cancelCopy();
+    }
+
+    @Override
+    public int getFormat() {
+        return getOp().getFormat();
+    }
+
+    @Override
+    public int getFieldFormat(int field) {
+        return getOp().getFieldFormat(field);
+    }
+
+    @Override
+    public int getFieldCount() {
+        return getOp().getFieldCount();
+    }
+
+    @Override
+    public boolean isActive() {
+        return op != null && op.isActive();
+    }
+
+    @Override
+    public long getHandledRowCount() {
+        return getOp().getHandledRowCount();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/copy/PGCopyOutputStream.java b/pgjdbc/src/main/java/org/postgresql/copy/PGCopyOutputStream.java
index 322a5a9..c05f6b9 100644
--- a/pgjdbc/src/main/java/org/postgresql/copy/PGCopyOutputStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/copy/PGCopyOutputStream.java
@@ -5,199 +5,198 @@
 
 package org.postgresql.copy;
 
-import org.postgresql.PGConnection;
-import org.postgresql.util.ByteStreamWriter;
-import org.postgresql.util.GT;
-
 import java.io.IOException;
 import java.io.OutputStream;
 import java.sql.SQLException;
+import org.postgresql.PGConnection;
+import org.postgresql.util.ByteStreamWriter;
+import org.postgresql.util.GT;
 
 /**
  * OutputStream for buffered input into a PostgreSQL COPY FROM STDIN operation.
  */
 public class PGCopyOutputStream extends OutputStream implements CopyIn {
-  private CopyIn op;
-  private final byte[] copyBuffer;
-  private final byte[] singleByteBuffer = new byte[1];
-  private int at;
+    private final byte[] copyBuffer;
+    private final byte[] singleByteBuffer = new byte[1];
+    private CopyIn op;
+    private int at;
 
-  /**
-   * Uses given connection for specified COPY FROM STDIN operation.
-   *
-   * @param connection database connection to use for copying (protocol version 3 required)
-   * @param sql        COPY FROM STDIN statement
-   * @throws SQLException if initializing the operation fails
-   */
-  public PGCopyOutputStream(PGConnection connection, String sql) throws SQLException {
-    this(connection, sql, CopyManager.DEFAULT_BUFFER_SIZE);
-  }
-
-  /**
-   * Uses given connection for specified COPY FROM STDIN operation.
-   *
-   * @param connection database connection to use for copying (protocol version 3 required)
-   * @param sql        COPY FROM STDIN statement
-   * @param bufferSize try to send this many bytes at a time
-   * @throws SQLException if initializing the operation fails
-   */
-  public PGCopyOutputStream(PGConnection connection, String sql, int bufferSize)
-      throws SQLException {
-    this(connection.getCopyAPI().copyIn(sql), bufferSize);
-  }
-
-  /**
-   * Use given CopyIn operation for writing.
-   *
-   * @param op COPY FROM STDIN operation
-   */
-  public PGCopyOutputStream(CopyIn op) {
-    this(op, CopyManager.DEFAULT_BUFFER_SIZE);
-  }
-
-  /**
-   * Use given CopyIn operation for writing.
-   *
-   * @param op         COPY FROM STDIN operation
-   * @param bufferSize try to send this many bytes at a time
-   */
-  public PGCopyOutputStream(CopyIn op, int bufferSize) {
-    this.op = op;
-    copyBuffer = new byte[bufferSize];
-  }
-
-  private CopyIn getOp() {
-    return op;
-  }
-
-  @Override
-  public void write(int b) throws IOException {
-    checkClosed();
-    if (b < 0 || b > 255) {
-      throw new IOException(GT.tr("Cannot write to copy a byte of value {0}", b));
-    }
-    singleByteBuffer[0] = (byte) b;
-    write(singleByteBuffer, 0, 1);
-  }
-
-  @Override
-  public void write(byte[] buf) throws IOException {
-    write(buf, 0, buf.length);
-  }
-
-  @Override
-  public void write(byte[] buf, int off, int siz) throws IOException {
-    checkClosed();
-    try {
-      writeToCopy(buf, off, siz);
-    } catch (SQLException se) {
-      throw new IOException("Write to copy failed.", se);
-    }
-  }
-
-  private void checkClosed() throws IOException {
-    if (op == null) {
-      throw new IOException(GT.tr("This copy stream is closed."));
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    // Don't complain about a double close.
-    CopyIn op = this.op;
-    if (op == null) {
-      return;
+    /**
+     * Uses given connection for specified COPY FROM STDIN operation.
+     *
+     * @param connection database connection to use for copying (protocol version 3 required)
+     * @param sql        COPY FROM STDIN statement
+     * @throws SQLException if initializing the operation fails
+     */
+    public PGCopyOutputStream(PGConnection connection, String sql) throws SQLException {
+        this(connection, sql, CopyManager.DEFAULT_BUFFER_SIZE);
     }
 
-    if (op.isActive()) {
-      try {
-        endCopy();
-      } catch (SQLException se) {
-        throw new IOException("Ending write to copy failed.", se);
-      }
+    /**
+     * Uses given connection for specified COPY FROM STDIN operation.
+     *
+     * @param connection database connection to use for copying (protocol version 3 required)
+     * @param sql        COPY FROM STDIN statement
+     * @param bufferSize try to send this many bytes at a time
+     * @throws SQLException if initializing the operation fails
+     */
+    public PGCopyOutputStream(PGConnection connection, String sql, int bufferSize)
+            throws SQLException {
+        this(connection.getCopyAPI().copyIn(sql), bufferSize);
     }
-    this.op = null;
-  }
 
-  @Override
-  public void flush() throws IOException {
-    checkClosed();
-    try {
-      getOp().writeToCopy(copyBuffer, 0, at);
-      at = 0;
-      getOp().flushCopy();
-    } catch (SQLException e) {
-      throw new IOException("Unable to flush stream", e);
+    /**
+     * Use given CopyIn operation for writing.
+     *
+     * @param op COPY FROM STDIN operation
+     */
+    public PGCopyOutputStream(CopyIn op) {
+        this(op, CopyManager.DEFAULT_BUFFER_SIZE);
     }
-  }
 
-  @Override
-  public void writeToCopy(byte[] buf, int off, int siz) throws SQLException {
-    if (at > 0
-        && siz > copyBuffer.length - at) { // would not fit into rest of our buf, so flush buf
-      getOp().writeToCopy(copyBuffer, 0, at);
-      at = 0;
+    /**
+     * Use given CopyIn operation for writing.
+     *
+     * @param op         COPY FROM STDIN operation
+     * @param bufferSize try to send this many bytes at a time
+     */
+    public PGCopyOutputStream(CopyIn op, int bufferSize) {
+        this.op = op;
+        copyBuffer = new byte[bufferSize];
     }
-    if (siz > copyBuffer.length) { // would still not fit into buf, so just pass it through
-      getOp().writeToCopy(buf, off, siz);
-    } else { // fits into our buf, so save it there
-      System.arraycopy(buf, off, copyBuffer, at, siz);
-      at += siz;
+
+    private CopyIn getOp() {
+        return op;
     }
-  }
 
-  @Override
-  public void writeToCopy(ByteStreamWriter from) throws SQLException {
-    if (at > 0) {
-      // flush existing buffer so order is preserved
-      getOp().writeToCopy(copyBuffer, 0, at);
-      at = 0;
+    @Override
+    public void write(int b) throws IOException {
+        checkClosed();
+        if (b < 0 || b > 255) {
+            throw new IOException(GT.tr("Cannot write to copy a byte of value {0}", b));
+        }
+        singleByteBuffer[0] = (byte) b;
+        write(singleByteBuffer, 0, 1);
     }
-    getOp().writeToCopy(from);
-  }
 
-  @Override
-  public int getFormat() {
-    return getOp().getFormat();
-  }
-
-  @Override
-  public int getFieldFormat(int field) {
-    return getOp().getFieldFormat(field);
-  }
-
-  @Override
-  public void cancelCopy() throws SQLException {
-    getOp().cancelCopy();
-  }
-
-  @Override
-  public int getFieldCount() {
-    return getOp().getFieldCount();
-  }
-
-  @Override
-  public boolean isActive() {
-    return op != null && getOp().isActive();
-  }
-
-  @Override
-  public void flushCopy() throws SQLException {
-    getOp().flushCopy();
-  }
-
-  @Override
-  public long endCopy() throws SQLException {
-    if (at > 0) {
-      getOp().writeToCopy(copyBuffer, 0, at);
+    @Override
+    public void write(byte[] buf) throws IOException {
+        write(buf, 0, buf.length);
     }
-    getOp().endCopy();
-    return getHandledRowCount();
-  }
 
-  @Override
-  public long getHandledRowCount() {
-    return getOp().getHandledRowCount();
-  }
+    @Override
+    public void write(byte[] buf, int off, int siz) throws IOException {
+        checkClosed();
+        try {
+            writeToCopy(buf, off, siz);
+        } catch (SQLException se) {
+            throw new IOException("Write to copy failed.", se);
+        }
+    }
+
+    private void checkClosed() throws IOException {
+        if (op == null) {
+            throw new IOException(GT.tr("This copy stream is closed."));
+        }
+    }
+
+    @Override
+    public void close() throws IOException {
+        // Don't complain about a double close.
+        CopyIn op = this.op;
+        if (op == null) {
+            return;
+        }
+
+        if (op.isActive()) {
+            try {
+                endCopy();
+            } catch (SQLException se) {
+                throw new IOException("Ending write to copy failed.", se);
+            }
+        }
+        this.op = null;
+    }
+
+    @Override
+    public void flush() throws IOException {
+        checkClosed();
+        try {
+            getOp().writeToCopy(copyBuffer, 0, at);
+            at = 0;
+            getOp().flushCopy();
+        } catch (SQLException e) {
+            throw new IOException("Unable to flush stream", e);
+        }
+    }
+
+    @Override
+    public void writeToCopy(byte[] buf, int off, int siz) throws SQLException {
+        if (at > 0
+                && siz > copyBuffer.length - at) { // would not fit into rest of our buf, so flush buf
+            getOp().writeToCopy(copyBuffer, 0, at);
+            at = 0;
+        }
+        if (siz > copyBuffer.length) { // would still not fit into buf, so just pass it through
+            getOp().writeToCopy(buf, off, siz);
+        } else { // fits into our buf, so save it there
+            System.arraycopy(buf, off, copyBuffer, at, siz);
+            at += siz;
+        }
+    }
+
+    @Override
+    public void writeToCopy(ByteStreamWriter from) throws SQLException {
+        if (at > 0) {
+            // flush existing buffer so order is preserved
+            getOp().writeToCopy(copyBuffer, 0, at);
+            at = 0;
+        }
+        getOp().writeToCopy(from);
+    }
+
+    @Override
+    public int getFormat() {
+        return getOp().getFormat();
+    }
+
+    @Override
+    public int getFieldFormat(int field) {
+        return getOp().getFieldFormat(field);
+    }
+
+    @Override
+    public void cancelCopy() throws SQLException {
+        getOp().cancelCopy();
+    }
+
+    @Override
+    public int getFieldCount() {
+        return getOp().getFieldCount();
+    }
+
+    @Override
+    public boolean isActive() {
+        return op != null && getOp().isActive();
+    }
+
+    @Override
+    public void flushCopy() throws SQLException {
+        getOp().flushCopy();
+    }
+
+    @Override
+    public long endCopy() throws SQLException {
+        if (at > 0) {
+            getOp().writeToCopy(copyBuffer, 0, at);
+        }
+        getOp().endCopy();
+        return getHandledRowCount();
+    }
+
+    @Override
+    public long getHandledRowCount() {
+        return getOp().getHandledRowCount();
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/AsciiStringInterner.java b/pgjdbc/src/main/java/org/postgresql/core/AsciiStringInterner.java
index 3aed133..3ee5531 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/AsciiStringInterner.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/AsciiStringInterner.java
@@ -29,326 +29,330 @@ import java.util.concurrent.ConcurrentMap;
  *
  * @author Brett Okken
  */
-final class AsciiStringInterner {
+public final class AsciiStringInterner {
 
-  private abstract static class BaseKey {
-    private final int hash;
+    /**
+     * Contains the canonicalized values, keyed by the ascii {@code byte[]}.
+     */
+    final ConcurrentMap<BaseKey, SoftReference<String>> cache = new ConcurrentHashMap<>(128);
+    /**
+     * Used for {@link Reference} as values in {@code cache}.
+     */
+    final ReferenceQueue<String> refQueue = new ReferenceQueue<>();
 
-    BaseKey(int hash) {
-      this.hash = hash;
+    public AsciiStringInterner() {
     }
 
-    @Override
-    public final int hashCode() {
-      return hash;
+    /**
+     * Generates a hash value for the relevant entries in <i>bytes</i> as long as all values are ascii ({@code >= 0}).
+     *
+     * @return hash code for relevant bytes, or {@code 0} if non-ascii bytes present.
+     */
+    private static int hashKey(byte[] bytes, int offset, int length) {
+        int result = 1;
+        for (int i = offset, j = offset + length; i < j; i++) {
+            final byte b = bytes[i];
+            // bytes are signed values. all ascii values are positive
+            if (b < 0) {
+                return 0;
+            }
+            result = 31 * result + b;
+        }
+        return result;
     }
 
-    @Override
-    public final boolean equals(Object obj) {
-      if (obj == this) {
+    /**
+     * Performs equality check between <i>a</i> and <i>b</i> (with corresponding offset/length values).
+     * <p>
+     * The {@code static boolean equals(byte[].class, int, int, byte[], int, int} method in {@link java.util.Arrays}
+     * is optimized for longer {@code byte[]} instances than is expected to be seen here.
+     * </p>
+     */
+    static boolean arrayEquals(byte[] a, int aOffset, int aLength, byte[] b, int bOffset, int bLength) {
+        if (aLength != bLength) {
+            return false;
+        }
+        //TODO: in jdk9, could use VarHandle to read 4 bytes at a time as an int for comparison
+        // or 8 bytes as a long - though we likely expect short values here
+        for (int i = 0; i < aLength; i++) {
+            if (a[aOffset + i] != b[bOffset + i]) {
+                return false;
+            }
+        }
         return true;
-      }
-      if (!(obj instanceof BaseKey)) {
-        return false;
-      }
-      final BaseKey other = (BaseKey) obj;
-      return equalsBytes(other);
     }
 
-    abstract boolean equalsBytes(BaseKey other);
-
-    abstract boolean equals(byte[] other, int offset, int length);
-
-    abstract void appendString(StringBuilder sb);
-  }
-
-  /**
-   * Only used for lookups, never to actually store entries.
-   */
-  private static class TempKey extends BaseKey {
-    final byte[] bytes;
-    final int offset;
-    final int length;
-
-    TempKey(int hash, byte[] bytes, int offset, int length) {
-      super(hash);
-      this.bytes = bytes;
-      this.offset = offset;
-      this.length = length;
+    /**
+     * Preemptively populates a value into the cache. This is intended to be used with {@code String} constants
+     * which are frequently used. While this can work with other {@code String} values, if <i>val</i> is ever
+     * garbage collected, it will not be actively removed from this instance.
+     *
+     * @param val The value to intern. Must not be {@code null}.
+     * @return Indication if <i>val</i> is an ascii String and placed into cache.
+     */
+    public boolean putString(String val) {
+        //ask for utf-8 so that we can detect if any of the characters are not ascii
+        final byte[] copy = val.getBytes(StandardCharsets.UTF_8);
+        final int hash = hashKey(copy, 0, copy.length);
+        if (hash == 0) {
+            return false;
+        }
+        final Key key = new Key(copy, hash);
+        //we are assuming this is a java interned string from , so this is unlikely to ever be
+        //reclaimed. so there is no value in using the custom StringReference or hand off to
+        //the refQueue.
+        //on the outside chance it actually does get reclaimed, it will just hang around as an
+        //empty reference in the map unless/until attempted to be retrieved
+        cache.put(key, new SoftReference<String>(val));
+        return true;
     }
 
-    @Override
-    boolean equalsBytes(BaseKey other) {
-      return other.equals(bytes, offset, length);
+    /**
+     * Produces a {@link String} instance for the given <i>bytes</i>. If all are valid ascii (i.e. {@code >= 0})
+     * either an existing value will be returned, or the newly created {@code String} will be stored before being
+     * returned.
+     *
+     * <p>
+     * If non-ascii bytes are discovered, the <i>encoding</i> will be used to
+     * {@link Encoding#decode(byte[], int, int) decode} and that value will be returned (but not stored).
+     * </p>
+     *
+     * @param bytes    The bytes of the String. Must not be {@code null}.
+     * @param offset   Offset into <i>bytes</i> to start.
+     * @param length   The number of bytes in <i>bytes</i> which are relevant.
+     * @param encoding To use if non-ascii bytes seen.
+     * @return Decoded {@code String} from <i>bytes</i>.
+     * @throws IOException If error decoding from <i>Encoding</i>.
+     */
+    public String getString(byte[] bytes, int offset, int length, Encoding encoding) throws IOException {
+        if (length == 0) {
+            return "";
+        }
+
+        final int hash = hashKey(bytes, offset, length);
+        // 0 indicates the presence of a non-ascii character - defer to encoding to create the string
+        if (hash == 0) {
+            return encoding.decode(bytes, offset, length);
+        }
+        cleanQueue();
+        // create a TempKey with the byte[] given
+        final TempKey tempKey = new TempKey(hash, bytes, offset, length);
+        SoftReference<String> ref = cache.get(tempKey);
+        if (ref != null) {
+            final String val = ref.get();
+            if (val != null) {
+                return val;
+            }
+        }
+        // in order to insert we need to create a "real" key with copy of bytes that will not be changed
+        final byte[] copy = Arrays.copyOfRange(bytes, offset, offset + length);
+        final Key key = new Key(copy, hash);
+        final String value = new String(copy, StandardCharsets.US_ASCII);
+
+        // handle case where a concurrent thread has populated the map or existing value has cleared reference
+        ref = cache.compute(key, (k, v) -> {
+            if (v == null) {
+                return new StringReference(key, value);
+            }
+            final String val = v.get();
+            return val != null ? v : new StringReference(key, value);
+        });
+
+        return ref.get();
     }
 
-    @Override
-    public boolean equals(byte[] other, int offset, int length) {
-      return arrayEquals(this.bytes, this.offset, this.length, other, offset, length);
+    /**
+     * Produces a {@link String} instance for the given <i>bytes</i>.
+     *
+     * <p>
+     * If all are valid ascii (i.e. {@code >= 0}) and a corresponding {@code String} value exists, it
+     * will be returned. If no value exists, a {@code String} will be created, but not stored.
+     * </p>
+     *
+     * <p>
+     * If non-ascii bytes are discovered, the <i>encoding</i> will be used to
+     * {@link Encoding#decode(byte[], int, int) decode} and that value will be returned (but not stored).
+     * </p>
+     *
+     * @param bytes    The bytes of the String. Must not be {@code null}.
+     * @param offset   Offset into <i>bytes</i> to start.
+     * @param length   The number of bytes in <i>bytes</i> which are relevant.
+     * @param encoding To use if non-ascii bytes seen.
+     * @return Decoded {@code String} from <i>bytes</i>.
+     * @throws IOException If error decoding from <i>Encoding</i>.
+     */
+    public String getStringIfPresent(byte[] bytes, int offset, int length, Encoding encoding) throws IOException {
+        if (length == 0) {
+            return "";
+        }
+
+        final int hash = hashKey(bytes, offset, length);
+        // 0 indicates the presence of a non-ascii character - defer to encoding to create the string
+        if (hash == 0) {
+            return encoding.decode(bytes, offset, length);
+        }
+        cleanQueue();
+        // create a TempKey with the byte[] given
+        final TempKey tempKey = new TempKey(hash, bytes, offset, length);
+        SoftReference<String> ref = cache.get(tempKey);
+        if (ref != null) {
+            final String val = ref.get();
+            if (val != null) {
+                return val;
+            }
+        }
+
+        return new String(bytes, offset, length, StandardCharsets.US_ASCII);
     }
 
-    @Override
-    void appendString(StringBuilder sb) {
-      for (int i = offset, j = offset + length; i < j; i++) {
-        sb.append((char) bytes[i]);
-      }
-    }
-  }
-
-  /**
-   * Instance used for inserting values into the cache. The {@code byte[]} must be a copy
-   * that will never be mutated.
-   */
-  private static final class Key extends BaseKey {
-    final byte[] key;
-
-    Key(byte[] key, int hash) {
-      super(hash);
-      this.key = key;
+    /**
+     * Process any entries in {@link #refQueue} to purge from the {@link #cache}.
+     *
+     * @see StringReference#dispose()
+     */
+    private void cleanQueue() {
+        Reference<?> ref;
+        while ((ref = refQueue.poll()) != null) {
+            ((StringReference) ref).dispose();
+        }
     }
 
     /**
      * {@inheritDoc}
      */
     @Override
-    boolean equalsBytes(BaseKey other) {
-      return other.equals(key, 0, key.length);
+    public String toString() {
+        final StringBuilder sb = new StringBuilder(32 + (8 * cache.size()));
+        sb.append("AsciiStringInterner [");
+        cache.forEach((k, v) -> {
+            sb.append('\'');
+            k.appendString(sb);
+            sb.append("', ");
+        });
+        //replace trailing ', ' with ']';
+        final int length = sb.length();
+        if (length > 21) {
+            sb.setLength(sb.length() - 2);
+        }
+        sb.append(']');
+        return sb.toString();
     }
 
-    @Override
-    public boolean equals(byte[] other, int offset, int length) {
-      return arrayEquals(this.key, 0, this.key.length, other, offset, length);
+    private abstract static class BaseKey {
+        private final int hash;
+
+        BaseKey(int hash) {
+            this.hash = hash;
+        }
+
+        @Override
+        public final int hashCode() {
+            return hash;
+        }
+
+        @Override
+        public final boolean equals(Object obj) {
+            if (obj == this) {
+                return true;
+            }
+            if (!(obj instanceof BaseKey)) {
+                return false;
+            }
+            final BaseKey other = (BaseKey) obj;
+            return equalsBytes(other);
+        }
+
+        abstract boolean equalsBytes(BaseKey other);
+
+        abstract boolean equals(byte[] other, int offset, int length);
+
+        abstract void appendString(StringBuilder sb);
     }
 
     /**
-     * {@inheritDoc}
+     * Only used for lookups, never to actually store entries.
      */
-    @Override
-    void appendString(StringBuilder sb) {
-      for (int i = 0; i < key.length; i++) {
-        sb.append((char) key[i]);
-      }
-    }
-  }
+    private static class TempKey extends BaseKey {
+        final byte[] bytes;
+        final int offset;
+        final int length;
 
-  /**
-   * Custom {@link SoftReference} implementation which maintains a reference to the key in the cache,
-   * which allows aggressive cleaning when garbage collector collects the {@code String} instance.
-   */
-  private final class StringReference extends SoftReference<String> {
+        TempKey(int hash, byte[] bytes, int offset, int length) {
+            super(hash);
+            this.bytes = bytes;
+            this.offset = offset;
+            this.length = length;
+        }
 
-    private final BaseKey key;
+        @Override
+        boolean equalsBytes(BaseKey other) {
+            return other.equals(bytes, offset, length);
+        }
 
-    StringReference(BaseKey key, String referent) {
-      super(referent, refQueue);
-      this.key = key;
+        @Override
+        public boolean equals(byte[] other, int offset, int length) {
+            return arrayEquals(this.bytes, this.offset, this.length, other, offset, length);
+        }
+
+        @Override
+        void appendString(StringBuilder sb) {
+            for (int i = offset, j = offset + length; i < j; i++) {
+                sb.append((char) bytes[i]);
+            }
+        }
     }
 
-    void dispose() {
-      cache.remove(key, this);
-    }
-  }
+    /**
+     * Instance used for inserting values into the cache. The {@code byte[]} must be a copy
+     * that will never be mutated.
+     */
+    private static final class Key extends BaseKey {
+        final byte[] key;
 
-  /**
-   * Contains the canonicalized values, keyed by the ascii {@code byte[]}.
-   */
-  final ConcurrentMap<BaseKey, SoftReference<String>> cache = new ConcurrentHashMap<>(128);
+        Key(byte[] key, int hash) {
+            super(hash);
+            this.key = key;
+        }
 
-  /**
-   * Used for {@link Reference} as values in {@code cache}.
-   */
-  final ReferenceQueue<String> refQueue = new ReferenceQueue<>();
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        boolean equalsBytes(BaseKey other) {
+            return other.equals(key, 0, key.length);
+        }
 
-  /**
-   * Preemptively populates a value into the cache. This is intended to be used with {@code String} constants
-   * which are frequently used. While this can work with other {@code String} values, if <i>val</i> is ever
-   * garbage collected, it will not be actively removed from this instance.
-   *
-   * @param val The value to intern. Must not be {@code null}.
-   * @return Indication if <i>val</i> is an ascii String and placed into cache.
-   */
-  public boolean putString(String val) {
-    //ask for utf-8 so that we can detect if any of the characters are not ascii
-    final byte[] copy = val.getBytes(StandardCharsets.UTF_8);
-    final int hash = hashKey(copy, 0, copy.length);
-    if (hash == 0) {
-      return false;
-    }
-    final Key key = new Key(copy, hash);
-    //we are assuming this is a java interned string from , so this is unlikely to ever be
-    //reclaimed. so there is no value in using the custom StringReference or hand off to
-    //the refQueue.
-    //on the outside chance it actually does get reclaimed, it will just hang around as an
-    //empty reference in the map unless/until attempted to be retrieved
-    cache.put(key, new SoftReference<String>(val));
-    return true;
-  }
+        @Override
+        public boolean equals(byte[] other, int offset, int length) {
+            return arrayEquals(this.key, 0, this.key.length, other, offset, length);
+        }
 
-  /**
-   * Produces a {@link String} instance for the given <i>bytes</i>. If all are valid ascii (i.e. {@code >= 0})
-   * either an existing value will be returned, or the newly created {@code String} will be stored before being
-   * returned.
-   *
-   * <p>
-   * If non-ascii bytes are discovered, the <i>encoding</i> will be used to
-   * {@link Encoding#decode(byte[], int, int) decode} and that value will be returned (but not stored).
-   * </p>
-   *
-   * @param bytes The bytes of the String. Must not be {@code null}.
-   * @param offset Offset into <i>bytes</i> to start.
-   * @param length The number of bytes in <i>bytes</i> which are relevant.
-   * @param encoding To use if non-ascii bytes seen.
-   * @return Decoded {@code String} from <i>bytes</i>.
-   * @throws IOException If error decoding from <i>Encoding</i>.
-   */
-  public String getString(byte[] bytes, int offset, int length, Encoding encoding) throws IOException {
-    if (length == 0) {
-      return "";
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        void appendString(StringBuilder sb) {
+            for (int i = 0; i < key.length; i++) {
+                sb.append((char) key[i]);
+            }
+        }
     }
 
-    final int hash = hashKey(bytes, offset, length);
-    // 0 indicates the presence of a non-ascii character - defer to encoding to create the string
-    if (hash == 0) {
-      return encoding.decode(bytes, offset, length);
-    }
-    cleanQueue();
-    // create a TempKey with the byte[] given
-    final TempKey tempKey = new TempKey(hash, bytes, offset, length);
-    SoftReference<String> ref = cache.get(tempKey);
-    if (ref != null) {
-      final String val = ref.get();
-      if (val != null) {
-        return val;
-      }
-    }
-    // in order to insert we need to create a "real" key with copy of bytes that will not be changed
-    final byte[] copy = Arrays.copyOfRange(bytes, offset, offset + length);
-    final Key key = new Key(copy, hash);
-    final String value = new String(copy, StandardCharsets.US_ASCII);
+    /**
+     * Custom {@link SoftReference} implementation which maintains a reference to the key in the cache,
+     * which allows aggressive cleaning when garbage collector collects the {@code String} instance.
+     */
+    private final class StringReference extends SoftReference<String> {
 
-    // handle case where a concurrent thread has populated the map or existing value has cleared reference
-    ref = cache.compute(key, (k, v) -> {
-      if (v == null) {
-        return new StringReference(key, value);
-      }
-      final String val = v.get();
-      return val != null ? v : new StringReference(key, value);
-    });
+        private final BaseKey key;
 
-    return ref.get();
-  }
+        StringReference(BaseKey key, String referent) {
+            super(referent, refQueue);
+            this.key = key;
+        }
 
-  /**
-   * Produces a {@link String} instance for the given <i>bytes</i>.
-   *
-   * <p>
-   * If all are valid ascii (i.e. {@code >= 0}) and a corresponding {@code String} value exists, it
-   * will be returned. If no value exists, a {@code String} will be created, but not stored.
-   * </p>
-   *
-   * <p>
-   * If non-ascii bytes are discovered, the <i>encoding</i> will be used to
-   * {@link Encoding#decode(byte[], int, int) decode} and that value will be returned (but not stored).
-   * </p>
-   *
-   * @param bytes The bytes of the String. Must not be {@code null}.
-   * @param offset Offset into <i>bytes</i> to start.
-   * @param length The number of bytes in <i>bytes</i> which are relevant.
-   * @param encoding To use if non-ascii bytes seen.
-   * @return Decoded {@code String} from <i>bytes</i>.
-   * @throws IOException If error decoding from <i>Encoding</i>.
-   */
-  public String getStringIfPresent(byte[] bytes, int offset, int length, Encoding encoding) throws IOException {
-    if (length == 0) {
-      return "";
+        void dispose() {
+            cache.remove(key, this);
+        }
     }
-
-    final int hash = hashKey(bytes, offset, length);
-    // 0 indicates the presence of a non-ascii character - defer to encoding to create the string
-    if (hash == 0) {
-      return encoding.decode(bytes, offset, length);
-    }
-    cleanQueue();
-    // create a TempKey with the byte[] given
-    final TempKey tempKey = new TempKey(hash, bytes, offset, length);
-    SoftReference<String> ref = cache.get(tempKey);
-    if (ref != null) {
-      final String val = ref.get();
-      if (val != null) {
-        return val;
-      }
-    }
-
-    return new String(bytes, offset, length, StandardCharsets.US_ASCII);
-  }
-
-  /**
-   * Process any entries in {@link #refQueue} to purge from the {@link #cache}.
-   * @see StringReference#dispose()
-   */
-  private void cleanQueue() {
-    Reference<?> ref;
-    while ((ref = refQueue.poll()) != null) {
-      ((StringReference) ref).dispose();
-    }
-  }
-
-  /**
-   * Generates a hash value for the relevant entries in <i>bytes</i> as long as all values are ascii ({@code >= 0}).
-   * @return hash code for relevant bytes, or {@code 0} if non-ascii bytes present.
-   */
-  private static int hashKey(byte[] bytes, int offset, int length) {
-    int result = 1;
-    for (int i = offset, j = offset + length; i < j; i++) {
-      final byte b = bytes[i];
-      // bytes are signed values. all ascii values are positive
-      if (b < 0) {
-        return 0;
-      }
-      result = 31 * result + b;
-    }
-    return result;
-  }
-
-  /**
-   * Performs equality check between <i>a</i> and <i>b</i> (with corresponding offset/length values).
-   * <p>
-   * The {@code static boolean equals(byte[].class, int, int, byte[], int, int} method in {@link java.util.Arrays}
-   * is optimized for longer {@code byte[]} instances than is expected to be seen here.
-   * </p>
-   */
-  static boolean arrayEquals(byte[] a, int aOffset, int aLength, byte[] b, int bOffset, int bLength) {
-    if (aLength != bLength) {
-      return false;
-    }
-    //TODO: in jdk9, could use VarHandle to read 4 bytes at a time as an int for comparison
-    // or 8 bytes as a long - though we likely expect short values here
-    for (int i = 0; i < aLength; i++) {
-      if (a[aOffset + i] != b[bOffset + i]) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String toString() {
-    final StringBuilder sb = new StringBuilder(32 + (8 * cache.size()));
-    sb.append("AsciiStringInterner [");
-    cache.forEach((k, v) -> {
-      sb.append('\'');
-      k.appendString(sb);
-      sb.append("', ");
-    });
-    //replace trailing ', ' with ']';
-    final int length = sb.length();
-    if (length > 21) {
-      sb.setLength(sb.length() - 2);
-    }
-    sb.append(']');
-    return sb.toString();
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/BaseConnection.java b/pgjdbc/src/main/java/org/postgresql/core/BaseConnection.java
index 35dcb79..f696efb 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/BaseConnection.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/BaseConnection.java
@@ -5,6 +5,11 @@
 
 package org.postgresql.core;
 
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.TimerTask;
+import java.util.logging.Logger;
 import org.postgresql.PGConnection;
 import org.postgresql.PGProperty;
 import org.postgresql.jdbc.FieldMetadata;
@@ -12,225 +17,220 @@ import org.postgresql.jdbc.TimestampUtils;
 import org.postgresql.util.LruCache;
 import org.postgresql.xml.PGXmlFactoryFactory;
 
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.TimerTask;
-import java.util.logging.Logger;
-
 /**
  * Driver-internal connection interface. Application code should not use this interface.
  */
 public interface BaseConnection extends PGConnection, Connection {
-  /**
-   * Cancel the current query executing on this connection.
-   *
-   * @throws SQLException if something goes wrong.
-   */
-  @Override
-  void cancelQuery() throws SQLException;
+    /**
+     * Cancel the current query executing on this connection.
+     *
+     * @throws SQLException if something goes wrong.
+     */
+    @Override
+    void cancelQuery() throws SQLException;
 
-  /**
-   * Execute a SQL query that returns a single resultset. Never causes a new transaction to be
-   * started regardless of the autocommit setting.
-   *
-   * @param s the query to execute
-   * @return the (non-null) returned resultset
-   * @throws SQLException if something goes wrong.
-   */
-  ResultSet execSQLQuery(String s) throws SQLException;
+    /**
+     * Execute a SQL query that returns a single resultset. Never causes a new transaction to be
+     * started regardless of the autocommit setting.
+     *
+     * @param s the query to execute
+     * @return the (non-null) returned resultset
+     * @throws SQLException if something goes wrong.
+     */
+    ResultSet execSQLQuery(String s) throws SQLException;
 
-  ResultSet execSQLQuery(String s, int resultSetType, int resultSetConcurrency)
-      throws SQLException;
+    ResultSet execSQLQuery(String s, int resultSetType, int resultSetConcurrency)
+            throws SQLException;
 
-  /**
-   * Execute a SQL query that does not return results. Never causes a new transaction to be started
-   * regardless of the autocommit setting.
-   *
-   * @param s the query to execute
-   * @throws SQLException if something goes wrong.
-   */
-  void execSQLUpdate(String s) throws SQLException;
+    /**
+     * Execute a SQL query that does not return results. Never causes a new transaction to be started
+     * regardless of the autocommit setting.
+     *
+     * @param s the query to execute
+     * @throws SQLException if something goes wrong.
+     */
+    void execSQLUpdate(String s) throws SQLException;
 
-  /**
-   * Get the QueryExecutor implementation for this connection.
-   *
-   * @return the (non-null) executor
-   */
-  QueryExecutor getQueryExecutor();
+    /**
+     * Get the QueryExecutor implementation for this connection.
+     *
+     * @return the (non-null) executor
+     */
+    QueryExecutor getQueryExecutor();
 
-  /**
-   * Internal protocol for work with physical and logical replication. Physical replication available
-   * only since PostgreSQL version 9.1. Logical replication available only since PostgreSQL version 9.4.
-   *
-   * @return not null replication protocol
-   */
-  ReplicationProtocol getReplicationProtocol();
+    /**
+     * Internal protocol for work with physical and logical replication. Physical replication available
+     * only since PostgreSQL version 9.1. Logical replication available only since PostgreSQL version 9.4.
+     *
+     * @return not null replication protocol
+     */
+    ReplicationProtocol getReplicationProtocol();
 
-  /**
-   * <p>Construct and return an appropriate object for the given type and value. This only considers
-   * the types registered via {@link org.postgresql.PGConnection#addDataType(String, Class)} and
-   * {@link org.postgresql.PGConnection#addDataType(String, String)}.</p>
-   *
-   * <p>If no class is registered as handling the given type, then a generic
-   * {@link org.postgresql.util.PGobject} instance is returned.</p>
-   *
-   * <p>value or byteValue must be non-null</p>
-   * @param type the backend typename
-   * @param value the type-specific string representation of the value
-   * @param byteValue the type-specific binary representation of the value
-   * @return an appropriate object; never null.
-   * @throws SQLException if something goes wrong
-   */
-  Object getObject(String type, String value, byte [] byteValue)
-      throws SQLException;
+    /**
+     * <p>Construct and return an appropriate object for the given type and value. This only considers
+     * the types registered via {@link org.postgresql.PGConnection#addDataType(String, Class)} and
+     * {@link org.postgresql.PGConnection#addDataType(String, String)}.</p>
+     *
+     * <p>If no class is registered as handling the given type, then a generic
+     * {@link org.postgresql.util.PGobject} instance is returned.</p>
+     *
+     * <p>value or byteValue must be non-null</p>
+     *
+     * @param type      the backend typename
+     * @param value     the type-specific string representation of the value
+     * @param byteValue the type-specific binary representation of the value
+     * @return an appropriate object; never null.
+     * @throws SQLException if something goes wrong
+     */
+    Object getObject(String type, String value, byte[] byteValue)
+            throws SQLException;
 
-  Encoding getEncoding() throws SQLException;
+    Encoding getEncoding() throws SQLException;
 
-  TypeInfo getTypeInfo();
+    TypeInfo getTypeInfo();
 
-  /**
-   * <p>Check if we have at least a particular server version.</p>
-   *
-   * <p>The input version is of the form xxyyzz, matching a PostgreSQL version like xx.yy.zz. So 9.0.12
-   * is 90012.</p>
-   *
-   * @param ver the server version to check, of the form xxyyzz eg 90401
-   * @return true if the server version is at least "ver".
-   */
-  boolean haveMinimumServerVersion(int ver);
+    /**
+     * <p>Check if we have at least a particular server version.</p>
+     *
+     * <p>The input version is of the form xxyyzz, matching a PostgreSQL version like xx.yy.zz. So 9.0.12
+     * is 90012.</p>
+     *
+     * @param ver the server version to check, of the form xxyyzz eg 90401
+     * @return true if the server version is at least "ver".
+     */
+    boolean haveMinimumServerVersion(int ver);
 
-  /**
-   * <p>Check if we have at least a particular server version.</p>
-   *
-   * <p>The input version is of the form xxyyzz, matching a PostgreSQL version like xx.yy.zz. So 9.0.12
-   * is 90012.</p>
-   *
-   * @param ver the server version to check
-   * @return true if the server version is at least "ver".
-   */
-  boolean haveMinimumServerVersion(Version ver);
+    /**
+     * <p>Check if we have at least a particular server version.</p>
+     *
+     * <p>The input version is of the form xxyyzz, matching a PostgreSQL version like xx.yy.zz. So 9.0.12
+     * is 90012.</p>
+     *
+     * @param ver the server version to check
+     * @return true if the server version is at least "ver".
+     */
+    boolean haveMinimumServerVersion(Version ver);
 
-  /**
-   * Encode a string using the database's client_encoding (usually UTF8, but can vary on older
-   * server versions). This is used when constructing synthetic resultsets (for example, in metadata
-   * methods).
-   *
-   * @param str the string to encode
-   * @return an encoded representation of the string
-   * @throws SQLException if something goes wrong.
-   */
-  byte[] encodeString(String str) throws SQLException;
+    /**
+     * Encode a string using the database's client_encoding (usually UTF8, but can vary on older
+     * server versions). This is used when constructing synthetic resultsets (for example, in metadata
+     * methods).
+     *
+     * @param str the string to encode
+     * @return an encoded representation of the string
+     * @throws SQLException if something goes wrong.
+     */
+    byte[] encodeString(String str) throws SQLException;
 
-  /**
-   * Escapes a string for use as string-literal within an SQL command. The method chooses the
-   * applicable escaping rules based on the value of {@link #getStandardConformingStrings()}.
-   *
-   * @param str a string value
-   * @return the escaped representation of the string
-   * @throws SQLException if the string contains a {@code \0} character
-   */
-  String escapeString(String str) throws SQLException;
+    /**
+     * Escapes a string for use as string-literal within an SQL command. The method chooses the
+     * applicable escaping rules based on the value of {@link #getStandardConformingStrings()}.
+     *
+     * @param str a string value
+     * @return the escaped representation of the string
+     * @throws SQLException if the string contains a {@code \0} character
+     */
+    String escapeString(String str) throws SQLException;
 
-  /**
-   * Returns whether the server treats string-literals according to the SQL standard or if it uses
-   * traditional PostgreSQL escaping rules. Versions up to 8.1 always treated backslashes as escape
-   * characters in string-literals. Since 8.2, this depends on the value of the
-   * {@code standard_conforming_strings} server variable.
-   *
-   * @return true if the server treats string literals according to the SQL standard
-   * @see QueryExecutor#getStandardConformingStrings()
-   */
-  boolean getStandardConformingStrings();
+    /**
+     * Returns whether the server treats string-literals according to the SQL standard or if it uses
+     * traditional PostgreSQL escaping rules. Versions up to 8.1 always treated backslashes as escape
+     * characters in string-literals. Since 8.2, this depends on the value of the
+     * {@code standard_conforming_strings} server variable.
+     *
+     * @return true if the server treats string literals according to the SQL standard
+     * @see QueryExecutor#getStandardConformingStrings()
+     */
+    boolean getStandardConformingStrings();
 
-  // Ew. Quick hack to give access to the connection-specific utils implementation.
-  @Deprecated
-  TimestampUtils getTimestampUtils();
+    // Ew. Quick hack to give access to the connection-specific utils implementation.
+    @Deprecated
+    TimestampUtils getTimestampUtils();
 
-  // Get the per-connection logger.
-  Logger getLogger();
+    // Get the per-connection logger.
+    Logger getLogger();
 
-  // Get the bind-string-as-varchar config flag
-  boolean getStringVarcharFlag();
+    // Get the bind-string-as-varchar config flag
+    boolean getStringVarcharFlag();
 
-  /**
-   * Get the current transaction state of this connection.
-   *
-   * @return current transaction state of this connection
-   */
-  TransactionState getTransactionState();
+    /**
+     * Get the current transaction state of this connection.
+     *
+     * @return current transaction state of this connection
+     */
+    TransactionState getTransactionState();
 
-  /**
-   * Returns true if value for the given oid should be sent using binary transfer. False if value
-   * should be sent using text transfer.
-   *
-   * @param oid The oid to check.
-   * @return True for binary transfer, false for text transfer.
-   */
-  boolean binaryTransferSend(int oid);
+    /**
+     * Returns true if value for the given oid should be sent using binary transfer. False if value
+     * should be sent using text transfer.
+     *
+     * @param oid The oid to check.
+     * @return True for binary transfer, false for text transfer.
+     */
+    boolean binaryTransferSend(int oid);
 
-  /**
-   * Return whether to disable column name sanitation.
-   *
-   * @return true column sanitizer is disabled
-   */
-  boolean isColumnSanitiserDisabled();
+    /**
+     * Return whether to disable column name sanitation.
+     *
+     * @return true column sanitizer is disabled
+     */
+    boolean isColumnSanitiserDisabled();
 
-  /**
-   * Schedule a TimerTask for later execution. The task will be scheduled with the shared Timer for
-   * this connection.
-   *
-   * @param timerTask timer task to schedule
-   * @param milliSeconds delay in milliseconds
-   */
-  void addTimerTask(TimerTask timerTask, long milliSeconds);
+    /**
+     * Schedule a TimerTask for later execution. The task will be scheduled with the shared Timer for
+     * this connection.
+     *
+     * @param timerTask    timer task to schedule
+     * @param milliSeconds delay in milliseconds
+     */
+    void addTimerTask(TimerTask timerTask, long milliSeconds);
 
-  /**
-   * Invoke purge() on the underlying shared Timer so that internal resources will be released.
-   */
-  void purgeTimerTasks();
+    /**
+     * Invoke purge() on the underlying shared Timer so that internal resources will be released.
+     */
+    void purgeTimerTasks();
 
-  /**
-   * Return metadata cache for given connection.
-   *
-   * @return metadata cache
-   */
-  LruCache<FieldMetadata.Key, FieldMetadata> getFieldMetadataCache();
+    /**
+     * Return metadata cache for given connection.
+     *
+     * @return metadata cache
+     */
+    LruCache<FieldMetadata.Key, FieldMetadata> getFieldMetadataCache();
 
-  CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized,
-      String... columnNames)
-      throws SQLException;
+    CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized,
+                            String... columnNames)
+            throws SQLException;
 
-  /**
-   * By default, the connection resets statement cache in case deallocate all/discard all
-   * message is observed.
-   * This API allows to disable that feature for testing purposes.
-   *
-   * @param flushCacheOnDeallocate true if statement cache should be reset when "deallocate/discard" message observed
-   */
-  void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate);
+    /**
+     * By default, the connection resets statement cache in case deallocate all/discard all
+     * message is observed.
+     * This API allows to disable that feature for testing purposes.
+     *
+     * @param flushCacheOnDeallocate true if statement cache should be reset when "deallocate/discard" message observed
+     */
+    void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate);
 
-  /**
-   * Indicates if statements to backend should be hinted as read only.
-   *
-   * @return Indication if hints to backend (such as when transaction begins)
-   *         should be read only.
-   * @see PGProperty#READ_ONLY_MODE
-   */
-  boolean hintReadOnly();
+    /**
+     * Indicates if statements to backend should be hinted as read only.
+     *
+     * @return Indication if hints to backend (such as when transaction begins)
+     * should be read only.
+     * @see PGProperty#READ_ONLY_MODE
+     */
+    boolean hintReadOnly();
 
-  /**
-   * Retrieve the factory to instantiate XML processing factories.
-   *
-   * @return The factory to use to instantiate XML processing factories
-   * @throws SQLException if the class cannot be found or instantiated.
-   */
-  PGXmlFactoryFactory getXmlFactoryFactory() throws SQLException;
+    /**
+     * Retrieve the factory to instantiate XML processing factories.
+     *
+     * @return The factory to use to instantiate XML processing factories
+     * @throws SQLException if the class cannot be found or instantiated.
+     */
+    PGXmlFactoryFactory getXmlFactoryFactory() throws SQLException;
 
-  /**
-   * Indicates if error details from server used in included in logging and exceptions.
-   *
-   * @return true if should be included and passed on to other exceptions
-   */
-  boolean getLogServerErrorDetail();
+    /**
+     * Indicates if error details from server used in included in logging and exceptions.
+     *
+     * @return true if should be included and passed on to other exceptions
+     */
+    boolean getLogServerErrorDetail();
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/BaseQueryKey.java b/pgjdbc/src/main/java/org/postgresql/core/BaseQueryKey.java
index d9d4aea..798295d 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/BaseQueryKey.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/BaseQueryKey.java
@@ -14,59 +14,59 @@ import org.postgresql.util.CanEstimateSize;
  * as a cache key.
  */
 class BaseQueryKey implements CanEstimateSize {
-  public final String sql;
-  public final boolean isParameterized;
-  public final boolean escapeProcessing;
+    public final String sql;
+    public final boolean isParameterized;
+    public final boolean escapeProcessing;
 
-  BaseQueryKey(String sql, boolean isParameterized, boolean escapeProcessing) {
-    this.sql = sql;
-    this.isParameterized = isParameterized;
-    this.escapeProcessing = escapeProcessing;
-  }
-
-  @Override
-  public String toString() {
-    return "BaseQueryKey{"
-        + "sql='" + sql + '\''
-        + ", isParameterized=" + isParameterized
-        + ", escapeProcessing=" + escapeProcessing
-        + '}';
-  }
-
-  @Override
-  public long getSize() {
-    if (sql == null) { // just in case
-      return 16;
-    }
-    return 16 + sql.length() * 2L; // 2 bytes per char, revise with Java 9's compact strings
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
+    BaseQueryKey(String sql, boolean isParameterized, boolean escapeProcessing) {
+        this.sql = sql;
+        this.isParameterized = isParameterized;
+        this.escapeProcessing = escapeProcessing;
     }
 
-    BaseQueryKey that = (BaseQueryKey) o;
-
-    if (isParameterized != that.isParameterized) {
-      return false;
+    @Override
+    public String toString() {
+        return "BaseQueryKey{"
+                + "sql='" + sql + '\''
+                + ", isParameterized=" + isParameterized
+                + ", escapeProcessing=" + escapeProcessing
+                + '}';
     }
-    if (escapeProcessing != that.escapeProcessing) {
-      return false;
+
+    @Override
+    public long getSize() {
+        if (sql == null) { // just in case
+            return 16;
+        }
+        return 16 + sql.length() * 2L; // 2 bytes per char, revise with Java 9's compact strings
     }
-    return sql != null ? sql.equals(that.sql) : that.sql == null;
 
-  }
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
 
-  @Override
-  public int hashCode() {
-    int result = sql != null ? sql.hashCode() : 0;
-    result = 31 * result + (isParameterized ? 1 : 0);
-    result = 31 * result + (escapeProcessing ? 1 : 0);
-    return result;
-  }
+        BaseQueryKey that = (BaseQueryKey) o;
+
+        if (isParameterized != that.isParameterized) {
+            return false;
+        }
+        if (escapeProcessing != that.escapeProcessing) {
+            return false;
+        }
+        return sql != null ? sql.equals(that.sql) : that.sql == null;
+
+    }
+
+    @Override
+    public int hashCode() {
+        int result = sql != null ? sql.hashCode() : 0;
+        result = 31 * result + (isParameterized ? 1 : 0);
+        result = 31 * result + (escapeProcessing ? 1 : 0);
+        return result;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/BaseStatement.java b/pgjdbc/src/main/java/org/postgresql/core/BaseStatement.java
index d7f8a66..f171442 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/BaseStatement.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/BaseStatement.java
@@ -5,71 +5,70 @@
 
 package org.postgresql.core;
 
-import org.postgresql.PGStatement;
-
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.List;
+import org.postgresql.PGStatement;
 
 /**
  * Driver-internal statement interface. Application code should not use this interface.
  */
 public interface BaseStatement extends PGStatement, Statement {
-  /**
-   * Create a synthetic resultset from data provided by the driver.
-   *
-   * @param fields the column metadata for the resultset
-   * @param tuples the resultset data
-   * @return the new ResultSet
-   * @throws SQLException if something goes wrong
-   */
-  ResultSet createDriverResultSet(Field[] fields, List<Tuple> tuples) throws SQLException;
+    /**
+     * Create a synthetic resultset from data provided by the driver.
+     *
+     * @param fields the column metadata for the resultset
+     * @param tuples the resultset data
+     * @return the new ResultSet
+     * @throws SQLException if something goes wrong
+     */
+    ResultSet createDriverResultSet(Field[] fields, List<Tuple> tuples) throws SQLException;
 
-  /**
-   * Create a resultset from data retrieved from the server.
-   *
-   * @param originalQuery the query that generated this resultset; used when dealing with updateable
-   *        resultsets
-   * @param fields the column metadata for the resultset
-   * @param tuples the resultset data
-   * @param cursor the cursor to use to retrieve more data from the server; if null, no additional
-   *        data is present.
-   * @return the new ResultSet
-   * @throws SQLException if something goes wrong
-   */
-  ResultSet createResultSet(Query originalQuery, Field[] fields, List<Tuple> tuples,
-      ResultCursor cursor) throws SQLException;
+    /**
+     * Create a resultset from data retrieved from the server.
+     *
+     * @param originalQuery the query that generated this resultset; used when dealing with updateable
+     *                      resultsets
+     * @param fields        the column metadata for the resultset
+     * @param tuples        the resultset data
+     * @param cursor        the cursor to use to retrieve more data from the server; if null, no additional
+     *                      data is present.
+     * @return the new ResultSet
+     * @throws SQLException if something goes wrong
+     */
+    ResultSet createResultSet(Query originalQuery, Field[] fields, List<Tuple> tuples,
+                              ResultCursor cursor) throws SQLException;
 
-  /**
-   * Execute a query, passing additional query flags.
-   *
-   * @param sql the query to execute (JDBC-style query)
-   * @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into
-   *        the default flags.
-   * @return true if there is a result set
-   * @throws SQLException if something goes wrong.
-   */
-  boolean executeWithFlags(String sql, int flags) throws SQLException;
+    /**
+     * Execute a query, passing additional query flags.
+     *
+     * @param sql   the query to execute (JDBC-style query)
+     * @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into
+     *              the default flags.
+     * @return true if there is a result set
+     * @throws SQLException if something goes wrong.
+     */
+    boolean executeWithFlags(String sql, int flags) throws SQLException;
 
-  /**
-   * Execute a query, passing additional query flags.
-   *
-   * @param cachedQuery the query to execute (native to PostgreSQL)
-   * @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into
-   *        the default flags.
-   * @return true if there is a result set
-   * @throws SQLException if something goes wrong.
-   */
-  boolean executeWithFlags(CachedQuery cachedQuery, int flags) throws SQLException;
+    /**
+     * Execute a query, passing additional query flags.
+     *
+     * @param cachedQuery the query to execute (native to PostgreSQL)
+     * @param flags       additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into
+     *                    the default flags.
+     * @return true if there is a result set
+     * @throws SQLException if something goes wrong.
+     */
+    boolean executeWithFlags(CachedQuery cachedQuery, int flags) throws SQLException;
 
-  /**
-   * Execute a prepared query, passing additional query flags.
-   *
-   * @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into
-   *        the default flags.
-   * @return true if there is a result set
-   * @throws SQLException if something goes wrong.
-   */
-  boolean executeWithFlags(int flags) throws SQLException;
+    /**
+     * Execute a prepared query, passing additional query flags.
+     *
+     * @param flags additional {@link QueryExecutor} flags for execution; these are bitwise-ORed into
+     *              the default flags.
+     * @return true if there is a result set
+     * @throws SQLException if something goes wrong.
+     */
+    boolean executeWithFlags(int flags) throws SQLException;
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/CachedQuery.java b/pgjdbc/src/main/java/org/postgresql/core/CachedQuery.java
index 23ac4cd..3a50f10 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/CachedQuery.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/CachedQuery.java
@@ -12,64 +12,64 @@ import org.postgresql.util.CanEstimateSize;
  * the same query through {@link java.sql.Connection#prepareStatement(String)}.
  */
 public class CachedQuery implements CanEstimateSize {
-  /**
-   * Cache key. {@link String} or {@code org.postgresql.util.CanEstimateSize}.
-   */
-  public final Object key;
-  public final Query query;
-  public final boolean isFunction;
+    /**
+     * Cache key. {@link String} or {@code org.postgresql.util.CanEstimateSize}.
+     */
+    public final Object key;
+    public final Query query;
+    public final boolean isFunction;
 
-  private int executeCount;
+    private int executeCount;
 
-  public CachedQuery(Object key, Query query, boolean isFunction) {
-    assert key instanceof String || key instanceof CanEstimateSize
-        : "CachedQuery.key should either be String or implement CanEstimateSize."
-        + " Actual class is " + key.getClass();
-    this.key = key;
-    this.query = query;
-    this.isFunction = isFunction;
-  }
-
-  public void increaseExecuteCount() {
-    if (executeCount < Integer.MAX_VALUE) {
-      executeCount++;
+    public CachedQuery(Object key, Query query, boolean isFunction) {
+        assert key instanceof String || key instanceof CanEstimateSize
+                : "CachedQuery.key should either be String or implement CanEstimateSize."
+                + " Actual class is " + key.getClass();
+        this.key = key;
+        this.query = query;
+        this.isFunction = isFunction;
     }
-  }
 
-  public void increaseExecuteCount(int inc) {
-    int newValue = executeCount + inc;
-    if (newValue > 0) { // if overflows, just ignore the update
-      executeCount = newValue;
+    public void increaseExecuteCount() {
+        if (executeCount < Integer.MAX_VALUE) {
+            executeCount++;
+        }
     }
-  }
 
-  /**
-   * Number of times this statement has been used.
-   *
-   * @return number of times this statement has been used
-   */
-  public int getExecuteCount() {
-    return executeCount;
-  }
-
-  @Override
-  public long getSize() {
-    long queryLength;
-    if (key instanceof String) {
-      queryLength = ((String) key).length() * 2L; // 2 bytes per char, revise with Java 9's compact strings
-    } else {
-      queryLength = ((CanEstimateSize) key).getSize();
+    public void increaseExecuteCount(int inc) {
+        int newValue = executeCount + inc;
+        if (newValue > 0) { // if overflows, just ignore the update
+            executeCount = newValue;
+        }
     }
-    return queryLength * 2 /* original query and native sql */
-        + 100L /* entry in hash map, CachedQuery wrapper, etc */;
-  }
 
-  @Override
-  public String toString() {
-    return "CachedQuery{"
-        + "executeCount=" + executeCount
-        + ", query=" + query
-        + ", isFunction=" + isFunction
-        + '}';
-  }
+    /**
+     * Number of times this statement has been used.
+     *
+     * @return number of times this statement has been used
+     */
+    public int getExecuteCount() {
+        return executeCount;
+    }
+
+    @Override
+    public long getSize() {
+        long queryLength;
+        if (key instanceof String) {
+            queryLength = ((String) key).length() * 2L; // 2 bytes per char, revise with Java 9's compact strings
+        } else {
+            queryLength = ((CanEstimateSize) key).getSize();
+        }
+        return queryLength * 2 /* original query and native sql */
+                + 100L /* entry in hash map, CachedQuery wrapper, etc */;
+    }
+
+    @Override
+    public String toString() {
+        return "CachedQuery{"
+                + "executeCount=" + executeCount
+                + ", query=" + query
+                + ", isFunction=" + isFunction
+                + '}';
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/CachedQueryCreateAction.java b/pgjdbc/src/main/java/org/postgresql/core/CachedQueryCreateAction.java
index 90af15d..c1181a7 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/CachedQueryCreateAction.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/CachedQueryCreateAction.java
@@ -5,68 +5,67 @@
 
 package org.postgresql.core;
 
-import org.postgresql.jdbc.PreferQueryMode;
-import org.postgresql.util.LruCache;
-
 import java.sql.SQLException;
 import java.util.List;
+import org.postgresql.jdbc.PreferQueryMode;
+import org.postgresql.util.LruCache;
 
 /**
  * Creates an instance of {@link CachedQuery} for a given connection.
  */
 class CachedQueryCreateAction implements LruCache.CreateAction<Object, CachedQuery> {
-  private static final String[] EMPTY_RETURNING = new String[0];
-  private final QueryExecutor queryExecutor;
+    private static final String[] EMPTY_RETURNING = new String[0];
+    private final QueryExecutor queryExecutor;
 
-  CachedQueryCreateAction(QueryExecutor queryExecutor) {
-    this.queryExecutor = queryExecutor;
-  }
-
-  @Override
-  public CachedQuery create(Object key) throws SQLException {
-    assert key instanceof String || key instanceof BaseQueryKey
-        : "Query key should be String or BaseQueryKey. Given " + key.getClass() + ", sql: "
-        + key;
-    BaseQueryKey queryKey;
-    String parsedSql;
-    if (key instanceof BaseQueryKey) {
-      queryKey = (BaseQueryKey) key;
-      parsedSql = queryKey.sql;
-    } else {
-      queryKey = null;
-      parsedSql = (String) key;
-    }
-    if (key instanceof String || queryKey.escapeProcessing) {
-      parsedSql =
-          Parser.replaceProcessing(parsedSql, true, queryExecutor.getStandardConformingStrings());
-    }
-    boolean isFunction;
-    if (key instanceof CallableQueryKey) {
-      JdbcCallParseInfo callInfo =
-          Parser.modifyJdbcCall(parsedSql, queryExecutor.getStandardConformingStrings(),
-              queryExecutor.getServerVersionNum(), queryExecutor.getProtocolVersion(), queryExecutor.getEscapeSyntaxCallMode());
-      parsedSql = callInfo.getSql();
-      isFunction = callInfo.isFunction();
-    } else {
-      isFunction = false;
-    }
-    boolean isParameterized = key instanceof String || queryKey.isParameterized;
-    boolean splitStatements = isParameterized || queryExecutor.getPreferQueryMode().compareTo(PreferQueryMode.EXTENDED) >= 0;
-
-    String[] returningColumns;
-    if (key instanceof QueryWithReturningColumnsKey) {
-      returningColumns = ((QueryWithReturningColumnsKey) key).columnNames;
-    } else {
-      returningColumns = EMPTY_RETURNING;
+    CachedQueryCreateAction(QueryExecutor queryExecutor) {
+        this.queryExecutor = queryExecutor;
     }
 
-    List<NativeQuery> queries = Parser.parseJdbcSql(parsedSql,
-        queryExecutor.getStandardConformingStrings(), isParameterized, splitStatements,
-        queryExecutor.isReWriteBatchedInsertsEnabled(), queryExecutor.getQuoteReturningIdentifiers(),
-        returningColumns
+    @Override
+    public CachedQuery create(Object key) throws SQLException {
+        assert key instanceof String || key instanceof BaseQueryKey
+                : "Query key should be String or BaseQueryKey. Given " + key.getClass() + ", sql: "
+                + key;
+        BaseQueryKey queryKey;
+        String parsedSql;
+        if (key instanceof BaseQueryKey) {
+            queryKey = (BaseQueryKey) key;
+            parsedSql = queryKey.sql;
+        } else {
+            queryKey = null;
+            parsedSql = (String) key;
+        }
+        if (key instanceof String || queryKey.escapeProcessing) {
+            parsedSql =
+                    Parser.replaceProcessing(parsedSql, true, queryExecutor.getStandardConformingStrings());
+        }
+        boolean isFunction;
+        if (key instanceof CallableQueryKey) {
+            JdbcCallParseInfo callInfo =
+                    Parser.modifyJdbcCall(parsedSql, queryExecutor.getStandardConformingStrings(),
+                            queryExecutor.getServerVersionNum(), queryExecutor.getProtocolVersion(), queryExecutor.getEscapeSyntaxCallMode());
+            parsedSql = callInfo.getSql();
+            isFunction = callInfo.isFunction();
+        } else {
+            isFunction = false;
+        }
+        boolean isParameterized = key instanceof String || queryKey.isParameterized;
+        boolean splitStatements = isParameterized || queryExecutor.getPreferQueryMode().compareTo(PreferQueryMode.EXTENDED) >= 0;
+
+        String[] returningColumns;
+        if (key instanceof QueryWithReturningColumnsKey) {
+            returningColumns = ((QueryWithReturningColumnsKey) key).columnNames;
+        } else {
+            returningColumns = EMPTY_RETURNING;
+        }
+
+        List<NativeQuery> queries = Parser.parseJdbcSql(parsedSql,
+                queryExecutor.getStandardConformingStrings(), isParameterized, splitStatements,
+                queryExecutor.isReWriteBatchedInsertsEnabled(), queryExecutor.getQuoteReturningIdentifiers(),
+                returningColumns
         );
 
-    Query query = queryExecutor.wrap(queries);
-    return new CachedQuery(key, query, isFunction);
-  }
+        Query query = queryExecutor.wrap(queries);
+        return new CachedQuery(key, query, isFunction);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/CallableQueryKey.java b/pgjdbc/src/main/java/org/postgresql/core/CallableQueryKey.java
index d65ab18..6a0aa3e 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/CallableQueryKey.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/CallableQueryKey.java
@@ -12,27 +12,27 @@ package org.postgresql.core;
  */
 class CallableQueryKey extends BaseQueryKey {
 
-  CallableQueryKey(String sql) {
-    super(sql, true, true);
-  }
+    CallableQueryKey(String sql) {
+        super(sql, true, true);
+    }
 
-  @Override
-  public String toString() {
-    return "CallableQueryKey{"
-        + "sql='" + sql + '\''
-        + ", isParameterized=" + isParameterized
-        + ", escapeProcessing=" + escapeProcessing
-        + '}';
-  }
+    @Override
+    public String toString() {
+        return "CallableQueryKey{"
+                + "sql='" + sql + '\''
+                + ", isParameterized=" + isParameterized
+                + ", escapeProcessing=" + escapeProcessing
+                + '}';
+    }
 
-  @Override
-  public int hashCode() {
-    return super.hashCode() * 31;
-  }
+    @Override
+    public int hashCode() {
+        return super.hashCode() * 31;
+    }
 
-  @Override
-  public boolean equals(Object o) {
-    // Nothing interesting here, overriding equals to make hashCode and equals paired
-    return super.equals(o);
-  }
+    @Override
+    public boolean equals(Object o) {
+        // Nothing interesting here, overriding equals to make hashCode and equals paired
+        return super.equals(o);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/CommandCompleteParser.java b/pgjdbc/src/main/java/org/postgresql/core/CommandCompleteParser.java
index a4e52b3..e696387 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/CommandCompleteParser.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/CommandCompleteParser.java
@@ -13,96 +13,96 @@ import org.postgresql.util.PSQLState;
  * Parses {@code oid} and {@code rows} from a {@code CommandComplete (B)} message (end of Execute).
  */
 public final class CommandCompleteParser {
-  private long oid;
-  private long rows;
+    private long oid;
+    private long rows;
 
-  public CommandCompleteParser() {
-  }
-
-  public long getOid() {
-    return oid;
-  }
-
-  public long getRows() {
-    return rows;
-  }
-
-  void set(long oid, long rows) {
-    this.oid = oid;
-    this.rows = rows;
-  }
-
-  /**
-   * Parses {@code CommandComplete (B)} message.
-   * Status is in the format of "COMMAND OID ROWS" where both 'OID' and 'ROWS' are optional
-   * and COMMAND can have spaces within it, like CREATE TABLE.
-   *
-   * @param status COMMAND OID ROWS message
-   * @throws PSQLException in case the status cannot be parsed
-   */
-  public void parse(String status) throws PSQLException {
-    // Assumption: command neither starts nor ends with a digit
-    if (!Parser.isDigitAt(status, status.length() - 1)) {
-      set(0, 0);
-      return;
+    public CommandCompleteParser() {
     }
 
-    // Scan backwards, while searching for a maximum of two number groups
-    //   COMMAND OID ROWS
-    //   COMMAND ROWS
-    long oid = 0;
-    long rows = 0;
-    try {
-      int lastSpace = status.lastIndexOf(' ');
-      // Status ends with a digit => it is ROWS
-      if (Parser.isDigitAt(status, lastSpace + 1)) {
-        rows = Parser.parseLong(status, lastSpace + 1, status.length());
+    public long getOid() {
+        return oid;
+    }
 
-        if (Parser.isDigitAt(status, lastSpace - 1)) {
-          int penultimateSpace = status.lastIndexOf(' ', lastSpace - 1);
-          if (Parser.isDigitAt(status, penultimateSpace + 1)) {
-            oid = Parser.parseLong(status, penultimateSpace + 1, lastSpace);
-          }
+    public long getRows() {
+        return rows;
+    }
+
+    public void set(long oid, long rows) {
+        this.oid = oid;
+        this.rows = rows;
+    }
+
+    /**
+     * Parses {@code CommandComplete (B)} message.
+     * Status is in the format of "COMMAND OID ROWS" where both 'OID' and 'ROWS' are optional
+     * and COMMAND can have spaces within it, like CREATE TABLE.
+     *
+     * @param status COMMAND OID ROWS message
+     * @throws PSQLException in case the status cannot be parsed
+     */
+    public void parse(String status) throws PSQLException {
+        // Assumption: command neither starts nor ends with a digit
+        if (!Parser.isDigitAt(status, status.length() - 1)) {
+            set(0, 0);
+            return;
         }
-      }
-    } catch (NumberFormatException e) {
-      // This should only occur if the oid or rows are out of 0..Long.MAX_VALUE range
-      throw new PSQLException(
-          GT.tr("Unable to parse the count in command completion tag: {0}.", status),
-          PSQLState.CONNECTION_FAILURE, e);
-    }
-    set(oid, rows);
-  }
 
-  @Override
-  public String toString() {
-    return "CommandStatus{"
-        + "oid=" + oid
-        + ", rows=" + rows
-        + '}';
-  }
+        // Scan backwards, while searching for a maximum of two number groups
+        //   COMMAND OID ROWS
+        //   COMMAND ROWS
+        long oid = 0;
+        long rows = 0;
+        try {
+            int lastSpace = status.lastIndexOf(' ');
+            // Status ends with a digit => it is ROWS
+            if (Parser.isDigitAt(status, lastSpace + 1)) {
+                rows = Parser.parseLong(status, lastSpace + 1, status.length());
 
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
+                if (Parser.isDigitAt(status, lastSpace - 1)) {
+                    int penultimateSpace = status.lastIndexOf(' ', lastSpace - 1);
+                    if (Parser.isDigitAt(status, penultimateSpace + 1)) {
+                        oid = Parser.parseLong(status, penultimateSpace + 1, lastSpace);
+                    }
+                }
+            }
+        } catch (NumberFormatException e) {
+            // This should only occur if the oid or rows are out of 0..Long.MAX_VALUE range
+            throw new PSQLException(
+                    GT.tr("Unable to parse the count in command completion tag: {0}.", status),
+                    PSQLState.CONNECTION_FAILURE, e);
+        }
+        set(oid, rows);
     }
 
-    CommandCompleteParser that = (CommandCompleteParser) o;
-
-    if (oid != that.oid) {
-      return false;
+    @Override
+    public String toString() {
+        return "CommandStatus{"
+                + "oid=" + oid
+                + ", rows=" + rows
+                + '}';
     }
-    return rows == that.rows;
-  }
 
-  @Override
-  public int hashCode() {
-    int result = (int) (oid ^ (oid >>> 32));
-    result = 31 * result + (int) (rows ^ (rows >>> 32));
-    return result;
-  }
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+
+        CommandCompleteParser that = (CommandCompleteParser) o;
+
+        if (oid != that.oid) {
+            return false;
+        }
+        return rows == that.rows;
+    }
+
+    @Override
+    public int hashCode() {
+        int result = (int) (oid ^ (oid >>> 32));
+        result = 31 * result + (int) (rows ^ (rows >>> 32));
+        return result;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/ConnectionFactory.java b/pgjdbc/src/main/java/org/postgresql/core/ConnectionFactory.java
index 45a0008..32d386c 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/ConnectionFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/ConnectionFactory.java
@@ -6,6 +6,11 @@
 
 package org.postgresql.core;
 
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.Properties;
+import java.util.logging.Level;
+import java.util.logging.Logger;
 import org.postgresql.PGProperty;
 import org.postgresql.core.v3.ConnectionFactoryImpl;
 import org.postgresql.util.GT;
@@ -13,12 +18,6 @@ import org.postgresql.util.HostSpec;
 import org.postgresql.util.PSQLException;
 import org.postgresql.util.PSQLState;
 
-import java.io.IOException;
-import java.sql.SQLException;
-import java.util.Properties;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
 /**
  * Handles protocol-specific connection setup.
  *
@@ -26,71 +25,71 @@ import java.util.logging.Logger;
  */
 public abstract class ConnectionFactory {
 
-  private static final Logger LOGGER = Logger.getLogger(ConnectionFactory.class.getName());
+    private static final Logger LOGGER = Logger.getLogger(ConnectionFactory.class.getName());
 
-  public ConnectionFactory() {
-  }
-
-  /**
-   * <p>Establishes and initializes a new connection.</p>
-   *
-   * <p>If the "protocolVersion" property is specified, only that protocol version is tried. Otherwise,
-   * all protocols are tried in order, falling back to older protocols as necessary.</p>
-   *
-   * <p>Currently, protocol versions 3 (7.4+) is supported.</p>
-   *
-   * @param hostSpecs at least one host and port to connect to; multiple elements for round-robin
-   *        failover
-   * @param info extra properties controlling the connection; notably, "password" if present
-   *        supplies the password to authenticate with.
-   * @return the new, initialized, connection
-   * @throws SQLException if the connection could not be established.
-   */
-  public static QueryExecutor openConnection(HostSpec[] hostSpecs,
-      Properties info) throws SQLException {
-    String protoName = PGProperty.PROTOCOL_VERSION.getOrDefault(info);
-
-    if (protoName == null || protoName.isEmpty() || "3".equals(protoName)) {
-      ConnectionFactory connectionFactory = new ConnectionFactoryImpl();
-      QueryExecutor queryExecutor = connectionFactory.openConnectionImpl(
-          hostSpecs, info);
-      if (queryExecutor != null) {
-        return queryExecutor;
-      }
+    public ConnectionFactory() {
     }
 
-    throw new PSQLException(
-        GT.tr("A connection could not be made using the requested protocol {0}.", protoName),
-        PSQLState.CONNECTION_UNABLE_TO_CONNECT);
-  }
+    /**
+     * <p>Establishes and initializes a new connection.</p>
+     *
+     * <p>If the "protocolVersion" property is specified, only that protocol version is tried. Otherwise,
+     * all protocols are tried in order, falling back to older protocols as necessary.</p>
+     *
+     * <p>Currently, protocol versions 3 (7.4+) is supported.</p>
+     *
+     * @param hostSpecs at least one host and port to connect to; multiple elements for round-robin
+     *                  failover
+     * @param info      extra properties controlling the connection; notably, "password" if present
+     *                  supplies the password to authenticate with.
+     * @return the new, initialized, connection
+     * @throws SQLException if the connection could not be established.
+     */
+    public static QueryExecutor openConnection(HostSpec[] hostSpecs,
+                                               Properties info) throws SQLException {
+        String protoName = PGProperty.PROTOCOL_VERSION.getOrDefault(info);
 
-  /**
-   * Implementation of {@link #openConnection} for a particular protocol version. Implemented by
-   * subclasses of {@link ConnectionFactory}.
-   *
-   * @param hostSpecs at least one host and port to connect to; multiple elements for round-robin
-   *        failover
-   * @param info extra properties controlling the connection; notably, "password" if present
-   *        supplies the password to authenticate with.
-   * @return the new, initialized, connection, or <code>null</code> if this protocol version is not
-   *         supported by the server.
-   * @throws SQLException if the connection could not be established for a reason other than
-   *         protocol version incompatibility.
-   */
-  public abstract QueryExecutor openConnectionImpl(HostSpec[] hostSpecs, Properties info) throws SQLException;
+        if (protoName == null || protoName.isEmpty() || "3".equals(protoName)) {
+            ConnectionFactory connectionFactory = new ConnectionFactoryImpl();
+            QueryExecutor queryExecutor = connectionFactory.openConnectionImpl(
+                    hostSpecs, info);
+            if (queryExecutor != null) {
+                return queryExecutor;
+            }
+        }
 
-  /**
-   * Safely close the given stream.
-   *
-   * @param newStream The stream to close.
-   */
-  protected void closeStream(PGStream newStream) {
-    if (newStream != null) {
-      try {
-        newStream.close();
-      } catch (IOException e) {
-        LOGGER.log(Level.WARNING, "Failed to closed stream with error: {0}", e);
-      }
+        throw new PSQLException(
+                GT.tr("A connection could not be made using the requested protocol {0}.", protoName),
+                PSQLState.CONNECTION_UNABLE_TO_CONNECT);
+    }
+
+    /**
+     * Implementation of {@link #openConnection} for a particular protocol version. Implemented by
+     * subclasses of {@link ConnectionFactory}.
+     *
+     * @param hostSpecs at least one host and port to connect to; multiple elements for round-robin
+     *                  failover
+     * @param info      extra properties controlling the connection; notably, "password" if present
+     *                  supplies the password to authenticate with.
+     * @return the new, initialized, connection, or <code>null</code> if this protocol version is not
+     * supported by the server.
+     * @throws SQLException if the connection could not be established for a reason other than
+     *                      protocol version incompatibility.
+     */
+    public abstract QueryExecutor openConnectionImpl(HostSpec[] hostSpecs, Properties info) throws SQLException;
+
+    /**
+     * Safely close the given stream.
+     *
+     * @param newStream The stream to close.
+     */
+    protected void closeStream(PGStream newStream) {
+        if (newStream != null) {
+            try {
+                newStream.close();
+            } catch (IOException e) {
+                LOGGER.log(Level.WARNING, "Failed to closed stream with error: {0}", e);
+            }
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/Encoding.java b/pgjdbc/src/main/java/org/postgresql/core/Encoding.java
index 0afc258..09ec2b5 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/Encoding.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/Encoding.java
@@ -23,326 +23,322 @@ import java.util.logging.Logger;
  */
 public class Encoding {
 
-  private static final Logger LOGGER = Logger.getLogger(Encoding.class.getName());
+    static final AsciiStringInterner INTERNER = new AsciiStringInterner();
+    private static final Logger LOGGER = Logger.getLogger(Encoding.class.getName());
+    private static final Encoding DEFAULT_ENCODING = new Encoding();
+    private static final Encoding UTF8_ENCODING = new Encoding(StandardCharsets.UTF_8, true);
+    /*
+     * Preferred JVM encodings for backend encodings.
+     */
+    private static final HashMap<String, String[]> encodings = new HashMap<>();
 
-  private static final Encoding DEFAULT_ENCODING = new Encoding();
-
-  private static final Encoding UTF8_ENCODING = new Encoding(StandardCharsets.UTF_8, true);
-
-  /*
-   * Preferred JVM encodings for backend encodings.
-   */
-  private static final HashMap<String, String[]> encodings = new HashMap<>();
-
-  static {
-    //Note: this list should match the set of supported server
-    // encodings found in backend/util/mb/encnames.c
-    encodings.put("SQL_ASCII", new String[]{"ASCII", "US-ASCII"});
-    encodings.put("UNICODE", new String[]{"UTF-8", "UTF8"});
-    encodings.put("UTF8", new String[]{"UTF-8", "UTF8"});
-    encodings.put("LATIN1", new String[]{"ISO8859_1"});
-    encodings.put("LATIN2", new String[]{"ISO8859_2"});
-    encodings.put("LATIN3", new String[]{"ISO8859_3"});
-    encodings.put("LATIN4", new String[]{"ISO8859_4"});
-    encodings.put("ISO_8859_5", new String[]{"ISO8859_5"});
-    encodings.put("ISO_8859_6", new String[]{"ISO8859_6"});
-    encodings.put("ISO_8859_7", new String[]{"ISO8859_7"});
-    encodings.put("ISO_8859_8", new String[]{"ISO8859_8"});
-    encodings.put("LATIN5", new String[]{"ISO8859_9"});
-    encodings.put("LATIN7", new String[]{"ISO8859_13"});
-    encodings.put("LATIN9", new String[]{"ISO8859_15_FDIS"});
-    encodings.put("EUC_JP", new String[]{"EUC_JP"});
-    encodings.put("EUC_CN", new String[]{"EUC_CN"});
-    encodings.put("EUC_KR", new String[]{"EUC_KR"});
-    encodings.put("JOHAB", new String[]{"Johab"});
-    encodings.put("EUC_TW", new String[]{"EUC_TW"});
-    encodings.put("SJIS", new String[]{"MS932", "SJIS"});
-    encodings.put("BIG5", new String[]{"Big5", "MS950", "Cp950"});
-    encodings.put("GBK", new String[]{"GBK", "MS936"});
-    encodings.put("UHC", new String[]{"MS949", "Cp949", "Cp949C"});
-    encodings.put("TCVN", new String[]{"Cp1258"});
-    encodings.put("WIN1256", new String[]{"Cp1256"});
-    encodings.put("WIN1250", new String[]{"Cp1250"});
-    encodings.put("WIN874", new String[]{"MS874", "Cp874"});
-    encodings.put("WIN", new String[]{"Cp1251"});
-    encodings.put("ALT", new String[]{"Cp866"});
-    // We prefer KOI8-U, since it is a superset of KOI8-R.
-    encodings.put("KOI8", new String[]{"KOI8_U", "KOI8_R"});
-    // If the database isn't encoding-aware then we can't have
-    // any preferred encodings.
-    encodings.put("UNKNOWN", new String[0]);
-    // The following encodings do not have a java equivalent
-    encodings.put("MULE_INTERNAL", new String[0]);
-    encodings.put("LATIN6", new String[0]);
-    encodings.put("LATIN8", new String[0]);
-    encodings.put("LATIN10", new String[0]);
-  }
-
-  static final AsciiStringInterner INTERNER = new AsciiStringInterner();
-
-  private final Charset encoding;
-  private final boolean fastASCIINumbers;
-
-  /**
-   * Uses the default charset of the JVM.
-   */
-  private Encoding() {
-    this(Charset.defaultCharset());
-  }
-
-  /**
-   * Subclasses may use this constructor if they know in advance of their ASCII number
-   * compatibility.
-   *
-   * @param encoding charset to use
-   * @param fastASCIINumbers whether this encoding is compatible with ASCII numbers.
-   */
-  protected Encoding(Charset encoding, boolean fastASCIINumbers) {
-    if (encoding == null) {
-      throw new NullPointerException("Null encoding charset not supported");
+    static {
+        //Note: this list should match the set of supported server
+        // encodings found in backend/util/mb/encnames.c
+        encodings.put("SQL_ASCII", new String[]{"ASCII", "US-ASCII"});
+        encodings.put("UNICODE", new String[]{"UTF-8", "UTF8"});
+        encodings.put("UTF8", new String[]{"UTF-8", "UTF8"});
+        encodings.put("LATIN1", new String[]{"ISO8859_1"});
+        encodings.put("LATIN2", new String[]{"ISO8859_2"});
+        encodings.put("LATIN3", new String[]{"ISO8859_3"});
+        encodings.put("LATIN4", new String[]{"ISO8859_4"});
+        encodings.put("ISO_8859_5", new String[]{"ISO8859_5"});
+        encodings.put("ISO_8859_6", new String[]{"ISO8859_6"});
+        encodings.put("ISO_8859_7", new String[]{"ISO8859_7"});
+        encodings.put("ISO_8859_8", new String[]{"ISO8859_8"});
+        encodings.put("LATIN5", new String[]{"ISO8859_9"});
+        encodings.put("LATIN7", new String[]{"ISO8859_13"});
+        encodings.put("LATIN9", new String[]{"ISO8859_15_FDIS"});
+        encodings.put("EUC_JP", new String[]{"EUC_JP"});
+        encodings.put("EUC_CN", new String[]{"EUC_CN"});
+        encodings.put("EUC_KR", new String[]{"EUC_KR"});
+        encodings.put("JOHAB", new String[]{"Johab"});
+        encodings.put("EUC_TW", new String[]{"EUC_TW"});
+        encodings.put("SJIS", new String[]{"MS932", "SJIS"});
+        encodings.put("BIG5", new String[]{"Big5", "MS950", "Cp950"});
+        encodings.put("GBK", new String[]{"GBK", "MS936"});
+        encodings.put("UHC", new String[]{"MS949", "Cp949", "Cp949C"});
+        encodings.put("TCVN", new String[]{"Cp1258"});
+        encodings.put("WIN1256", new String[]{"Cp1256"});
+        encodings.put("WIN1250", new String[]{"Cp1250"});
+        encodings.put("WIN874", new String[]{"MS874", "Cp874"});
+        encodings.put("WIN", new String[]{"Cp1251"});
+        encodings.put("ALT", new String[]{"Cp866"});
+        // We prefer KOI8-U, since it is a superset of KOI8-R.
+        encodings.put("KOI8", new String[]{"KOI8_U", "KOI8_R"});
+        // If the database isn't encoding-aware then we can't have
+        // any preferred encodings.
+        encodings.put("UNKNOWN", new String[0]);
+        // The following encodings do not have a java equivalent
+        encodings.put("MULE_INTERNAL", new String[0]);
+        encodings.put("LATIN6", new String[0]);
+        encodings.put("LATIN8", new String[0]);
+        encodings.put("LATIN10", new String[0]);
     }
-    this.encoding = encoding;
-    this.fastASCIINumbers = fastASCIINumbers;
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      LOGGER.log(Level.FINEST, "Creating new Encoding {0} with fastASCIINumbers {1}",
-          new Object[]{encoding, fastASCIINumbers});
-    }
-  }
 
-  /**
-   * Use the charset passed as parameter and tests at creation time whether the specified encoding
-   * is compatible with ASCII numbers.
-   *
-   * @param encoding charset to use
-   */
-  protected Encoding(Charset encoding) {
-    this(encoding, testAsciiNumbers(encoding));
-  }
+    private final Charset encoding;
+    private final boolean fastASCIINumbers;
 
-  /**
-   * Returns true if this encoding has characters '-' and '0'..'9' in exactly same position as
-   * ascii.
-   *
-   * @return true if the bytes can be scanned directly for ascii numbers.
-   */
-  public boolean hasAsciiNumbers() {
-    return fastASCIINumbers;
-  }
+    /**
+     * Uses the default charset of the JVM.
+     */
+    private Encoding() {
+        this(Charset.defaultCharset());
+    }
 
-  /**
-   * Construct an Encoding for a given JVM encoding.
-   *
-   * @param jvmEncoding the name of the JVM encoding
-   * @return an Encoding instance for the specified encoding, or an Encoding instance for the
-   *     default JVM encoding if the specified encoding is unavailable.
-   */
-  public static Encoding getJVMEncoding(String jvmEncoding) {
-    if ("UTF-8".equals(jvmEncoding)) {
-      return UTF8_ENCODING;
-    }
-    if (Charset.isSupported(jvmEncoding)) {
-      return new Encoding(Charset.forName(jvmEncoding));
-    }
-    return DEFAULT_ENCODING;
-  }
-
-  /**
-   * Construct an Encoding for a given database encoding.
-   *
-   * @param databaseEncoding the name of the database encoding
-   * @return an Encoding instance for the specified encoding, or an Encoding instance for the
-   *     default JVM encoding if the specified encoding is unavailable.
-   */
-  public static Encoding getDatabaseEncoding(String databaseEncoding) {
-    if ("UTF8".equals(databaseEncoding) || "UNICODE".equals(databaseEncoding)) {
-      return UTF8_ENCODING;
-    }
-    // If the backend encoding is known and there is a suitable
-    // encoding in the JVM we use that. Otherwise we fall back
-    // to the default encoding of the JVM.
-    String[] candidates = encodings.get(databaseEncoding);
-    if (candidates != null) {
-      for (String candidate : candidates) {
-        LOGGER.log(Level.FINEST, "Search encoding candidate {0}", candidate);
-        if (Charset.isSupported(candidate)) {
-          return new Encoding(Charset.forName(candidate));
+    /**
+     * Subclasses may use this constructor if they know in advance of their ASCII number
+     * compatibility.
+     *
+     * @param encoding         charset to use
+     * @param fastASCIINumbers whether this encoding is compatible with ASCII numbers.
+     */
+    protected Encoding(Charset encoding, boolean fastASCIINumbers) {
+        if (encoding == null) {
+            throw new NullPointerException("Null encoding charset not supported");
+        }
+        this.encoding = encoding;
+        this.fastASCIINumbers = fastASCIINumbers;
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            LOGGER.log(Level.FINEST, "Creating new Encoding {0} with fastASCIINumbers {1}",
+                    new Object[]{encoding, fastASCIINumbers});
         }
-      }
     }
 
-    // Try the encoding name directly -- maybe the charset has been
-    // provided by the user.
-    if (Charset.isSupported(databaseEncoding)) {
-      return new Encoding(Charset.forName(databaseEncoding));
+    /**
+     * Use the charset passed as parameter and tests at creation time whether the specified encoding
+     * is compatible with ASCII numbers.
+     *
+     * @param encoding charset to use
+     */
+    protected Encoding(Charset encoding) {
+        this(encoding, testAsciiNumbers(encoding));
     }
 
-    // Fall back to default JVM encoding.
-    LOGGER.log(Level.FINEST, "{0} encoding not found, returning default encoding", databaseEncoding);
-    return DEFAULT_ENCODING;
-  }
-
-  /**
-   * Indicates that <i>string</i> should be staged as a canonicalized value.
-   *
-   * <p>
-   * This is intended for use with {@code String} constants.
-   * </p>
-   *
-   * @param string The string to maintain canonicalized reference to. Must not be {@code null}.
-   * @see Encoding#decodeCanonicalized(byte[], int, int)
-   */
-  public static void canonicalize(String string) {
-    INTERNER.putString(string);
-  }
-
-  /**
-   * Get the name of the (JVM) encoding used.
-   *
-   * @return the JVM encoding name used by this instance.
-   */
-  public String name() {
-    return encoding.name();
-  }
-
-  /**
-   * Encode a string to an array of bytes.
-   *
-   * @param s the string to encode
-   * @return a bytearray containing the encoded string
-   * @throws IOException if something goes wrong
-   */
-  public byte [] encode(String s) throws IOException {
-    if (s == null) {
-      return null;
+    /**
+     * Construct an Encoding for a given JVM encoding.
+     *
+     * @param jvmEncoding the name of the JVM encoding
+     * @return an Encoding instance for the specified encoding, or an Encoding instance for the
+     * default JVM encoding if the specified encoding is unavailable.
+     */
+    public static Encoding getJVMEncoding(String jvmEncoding) {
+        if ("UTF-8".equals(jvmEncoding)) {
+            return UTF8_ENCODING;
+        }
+        if (Charset.isSupported(jvmEncoding)) {
+            return new Encoding(Charset.forName(jvmEncoding));
+        }
+        return DEFAULT_ENCODING;
     }
 
-    return s.getBytes(encoding);
-  }
+    /**
+     * Construct an Encoding for a given database encoding.
+     *
+     * @param databaseEncoding the name of the database encoding
+     * @return an Encoding instance for the specified encoding, or an Encoding instance for the
+     * default JVM encoding if the specified encoding is unavailable.
+     */
+    public static Encoding getDatabaseEncoding(String databaseEncoding) {
+        if ("UTF8".equals(databaseEncoding) || "UNICODE".equals(databaseEncoding)) {
+            return UTF8_ENCODING;
+        }
+        // If the backend encoding is known and there is a suitable
+        // encoding in the JVM we use that. Otherwise we fall back
+        // to the default encoding of the JVM.
+        String[] candidates = encodings.get(databaseEncoding);
+        if (candidates != null) {
+            for (String candidate : candidates) {
+                LOGGER.log(Level.FINEST, "Search encoding candidate {0}", candidate);
+                if (Charset.isSupported(candidate)) {
+                    return new Encoding(Charset.forName(candidate));
+                }
+            }
+        }
 
-  /**
-   * Decode an array of bytes possibly into a canonicalized string.
-   *
-   * <p>
-   * Only ascii compatible encoding support canonicalization and only ascii {@code String} values are eligible
-   * to be canonicalized.
-   * </p>
-   *
-   * @param encodedString a byte array containing the string to decode
-   * @param offset        the offset in <code>encodedString</code> of the first byte of the encoded
-   *                      representation
-   * @param length        the length, in bytes, of the encoded representation
-   * @return the decoded string
-   * @throws IOException if something goes wrong
-   */
-  public String decodeCanonicalized(byte[] encodedString, int offset, int length) throws IOException {
-    if (length == 0) {
-      return "";
+        // Try the encoding name directly -- maybe the charset has been
+        // provided by the user.
+        if (Charset.isSupported(databaseEncoding)) {
+            return new Encoding(Charset.forName(databaseEncoding));
+        }
+
+        // Fall back to default JVM encoding.
+        LOGGER.log(Level.FINEST, "{0} encoding not found, returning default encoding", databaseEncoding);
+        return DEFAULT_ENCODING;
     }
-    // if fastASCIINumbers is false, then no chance of the byte[] being ascii compatible characters
-    return fastASCIINumbers ? INTERNER.getString(encodedString, offset, length, this)
-                            : decode(encodedString, offset, length);
-  }
 
-  public String decodeCanonicalizedIfPresent(byte[] encodedString, int offset, int length) throws IOException {
-    if (length == 0) {
-      return "";
+    /**
+     * Indicates that <i>string</i> should be staged as a canonicalized value.
+     *
+     * <p>
+     * This is intended for use with {@code String} constants.
+     * </p>
+     *
+     * @param string The string to maintain canonicalized reference to. Must not be {@code null}.
+     * @see Encoding#decodeCanonicalized(byte[], int, int)
+     */
+    public static void canonicalize(String string) {
+        INTERNER.putString(string);
     }
-    // if fastASCIINumbers is false, then no chance of the byte[] being ascii compatible characters
-    return fastASCIINumbers ? INTERNER.getStringIfPresent(encodedString, offset, length, this)
-                            : decode(encodedString, offset, length);
-  }
 
-  /**
-   * Decode an array of bytes possibly into a canonicalized string.
-   *
-   * <p>
-   * Only ascii compatible encoding support canonicalization and only ascii {@code String} values are eligible
-   * to be canonicalized.
-   * </p>
-   *
-   * @param encodedString a byte array containing the string to decode
-   * @return the decoded string
-   * @throws IOException if something goes wrong
-   */
-  public String decodeCanonicalized(byte[] encodedString) throws IOException {
-    return decodeCanonicalized(encodedString, 0, encodedString.length);
-  }
+    /**
+     * Get an Encoding using the default encoding for the JVM.
+     *
+     * @return an Encoding instance
+     */
+    public static Encoding defaultEncoding() {
+        return DEFAULT_ENCODING;
+    }
 
-  /**
-   * Decode an array of bytes into a string.
-   *
-   * @param encodedString a byte array containing the string to decode
-   * @param offset        the offset in <code>encodedString</code> of the first byte of the encoded
-   *                      representation
-   * @param length        the length, in bytes, of the encoded representation
-   * @return the decoded string
-   * @throws IOException if something goes wrong
-   */
-  public String decode(byte[] encodedString, int offset, int length) throws IOException {
-    return new String(encodedString, offset, length, encoding);
-  }
+    /**
+     * Checks whether this encoding is compatible with ASCII for the number characters '-' and
+     * '0'..'9'. Where compatible means that they are encoded with exactly same values.
+     *
+     * @return If faster ASCII number parsing can be used with this encoding.
+     */
+    private static boolean testAsciiNumbers(Charset encoding) {
+        // TODO: test all postgres supported encoding to see if there are
+        // any which do _not_ have ascii numbers in same location
+        // at least all the encoding listed in the encodings hashmap have
+        // working ascii numbers
+        String test = "-0123456789";
+        byte[] bytes = test.getBytes(encoding);
+        String res = new String(bytes, StandardCharsets.US_ASCII);
+        return test.equals(res);
+    }
 
-  /**
-   * Decode an array of bytes into a string.
-   *
-   * @param encodedString a byte array containing the string to decode
-   * @return the decoded string
-   * @throws IOException if something goes wrong
-   */
-  public String decode(byte[] encodedString) throws IOException {
-    return decode(encodedString, 0, encodedString.length);
-  }
+    /**
+     * Returns true if this encoding has characters '-' and '0'..'9' in exactly same position as
+     * ascii.
+     *
+     * @return true if the bytes can be scanned directly for ascii numbers.
+     */
+    public boolean hasAsciiNumbers() {
+        return fastASCIINumbers;
+    }
 
-  /**
-   * Get a Reader that decodes the given InputStream using this encoding.
-   *
-   * @param in the underlying stream to decode from
-   * @return a non-null Reader implementation.
-   * @throws IOException if something goes wrong
-   */
-  public Reader getDecodingReader(InputStream in) throws IOException {
-    return new InputStreamReader(in, encoding);
-  }
+    /**
+     * Get the name of the (JVM) encoding used.
+     *
+     * @return the JVM encoding name used by this instance.
+     */
+    public String name() {
+        return encoding.name();
+    }
 
-  /**
-   * Get a Writer that encodes to the given OutputStream using this encoding.
-   *
-   * @param out the underlying stream to encode to
-   * @return a non-null Writer implementation.
-   * @throws IOException if something goes wrong
-   */
-  public Writer getEncodingWriter(OutputStream out) throws IOException {
-    return new OutputStreamWriter(out, encoding);
-  }
+    /**
+     * Encode a string to an array of bytes.
+     *
+     * @param s the string to encode
+     * @return a bytearray containing the encoded string
+     * @throws IOException if something goes wrong
+     */
+    public byte[] encode(String s) throws IOException {
+        if (s == null) {
+            return null;
+        }
 
-  /**
-   * Get an Encoding using the default encoding for the JVM.
-   *
-   * @return an Encoding instance
-   */
-  public static Encoding defaultEncoding() {
-    return DEFAULT_ENCODING;
-  }
+        return s.getBytes(encoding);
+    }
 
-  @Override
-  public String toString() {
-    return encoding.name();
-  }
+    /**
+     * Decode an array of bytes possibly into a canonicalized string.
+     *
+     * <p>
+     * Only ascii compatible encoding support canonicalization and only ascii {@code String} values are eligible
+     * to be canonicalized.
+     * </p>
+     *
+     * @param encodedString a byte array containing the string to decode
+     * @param offset        the offset in <code>encodedString</code> of the first byte of the encoded
+     *                      representation
+     * @param length        the length, in bytes, of the encoded representation
+     * @return the decoded string
+     * @throws IOException if something goes wrong
+     */
+    public String decodeCanonicalized(byte[] encodedString, int offset, int length) throws IOException {
+        if (length == 0) {
+            return "";
+        }
+        // if fastASCIINumbers is false, then no chance of the byte[] being ascii compatible characters
+        return fastASCIINumbers ? INTERNER.getString(encodedString, offset, length, this)
+                : decode(encodedString, offset, length);
+    }
 
-  /**
-   * Checks whether this encoding is compatible with ASCII for the number characters '-' and
-   * '0'..'9'. Where compatible means that they are encoded with exactly same values.
-   *
-   * @return If faster ASCII number parsing can be used with this encoding.
-   */
-  private static boolean testAsciiNumbers(Charset encoding) {
-    // TODO: test all postgres supported encoding to see if there are
-    // any which do _not_ have ascii numbers in same location
-    // at least all the encoding listed in the encodings hashmap have
-    // working ascii numbers
-    String test = "-0123456789";
-    byte[] bytes = test.getBytes(encoding);
-    String res = new String(bytes, StandardCharsets.US_ASCII);
-    return test.equals(res);
-  }
+    public String decodeCanonicalizedIfPresent(byte[] encodedString, int offset, int length) throws IOException {
+        if (length == 0) {
+            return "";
+        }
+        // if fastASCIINumbers is false, then no chance of the byte[] being ascii compatible characters
+        return fastASCIINumbers ? INTERNER.getStringIfPresent(encodedString, offset, length, this)
+                : decode(encodedString, offset, length);
+    }
+
+    /**
+     * Decode an array of bytes possibly into a canonicalized string.
+     *
+     * <p>
+     * Only ascii compatible encoding support canonicalization and only ascii {@code String} values are eligible
+     * to be canonicalized.
+     * </p>
+     *
+     * @param encodedString a byte array containing the string to decode
+     * @return the decoded string
+     * @throws IOException if something goes wrong
+     */
+    public String decodeCanonicalized(byte[] encodedString) throws IOException {
+        return decodeCanonicalized(encodedString, 0, encodedString.length);
+    }
+
+    /**
+     * Decode an array of bytes into a string.
+     *
+     * @param encodedString a byte array containing the string to decode
+     * @param offset        the offset in <code>encodedString</code> of the first byte of the encoded
+     *                      representation
+     * @param length        the length, in bytes, of the encoded representation
+     * @return the decoded string
+     * @throws IOException if something goes wrong
+     */
+    public String decode(byte[] encodedString, int offset, int length) throws IOException {
+        return new String(encodedString, offset, length, encoding);
+    }
+
+    /**
+     * Decode an array of bytes into a string.
+     *
+     * @param encodedString a byte array containing the string to decode
+     * @return the decoded string
+     * @throws IOException if something goes wrong
+     */
+    public String decode(byte[] encodedString) throws IOException {
+        return decode(encodedString, 0, encodedString.length);
+    }
+
+    /**
+     * Get a Reader that decodes the given InputStream using this encoding.
+     *
+     * @param in the underlying stream to decode from
+     * @return a non-null Reader implementation.
+     * @throws IOException if something goes wrong
+     */
+    public Reader getDecodingReader(InputStream in) throws IOException {
+        return new InputStreamReader(in, encoding);
+    }
+
+    /**
+     * Get a Writer that encodes to the given OutputStream using this encoding.
+     *
+     * @param out the underlying stream to encode to
+     * @return a non-null Writer implementation.
+     * @throws IOException if something goes wrong
+     */
+    public Writer getEncodingWriter(OutputStream out) throws IOException {
+        return new OutputStreamWriter(out, encoding);
+    }
+
+    @Override
+    public String toString() {
+        return encoding.name();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/EncodingPredictor.java b/pgjdbc/src/main/java/org/postgresql/core/EncodingPredictor.java
index 9116bee..62dbbe0 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/EncodingPredictor.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/EncodingPredictor.java
@@ -18,134 +18,134 @@ import java.io.IOException;
  */
 public class EncodingPredictor {
 
-  public EncodingPredictor() {
-  }
+    private static final Translation[] FATAL_TRANSLATIONS =
+            new Translation[]{
+                    new Translation("ВАЖНО", null, "ru", "WIN", "ALT", "KOI8"),
+                    new Translation("致命错误", null, "zh_CN", "EUC_CN", "GBK", "BIG5"),
+                    new Translation("KATASTROFALNY", null, "pl", "LATIN2"),
+                    new Translation("FATALE", null, "it", "LATIN1", "LATIN9"),
+                    new Translation("FATAL", new String[]{"は存在しません" /* ~ does not exist */,
+                            "ロール" /* ~ role */, "ユーザ" /* ~ user */}, "ja", "EUC_JP", "SJIS"),
+                    new Translation(null, null, "fr/de/es/pt_BR", "LATIN1", "LATIN3", "LATIN4", "LATIN5",
+                            "LATIN7", "LATIN9"),
+            };
 
-  /**
-   * In certain cases the encoding is not known for sure (e.g. before authentication).
-   * In such cases, backend might send messages in "native to database" encoding,
-   * thus pgjdbc has to guess the encoding nad
-   */
-  public static class DecodeResult {
-    public final String result;
-    public final String encoding; // JVM name
-
-    DecodeResult(String result, String encoding) {
-      this.result = result;
-      this.encoding = encoding;
+    public EncodingPredictor() {
     }
-  }
 
-  static class Translation {
-    public final String fatalText;
-    private final String [] texts;
-    public final String language;
-    public final String[] encodings;
+    public static DecodeResult decode(byte[] bytes, int offset, int length) {
+        Encoding defaultEncoding = Encoding.defaultEncoding();
+        for (Translation tr : FATAL_TRANSLATIONS) {
+            for (String encoding : tr.encodings) {
+                Encoding encoder = Encoding.getDatabaseEncoding(encoding);
+                if (encoder == defaultEncoding) {
+                    continue;
+                }
 
-    Translation(String fatalText, String [] texts,
-        String language, String... encodings) {
-      this.fatalText = fatalText;
-      this.texts = texts;
-      this.language = language;
-      this.encodings = encodings;
-    }
-  }
+                // If there is a translation for "FATAL", then try typical encodings for that language
+                if (tr.fatalText != null) {
+                    byte[] encoded;
+                    try {
+                        byte[] tmp = encoder.encode(tr.fatalText);
+                        encoded = new byte[tmp.length + 2];
+                        encoded[0] = 'S';
+                        encoded[encoded.length - 1] = 0;
+                        System.arraycopy(tmp, 0, encoded, 1, tmp.length);
+                    } catch (IOException e) {
+                        continue;// should not happen
+                    }
 
-  private static final Translation[] FATAL_TRANSLATIONS =
-      new Translation[]{
-          new Translation("ВАЖНО", null, "ru", "WIN", "ALT", "KOI8"),
-          new Translation("致命错误", null, "zh_CN", "EUC_CN", "GBK", "BIG5"),
-          new Translation("KATASTROFALNY", null, "pl", "LATIN2"),
-          new Translation("FATALE", null, "it", "LATIN1", "LATIN9"),
-          new Translation("FATAL", new String[]{"は存在しません" /* ~ does not exist */,
-              "ロール" /* ~ role */, "ユーザ" /* ~ user */}, "ja", "EUC_JP", "SJIS"),
-          new Translation(null, null, "fr/de/es/pt_BR", "LATIN1", "LATIN3", "LATIN4", "LATIN5",
-              "LATIN7", "LATIN9"),
-      };
+                    if (!arrayContains(bytes, offset, length, encoded, 0, encoded.length)) {
+                        continue;
+                    }
+                }
 
-  public static DecodeResult decode(byte[] bytes, int offset, int length) {
-    Encoding defaultEncoding = Encoding.defaultEncoding();
-    for (Translation tr : FATAL_TRANSLATIONS) {
-      for (String encoding : tr.encodings) {
-        Encoding encoder = Encoding.getDatabaseEncoding(encoding);
-        if (encoder == defaultEncoding) {
-          continue;
-        }
+                // No idea how to tell Japanese from Latin languages, thus just hard-code certain Japanese words
+                if (tr.texts != null) {
+                    boolean foundOne = false;
+                    for (String text : tr.texts) {
+                        try {
+                            byte[] textBytes = encoder.encode(text);
+                            if (arrayContains(bytes, offset, length, textBytes, 0, textBytes.length)) {
+                                foundOne = true;
+                                break;
+                            }
+                        } catch (IOException e) {
+                            // do not care, will try other encodings
+                        }
+                    }
+                    if (!foundOne) {
+                        // Error message does not have key parts, will try other encodings
+                        continue;
+                    }
+                }
 
-        // If there is a translation for "FATAL", then try typical encodings for that language
-        if (tr.fatalText != null) {
-          byte[] encoded;
-          try {
-            byte[] tmp = encoder.encode(tr.fatalText);
-            encoded = new byte[tmp.length + 2];
-            encoded[0] = 'S';
-            encoded[encoded.length - 1] = 0;
-            System.arraycopy(tmp, 0, encoded, 1, tmp.length);
-          } catch (IOException e) {
-            continue;// should not happen
-          }
-
-          if (!arrayContains(bytes, offset, length, encoded, 0, encoded.length)) {
-            continue;
-          }
-        }
-
-        // No idea how to tell Japanese from Latin languages, thus just hard-code certain Japanese words
-        if (tr.texts != null) {
-          boolean foundOne = false;
-          for (String text : tr.texts) {
-            try {
-              byte[] textBytes = encoder.encode(text);
-              if (arrayContains(bytes, offset, length, textBytes, 0, textBytes.length)) {
-                foundOne = true;
-                break;
-              }
-            } catch (IOException e) {
-              // do not care, will try other encodings
+                try {
+                    String decoded = encoder.decode(bytes, offset, length);
+                    if (decoded.indexOf(65533) != -1) {
+                        // bad character in string, try another encoding
+                        continue;
+                    }
+                    return new DecodeResult(decoded, encoder.name());
+                } catch (IOException e) {
+                    // do not care
+                }
             }
-          }
-          if (!foundOne) {
-            // Error message does not have key parts, will try other encodings
-            continue;
-          }
+        }
+        return null;
+    }
+
+    private static boolean arrayContains(
+            byte[] first, int firstOffset, int firstLength,
+            byte[] second, int secondOffset, int secondLength
+    ) {
+        if (firstLength < secondLength) {
+            return false;
         }
 
-        try {
-          String decoded = encoder.decode(bytes, offset, length);
-          if (decoded.indexOf(65533) != -1) {
-            // bad character in string, try another encoding
-            continue;
-          }
-          return new DecodeResult(decoded, encoder.name());
-        } catch (IOException e) {
-          // do not care
+        for (int i = 0; i < firstLength; i++) {
+            for (; i < firstLength && first[firstOffset + i] != second[secondOffset]; i++) {
+                // find the first matching byte
+            }
+
+            int j = 1;
+            for (; j < secondLength && first[firstOffset + i + j] == second[secondOffset + j]; j++) {
+                // compare arrays
+            }
+            if (j == secondLength) {
+                return true;
+            }
         }
-      }
-    }
-    return null;
-  }
-
-  private static boolean arrayContains(
-      byte[] first, int firstOffset, int firstLength,
-      byte[] second, int secondOffset, int secondLength
-  ) {
-    if (firstLength < secondLength) {
-      return false;
+        return false;
     }
 
-    for (int i = 0; i < firstLength; i++) {
-      for (; i < firstLength && first[firstOffset + i] != second[secondOffset]; i++) {
-        // find the first matching byte
-      }
+    /**
+     * In certain cases the encoding is not known for sure (e.g. before authentication).
+     * In such cases, backend might send messages in "native to database" encoding,
+     * thus pgjdbc has to guess the encoding nad
+     */
+    public static class DecodeResult {
+        public final String result;
+        public final String encoding; // JVM name
 
-      int j = 1;
-      for (; j < secondLength && first[firstOffset + i + j] == second[secondOffset + j]; j++) {
-        // compare arrays
-      }
-      if (j == secondLength) {
-        return true;
-      }
+        DecodeResult(String result, String encoding) {
+            this.result = result;
+            this.encoding = encoding;
+        }
+    }
+
+    static class Translation {
+        public final String fatalText;
+        public final String language;
+        public final String[] encodings;
+        private final String[] texts;
+
+        Translation(String fatalText, String[] texts,
+                    String language, String... encodings) {
+            this.fatalText = fatalText;
+            this.texts = texts;
+            this.language = language;
+            this.encodings = encodings;
+        }
     }
-    return false;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/Field.java b/pgjdbc/src/main/java/org/postgresql/core/Field.java
index 987f743..31d69ff 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/Field.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/Field.java
@@ -5,172 +5,166 @@
 
 package org.postgresql.core;
 
+import java.util.Locale;
 import org.postgresql.jdbc.FieldMetadata;
 
-import java.util.Locale;
-
 public class Field {
-  // The V3 protocol defines two constants for the format of data
-  public static final int TEXT_FORMAT = 0;
-  public static final int BINARY_FORMAT = 1;
+    // The V3 protocol defines two constants for the format of data
+    public static final int TEXT_FORMAT = 0;
+    public static final int BINARY_FORMAT = 1;
+    // New string to avoid clashes with other strings
+    private static final String NOT_YET_LOADED = new String("pgType is not yet loaded");
+    private final int length; // Internal Length of this field
+    private final int oid; // OID of the type
+    private final int mod; // type modifier of this field
+    private final int tableOid; // OID of table ( zero if no table )
+    // 0 = text, 1 = binary
+    // In the V2 protocol all fields in a
+    // binary cursor are binary and all
+    // others are text
+    private final int positionInTable;
+    private String columnLabel; // Column label
+    private int format = TEXT_FORMAT; // In the V3 protocol each field has a format
+    // Cache fields filled in by AbstractJdbc2ResultSetMetaData.fetchFieldMetaData.
+    // Don't use unless that has been called.
+    private FieldMetadata metadata;
+    private int sqlType;
+    private String pgType = NOT_YET_LOADED;
 
-  private final int length; // Internal Length of this field
-  private final int oid; // OID of the type
-  private final int mod; // type modifier of this field
-  private String columnLabel; // Column label
+    /**
+     * Construct a field based on the information fed to it.
+     *
+     * @param name   the name (column name and label) of the field
+     * @param oid    the OID of the field
+     * @param length the length of the field
+     * @param mod    modifier
+     */
+    public Field(String name, int oid, int length, int mod) {
+        this(name, oid, length, mod, 0, 0);
+    }
 
-  private int format = TEXT_FORMAT; // In the V3 protocol each field has a format
-  // 0 = text, 1 = binary
-  // In the V2 protocol all fields in a
-  // binary cursor are binary and all
-  // others are text
+    /**
+     * Constructor without mod parameter.
+     *
+     * @param name the name (column name and label) of the field
+     * @param oid  the OID of the field
+     */
+    public Field(String name, int oid) {
+        this(name, oid, 0, -1);
+    }
 
-  private final int tableOid; // OID of table ( zero if no table )
-  private final int positionInTable;
+    /**
+     * Construct a field based on the information fed to it.
+     *
+     * @param columnLabel     the column label of the field
+     * @param oid             the OID of the field
+     * @param length          the length of the field
+     * @param mod             modifier
+     * @param tableOid        the OID of the columns' table
+     * @param positionInTable the position of column in the table (first column is 1, second column is 2, etc...)
+     */
+    public Field(String columnLabel, int oid, int length, int mod, int tableOid,
+                 int positionInTable) {
+        this.columnLabel = columnLabel;
+        this.oid = oid;
+        this.length = length;
+        this.mod = mod;
+        this.tableOid = tableOid;
+        this.positionInTable = positionInTable;
+        this.metadata = tableOid == 0 ? new FieldMetadata(columnLabel) : null;
+    }
 
-  // Cache fields filled in by AbstractJdbc2ResultSetMetaData.fetchFieldMetaData.
-  // Don't use unless that has been called.
-  private FieldMetadata metadata;
+    /**
+     * @return the oid of this Field's data type
+     */
+    public int getOID() {
+        return oid;
+    }
 
-  private int sqlType;
-  private String pgType = NOT_YET_LOADED;
+    /**
+     * @return the mod of this Field's data type
+     */
+    public int getMod() {
+        return mod;
+    }
 
-  // New string to avoid clashes with other strings
-  private static final String NOT_YET_LOADED = new String("pgType is not yet loaded");
+    /**
+     * @return the column label of this Field's data type
+     */
+    public String getColumnLabel() {
+        return columnLabel;
+    }
 
-  /**
-   * Construct a field based on the information fed to it.
-   *
-   * @param name the name (column name and label) of the field
-   * @param oid the OID of the field
-   * @param length the length of the field
-   * @param mod modifier
-   */
-  public Field(String name, int oid, int length, int mod) {
-    this(name, oid, length, mod, 0, 0);
-  }
+    /**
+     * @return the length of this Field's data type
+     */
+    public int getLength() {
+        return length;
+    }
 
-  /**
-   * Constructor without mod parameter.
-   *
-   * @param name the name (column name and label) of the field
-   * @param oid the OID of the field
-   */
-  public Field(String name, int oid) {
-    this(name, oid, 0, -1);
-  }
+    /**
+     * @return the format of this Field's data (text=0, binary=1)
+     */
+    public int getFormat() {
+        return format;
+    }
 
-  /**
-   * Construct a field based on the information fed to it.
-   * @param columnLabel the column label of the field
-   * @param oid the OID of the field
-   * @param length the length of the field
-   * @param mod modifier
-   * @param tableOid the OID of the columns' table
-   * @param positionInTable the position of column in the table (first column is 1, second column is 2, etc...)
-   */
-  public Field(String columnLabel, int oid, int length, int mod, int tableOid,
-      int positionInTable) {
-    this.columnLabel = columnLabel;
-    this.oid = oid;
-    this.length = length;
-    this.mod = mod;
-    this.tableOid = tableOid;
-    this.positionInTable = positionInTable;
-    this.metadata = tableOid == 0 ? new FieldMetadata(columnLabel) : null;
-  }
+    /**
+     * @param format the format of this Field's data (text=0, binary=1)
+     */
+    public void setFormat(int format) {
+        this.format = format;
+    }
 
-  /**
-   * @return the oid of this Field's data type
-   */
-  public int getOID() {
-    return oid;
-  }
+    /**
+     * @return the columns' table oid, zero if no oid available
+     */
+    public int getTableOid() {
+        return tableOid;
+    }
 
-  /**
-   * @return the mod of this Field's data type
-   */
-  public int getMod() {
-    return mod;
-  }
+    public int getPositionInTable() {
+        return positionInTable;
+    }
 
-  /**
-   * @return the column label of this Field's data type
-   */
-  public String getColumnLabel() {
-    return columnLabel;
-  }
+    public FieldMetadata getMetadata() {
+        return metadata;
+    }
 
-  /**
-   * @return the length of this Field's data type
-   */
-  public int getLength() {
-    return length;
-  }
+    public void setMetadata(FieldMetadata metadata) {
+        this.metadata = metadata;
+    }
 
-  /**
-   * @return the format of this Field's data (text=0, binary=1)
-   */
-  public int getFormat() {
-    return format;
-  }
+    @Override
+    public String toString() {
+        return "Field(" + (columnLabel != null ? columnLabel : "")
+                + "," + Oid.toString(oid)
+                + "," + length
+                + "," + (format == TEXT_FORMAT ? 'T' : 'B')
+                + ")";
+    }
 
-  /**
-   * @param format the format of this Field's data (text=0, binary=1)
-   */
-  public void setFormat(int format) {
-    this.format = format;
-  }
+    public int getSQLType() {
+        return sqlType;
+    }
 
-  /**
-   * @return the columns' table oid, zero if no oid available
-   */
-  public int getTableOid() {
-    return tableOid;
-  }
+    public void setSQLType(int sqlType) {
+        this.sqlType = sqlType;
+    }
 
-  public int getPositionInTable() {
-    return positionInTable;
-  }
+    public String getPGType() {
+        return pgType;
+    }
 
-  public FieldMetadata getMetadata() {
-    return metadata;
-  }
+    public void setPGType(String pgType) {
+        this.pgType = pgType;
+    }
 
-  public void setMetadata(FieldMetadata metadata) {
-    this.metadata = metadata;
-  }
+    public boolean isTypeInitialized() {
+        return pgType != NOT_YET_LOADED;
+    }
 
-  @Override
-  public String toString() {
-    return "Field(" + (columnLabel != null ? columnLabel : "")
-        + "," + Oid.toString(oid)
-        + "," + length
-        + "," + (format == TEXT_FORMAT ? 'T' : 'B')
-        + ")";
-  }
-
-  public void setSQLType(int sqlType) {
-    this.sqlType = sqlType;
-  }
-
-  public int getSQLType() {
-    return sqlType;
-  }
-
-  public void setPGType(String pgType) {
-    this.pgType = pgType;
-  }
-
-  public String getPGType() {
-    return pgType;
-  }
-
-  public boolean isTypeInitialized() {
-    return pgType != NOT_YET_LOADED;
-  }
-
-  public void upperCaseLabel() {
-    columnLabel = columnLabel.toUpperCase(Locale.ROOT);
-  }
+    public void upperCaseLabel() {
+        columnLabel = columnLabel.toUpperCase(Locale.ROOT);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/FixedLengthOutputStream.java b/pgjdbc/src/main/java/org/postgresql/core/FixedLengthOutputStream.java
index 7e7b4ff..20b0ec6 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/FixedLengthOutputStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/FixedLengthOutputStream.java
@@ -13,41 +13,41 @@ import java.io.OutputStream;
  */
 public class FixedLengthOutputStream extends OutputStream {
 
-  private final int size;
-  private final OutputStream target;
-  private int written;
+    private final int size;
+    private final OutputStream target;
+    private int written;
 
-  public FixedLengthOutputStream(int size, OutputStream target) {
-    this.size = size;
-    this.target = target;
-  }
-
-  @Override
-  public void write(int b) throws IOException {
-    verifyAllowed(1);
-    written++;
-    target.write(b);
-  }
-
-  @Override
-  public void write(byte[] buf, int offset, int len) throws IOException {
-    if ((offset < 0) || (len < 0) || ((offset + len) > buf.length)) {
-      throw new IndexOutOfBoundsException();
-    } else if (len == 0) {
-      return;
+    public FixedLengthOutputStream(int size, OutputStream target) {
+        this.size = size;
+        this.target = target;
     }
-    verifyAllowed(len);
-    target.write(buf, offset, len);
-    written += len;
-  }
 
-  public int remaining() {
-    return size - written;
-  }
-
-  private void verifyAllowed(int wanted) throws IOException {
-    if (remaining() < wanted) {
-      throw new IOException("Attempt to write more than the specified " + size + " bytes");
+    @Override
+    public void write(int b) throws IOException {
+        verifyAllowed(1);
+        written++;
+        target.write(b);
+    }
+
+    @Override
+    public void write(byte[] buf, int offset, int len) throws IOException {
+        if ((offset < 0) || (len < 0) || ((offset + len) > buf.length)) {
+            throw new IndexOutOfBoundsException();
+        } else if (len == 0) {
+            return;
+        }
+        verifyAllowed(len);
+        target.write(buf, offset, len);
+        written += len;
+    }
+
+    public int remaining() {
+        return size - written;
+    }
+
+    private void verifyAllowed(int wanted) throws IOException {
+        if (remaining() < wanted) {
+            throw new IOException("Attempt to write more than the specified " + size + " bytes");
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/JavaVersion.java b/pgjdbc/src/main/java/org/postgresql/core/JavaVersion.java
index 43d52cc..d5ce9d6 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/JavaVersion.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/JavaVersion.java
@@ -6,32 +6,32 @@
 package org.postgresql.core;
 
 public enum JavaVersion {
-  // Note: order is important,
-  v1_8,
-  other;
+    // Note: order is important,
+    v1_8,
+    other;
 
-  private static final JavaVersion RUNTIME_VERSION = from(System.getProperty("java.version"));
+    private static final JavaVersion RUNTIME_VERSION = from(System.getProperty("java.version"));
 
-  /**
-   * Returns enum value that represents current runtime. For instance, when using -jre7.jar via Java
-   * 8, this would return v18
-   *
-   * @return enum value that represents current runtime.
-   */
-  public static JavaVersion getRuntimeVersion() {
-    return RUNTIME_VERSION;
-  }
-
-  /**
-   * Java version string like in {@code "java.version"} property.
-   *
-   * @param version string like 1.6, 1.7, etc
-   * @return JavaVersion enum
-   */
-  public static JavaVersion from(String version) {
-    if (version.startsWith("1.8")) {
-      return v1_8;
+    /**
+     * Returns enum value that represents current runtime. For instance, when using -jre7.jar via Java
+     * 8, this would return v18
+     *
+     * @return enum value that represents current runtime.
+     */
+    public static JavaVersion getRuntimeVersion() {
+        return RUNTIME_VERSION;
+    }
+
+    /**
+     * Java version string like in {@code "java.version"} property.
+     *
+     * @param version string like 1.6, 1.7, etc
+     * @return JavaVersion enum
+     */
+    public static JavaVersion from(String version) {
+        if (version.startsWith("1.8")) {
+            return v1_8;
+        }
+        return other;
     }
-    return other;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/JdbcCallParseInfo.java b/pgjdbc/src/main/java/org/postgresql/core/JdbcCallParseInfo.java
index d7f7028..09a4edd 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/JdbcCallParseInfo.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/JdbcCallParseInfo.java
@@ -9,30 +9,30 @@ package org.postgresql.core;
  * Contains parse flags from {@link Parser#modifyJdbcCall(String, boolean, int, int, EscapeSyntaxCallMode)}.
  */
 public class JdbcCallParseInfo {
-  private final String sql;
-  private final boolean isFunction;
+    private final String sql;
+    private final boolean isFunction;
 
-  public JdbcCallParseInfo(String sql, boolean isFunction) {
-    this.sql = sql;
-    this.isFunction = isFunction;
-  }
+    public JdbcCallParseInfo(String sql, boolean isFunction) {
+        this.sql = sql;
+        this.isFunction = isFunction;
+    }
 
-  /**
-   * SQL in a native for certain backend version.
-   *
-   * @return SQL in a native for certain backend version
-   */
-  public String getSql() {
-    return sql;
-  }
+    /**
+     * SQL in a native for certain backend version.
+     *
+     * @return SQL in a native for certain backend version
+     */
+    public String getSql() {
+        return sql;
+    }
 
-  /**
-   * Returns if given SQL is a function.
-   *
-   * @return {@code true} if given SQL is a function
-   */
-  public boolean isFunction() {
-    return isFunction;
-  }
+    /**
+     * Returns if given SQL is a function.
+     *
+     * @return {@code true} if given SQL is a function
+     */
+    public boolean isFunction() {
+        return isFunction;
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/NativeQuery.java b/pgjdbc/src/main/java/org/postgresql/core/NativeQuery.java
index 3e56cc6..ca0b3a1 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/NativeQuery.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/NativeQuery.java
@@ -11,106 +11,106 @@ package org.postgresql.core;
  * replaced with $1, $2, etc.
  */
 public class NativeQuery {
-  private static final String[] BIND_NAMES = new String[128 * 10];
-  private static final int[] NO_BINDS = new int[0];
+    private static final String[] BIND_NAMES = new String[128 * 10];
+    private static final int[] NO_BINDS = new int[0];
 
-  public final String nativeSql;
-  public final int[] bindPositions;
-  public final SqlCommand command;
-  public final boolean multiStatement;
-
-  static {
-    for (int i = 1; i < BIND_NAMES.length; i++) {
-      BIND_NAMES[i] = "$" + i;
-    }
-  }
-
-  public NativeQuery(String nativeSql, SqlCommand dml) {
-    this(nativeSql, NO_BINDS, true, dml);
-  }
-
-  public NativeQuery(String nativeSql, int [] bindPositions, boolean multiStatement, SqlCommand dml) {
-    this.nativeSql = nativeSql;
-    this.bindPositions =
-        bindPositions == null || bindPositions.length == 0 ? NO_BINDS : bindPositions;
-    this.multiStatement = multiStatement;
-    this.command = dml;
-  }
-
-  /**
-   * Stringize this query to a human-readable form, substituting particular parameter values for
-   * parameter placeholders.
-   *
-   * @param parameters a ParameterList returned by this Query's {@link Query#createParameterList}
-   *        method, or {@code null} to leave the parameter placeholders unsubstituted.
-   * @return a human-readable representation of this query
-   */
-  public String toString(ParameterList parameters) {
-    if (bindPositions.length == 0) {
-      return nativeSql;
+    static {
+        for (int i = 1; i < BIND_NAMES.length; i++) {
+            BIND_NAMES[i] = "$" + i;
+        }
     }
 
-    int queryLength = nativeSql.length();
-    String[] params = new String[bindPositions.length];
-    for (int i = 1; i <= bindPositions.length; i++) {
-      String param = parameters == null ? "?" : parameters.toString(i, true);
-      params[i - 1] = param;
-      queryLength += param.length() - bindName(i).length();
+    public final String nativeSql;
+    public final int[] bindPositions;
+    public final SqlCommand command;
+    public final boolean multiStatement;
+
+    public NativeQuery(String nativeSql, SqlCommand dml) {
+        this(nativeSql, NO_BINDS, true, dml);
     }
 
-    StringBuilder sbuf = new StringBuilder(queryLength);
-    sbuf.append(nativeSql, 0, bindPositions[0]);
-    for (int i = 1; i <= bindPositions.length; i++) {
-      sbuf.append(params[i - 1]);
-      int nextBind = i < bindPositions.length ? bindPositions[i] : nativeSql.length();
-      sbuf.append(nativeSql, bindPositions[i - 1] + bindName(i).length(), nextBind);
+    public NativeQuery(String nativeSql, int[] bindPositions, boolean multiStatement, SqlCommand dml) {
+        this.nativeSql = nativeSql;
+        this.bindPositions =
+                bindPositions == null || bindPositions.length == 0 ? NO_BINDS : bindPositions;
+        this.multiStatement = multiStatement;
+        this.command = dml;
     }
-    return sbuf.toString();
-  }
 
-  /**
-   * Returns $1, $2, etc names of bind variables used by backend.
-   *
-   * @param index index of a bind variable
-   * @return bind variable name
-   */
-  public static String bindName(int index) {
-    return index < BIND_NAMES.length ? BIND_NAMES[index] : "$" + index;
-  }
-
-  public static StringBuilder appendBindName(StringBuilder sb, int index) {
-    if (index < BIND_NAMES.length) {
-      return sb.append(bindName(index));
+    /**
+     * Returns $1, $2, etc names of bind variables used by backend.
+     *
+     * @param index index of a bind variable
+     * @return bind variable name
+     */
+    public static String bindName(int index) {
+        return index < BIND_NAMES.length ? BIND_NAMES[index] : "$" + index;
     }
-    sb.append('$');
-    sb.append(index);
-    return sb;
-  }
 
-  /**
-   * Calculate the text length required for the given number of bind variables
-   * including dollars.
-   * Do this to avoid repeated calls to
-   * AbstractStringBuilder.expandCapacity(...) and Arrays.copyOf
-   *
-   * @param bindCount total number of parameters in a query
-   * @return int total character length for $xyz kind of binds
-   */
-  public static int calculateBindLength(int bindCount) {
-    int res = 0;
-    int bindLen = 2; // $1
-    int maxBindsOfLen = 9; // $0 .. $9
-    while (bindCount > 0) {
-      int numBinds = Math.min(maxBindsOfLen, bindCount);
-      bindCount -= numBinds;
-      res += bindLen * numBinds;
-      bindLen++;
-      maxBindsOfLen *= 10; // $0..$9 (9 items) -> $10..$99 (90 items)
+    public static StringBuilder appendBindName(StringBuilder sb, int index) {
+        if (index < BIND_NAMES.length) {
+            return sb.append(bindName(index));
+        }
+        sb.append('$');
+        sb.append(index);
+        return sb;
     }
-    return res;
-  }
 
-  public SqlCommand getCommand() {
-    return command;
-  }
+    /**
+     * Calculate the text length required for the given number of bind variables
+     * including dollars.
+     * Do this to avoid repeated calls to
+     * AbstractStringBuilder.expandCapacity(...) and Arrays.copyOf
+     *
+     * @param bindCount total number of parameters in a query
+     * @return int total character length for $xyz kind of binds
+     */
+    public static int calculateBindLength(int bindCount) {
+        int res = 0;
+        int bindLen = 2; // $1
+        int maxBindsOfLen = 9; // $0 .. $9
+        while (bindCount > 0) {
+            int numBinds = Math.min(maxBindsOfLen, bindCount);
+            bindCount -= numBinds;
+            res += bindLen * numBinds;
+            bindLen++;
+            maxBindsOfLen *= 10; // $0..$9 (9 items) -> $10..$99 (90 items)
+        }
+        return res;
+    }
+
+    /**
+     * Stringize this query to a human-readable form, substituting particular parameter values for
+     * parameter placeholders.
+     *
+     * @param parameters a ParameterList returned by this Query's {@link Query#createParameterList}
+     *                   method, or {@code null} to leave the parameter placeholders unsubstituted.
+     * @return a human-readable representation of this query
+     */
+    public String toString(ParameterList parameters) {
+        if (bindPositions.length == 0) {
+            return nativeSql;
+        }
+
+        int queryLength = nativeSql.length();
+        String[] params = new String[bindPositions.length];
+        for (int i = 1; i <= bindPositions.length; i++) {
+            String param = parameters == null ? "?" : parameters.toString(i, true);
+            params[i - 1] = param;
+            queryLength += param.length() - bindName(i).length();
+        }
+
+        StringBuilder sbuf = new StringBuilder(queryLength);
+        sbuf.append(nativeSql, 0, bindPositions[0]);
+        for (int i = 1; i <= bindPositions.length; i++) {
+            sbuf.append(params[i - 1]);
+            int nextBind = i < bindPositions.length ? bindPositions[i] : nativeSql.length();
+            sbuf.append(nativeSql, bindPositions[i - 1] + bindName(i).length(), nextBind);
+        }
+        return sbuf.toString();
+    }
+
+    public SqlCommand getCommand() {
+        return command;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/Notification.java b/pgjdbc/src/main/java/org/postgresql/core/Notification.java
index 793a274..cb7102e 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/Notification.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/Notification.java
@@ -9,39 +9,39 @@ import org.postgresql.PGNotification;
 
 public class Notification implements PGNotification {
 
-  private final String name;
-  private final String parameter;
-  private final int pid;
+    private final String name;
+    private final String parameter;
+    private final int pid;
 
-  public Notification(String name, int pid) {
-    this(name, pid, "");
-  }
+    public Notification(String name, int pid) {
+        this(name, pid, "");
+    }
 
-  public Notification(String name, int pid, String parameter) {
-    this.name = name;
-    this.pid = pid;
-    this.parameter = parameter;
-  }
+    public Notification(String name, int pid, String parameter) {
+        this.name = name;
+        this.pid = pid;
+        this.parameter = parameter;
+    }
 
-  /*
-   * Returns name of this notification
-   */
-  @Override
-  public String getName() {
-    return name;
-  }
+    /*
+     * Returns name of this notification
+     */
+    @Override
+    public String getName() {
+        return name;
+    }
 
-  /*
-   * Returns the process id of the backend process making this notification
-   */
-  @Override
-  public int getPID() {
-    return pid;
-  }
+    /*
+     * Returns the process id of the backend process making this notification
+     */
+    @Override
+    public int getPID() {
+        return pid;
+    }
 
-  @Override
-  public String getParameter() {
-    return parameter;
-  }
+    @Override
+    public String getParameter() {
+        return parameter;
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/Oid.java b/pgjdbc/src/main/java/org/postgresql/core/Oid.java
index 9fbd267..67649b8 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/Oid.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/Oid.java
@@ -5,144 +5,143 @@
 
 package org.postgresql.core;
 
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
 import java.lang.reflect.Field;
 import java.util.HashMap;
 import java.util.Locale;
 import java.util.Map;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
 
 /**
  * Provides constants for well-known backend OIDs for the types we commonly use.
  */
 public class Oid {
-  public static final int UNSPECIFIED = 0;
-  public static final int INT2 = 21;
-  public static final int INT2_ARRAY = 1005;
-  public static final int INT4 = 23;
-  public static final int INT4_ARRAY = 1007;
-  public static final int INT8 = 20;
-  public static final int INT8_ARRAY = 1016;
-  public static final int TEXT = 25;
-  public static final int TEXT_ARRAY = 1009;
-  public static final int NUMERIC = 1700;
-  public static final int NUMERIC_ARRAY = 1231;
-  public static final int FLOAT4 = 700;
-  public static final int FLOAT4_ARRAY = 1021;
-  public static final int FLOAT8 = 701;
-  public static final int FLOAT8_ARRAY = 1022;
-  public static final int BOOL = 16;
-  public static final int BOOL_ARRAY = 1000;
-  public static final int DATE = 1082;
-  public static final int DATE_ARRAY = 1182;
-  public static final int TIME = 1083;
-  public static final int TIME_ARRAY = 1183;
-  public static final int TIMETZ = 1266;
-  public static final int TIMETZ_ARRAY = 1270;
-  public static final int TIMESTAMP = 1114;
-  public static final int TIMESTAMP_ARRAY = 1115;
-  public static final int TIMESTAMPTZ = 1184;
-  public static final int TIMESTAMPTZ_ARRAY = 1185;
-  public static final int BYTEA = 17;
-  public static final int BYTEA_ARRAY = 1001;
-  public static final int VARCHAR = 1043;
-  public static final int VARCHAR_ARRAY = 1015;
-  public static final int OID = 26;
-  public static final int OID_ARRAY = 1028;
-  public static final int BPCHAR = 1042;
-  public static final int BPCHAR_ARRAY = 1014;
-  public static final int MONEY = 790;
-  public static final int MONEY_ARRAY = 791;
-  public static final int NAME = 19;
-  public static final int NAME_ARRAY = 1003;
-  public static final int BIT = 1560;
-  public static final int BIT_ARRAY = 1561;
-  public static final int VOID = 2278;
-  public static final int INTERVAL = 1186;
-  public static final int INTERVAL_ARRAY = 1187;
-  public static final int CHAR = 18; // This is not char(N), this is "char" a single byte type.
-  public static final int CHAR_ARRAY = 1002;
-  public static final int VARBIT = 1562;
-  public static final int VARBIT_ARRAY = 1563;
-  public static final int UUID = 2950;
-  public static final int UUID_ARRAY = 2951;
-  public static final int XML = 142;
-  public static final int XML_ARRAY = 143;
-  public static final int POINT = 600;
-  public static final int POINT_ARRAY = 1017;
-  public static final int BOX = 603;
-  public static final int BOX_ARRAY = 1020;
-  public static final int JSONB = 3802;
-  public static final int JSONB_ARRAY = 3807;
-  public static final int JSON = 114;
-  public static final int JSON_ARRAY = 199;
-  public static final int REF_CURSOR = 1790;
-  public static final int REF_CURSOR_ARRAY = 2201;
-  public static final int LINE = 628;
-  public static final int LSEG = 601;
-  public static final int PATH = 602;
-  public static final int POLYGON = 604;
-  public static final int CIRCLE = 718;
-  public static final int CIDR = 650;
-  public static final int INET = 869;
-  public static final int MACADDR = 829;
-  public static final int MACADDR8 = 774;
-  public static final int TSVECTOR = 3614;
-  public static final int TSQUERY = 3615;
+    public static final int UNSPECIFIED = 0;
+    public static final int INT2 = 21;
+    public static final int INT2_ARRAY = 1005;
+    public static final int INT4 = 23;
+    public static final int INT4_ARRAY = 1007;
+    public static final int INT8 = 20;
+    public static final int INT8_ARRAY = 1016;
+    public static final int TEXT = 25;
+    public static final int TEXT_ARRAY = 1009;
+    public static final int NUMERIC = 1700;
+    public static final int NUMERIC_ARRAY = 1231;
+    public static final int FLOAT4 = 700;
+    public static final int FLOAT4_ARRAY = 1021;
+    public static final int FLOAT8 = 701;
+    public static final int FLOAT8_ARRAY = 1022;
+    public static final int BOOL = 16;
+    public static final int BOOL_ARRAY = 1000;
+    public static final int DATE = 1082;
+    public static final int DATE_ARRAY = 1182;
+    public static final int TIME = 1083;
+    public static final int TIME_ARRAY = 1183;
+    public static final int TIMETZ = 1266;
+    public static final int TIMETZ_ARRAY = 1270;
+    public static final int TIMESTAMP = 1114;
+    public static final int TIMESTAMP_ARRAY = 1115;
+    public static final int TIMESTAMPTZ = 1184;
+    public static final int TIMESTAMPTZ_ARRAY = 1185;
+    public static final int BYTEA = 17;
+    public static final int BYTEA_ARRAY = 1001;
+    public static final int VARCHAR = 1043;
+    public static final int VARCHAR_ARRAY = 1015;
+    public static final int OID = 26;
+    public static final int OID_ARRAY = 1028;
+    public static final int BPCHAR = 1042;
+    public static final int BPCHAR_ARRAY = 1014;
+    public static final int MONEY = 790;
+    public static final int MONEY_ARRAY = 791;
+    public static final int NAME = 19;
+    public static final int NAME_ARRAY = 1003;
+    public static final int BIT = 1560;
+    public static final int BIT_ARRAY = 1561;
+    public static final int VOID = 2278;
+    public static final int INTERVAL = 1186;
+    public static final int INTERVAL_ARRAY = 1187;
+    public static final int CHAR = 18; // This is not char(N), this is "char" a single byte type.
+    public static final int CHAR_ARRAY = 1002;
+    public static final int VARBIT = 1562;
+    public static final int VARBIT_ARRAY = 1563;
+    public static final int UUID = 2950;
+    public static final int UUID_ARRAY = 2951;
+    public static final int XML = 142;
+    public static final int XML_ARRAY = 143;
+    public static final int POINT = 600;
+    public static final int POINT_ARRAY = 1017;
+    public static final int BOX = 603;
+    public static final int BOX_ARRAY = 1020;
+    public static final int JSONB = 3802;
+    public static final int JSONB_ARRAY = 3807;
+    public static final int JSON = 114;
+    public static final int JSON_ARRAY = 199;
+    public static final int REF_CURSOR = 1790;
+    public static final int REF_CURSOR_ARRAY = 2201;
+    public static final int LINE = 628;
+    public static final int LSEG = 601;
+    public static final int PATH = 602;
+    public static final int POLYGON = 604;
+    public static final int CIRCLE = 718;
+    public static final int CIDR = 650;
+    public static final int INET = 869;
+    public static final int MACADDR = 829;
+    public static final int MACADDR8 = 774;
+    public static final int TSVECTOR = 3614;
+    public static final int TSQUERY = 3615;
 
-  private static final Map<Integer, String> OID_TO_NAME = new HashMap<>(100);
-  private static final Map<String, Integer> NAME_TO_OID = new HashMap<>(100);
+    private static final Map<Integer, String> OID_TO_NAME = new HashMap<>(100);
+    private static final Map<String, Integer> NAME_TO_OID = new HashMap<>(100);
 
-  static {
-    for (Field field : Oid.class.getFields()) {
-      try {
-        int oid = field.getInt(null);
-        String name = field.getName().toUpperCase(Locale.ROOT);
-        OID_TO_NAME.put(oid, name);
-        NAME_TO_OID.put(name, oid);
-      } catch (IllegalAccessException e) {
-        // ignore
-      }
+    static {
+        for (Field field : Oid.class.getFields()) {
+            try {
+                int oid = field.getInt(null);
+                String name = field.getName().toUpperCase(Locale.ROOT);
+                OID_TO_NAME.put(oid, name);
+                NAME_TO_OID.put(name, oid);
+            } catch (IllegalAccessException e) {
+                // ignore
+            }
+        }
     }
-  }
 
-  public Oid() {
-  }
-
-  /**
-   * Returns the name of the oid as string.
-   *
-   * @param oid The oid to convert to name.
-   * @return The name of the oid or {@code "<unknown>"} if oid no constant for oid value has been
-   *         defined.
-   */
-  public static String toString(int oid) {
-    String name = OID_TO_NAME.get(oid);
-    if (name == null) {
-      name = "<unknown:" + oid + ">";
+    public Oid() {
     }
-    return name;
-  }
 
-  public static int valueOf(String oid) throws PSQLException {
-    if (oid.length() > 0 && !Character.isDigit(oid.charAt(0))) {
-      Integer id = NAME_TO_OID.get(oid);
-      if (id == null) {
-        id = NAME_TO_OID.get(oid.toUpperCase(Locale.ROOT));
-      }
-      if (id != null) {
-        return id;
-      }
-    } else {
-      try {
-        // OID are unsigned 32bit integers, so Integer.parseInt is not enough
-        return (int) Long.parseLong(oid);
-      } catch (NumberFormatException ex) {
-      }
+    /**
+     * Returns the name of the oid as string.
+     *
+     * @param oid The oid to convert to name.
+     * @return The name of the oid or {@code "<unknown>"} if oid no constant for oid value has been
+     * defined.
+     */
+    public static String toString(int oid) {
+        String name = OID_TO_NAME.get(oid);
+        if (name == null) {
+            name = "<unknown:" + oid + ">";
+        }
+        return name;
+    }
+
+    public static int valueOf(String oid) throws PSQLException {
+        if (oid.length() > 0 && !Character.isDigit(oid.charAt(0))) {
+            Integer id = NAME_TO_OID.get(oid);
+            if (id == null) {
+                id = NAME_TO_OID.get(oid.toUpperCase(Locale.ROOT));
+            }
+            if (id != null) {
+                return id;
+            }
+        } else {
+            try {
+                // OID are unsigned 32bit integers, so Integer.parseInt is not enough
+                return (int) Long.parseLong(oid);
+            } catch (NumberFormatException ex) {
+            }
+        }
+        throw new PSQLException(GT.tr("oid type {0} not known and not a number", oid),
+                PSQLState.INVALID_PARAMETER_VALUE);
     }
-    throw new PSQLException(GT.tr("oid type {0} not known and not a number", oid),
-        PSQLState.INVALID_PARAMETER_VALUE);
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/PGBindException.java b/pgjdbc/src/main/java/org/postgresql/core/PGBindException.java
index 1a91133..2bc1ee3 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/PGBindException.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/PGBindException.java
@@ -10,13 +10,13 @@ import java.io.IOException;
 @SuppressWarnings("serial")
 public class PGBindException extends IOException {
 
-  private final IOException ioe;
+    private final IOException ioe;
 
-  public PGBindException(IOException ioe) {
-    this.ioe = ioe;
-  }
+    public PGBindException(IOException ioe) {
+        this.ioe = ioe;
+    }
 
-  public IOException getIOException() {
-    return ioe;
-  }
+    public IOException getIOException() {
+        return ioe;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/PGStream.java b/pgjdbc/src/main/java/org/postgresql/core/PGStream.java
index b914b07..43caa2a 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/PGStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/PGStream.java
@@ -5,18 +5,6 @@
 
 package org.postgresql.core;
 
-import org.postgresql.gss.GSSInputStream;
-import org.postgresql.gss.GSSOutputStream;
-import org.postgresql.util.ByteStreamWriter;
-import org.postgresql.util.GT;
-import org.postgresql.util.HostSpec;
-import org.postgresql.util.PGPropertyMaxResultBufferParser;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
-import org.ietf.jgss.GSSContext;
-import org.ietf.jgss.MessageProp;
-
 import java.io.BufferedOutputStream;
 import java.io.Closeable;
 import java.io.EOFException;
@@ -32,8 +20,17 @@ import java.net.Socket;
 import java.net.SocketException;
 import java.net.SocketTimeoutException;
 import java.sql.SQLException;
-
 import javax.net.SocketFactory;
+import org.ietf.jgss.GSSContext;
+import org.ietf.jgss.MessageProp;
+import org.postgresql.gss.GSSInputStream;
+import org.postgresql.gss.GSSOutputStream;
+import org.postgresql.util.ByteStreamWriter;
+import org.postgresql.util.GT;
+import org.postgresql.util.HostSpec;
+import org.postgresql.util.PGPropertyMaxResultBufferParser;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
 
 /**
  * <p>Wrapper around the raw connection to the server that implements some basic primitives
@@ -43,804 +40,797 @@ import javax.net.SocketFactory;
  * at a time is accessing a particular PGStream instance.</p>
  */
 public class PGStream implements Closeable, Flushable {
-  private final SocketFactory socketFactory;
-  private final HostSpec hostSpec;
+    private final SocketFactory socketFactory;
+    private final HostSpec hostSpec;
 
-  private final byte[] int4Buf;
-  private final byte[] int2Buf;
+    private final byte[] int4Buf;
+    private final byte[] int2Buf;
+    boolean gssEncrypted;
+    private Socket connection;
+    private VisibleBufferedInputStream pgInput;
+    private OutputStream pgOutput;
+    private byte[] streamBuffer;
+    private long nextStreamAvailableCheckTime;
+    // This is a workaround for SSL sockets: sslInputStream.available() might return 0
+    // so we perform "1ms reads" once in a while
+    private int minStreamAvailableCheckDelay = 1000;
+    private Encoding encoding;
+    private Writer encodingWriter;
+    private long maxResultBuffer = -1;
+    private long resultBufferByteCount;
+    private int maxRowSizeBytes = -1;
+    /**
+     * Constructor: Connect to the PostgreSQL back end and return a stream connection.
+     *
+     * @param socketFactory socket factory to use when creating sockets
+     * @param hostSpec      the host and port to connect to
+     * @param timeout       timeout in milliseconds, or 0 if no timeout set
+     * @throws IOException if an IOException occurs below it.
+     */
+    @SuppressWarnings("this-escape")
+    public PGStream(SocketFactory socketFactory, HostSpec hostSpec, int timeout) throws IOException {
+        this.socketFactory = socketFactory;
+        this.hostSpec = hostSpec;
 
-  private Socket connection;
-  private VisibleBufferedInputStream pgInput;
-  private OutputStream pgOutput;
-  private byte [] streamBuffer;
+        Socket socket = createSocket(timeout);
+        changeSocket(socket);
+        setEncoding(Encoding.getJVMEncoding("UTF-8"));
 
-  public boolean isGssEncrypted() {
-    return gssEncrypted;
-  }
+        int2Buf = new byte[2];
+        int4Buf = new byte[4];
+    }
 
-  boolean gssEncrypted;
-
-  public void setSecContext(GSSContext secContext) {
-    MessageProp messageProp =  new MessageProp(0, true);
-    pgInput = new VisibleBufferedInputStream(new GSSInputStream(pgInput.getWrapped(), secContext, messageProp ), 8192);
-    pgOutput = new GSSOutputStream(pgOutput, secContext, messageProp, 16384);
-    gssEncrypted = true;
-
-  }
-
-  private long nextStreamAvailableCheckTime;
-  // This is a workaround for SSL sockets: sslInputStream.available() might return 0
-  // so we perform "1ms reads" once in a while
-  private int minStreamAvailableCheckDelay = 1000;
-
-  private Encoding encoding;
-  private Writer encodingWriter;
-
-  private long maxResultBuffer = -1;
-  private long resultBufferByteCount;
-
-  private int maxRowSizeBytes = -1;
-
-  /**
-   * Constructor: Connect to the PostgreSQL back end and return a stream connection.
-   *
-   * @param socketFactory socket factory to use when creating sockets
-   * @param hostSpec the host and port to connect to
-   * @param timeout timeout in milliseconds, or 0 if no timeout set
-   * @throws IOException if an IOException occurs below it.
-   */
-  @SuppressWarnings("this-escape")
-  public PGStream(SocketFactory socketFactory, HostSpec hostSpec, int timeout) throws IOException {
-    this.socketFactory = socketFactory;
-    this.hostSpec = hostSpec;
-
-    Socket socket = createSocket(timeout);
-    changeSocket(socket);
-    setEncoding(Encoding.getJVMEncoding("UTF-8"));
-
-    int2Buf = new byte[2];
-    int4Buf = new byte[4];
-  }
-
-  @SuppressWarnings("this-escape")
-  public PGStream(PGStream pgStream, int timeout) throws IOException {
+    @SuppressWarnings("this-escape")
+    public PGStream(PGStream pgStream, int timeout) throws IOException {
 
     /*
     Some defaults
      */
-    int sendBufferSize = 1024;
-    int receiveBufferSize = 1024;
-    int soTimeout = 0;
-    boolean keepAlive = false;
-    boolean tcpNoDelay = true;
+        int sendBufferSize = 1024;
+        int receiveBufferSize = 1024;
+        int soTimeout = 0;
+        boolean keepAlive = false;
+        boolean tcpNoDelay = true;
 
     /*
     Get the existing values before closing the stream
      */
-    try {
-      sendBufferSize = pgStream.getSocket().getSendBufferSize();
-      receiveBufferSize = pgStream.getSocket().getReceiveBufferSize();
-      soTimeout = pgStream.getSocket().getSoTimeout();
-      keepAlive = pgStream.getSocket().getKeepAlive();
-      tcpNoDelay = pgStream.getSocket().getTcpNoDelay();
+        try {
+            sendBufferSize = pgStream.getSocket().getSendBufferSize();
+            receiveBufferSize = pgStream.getSocket().getReceiveBufferSize();
+            soTimeout = pgStream.getSocket().getSoTimeout();
+            keepAlive = pgStream.getSocket().getKeepAlive();
+            tcpNoDelay = pgStream.getSocket().getTcpNoDelay();
 
-    } catch ( SocketException ex ) {
-      // ignore it
-    }
-    //close the existing stream
-    pgStream.close();
+        } catch (SocketException ex) {
+            // ignore it
+        }
+        //close the existing stream
+        pgStream.close();
 
-    this.socketFactory = pgStream.socketFactory;
-    this.hostSpec = pgStream.hostSpec;
+        this.socketFactory = pgStream.socketFactory;
+        this.hostSpec = pgStream.hostSpec;
 
-    Socket socket = createSocket(timeout);
-    changeSocket(socket);
-    setEncoding(Encoding.getJVMEncoding("UTF-8"));
-    // set the buffer sizes and timeout
-    socket.setReceiveBufferSize(receiveBufferSize);
-    socket.setSendBufferSize(sendBufferSize);
-    setNetworkTimeout(soTimeout);
-    socket.setKeepAlive(keepAlive);
-    socket.setTcpNoDelay(tcpNoDelay);
+        Socket socket = createSocket(timeout);
+        changeSocket(socket);
+        setEncoding(Encoding.getJVMEncoding("UTF-8"));
+        // set the buffer sizes and timeout
+        socket.setReceiveBufferSize(receiveBufferSize);
+        socket.setSendBufferSize(sendBufferSize);
+        setNetworkTimeout(soTimeout);
+        socket.setKeepAlive(keepAlive);
+        socket.setTcpNoDelay(tcpNoDelay);
 
-    int2Buf = new byte[2];
-    int4Buf = new byte[4];
+        int2Buf = new byte[2];
+        int4Buf = new byte[4];
 
-  }
-
-  /**
-   * Constructor: Connect to the PostgreSQL back end and return a stream connection.
-   *
-   * @param socketFactory socket factory
-   * @param hostSpec the host and port to connect to
-   * @throws IOException if an IOException occurs below it.
-   * @deprecated use {@link #PGStream(SocketFactory, org.postgresql.util.HostSpec, int)}
-   */
-  @Deprecated
-  public PGStream(SocketFactory socketFactory, HostSpec hostSpec) throws IOException {
-    this(socketFactory, hostSpec, 0);
-  }
-
-  public HostSpec getHostSpec() {
-    return hostSpec;
-  }
-
-  public Socket getSocket() {
-    return connection;
-  }
-
-  public SocketFactory getSocketFactory() {
-    return socketFactory;
-  }
-
-  /**
-   * Check for pending backend messages without blocking. Might return false when there actually are
-   * messages waiting, depending on the characteristics of the underlying socket. This is used to
-   * detect asynchronous notifies from the backend, when available.
-   *
-   * @return true if there is a pending backend message
-   * @throws IOException if something wrong happens
-   */
-  public boolean hasMessagePending() throws IOException {
-
-    boolean available = false;
-
-    // In certain cases, available returns 0, yet there are bytes
-    if (pgInput.available() > 0) {
-      return true;
-    }
-    long now = System.nanoTime() / 1000000;
-
-    if (now < nextStreamAvailableCheckTime && minStreamAvailableCheckDelay != 0) {
-      // Do not use ".peek" too often
-      return false;
     }
 
-    int soTimeout = getNetworkTimeout();
-    connection.setSoTimeout(1);
-    try {
-      if (!pgInput.ensureBytes(1, false)) {
-        return false;
-      }
-      available = pgInput.peek() != -1;
-    } catch (SocketTimeoutException e) {
-      return false;
-    } finally {
-      connection.setSoTimeout(soTimeout);
+    /**
+     * Constructor: Connect to the PostgreSQL back end and return a stream connection.
+     *
+     * @param socketFactory socket factory
+     * @param hostSpec      the host and port to connect to
+     * @throws IOException if an IOException occurs below it.
+     * @deprecated use {@link #PGStream(SocketFactory, org.postgresql.util.HostSpec, int)}
+     */
+    @Deprecated
+    public PGStream(SocketFactory socketFactory, HostSpec hostSpec) throws IOException {
+        this(socketFactory, hostSpec, 0);
     }
 
+    public boolean isGssEncrypted() {
+        return gssEncrypted;
+    }
+
+    public void setSecContext(GSSContext secContext) {
+        MessageProp messageProp = new MessageProp(0, true);
+        pgInput = new VisibleBufferedInputStream(new GSSInputStream(pgInput.getWrapped(), secContext, messageProp), 8192);
+        pgOutput = new GSSOutputStream(pgOutput, secContext, messageProp, 16384);
+        gssEncrypted = true;
+
+    }
+
+    public HostSpec getHostSpec() {
+        return hostSpec;
+    }
+
+    public Socket getSocket() {
+        return connection;
+    }
+
+    public SocketFactory getSocketFactory() {
+        return socketFactory;
+    }
+
+    /**
+     * Check for pending backend messages without blocking. Might return false when there actually are
+     * messages waiting, depending on the characteristics of the underlying socket. This is used to
+     * detect asynchronous notifies from the backend, when available.
+     *
+     * @return true if there is a pending backend message
+     * @throws IOException if something wrong happens
+     */
+    public boolean hasMessagePending() throws IOException {
+
+        boolean available = false;
+
+        // In certain cases, available returns 0, yet there are bytes
+        if (pgInput.available() > 0) {
+            return true;
+        }
+        long now = System.nanoTime() / 1000000;
+
+        if (now < nextStreamAvailableCheckTime && minStreamAvailableCheckDelay != 0) {
+            // Do not use ".peek" too often
+            return false;
+        }
+
+        int soTimeout = getNetworkTimeout();
+        connection.setSoTimeout(1);
+        try {
+            if (!pgInput.ensureBytes(1, false)) {
+                return false;
+            }
+            available = pgInput.peek() != -1;
+        } catch (SocketTimeoutException e) {
+            return false;
+        } finally {
+            connection.setSoTimeout(soTimeout);
+        }
+
     /*
     If none available then set the next check time
     In the event that there more async bytes available we will continue to get them all
     see issue 1547 https://github.com/pgjdbc/pgjdbc/issues/1547
      */
-    if (!available) {
-      nextStreamAvailableCheckTime = now + minStreamAvailableCheckDelay;
+        if (!available) {
+            nextStreamAvailableCheckTime = now + minStreamAvailableCheckDelay;
+        }
+        return available;
     }
-    return available;
-  }
 
-  public void setMinStreamAvailableCheckDelay(int delay) {
-    this.minStreamAvailableCheckDelay = delay;
-  }
+    public void setMinStreamAvailableCheckDelay(int delay) {
+        this.minStreamAvailableCheckDelay = delay;
+    }
 
-  private Socket createSocket(int timeout) throws IOException {
-    Socket socket = null;
-    try {
-      socket = socketFactory.createSocket();
-      String localSocketAddress = hostSpec.getLocalSocketAddress();
-      if (localSocketAddress != null) {
-        socket.bind(new InetSocketAddress(InetAddress.getByName(localSocketAddress), 0));
-      }
-      if (!socket.isConnected()) {
-        // When using a SOCKS proxy, the host might not be resolvable locally,
-        // thus we defer resolution until the traffic reaches the proxy. If there
-        // is no proxy, we must resolve the host to an IP to connect the socket.
-        InetSocketAddress address = hostSpec.shouldResolve()
-            ? new InetSocketAddress(hostSpec.getHost(), hostSpec.getPort())
-            : InetSocketAddress.createUnresolved(hostSpec.getHost(), hostSpec.getPort());
-        socket.connect(address, timeout);
-      }
-      return socket;
-    } catch ( Exception ex ) {
-      if (socket != null) {
+    private Socket createSocket(int timeout) throws IOException {
+        Socket socket = null;
         try {
-          socket.close();
-        } catch ( Exception ex1 ) {
-          ex.addSuppressed(ex1);
+            socket = socketFactory.createSocket();
+            String localSocketAddress = hostSpec.getLocalSocketAddress();
+            if (localSocketAddress != null) {
+                socket.bind(new InetSocketAddress(InetAddress.getByName(localSocketAddress), 0));
+            }
+            if (!socket.isConnected()) {
+                // When using a SOCKS proxy, the host might not be resolvable locally,
+                // thus we defer resolution until the traffic reaches the proxy. If there
+                // is no proxy, we must resolve the host to an IP to connect the socket.
+                InetSocketAddress address = hostSpec.shouldResolve()
+                        ? new InetSocketAddress(hostSpec.getHost(), hostSpec.getPort())
+                        : InetSocketAddress.createUnresolved(hostSpec.getHost(), hostSpec.getPort());
+                socket.connect(address, timeout);
+            }
+            return socket;
+        } catch (Exception ex) {
+            if (socket != null) {
+                try {
+                    socket.close();
+                } catch (Exception ex1) {
+                    ex.addSuppressed(ex1);
+                }
+            }
+            throw ex;
         }
-      }
-      throw ex;
-    }
-  }
-
-  /**
-   * Switch this stream to using a new socket. Any existing socket is <em>not</em> closed; it's
-   * assumed that we are changing to a new socket that delegates to the original socket (e.g. SSL).
-   *
-   * @param socket the new socket to change to
-   * @throws IOException if something goes wrong
-   */
-  public void changeSocket(Socket socket) throws IOException {
-    assert connection != socket : "changeSocket is called with the current socket as argument."
-        + " This is a no-op, however, it re-allocates buffered streams, so refrain from"
-        + " excessive changeSocket calls";
-
-    this.connection = socket;
-
-    // Submitted by Jason Venner <jason@idiom.com>. Disable Nagle
-    // as we are selective about flushing output only when we
-    // really need to.
-    connection.setTcpNoDelay(true);
-
-    // Buffer sizes submitted by Sverre H Huseby <sverrehu@online.no>
-    pgInput = new VisibleBufferedInputStream(connection.getInputStream(), 8192);
-    pgOutput = new BufferedOutputStream(connection.getOutputStream(), 8192);
-
-    if (encoding != null) {
-      setEncoding(encoding);
-    }
-  }
-
-  public Encoding getEncoding() {
-    return encoding;
-  }
-
-  /**
-   * Change the encoding used by this connection.
-   *
-   * @param encoding the new encoding to use
-   * @throws IOException if something goes wrong
-   */
-  public void setEncoding(Encoding encoding) throws IOException {
-    if (this.encoding != null && this.encoding.name().equals(encoding.name())) {
-      return;
-    }
-    // Close down any old writer.
-    if (encodingWriter != null) {
-      encodingWriter.close();
     }
 
-    this.encoding = encoding;
+    /**
+     * Switch this stream to using a new socket. Any existing socket is <em>not</em> closed; it's
+     * assumed that we are changing to a new socket that delegates to the original socket (e.g. SSL).
+     *
+     * @param socket the new socket to change to
+     * @throws IOException if something goes wrong
+     */
+    public void changeSocket(Socket socket) throws IOException {
+        assert connection != socket : "changeSocket is called with the current socket as argument."
+                + " This is a no-op, however, it re-allocates buffered streams, so refrain from"
+                + " excessive changeSocket calls";
 
-    // Intercept flush() downcalls from the writer; our caller
-    // will call PGStream.flush() as needed.
-    OutputStream interceptor = new FilterOutputStream(pgOutput) {
-      @Override
-      public void flush() throws IOException {
-      }
+        this.connection = socket;
 
-      @Override
-      public void close() throws IOException {
-        super.flush();
-      }
-    };
+        // Submitted by Jason Venner <jason@idiom.com>. Disable Nagle
+        // as we are selective about flushing output only when we
+        // really need to.
+        connection.setTcpNoDelay(true);
 
-    encodingWriter = encoding.getEncodingWriter(interceptor);
-  }
+        // Buffer sizes submitted by Sverre H Huseby <sverrehu@online.no>
+        pgInput = new VisibleBufferedInputStream(connection.getInputStream(), 8192);
+        pgOutput = new BufferedOutputStream(connection.getOutputStream(), 8192);
 
-  /**
-   * <p>Get a Writer instance that encodes directly onto the underlying stream.</p>
-   *
-   * <p>The returned Writer should not be closed, as it's a shared object. Writer.flush needs to be
-   * called when switching between use of the Writer and use of the PGStream write methods, but it
-   * won't actually flush output all the way out -- call {@link #flush} to actually ensure all
-   * output has been pushed to the server.</p>
-   *
-   * @return the shared Writer instance
-   * @throws IOException if something goes wrong.
-   */
-  public Writer getEncodingWriter() throws IOException {
-    if (encodingWriter == null) {
-      throw new IOException("No encoding has been set on this connection");
-    }
-    return encodingWriter;
-  }
-
-  /**
-   * Sends a single character to the back end.
-   *
-   * @param val the character to be sent
-   * @throws IOException if an I/O error occurs
-   */
-  public void sendChar(int val) throws IOException {
-    pgOutput.write(val);
-  }
-
-  /**
-   * Sends a 4-byte integer to the back end.
-   *
-   * @param val the integer to be sent
-   * @throws IOException if an I/O error occurs
-   */
-  public void sendInteger4(int val) throws IOException {
-    int4Buf[0] = (byte) (val >>> 24);
-    int4Buf[1] = (byte) (val >>> 16);
-    int4Buf[2] = (byte) (val >>> 8);
-    int4Buf[3] = (byte) (val);
-    pgOutput.write(int4Buf);
-  }
-
-  /**
-   * Sends a 2-byte integer (short) to the back end.
-   *
-   * @param val the integer to be sent
-   * @throws IOException if an I/O error occurs or {@code val} cannot be encoded in 2 bytes
-   */
-  public void sendInteger2(int val) throws IOException {
-    if (val < 0 || val > 65535) {
-      throw new IllegalArgumentException("Tried to send an out-of-range integer as a 2-byte unsigned int value: " + val);
-    }
-    int2Buf[0] = (byte) (val >>> 8);
-    int2Buf[1] = (byte) val;
-    pgOutput.write(int2Buf);
-  }
-
-  /**
-   * Send an array of bytes to the backend.
-   *
-   * @param buf The array of bytes to be sent
-   * @throws IOException if an I/O error occurs
-   */
-  public void send(byte[] buf) throws IOException {
-    pgOutput.write(buf);
-  }
-
-  /**
-   * Send a fixed-size array of bytes to the backend. If {@code buf.length < siz}, pad with zeros.
-   * If {@code buf.length > siz}, truncate the array.
-   *
-   * @param buf the array of bytes to be sent
-   * @param siz the number of bytes to be sent
-   * @throws IOException if an I/O error occurs
-   */
-  public void send(byte[] buf, int siz) throws IOException {
-    send(buf, 0, siz);
-  }
-
-  /**
-   * Send a fixed-size array of bytes to the backend. If {@code length < siz}, pad with zeros. If
-   * {@code length > siz}, truncate the array.
-   *
-   * @param buf the array of bytes to be sent
-   * @param off offset in the array to start sending from
-   * @param siz the number of bytes to be sent
-   * @throws IOException if an I/O error occurs
-   */
-  public void send(byte[] buf, int off, int siz) throws IOException {
-    int bufamt = buf.length - off;
-    pgOutput.write(buf, off, bufamt < siz ? bufamt : siz);
-    for (int i = bufamt; i < siz; i++) {
-      pgOutput.write(0);
-    }
-  }
-
-  /**
-   * Send a fixed-size array of bytes to the backend. If {@code length < siz}, pad with zeros. If
-   * {@code length > siz}, truncate the array.
-   *
-   * @param writer the stream writer to invoke to send the bytes
-   * @throws IOException if an I/O error occurs
-   */
-  public void send(ByteStreamWriter writer) throws IOException {
-    final FixedLengthOutputStream fixedLengthStream = new FixedLengthOutputStream(writer.getLength(), pgOutput);
-    try {
-      writer.writeTo(new ByteStreamWriter.ByteStreamTarget() {
-        @Override
-        public OutputStream getOutputStream() {
-          return fixedLengthStream;
+        if (encoding != null) {
+            setEncoding(encoding);
         }
-      });
-    } catch (IOException ioe) {
-      throw ioe;
-    } catch (Exception re) {
-      throw new IOException("Error writing bytes to stream", re);
-    }
-    for (int i = fixedLengthStream.remaining(); i > 0; i--) {
-      pgOutput.write(0);
-    }
-  }
-
-  /**
-   * Receives a single character from the backend, without advancing the current protocol stream
-   * position.
-   *
-   * @return the character received
-   * @throws IOException if an I/O Error occurs
-   */
-  public int peekChar() throws IOException {
-    int c = pgInput.peek();
-    if (c < 0) {
-      throw new EOFException();
-    }
-    return c;
-  }
-
-  /**
-   * Receives a single character from the backend.
-   *
-   * @return the character received
-   * @throws IOException if an I/O Error occurs
-   */
-  public int receiveChar() throws IOException {
-    int c = pgInput.read();
-    if (c < 0) {
-      throw new EOFException();
-    }
-    return c;
-  }
-
-  /**
-   * Receives a four byte integer from the backend.
-   *
-   * @return the integer received from the backend
-   * @throws IOException if an I/O error occurs
-   */
-  public int receiveInteger4() throws IOException {
-    if (pgInput.read(int4Buf) != 4) {
-      throw new EOFException();
     }
 
-    return (int4Buf[0] & 0xFF) << 24 | (int4Buf[1] & 0xFF) << 16 | (int4Buf[2] & 0xFF) << 8
-        | int4Buf[3] & 0xFF;
-  }
-
-  /**
-   * Receives a two byte integer from the backend.
-   *
-   * @return the integer received from the backend
-   * @throws IOException if an I/O error occurs
-   */
-  public int receiveInteger2() throws IOException {
-    if (pgInput.read(int2Buf) != 2) {
-      throw new EOFException();
+    public Encoding getEncoding() {
+        return encoding;
     }
 
-    return (int2Buf[0] & 0xFF) << 8 | int2Buf[1] & 0xFF;
-  }
+    /**
+     * Change the encoding used by this connection.
+     *
+     * @param encoding the new encoding to use
+     * @throws IOException if something goes wrong
+     */
+    public void setEncoding(Encoding encoding) throws IOException {
+        if (this.encoding != null && this.encoding.name().equals(encoding.name())) {
+            return;
+        }
+        // Close down any old writer.
+        if (encodingWriter != null) {
+            encodingWriter.close();
+        }
 
-  /**
-   * Receives a fixed-size string from the backend.
-   *
-   * @param len the length of the string to receive, in bytes.
-   * @return the decoded string
-   * @throws IOException if something wrong happens
-   */
-  public String receiveString(int len) throws IOException {
-    if (!pgInput.ensureBytes(len)) {
-      throw new EOFException();
+        this.encoding = encoding;
+
+        // Intercept flush() downcalls from the writer; our caller
+        // will call PGStream.flush() as needed.
+        OutputStream interceptor = new FilterOutputStream(pgOutput) {
+            @Override
+            public void flush() throws IOException {
+            }
+
+            @Override
+            public void close() throws IOException {
+                super.flush();
+            }
+        };
+
+        encodingWriter = encoding.getEncodingWriter(interceptor);
     }
 
-    String res = encoding.decode(pgInput.getBuffer(), pgInput.getIndex(), len);
-    pgInput.skip(len);
-    return res;
-  }
-
-  /**
-   * Receives a fixed-size string from the backend, and tries to avoid "UTF-8 decode failed"
-   * errors.
-   *
-   * @param len the length of the string to receive, in bytes.
-   * @return the decoded string
-   * @throws IOException if something wrong happens
-   */
-  public EncodingPredictor.DecodeResult receiveErrorString(int len) throws IOException {
-    if (!pgInput.ensureBytes(len)) {
-      throw new EOFException();
+    /**
+     * <p>Get a Writer instance that encodes directly onto the underlying stream.</p>
+     *
+     * <p>The returned Writer should not be closed, as it's a shared object. Writer.flush needs to be
+     * called when switching between use of the Writer and use of the PGStream write methods, but it
+     * won't actually flush output all the way out -- call {@link #flush} to actually ensure all
+     * output has been pushed to the server.</p>
+     *
+     * @return the shared Writer instance
+     * @throws IOException if something goes wrong.
+     */
+    public Writer getEncodingWriter() throws IOException {
+        if (encodingWriter == null) {
+            throw new IOException("No encoding has been set on this connection");
+        }
+        return encodingWriter;
     }
 
-    EncodingPredictor.DecodeResult res;
-    try {
-      String value = encoding.decode(pgInput.getBuffer(), pgInput.getIndex(), len);
-      // no autodetect warning as the message was converted on its own
-      res = new EncodingPredictor.DecodeResult(value, null);
-    } catch (IOException e) {
-      res = EncodingPredictor.decode(pgInput.getBuffer(), pgInput.getIndex(), len);
-      if (res == null) {
-        Encoding enc = Encoding.defaultEncoding();
-        String value = enc.decode(pgInput.getBuffer(), pgInput.getIndex(), len);
-        res = new EncodingPredictor.DecodeResult(value, enc.name());
-      }
+    /**
+     * Sends a single character to the back end.
+     *
+     * @param val the character to be sent
+     * @throws IOException if an I/O error occurs
+     */
+    public void sendChar(int val) throws IOException {
+        pgOutput.write(val);
     }
-    pgInput.skip(len);
-    return res;
-  }
 
-  /**
-   * Receives a null-terminated string from the backend. If we don't see a null, then we assume
-   * something has gone wrong.
-   *
-   * @return string from back end
-   * @throws IOException if an I/O error occurs, or end of file
-   */
-  public String receiveString() throws IOException {
-    int len = pgInput.scanCStringLength();
-    String res = encoding.decode(pgInput.getBuffer(), pgInput.getIndex(), len - 1);
-    pgInput.skip(len);
-    return res;
-  }
+    /**
+     * Sends a 4-byte integer to the back end.
+     *
+     * @param val the integer to be sent
+     * @throws IOException if an I/O error occurs
+     */
+    public void sendInteger4(int val) throws IOException {
+        int4Buf[0] = (byte) (val >>> 24);
+        int4Buf[1] = (byte) (val >>> 16);
+        int4Buf[2] = (byte) (val >>> 8);
+        int4Buf[3] = (byte) (val);
+        pgOutput.write(int4Buf);
+    }
 
-  /**
-   * Receives a null-terminated string from the backend and attempts to decode to a
-   * {@link Encoding#decodeCanonicalized(byte[], int, int) canonical} {@code String}.
-   * If we don't see a null, then we assume something has gone wrong.
-   *
-   * @return string from back end
-   * @throws IOException if an I/O error occurs, or end of file
-   * @see Encoding#decodeCanonicalized(byte[], int, int)
-   */
-  public String receiveCanonicalString() throws IOException {
-    int len = pgInput.scanCStringLength();
-    String res = encoding.decodeCanonicalized(pgInput.getBuffer(), pgInput.getIndex(), len - 1);
-    pgInput.skip(len);
-    return res;
-  }
+    /**
+     * Sends a 2-byte integer (short) to the back end.
+     *
+     * @param val the integer to be sent
+     * @throws IOException if an I/O error occurs or {@code val} cannot be encoded in 2 bytes
+     */
+    public void sendInteger2(int val) throws IOException {
+        if (val < 0 || val > 65535) {
+            throw new IllegalArgumentException("Tried to send an out-of-range integer as a 2-byte unsigned int value: " + val);
+        }
+        int2Buf[0] = (byte) (val >>> 8);
+        int2Buf[1] = (byte) val;
+        pgOutput.write(int2Buf);
+    }
 
-  /**
-   * Receives a null-terminated string from the backend and attempts to decode to a
-   * {@link Encoding#decodeCanonicalizedIfPresent(byte[], int, int) canonical} {@code String}.
-   * If we don't see a null, then we assume something has gone wrong.
-   *
-   * @return string from back end
-   * @throws IOException if an I/O error occurs, or end of file
-   * @see Encoding#decodeCanonicalizedIfPresent(byte[], int, int)
-   */
-  public String receiveCanonicalStringIfPresent() throws IOException {
-    int len = pgInput.scanCStringLength();
-    String res = encoding.decodeCanonicalizedIfPresent(pgInput.getBuffer(), pgInput.getIndex(), len - 1);
-    pgInput.skip(len);
-    return res;
-  }
+    /**
+     * Send an array of bytes to the backend.
+     *
+     * @param buf The array of bytes to be sent
+     * @throws IOException if an I/O error occurs
+     */
+    public void send(byte[] buf) throws IOException {
+        pgOutput.write(buf);
+    }
 
-  /**
-   * Read a tuple from the back end. A tuple is a two dimensional array of bytes. This variant reads
-   * the V3 protocol's tuple representation.
-   *
-   * @return tuple from the back end
-   * @throws IOException if a data I/O error occurs
-   * @throws SQLException if read more bytes than set maxResultBuffer
-   */
-  public Tuple receiveTupleV3() throws IOException, OutOfMemoryError, SQLException {
-    int messageSize = receiveInteger4(); // MESSAGE SIZE
-    int nf = receiveInteger2();
-    //size = messageSize - 4 bytes of message size - 2 bytes of field count - 4 bytes for each column length
-    int dataToReadSize = messageSize - 4 - 2 - 4 * nf;
-    setMaxRowSizeBytes(dataToReadSize);
+    /**
+     * Send a fixed-size array of bytes to the backend. If {@code buf.length < siz}, pad with zeros.
+     * If {@code buf.length > siz}, truncate the array.
+     *
+     * @param buf the array of bytes to be sent
+     * @param siz the number of bytes to be sent
+     * @throws IOException if an I/O error occurs
+     */
+    public void send(byte[] buf, int siz) throws IOException {
+        send(buf, 0, siz);
+    }
 
-    byte[][] answer = new byte[nf][];
+    /**
+     * Send a fixed-size array of bytes to the backend. If {@code length < siz}, pad with zeros. If
+     * {@code length > siz}, truncate the array.
+     *
+     * @param buf the array of bytes to be sent
+     * @param off offset in the array to start sending from
+     * @param siz the number of bytes to be sent
+     * @throws IOException if an I/O error occurs
+     */
+    public void send(byte[] buf, int off, int siz) throws IOException {
+        int bufamt = buf.length - off;
+        pgOutput.write(buf, off, bufamt < siz ? bufamt : siz);
+        for (int i = bufamt; i < siz; i++) {
+            pgOutput.write(0);
+        }
+    }
 
-    increaseByteCounter(dataToReadSize);
-    OutOfMemoryError oom = null;
-    for (int i = 0; i < nf; i++) {
-      int size = receiveInteger4();
-      if (size != -1) {
+    /**
+     * Send a fixed-size array of bytes to the backend. If {@code length < siz}, pad with zeros. If
+     * {@code length > siz}, truncate the array.
+     *
+     * @param writer the stream writer to invoke to send the bytes
+     * @throws IOException if an I/O error occurs
+     */
+    public void send(ByteStreamWriter writer) throws IOException {
+        final FixedLengthOutputStream fixedLengthStream = new FixedLengthOutputStream(writer.getLength(), pgOutput);
         try {
-          answer[i] = new byte[size];
-          receive(answer[i], 0, size);
-        } catch (OutOfMemoryError oome) {
-          oom = oome;
-          skip(size);
+            writer.writeTo(new ByteStreamWriter.ByteStreamTarget() {
+                @Override
+                public OutputStream getOutputStream() {
+                    return fixedLengthStream;
+                }
+            });
+        } catch (IOException ioe) {
+            throw ioe;
+        } catch (Exception re) {
+            throw new IOException("Error writing bytes to stream", re);
         }
-      }
-    }
-
-    if (oom != null) {
-      throw oom;
-    }
-
-    return new Tuple(answer);
-  }
-
-  /**
-   * Reads in a given number of bytes from the backend.
-   *
-   * @param siz number of bytes to read
-   * @return array of bytes received
-   * @throws IOException if a data I/O error occurs
-   */
-  public byte[] receive(int siz) throws IOException {
-    byte[] answer = new byte[siz];
-    receive(answer, 0, siz);
-    return answer;
-  }
-
-  /**
-   * Reads in a given number of bytes from the backend.
-   *
-   * @param buf buffer to store result
-   * @param off offset in buffer
-   * @param siz number of bytes to read
-   * @throws IOException if a data I/O error occurs
-   */
-  public void receive(byte[] buf, int off, int siz) throws IOException {
-    int s = 0;
-
-    while (s < siz) {
-      int w = pgInput.read(buf, off + s, siz - s);
-      if (w < 0) {
-        throw new EOFException();
-      }
-      s += w;
-    }
-  }
-
-  public void skip(int size) throws IOException {
-    long s = 0;
-    while (s < size) {
-      s += pgInput.skip(size - s);
-    }
-  }
-
-  /**
-   * Copy data from an input stream to the connection.
-   *
-   * @param inStream the stream to read data from
-   * @param remaining the number of bytes to copy
-   * @throws IOException if a data I/O error occurs
-   */
-  public void sendStream(InputStream inStream, int remaining) throws IOException {
-    int expectedLength = remaining;
-    byte[] streamBuffer = this.streamBuffer;
-    if (streamBuffer == null) {
-      this.streamBuffer = streamBuffer = new byte[8192];
-    }
-
-    while (remaining > 0) {
-      int count = remaining > streamBuffer.length ? streamBuffer.length : remaining;
-      int readCount;
-
-      try {
-        readCount = inStream.read(streamBuffer, 0, count);
-        if (readCount < 0) {
-          throw new EOFException(
-              GT.tr("Premature end of input stream, expected {0} bytes, but only read {1}.",
-                  expectedLength, expectedLength - remaining));
+        for (int i = fixedLengthStream.remaining(); i > 0; i--) {
+            pgOutput.write(0);
         }
-      } catch (IOException ioe) {
+    }
+
+    /**
+     * Receives a single character from the backend, without advancing the current protocol stream
+     * position.
+     *
+     * @return the character received
+     * @throws IOException if an I/O Error occurs
+     */
+    public int peekChar() throws IOException {
+        int c = pgInput.peek();
+        if (c < 0) {
+            throw new EOFException();
+        }
+        return c;
+    }
+
+    /**
+     * Receives a single character from the backend.
+     *
+     * @return the character received
+     * @throws IOException if an I/O Error occurs
+     */
+    public int receiveChar() throws IOException {
+        int c = pgInput.read();
+        if (c < 0) {
+            throw new EOFException();
+        }
+        return c;
+    }
+
+    /**
+     * Receives a four byte integer from the backend.
+     *
+     * @return the integer received from the backend
+     * @throws IOException if an I/O error occurs
+     */
+    public int receiveInteger4() throws IOException {
+        if (pgInput.read(int4Buf) != 4) {
+            throw new EOFException();
+        }
+
+        return (int4Buf[0] & 0xFF) << 24 | (int4Buf[1] & 0xFF) << 16 | (int4Buf[2] & 0xFF) << 8
+                | int4Buf[3] & 0xFF;
+    }
+
+    /**
+     * Receives a two byte integer from the backend.
+     *
+     * @return the integer received from the backend
+     * @throws IOException if an I/O error occurs
+     */
+    public int receiveInteger2() throws IOException {
+        if (pgInput.read(int2Buf) != 2) {
+            throw new EOFException();
+        }
+
+        return (int2Buf[0] & 0xFF) << 8 | int2Buf[1] & 0xFF;
+    }
+
+    /**
+     * Receives a fixed-size string from the backend.
+     *
+     * @param len the length of the string to receive, in bytes.
+     * @return the decoded string
+     * @throws IOException if something wrong happens
+     */
+    public String receiveString(int len) throws IOException {
+        if (!pgInput.ensureBytes(len)) {
+            throw new EOFException();
+        }
+
+        String res = encoding.decode(pgInput.getBuffer(), pgInput.getIndex(), len);
+        pgInput.skip(len);
+        return res;
+    }
+
+    /**
+     * Receives a fixed-size string from the backend, and tries to avoid "UTF-8 decode failed"
+     * errors.
+     *
+     * @param len the length of the string to receive, in bytes.
+     * @return the decoded string
+     * @throws IOException if something wrong happens
+     */
+    public EncodingPredictor.DecodeResult receiveErrorString(int len) throws IOException {
+        if (!pgInput.ensureBytes(len)) {
+            throw new EOFException();
+        }
+
+        EncodingPredictor.DecodeResult res;
+        try {
+            String value = encoding.decode(pgInput.getBuffer(), pgInput.getIndex(), len);
+            // no autodetect warning as the message was converted on its own
+            res = new EncodingPredictor.DecodeResult(value, null);
+        } catch (IOException e) {
+            res = EncodingPredictor.decode(pgInput.getBuffer(), pgInput.getIndex(), len);
+            if (res == null) {
+                Encoding enc = Encoding.defaultEncoding();
+                String value = enc.decode(pgInput.getBuffer(), pgInput.getIndex(), len);
+                res = new EncodingPredictor.DecodeResult(value, enc.name());
+            }
+        }
+        pgInput.skip(len);
+        return res;
+    }
+
+    /**
+     * Receives a null-terminated string from the backend. If we don't see a null, then we assume
+     * something has gone wrong.
+     *
+     * @return string from back end
+     * @throws IOException if an I/O error occurs, or end of file
+     */
+    public String receiveString() throws IOException {
+        int len = pgInput.scanCStringLength();
+        String res = encoding.decode(pgInput.getBuffer(), pgInput.getIndex(), len - 1);
+        pgInput.skip(len);
+        return res;
+    }
+
+    /**
+     * Receives a null-terminated string from the backend and attempts to decode to a
+     * {@link Encoding#decodeCanonicalized(byte[], int, int) canonical} {@code String}.
+     * If we don't see a null, then we assume something has gone wrong.
+     *
+     * @return string from back end
+     * @throws IOException if an I/O error occurs, or end of file
+     * @see Encoding#decodeCanonicalized(byte[], int, int)
+     */
+    public String receiveCanonicalString() throws IOException {
+        int len = pgInput.scanCStringLength();
+        String res = encoding.decodeCanonicalized(pgInput.getBuffer(), pgInput.getIndex(), len - 1);
+        pgInput.skip(len);
+        return res;
+    }
+
+    /**
+     * Receives a null-terminated string from the backend and attempts to decode to a
+     * {@link Encoding#decodeCanonicalizedIfPresent(byte[], int, int) canonical} {@code String}.
+     * If we don't see a null, then we assume something has gone wrong.
+     *
+     * @return string from back end
+     * @throws IOException if an I/O error occurs, or end of file
+     * @see Encoding#decodeCanonicalizedIfPresent(byte[], int, int)
+     */
+    public String receiveCanonicalStringIfPresent() throws IOException {
+        int len = pgInput.scanCStringLength();
+        String res = encoding.decodeCanonicalizedIfPresent(pgInput.getBuffer(), pgInput.getIndex(), len - 1);
+        pgInput.skip(len);
+        return res;
+    }
+
+    /**
+     * Read a tuple from the back end. A tuple is a two dimensional array of bytes. This variant reads
+     * the V3 protocol's tuple representation.
+     *
+     * @return tuple from the back end
+     * @throws IOException  if a data I/O error occurs
+     * @throws SQLException if read more bytes than set maxResultBuffer
+     */
+    public Tuple receiveTupleV3() throws IOException, OutOfMemoryError, SQLException {
+        int messageSize = receiveInteger4(); // MESSAGE SIZE
+        int nf = receiveInteger2();
+        //size = messageSize - 4 bytes of message size - 2 bytes of field count - 4 bytes for each column length
+        int dataToReadSize = messageSize - 4 - 2 - 4 * nf;
+        setMaxRowSizeBytes(dataToReadSize);
+
+        byte[][] answer = new byte[nf][];
+
+        increaseByteCounter(dataToReadSize);
+        OutOfMemoryError oom = null;
+        for (int i = 0; i < nf; i++) {
+            int size = receiveInteger4();
+            if (size != -1) {
+                try {
+                    answer[i] = new byte[size];
+                    receive(answer[i], 0, size);
+                } catch (OutOfMemoryError oome) {
+                    oom = oome;
+                    skip(size);
+                }
+            }
+        }
+
+        if (oom != null) {
+            throw oom;
+        }
+
+        return new Tuple(answer);
+    }
+
+    /**
+     * Reads in a given number of bytes from the backend.
+     *
+     * @param siz number of bytes to read
+     * @return array of bytes received
+     * @throws IOException if a data I/O error occurs
+     */
+    public byte[] receive(int siz) throws IOException {
+        byte[] answer = new byte[siz];
+        receive(answer, 0, siz);
+        return answer;
+    }
+
+    /**
+     * Reads in a given number of bytes from the backend.
+     *
+     * @param buf buffer to store result
+     * @param off offset in buffer
+     * @param siz number of bytes to read
+     * @throws IOException if a data I/O error occurs
+     */
+    public void receive(byte[] buf, int off, int siz) throws IOException {
+        int s = 0;
+
+        while (s < siz) {
+            int w = pgInput.read(buf, off + s, siz - s);
+            if (w < 0) {
+                throw new EOFException();
+            }
+            s += w;
+        }
+    }
+
+    public void skip(int size) throws IOException {
+        long s = 0;
+        while (s < size) {
+            s += pgInput.skip(size - s);
+        }
+    }
+
+    /**
+     * Copy data from an input stream to the connection.
+     *
+     * @param inStream  the stream to read data from
+     * @param remaining the number of bytes to copy
+     * @throws IOException if a data I/O error occurs
+     */
+    public void sendStream(InputStream inStream, int remaining) throws IOException {
+        int expectedLength = remaining;
+        byte[] streamBuffer = this.streamBuffer;
+        if (streamBuffer == null) {
+            this.streamBuffer = streamBuffer = new byte[8192];
+        }
+
         while (remaining > 0) {
-          send(streamBuffer, count);
-          remaining -= count;
-          count = remaining > streamBuffer.length ? streamBuffer.length : remaining;
+            int count = remaining > streamBuffer.length ? streamBuffer.length : remaining;
+            int readCount;
+
+            try {
+                readCount = inStream.read(streamBuffer, 0, count);
+                if (readCount < 0) {
+                    throw new EOFException(
+                            GT.tr("Premature end of input stream, expected {0} bytes, but only read {1}.",
+                                    expectedLength, expectedLength - remaining));
+                }
+            } catch (IOException ioe) {
+                while (remaining > 0) {
+                    send(streamBuffer, count);
+                    remaining -= count;
+                    count = remaining > streamBuffer.length ? streamBuffer.length : remaining;
+                }
+                throw new PGBindException(ioe);
+            }
+
+            send(streamBuffer, readCount);
+            remaining -= readCount;
         }
-        throw new PGBindException(ioe);
-      }
-
-      send(streamBuffer, readCount);
-      remaining -= readCount;
-    }
-  }
-
-  /**
-   * Flush any pending output to the backend.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override
-  public void flush() throws IOException {
-    if (encodingWriter != null) {
-      encodingWriter.flush();
-    }
-    pgOutput.flush();
-  }
-
-  /**
-   * Consume an expected EOF from the backend.
-   *
-   * @throws IOException if an I/O error occurs
-   * @throws SQLException if we get something other than an EOF
-   */
-  public void receiveEOF() throws SQLException, IOException {
-    int c = pgInput.read();
-    if (c < 0) {
-      return;
-    }
-    throw new PSQLException(GT.tr("Expected an EOF from server, got: {0}", c),
-        PSQLState.COMMUNICATION_ERROR);
-  }
-
-  /**
-   * Closes the connection.
-   *
-   * @throws IOException if an I/O Error occurs
-   */
-  @Override
-  public void close() throws IOException {
-    if (encodingWriter != null) {
-      encodingWriter.close();
     }
 
-    pgOutput.close();
-    pgInput.close();
-    connection.close();
-  }
-
-  public void setNetworkTimeout(int milliseconds) throws IOException {
-    connection.setSoTimeout(milliseconds);
-    pgInput.setTimeoutRequested(milliseconds != 0);
-  }
-
-  public int getNetworkTimeout() throws IOException {
-    return connection.getSoTimeout();
-  }
-
-  /**
-   * Method to set MaxResultBuffer inside PGStream.
-   *
-   * @param value value of new max result buffer as string (cause we can expect % or chars to use
-   *              multiplier)
-   * @throws PSQLException exception returned when occurred parsing problem.
-   */
-  public void setMaxResultBuffer(String value) throws PSQLException {
-    maxResultBuffer = PGPropertyMaxResultBufferParser.parseProperty(value);
-  }
-
-  /**
-   * Get MaxResultBuffer from PGStream.
-   *
-   * @return size of MaxResultBuffer
-   */
-  public long getMaxResultBuffer() {
-    return maxResultBuffer;
-  }
-
-  /**
-   * The idea behind this method is to keep in maxRowSize the size of biggest read data row. As
-   * there may be many data rows send after each other for a query, then value in maxRowSize would
-   * contain value noticed so far, because next data rows and their sizes are not read for that
-   * moment. We want it increasing, because the size of the biggest among data rows will be used
-   * during computing new adaptive fetch size for the query.
-   *
-   * @param rowSizeBytes new value to be set as maxRowSizeBytes
-   */
-  public void setMaxRowSizeBytes(int rowSizeBytes) {
-    if (rowSizeBytes > maxRowSizeBytes) {
-      maxRowSizeBytes = rowSizeBytes;
+    /**
+     * Flush any pending output to the backend.
+     *
+     * @throws IOException if an I/O error occurs
+     */
+    @Override
+    public void flush() throws IOException {
+        if (encodingWriter != null) {
+            encodingWriter.flush();
+        }
+        pgOutput.flush();
     }
-  }
 
-  /**
-   * Get actual max row size noticed so far.
-   *
-   * @return value of max row size
-   */
-  public int getMaxRowSizeBytes() {
-    return maxRowSizeBytes;
-  }
-
-  /**
-   * Clear value of max row size noticed so far.
-   */
-  public void clearMaxRowSizeBytes() {
-    maxRowSizeBytes = -1;
-  }
-
-  /**
-   * Clear count of byte buffer.
-   */
-  public void clearResultBufferCount() {
-    resultBufferByteCount = 0;
-  }
-
-  /**
-   * Increase actual count of buffer. If buffer count is bigger than max result buffer limit, then
-   * gonna return an exception.
-   *
-   * @param value size of bytes to add to byte buffer.
-   * @throws SQLException exception returned when result buffer count is bigger than max result
-   *                      buffer.
-   */
-  private void increaseByteCounter(long value) throws SQLException {
-    if (maxResultBuffer != -1) {
-      resultBufferByteCount += value;
-      if (resultBufferByteCount > maxResultBuffer) {
-        throw new PSQLException(GT.tr(
-          "Result set exceeded maxResultBuffer limit. Received:  {0}; Current limit: {1}",
-          String.valueOf(resultBufferByteCount), String.valueOf(maxResultBuffer)), PSQLState.COMMUNICATION_ERROR);
-      }
+    /**
+     * Consume an expected EOF from the backend.
+     *
+     * @throws IOException  if an I/O error occurs
+     * @throws SQLException if we get something other than an EOF
+     */
+    public void receiveEOF() throws SQLException, IOException {
+        int c = pgInput.read();
+        if (c < 0) {
+            return;
+        }
+        throw new PSQLException(GT.tr("Expected an EOF from server, got: {0}", c),
+                PSQLState.COMMUNICATION_ERROR);
     }
-  }
 
-  public boolean isClosed() {
-    return connection.isClosed();
-  }
+    /**
+     * Closes the connection.
+     *
+     * @throws IOException if an I/O Error occurs
+     */
+    @Override
+    public void close() throws IOException {
+        if (encodingWriter != null) {
+            encodingWriter.close();
+        }
+
+        pgOutput.close();
+        pgInput.close();
+        connection.close();
+    }
+
+    public int getNetworkTimeout() throws IOException {
+        return connection.getSoTimeout();
+    }
+
+    public void setNetworkTimeout(int milliseconds) throws IOException {
+        connection.setSoTimeout(milliseconds);
+        pgInput.setTimeoutRequested(milliseconds != 0);
+    }
+
+    /**
+     * Get MaxResultBuffer from PGStream.
+     *
+     * @return size of MaxResultBuffer
+     */
+    public long getMaxResultBuffer() {
+        return maxResultBuffer;
+    }
+
+    /**
+     * Method to set MaxResultBuffer inside PGStream.
+     *
+     * @param value value of new max result buffer as string (cause we can expect % or chars to use
+     *              multiplier)
+     * @throws PSQLException exception returned when occurred parsing problem.
+     */
+    public void setMaxResultBuffer(String value) throws PSQLException {
+        maxResultBuffer = PGPropertyMaxResultBufferParser.parseProperty(value);
+    }
+
+    /**
+     * Get actual max row size noticed so far.
+     *
+     * @return value of max row size
+     */
+    public int getMaxRowSizeBytes() {
+        return maxRowSizeBytes;
+    }
+
+    /**
+     * The idea behind this method is to keep in maxRowSize the size of biggest read data row. As
+     * there may be many data rows send after each other for a query, then value in maxRowSize would
+     * contain value noticed so far, because next data rows and their sizes are not read for that
+     * moment. We want it increasing, because the size of the biggest among data rows will be used
+     * during computing new adaptive fetch size for the query.
+     *
+     * @param rowSizeBytes new value to be set as maxRowSizeBytes
+     */
+    public void setMaxRowSizeBytes(int rowSizeBytes) {
+        if (rowSizeBytes > maxRowSizeBytes) {
+            maxRowSizeBytes = rowSizeBytes;
+        }
+    }
+
+    /**
+     * Clear value of max row size noticed so far.
+     */
+    public void clearMaxRowSizeBytes() {
+        maxRowSizeBytes = -1;
+    }
+
+    /**
+     * Clear count of byte buffer.
+     */
+    public void clearResultBufferCount() {
+        resultBufferByteCount = 0;
+    }
+
+    /**
+     * Increase actual count of buffer. If buffer count is bigger than max result buffer limit, then
+     * gonna return an exception.
+     *
+     * @param value size of bytes to add to byte buffer.
+     * @throws SQLException exception returned when result buffer count is bigger than max result
+     *                      buffer.
+     */
+    private void increaseByteCounter(long value) throws SQLException {
+        if (maxResultBuffer != -1) {
+            resultBufferByteCount += value;
+            if (resultBufferByteCount > maxResultBuffer) {
+                throw new PSQLException(GT.tr(
+                        "Result set exceeded maxResultBuffer limit. Received:  {0}; Current limit: {1}",
+                        String.valueOf(resultBufferByteCount), String.valueOf(maxResultBuffer)), PSQLState.COMMUNICATION_ERROR);
+            }
+        }
+    }
+
+    public boolean isClosed() {
+        return connection.isClosed();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/ParameterList.java b/pgjdbc/src/main/java/org/postgresql/core/ParameterList.java
index 5288184..e71a8e1 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/ParameterList.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/ParameterList.java
@@ -6,10 +6,9 @@
 
 package org.postgresql.core;
 
-import org.postgresql.util.ByteStreamWriter;
-
 import java.io.InputStream;
 import java.sql.SQLException;
+import org.postgresql.util.ByteStreamWriter;
 
 /**
  * <p>Abstraction of a list of parameters to be substituted into a Query. The protocol-specific details
@@ -25,184 +24,186 @@ import java.sql.SQLException;
  * @author Oliver Jowett (oliver@opencloud.com)
  */
 public interface ParameterList {
-  void registerOutParameter(int index, int sqlType) throws SQLException;
+    void registerOutParameter(int index, int sqlType) throws SQLException;
 
-  /**
-   * Get the number of parameters in this list. This value never changes for a particular instance,
-   * and might be zero.
-   *
-   * @return the number of parameters in this list.
-   */
-  int getParameterCount();
+    /**
+     * Get the number of parameters in this list. This value never changes for a particular instance,
+     * and might be zero.
+     *
+     * @return the number of parameters in this list.
+     */
+    int getParameterCount();
 
-  /**
-   * Get the number of IN parameters in this list.
-   *
-   * @return the number of IN parameters in this list
-   */
-  int getInParameterCount();
+    /**
+     * Get the number of IN parameters in this list.
+     *
+     * @return the number of IN parameters in this list
+     */
+    int getInParameterCount();
 
-  /**
-   * Get the number of OUT parameters in this list.
-   *
-   * @return the number of OUT parameters in this list
-   */
-  int getOutParameterCount();
+    /**
+     * Get the number of OUT parameters in this list.
+     *
+     * @return the number of OUT parameters in this list
+     */
+    int getOutParameterCount();
 
-  /**
-   * Return the oids of the parameters in this list. May be null for a ParameterList that does not
-   * support typing of parameters.
-   *
-   * @return oids of the parameters
-   */
-  int[] getTypeOIDs();
+    /**
+     * Return the oids of the parameters in this list. May be null for a ParameterList that does not
+     * support typing of parameters.
+     *
+     * @return oids of the parameters
+     */
+    int[] getTypeOIDs();
 
-  /**
-   * Binds an integer value to a parameter. The type of the parameter is implicitly 'int4'.
-   *
-   * @param index the 1-based parameter index to bind.
-   * @param value the integer value to use.
-   * @throws SQLException on error or if <code>index</code> is out of range
-   */
-  void setIntParameter(int index, int value) throws SQLException;
+    /**
+     * Binds an integer value to a parameter. The type of the parameter is implicitly 'int4'.
+     *
+     * @param index the 1-based parameter index to bind.
+     * @param value the integer value to use.
+     * @throws SQLException on error or if <code>index</code> is out of range
+     */
+    void setIntParameter(int index, int value) throws SQLException;
 
-  /**
-   * Binds a String value that is an unquoted literal to the server's query parser (for example, a
-   * bare integer) to a parameter. Associated with the parameter is a typename for the parameter
-   * that should correspond to an entry in pg_types.
-   *
-   * @param index the 1-based parameter index to bind.
-   * @param value the unquoted literal string to use.
-   * @param oid the type OID of the parameter, or <code>0</code> to infer the type.
-   * @throws SQLException on error or if <code>index</code> is out of range
-   */
-  void setLiteralParameter(int index,
-      String value, int oid) throws SQLException;
+    /**
+     * Binds a String value that is an unquoted literal to the server's query parser (for example, a
+     * bare integer) to a parameter. Associated with the parameter is a typename for the parameter
+     * that should correspond to an entry in pg_types.
+     *
+     * @param index the 1-based parameter index to bind.
+     * @param value the unquoted literal string to use.
+     * @param oid   the type OID of the parameter, or <code>0</code> to infer the type.
+     * @throws SQLException on error or if <code>index</code> is out of range
+     */
+    void setLiteralParameter(int index,
+                             String value, int oid) throws SQLException;
 
-  /**
-   * Binds a String value that needs to be quoted for the server's parser to understand (for
-   * example, a timestamp) to a parameter. Associated with the parameter is a typename for the
-   * parameter that should correspond to an entry in pg_types.
-   *
-   * @param index the 1-based parameter index to bind.
-   * @param value the quoted string to use.
-   * @param oid the type OID of the parameter, or <code>0</code> to infer the type.
-   * @throws SQLException on error or if <code>index</code> is out of range
-   */
-  void setStringParameter(int index, String value, int oid) throws SQLException;
+    /**
+     * Binds a String value that needs to be quoted for the server's parser to understand (for
+     * example, a timestamp) to a parameter. Associated with the parameter is a typename for the
+     * parameter that should correspond to an entry in pg_types.
+     *
+     * @param index the 1-based parameter index to bind.
+     * @param value the quoted string to use.
+     * @param oid   the type OID of the parameter, or <code>0</code> to infer the type.
+     * @throws SQLException on error or if <code>index</code> is out of range
+     */
+    void setStringParameter(int index, String value, int oid) throws SQLException;
 
-  /**
-   * Binds a binary bytea value stored as a bytearray to a parameter. The parameter's type is
-   * implicitly set to 'bytea'. The bytearray's contains should remain unchanged until query
-   * execution has completed.
-   *
-   * @param index the 1-based parameter index to bind.
-   * @param data an array containing the raw data value
-   * @param offset the offset within <code>data</code> of the start of the parameter data.
-   * @param length the number of bytes of parameter data within <code>data</code> to use.
-   * @throws SQLException on error or if <code>index</code> is out of range
-   */
-  void setBytea(int index, byte[] data,
-      int offset, int length) throws SQLException;
+    /**
+     * Binds a binary bytea value stored as a bytearray to a parameter. The parameter's type is
+     * implicitly set to 'bytea'. The bytearray's contains should remain unchanged until query
+     * execution has completed.
+     *
+     * @param index  the 1-based parameter index to bind.
+     * @param data   an array containing the raw data value
+     * @param offset the offset within <code>data</code> of the start of the parameter data.
+     * @param length the number of bytes of parameter data within <code>data</code> to use.
+     * @throws SQLException on error or if <code>index</code> is out of range
+     */
+    void setBytea(int index, byte[] data,
+                  int offset, int length) throws SQLException;
 
-  /**
-   * Binds a binary bytea value stored as an InputStream. The parameter's type is implicitly set to
-   * 'bytea'. The stream should remain valid until query execution has completed.
-   *
-   * @param index the 1-based parameter index to bind.
-   * @param stream a stream containing the parameter data.
-   * @param length the number of bytes of parameter data to read from <code>stream</code>.
-   * @throws SQLException on error or if <code>index</code> is out of range
-   */
-  void setBytea(int index, InputStream stream, int length) throws SQLException;
+    /**
+     * Binds a binary bytea value stored as an InputStream. The parameter's type is implicitly set to
+     * 'bytea'. The stream should remain valid until query execution has completed.
+     *
+     * @param index  the 1-based parameter index to bind.
+     * @param stream a stream containing the parameter data.
+     * @param length the number of bytes of parameter data to read from <code>stream</code>.
+     * @throws SQLException on error or if <code>index</code> is out of range
+     */
+    void setBytea(int index, InputStream stream, int length) throws SQLException;
 
-  /**
-   * Binds a binary bytea value stored as an InputStream. The parameter's type is implicitly set to
-   * 'bytea'. The stream should remain valid until query execution has completed.
-   *
-   * @param index the 1-based parameter index to bind.
-   * @param stream a stream containing the parameter data.
-   * @throws SQLException on error or if <code>index</code> is out of range
-   */
-  void setBytea(int index, InputStream stream) throws SQLException;
+    /**
+     * Binds a binary bytea value stored as an InputStream. The parameter's type is implicitly set to
+     * 'bytea'. The stream should remain valid until query execution has completed.
+     *
+     * @param index  the 1-based parameter index to bind.
+     * @param stream a stream containing the parameter data.
+     * @throws SQLException on error or if <code>index</code> is out of range
+     */
+    void setBytea(int index, InputStream stream) throws SQLException;
 
-  /**
-   * Binds a binary bytea value stored as a ByteStreamWriter. The parameter's type is implicitly set to
-   * 'bytea'. The stream should remain valid until query execution has completed.
-   *
-   * @param index the 1-based parameter index to bind.
-   * @param writer a writer that can write the bytes for the parameter
-   * @throws SQLException on error or if <code>index</code> is out of range
-   */
-  void setBytea(int index, ByteStreamWriter writer) throws SQLException;
+    /**
+     * Binds a binary bytea value stored as a ByteStreamWriter. The parameter's type is implicitly set to
+     * 'bytea'. The stream should remain valid until query execution has completed.
+     *
+     * @param index  the 1-based parameter index to bind.
+     * @param writer a writer that can write the bytes for the parameter
+     * @throws SQLException on error or if <code>index</code> is out of range
+     */
+    void setBytea(int index, ByteStreamWriter writer) throws SQLException;
 
-  /**
-   * Binds a text value stored as an InputStream that is a valid UTF-8 byte stream.
-   * Any byte-order marks (BOM) in the stream are passed to the backend.
-   * The parameter's type is implicitly set to 'text'.
-   * The stream should remain valid until query execution has completed.
-   *
-   * @param index the 1-based parameter index to bind.
-   * @param stream a stream containing the parameter data.
-   * @throws SQLException on error or if <code>index</code> is out of range
-   */
-  void setText(int index, InputStream stream) throws SQLException;
+    /**
+     * Binds a text value stored as an InputStream that is a valid UTF-8 byte stream.
+     * Any byte-order marks (BOM) in the stream are passed to the backend.
+     * The parameter's type is implicitly set to 'text'.
+     * The stream should remain valid until query execution has completed.
+     *
+     * @param index  the 1-based parameter index to bind.
+     * @param stream a stream containing the parameter data.
+     * @throws SQLException on error or if <code>index</code> is out of range
+     */
+    void setText(int index, InputStream stream) throws SQLException;
 
-  /**
-   * Binds given byte[] value to a parameter. The bytes must already be in correct format matching
-   * the OID.
-   *
-   * @param index the 1-based parameter index to bind.
-   * @param value the bytes to send.
-   * @param oid the type OID of the parameter.
-   * @throws SQLException on error or if <code>index</code> is out of range
-   */
-  void setBinaryParameter(int index, byte[] value, int oid) throws SQLException;
+    /**
+     * Binds given byte[] value to a parameter. The bytes must already be in correct format matching
+     * the OID.
+     *
+     * @param index the 1-based parameter index to bind.
+     * @param value the bytes to send.
+     * @param oid   the type OID of the parameter.
+     * @throws SQLException on error or if <code>index</code> is out of range
+     */
+    void setBinaryParameter(int index, byte[] value, int oid) throws SQLException;
 
-  /**
-   * Binds a SQL NULL value to a parameter. Associated with the parameter is a typename for the
-   * parameter that should correspond to an entry in pg_types.
-   *
-   * @param index the 1-based parameter index to bind.
-   * @param oid the type OID of the parameter, or <code>0</code> to infer the type.
-   * @throws SQLException on error or if <code>index</code> is out of range
-   */
-  void setNull(int index, int oid) throws SQLException;
+    /**
+     * Binds a SQL NULL value to a parameter. Associated with the parameter is a typename for the
+     * parameter that should correspond to an entry in pg_types.
+     *
+     * @param index the 1-based parameter index to bind.
+     * @param oid   the type OID of the parameter, or <code>0</code> to infer the type.
+     * @throws SQLException on error or if <code>index</code> is out of range
+     */
+    void setNull(int index, int oid) throws SQLException;
 
-  /**
-   * Perform a shallow copy of this ParameterList, returning a new instance (still suitable for
-   * passing to the owning Query). If this ParameterList is immutable, copy() may return the same
-   * immutable object.
-   *
-   * @return a new ParameterList instance
-   */
-  ParameterList copy();
+    /**
+     * Perform a shallow copy of this ParameterList, returning a new instance (still suitable for
+     * passing to the owning Query). If this ParameterList is immutable, copy() may return the same
+     * immutable object.
+     *
+     * @return a new ParameterList instance
+     */
+    ParameterList copy();
 
-  /**
-   * Unbind all parameter values bound in this list.
-   */
-  void clear();
+    /**
+     * Unbind all parameter values bound in this list.
+     */
+    void clear();
 
-  /**
-   * Return a human-readable representation of a particular parameter in this ParameterList. If the
-   * parameter is not bound, returns "?".
-   *
-   * @param index the 1-based parameter index to bind.
-   * @param standardConformingStrings true if \ is not an escape character in strings literals
-   * @return a string representation of the parameter.
-   */
-  String toString(int index, boolean standardConformingStrings);
+    /**
+     * Return a human-readable representation of a particular parameter in this ParameterList. If the
+     * parameter is not bound, returns "?".
+     *
+     * @param index                     the 1-based parameter index to bind.
+     * @param standardConformingStrings true if \ is not an escape character in strings literals
+     * @return a string representation of the parameter.
+     */
+    String toString(int index, boolean standardConformingStrings);
 
-  /**
-   * Use this operation to append more parameters to the current list.
-   * @param list of parameters to append with.
-   * @throws SQLException fault raised if driver or back end throw an exception
-   */
-  void appendAll(ParameterList list) throws SQLException ;
+    /**
+     * Use this operation to append more parameters to the current list.
+     *
+     * @param list of parameters to append with.
+     * @throws SQLException fault raised if driver or back end throw an exception
+     */
+    void appendAll(ParameterList list) throws SQLException;
 
-  /**
-   * Returns the bound parameter values.
-   * @return Object array containing the parameter values.
-   */
-  Object [] getValues();
+    /**
+     * Returns the bound parameter values.
+     *
+     * @return Object array containing the parameter values.
+     */
+    Object[] getValues();
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/Parser.java b/pgjdbc/src/main/java/org/postgresql/core/Parser.java
index ba0a105..c4322fa 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/Parser.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/Parser.java
@@ -5,19 +5,18 @@
 
 package org.postgresql.core;
 
-import org.postgresql.jdbc.EscapeSyntaxCallMode;
-import org.postgresql.jdbc.EscapedFunctions2;
-import org.postgresql.util.GT;
-import org.postgresql.util.IntList;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import org.postgresql.jdbc.EscapeSyntaxCallMode;
+import org.postgresql.jdbc.EscapedFunctions2;
+import org.postgresql.util.GT;
+import org.postgresql.util.IntList;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
 
 /**
  * Basic query parser infrastructure.
@@ -28,1554 +27,1555 @@ import java.util.List;
  */
 public class Parser {
 
-  public Parser() {
-  }
+    private static final char[] QUOTE_OR_ALPHABETIC_MARKER = {'\"', '0'};
+    private static final char[] QUOTE_OR_ALPHABETIC_MARKER_OR_PARENTHESIS = {'\"', '0', '('};
+    private static final char[] SINGLE_QUOTE = {'\''};
 
-  /**
-   * Parses JDBC query into PostgreSQL's native format. Several queries might be given if separated
-   * by semicolon.
-   *
-   * @param query                     jdbc query to parse
-   * @param standardConformingStrings whether to allow backslashes to be used as escape characters
-   *                                  in single quote literals
-   * @param withParameters            whether to replace ?, ? with $1, $2, etc
-   * @param splitStatements           whether to split statements by semicolon
-   * @param isBatchedReWriteConfigured whether re-write optimization is enabled
-   * @param quoteReturningIdentifiers whether to quote identifiers returned using returning clause
-   * @param returningColumnNames      for simple insert, update, delete add returning with given column names
-   * @return list of native queries
-   * @throws SQLException if unable to add returning clause (invalid column names)
-   */
-  public static List<NativeQuery> parseJdbcSql(String query, boolean standardConformingStrings,
-      boolean withParameters, boolean splitStatements,
-      boolean isBatchedReWriteConfigured,
-      boolean quoteReturningIdentifiers,
-      String... returningColumnNames) throws SQLException {
-    if (!withParameters && !splitStatements
-        && returningColumnNames != null && returningColumnNames.length == 0) {
-      return Collections.singletonList(new NativeQuery(query,
-        SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK)));
+    public Parser() {
     }
 
-    int fragmentStart = 0;
-    int inParen = 0;
+    /**
+     * Parses JDBC query into PostgreSQL's native format. Several queries might be given if separated
+     * by semicolon.
+     *
+     * @param query                      jdbc query to parse
+     * @param standardConformingStrings  whether to allow backslashes to be used as escape characters
+     *                                   in single quote literals
+     * @param withParameters             whether to replace ?, ? with $1, $2, etc
+     * @param splitStatements            whether to split statements by semicolon
+     * @param isBatchedReWriteConfigured whether re-write optimization is enabled
+     * @param quoteReturningIdentifiers  whether to quote identifiers returned using returning clause
+     * @param returningColumnNames       for simple insert, update, delete add returning with given column names
+     * @return list of native queries
+     * @throws SQLException if unable to add returning clause (invalid column names)
+     */
+    public static List<NativeQuery> parseJdbcSql(String query, boolean standardConformingStrings,
+                                                 boolean withParameters, boolean splitStatements,
+                                                 boolean isBatchedReWriteConfigured,
+                                                 boolean quoteReturningIdentifiers,
+                                                 String... returningColumnNames) throws SQLException {
+        if (!withParameters && !splitStatements
+                && returningColumnNames != null && returningColumnNames.length == 0) {
+            return Collections.singletonList(new NativeQuery(query,
+                    SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK)));
+        }
 
-    char[] aChars = query.toCharArray();
+        int fragmentStart = 0;
+        int inParen = 0;
 
-    StringBuilder nativeSql = new StringBuilder(query.length() + 10);
-    IntList bindPositions = null; // initialized on demand
-    List<NativeQuery> nativeQueries = null;
-    boolean isCurrentReWriteCompatible = false;
-    boolean isValuesFound = false;
-    int valuesParenthesisOpenPosition = -1;
-    int valuesParenthesisClosePosition = -1;
-    boolean valuesParenthesisCloseFound = false;
-    boolean isInsertPresent = false;
-    boolean isReturningPresent = false;
-    boolean isReturningPresentPrev = false;
-    boolean isBeginPresent = false;
-    boolean isBeginAtomicPresent = false;
-    SqlCommandType currentCommandType = SqlCommandType.BLANK;
-    SqlCommandType prevCommandType = SqlCommandType.BLANK;
-    int numberOfStatements = 0;
+        char[] aChars = query.toCharArray();
 
-    boolean whitespaceOnly = true;
-    int keyWordCount = 0;
-    int keywordStart = -1;
-    int keywordEnd = -1;
+        StringBuilder nativeSql = new StringBuilder(query.length() + 10);
+        IntList bindPositions = null; // initialized on demand
+        List<NativeQuery> nativeQueries = null;
+        boolean isCurrentReWriteCompatible = false;
+        boolean isValuesFound = false;
+        int valuesParenthesisOpenPosition = -1;
+        int valuesParenthesisClosePosition = -1;
+        boolean valuesParenthesisCloseFound = false;
+        boolean isInsertPresent = false;
+        boolean isReturningPresent = false;
+        boolean isReturningPresentPrev = false;
+        boolean isBeginPresent = false;
+        boolean isBeginAtomicPresent = false;
+        SqlCommandType currentCommandType = SqlCommandType.BLANK;
+        SqlCommandType prevCommandType = SqlCommandType.BLANK;
+        int numberOfStatements = 0;
+
+        boolean whitespaceOnly = true;
+        int keyWordCount = 0;
+        int keywordStart = -1;
+        int keywordEnd = -1;
     /*
     loop through looking for keywords, single quotes, double quotes, comments, dollar quotes,
     parenthesis, ? and ;
     for single/double/dollar quotes, and comments we just want to move the index
      */
-    for (int i = 0; i < aChars.length; i++) {
-      char aChar = aChars[i];
-      boolean isKeyWordChar = false;
-      // ';' is ignored as it splits the queries. We do have to deal with ; in BEGIN ATOMIC functions
-      whitespaceOnly &= aChar == ';' || Character.isWhitespace(aChar);
-      keywordEnd = i; // parseSingleQuotes, parseDoubleQuotes, etc move index so we keep old value
-      switch (aChar) {
-        case '\'': // single-quotes
-          i = Parser.parseSingleQuotes(aChars, i, standardConformingStrings);
-          break;
+        for (int i = 0; i < aChars.length; i++) {
+            char aChar = aChars[i];
+            boolean isKeyWordChar = false;
+            // ';' is ignored as it splits the queries. We do have to deal with ; in BEGIN ATOMIC functions
+            whitespaceOnly &= aChar == ';' || Character.isWhitespace(aChar);
+            keywordEnd = i; // parseSingleQuotes, parseDoubleQuotes, etc move index so we keep old value
+            switch (aChar) {
+                case '\'': // single-quotes
+                    i = Parser.parseSingleQuotes(aChars, i, standardConformingStrings);
+                    break;
 
-        case '"': // double-quotes
-          i = Parser.parseDoubleQuotes(aChars, i);
-          break;
+                case '"': // double-quotes
+                    i = Parser.parseDoubleQuotes(aChars, i);
+                    break;
 
-        case '-': // possibly -- style comment
-          i = Parser.parseLineComment(aChars, i);
-          break;
+                case '-': // possibly -- style comment
+                    i = Parser.parseLineComment(aChars, i);
+                    break;
 
-        case '/': // possibly /* */ style comment
-          i = Parser.parseBlockComment(aChars, i);
-          break;
+                case '/': // possibly /* */ style comment
+                    i = Parser.parseBlockComment(aChars, i);
+                    break;
 
-        case '$': // possibly dollar quote start
-          i = Parser.parseDollarQuotes(aChars, i);
-          break;
+                case '$': // possibly dollar quote start
+                    i = Parser.parseDollarQuotes(aChars, i);
+                    break;
 
-        // case '(' moved below to parse "values(" properly
+                // case '(' moved below to parse "values(" properly
 
-        case ')':
-          inParen--;
-          if (inParen == 0 && isValuesFound && !valuesParenthesisCloseFound) {
-            // If original statement is multi-values like VALUES (...), (...), ... then
-            // search for the latest closing paren
-            valuesParenthesisClosePosition = nativeSql.length() + i - fragmentStart;
-          }
-          break;
+                case ')':
+                    inParen--;
+                    if (inParen == 0 && isValuesFound && !valuesParenthesisCloseFound) {
+                        // If original statement is multi-values like VALUES (...), (...), ... then
+                        // search for the latest closing paren
+                        valuesParenthesisClosePosition = nativeSql.length() + i - fragmentStart;
+                    }
+                    break;
 
-        case '?':
-          nativeSql.append(aChars, fragmentStart, i - fragmentStart);
-          if (i + 1 < aChars.length && aChars[i + 1] == '?') /* replace ?? with ? */ {
-            nativeSql.append('?');
-            i++; // make sure the coming ? is not treated as a bind
-          } else {
-            if (!withParameters) {
-              nativeSql.append('?');
-            } else {
-              if (bindPositions == null) {
-                bindPositions = new IntList();
-              }
-              bindPositions.add(nativeSql.length());
-              int bindIndex = bindPositions.size();
-              nativeSql.append(NativeQuery.bindName(bindIndex));
+                case '?':
+                    nativeSql.append(aChars, fragmentStart, i - fragmentStart);
+                    if (i + 1 < aChars.length && aChars[i + 1] == '?') /* replace ?? with ? */ {
+                        nativeSql.append('?');
+                        i++; // make sure the coming ? is not treated as a bind
+                    } else {
+                        if (!withParameters) {
+                            nativeSql.append('?');
+                        } else {
+                            if (bindPositions == null) {
+                                bindPositions = new IntList();
+                            }
+                            bindPositions.add(nativeSql.length());
+                            int bindIndex = bindPositions.size();
+                            nativeSql.append(NativeQuery.bindName(bindIndex));
+                        }
+                    }
+                    fragmentStart = i + 1;
+                    break;
+
+                case ';':
+                    // we don't split the queries if BEGIN ATOMIC is present
+                    if (!isBeginAtomicPresent && inParen == 0) {
+                        if (!whitespaceOnly) {
+                            numberOfStatements++;
+                            nativeSql.append(aChars, fragmentStart, i - fragmentStart);
+                            whitespaceOnly = true;
+                        }
+                        fragmentStart = i + 1;
+                        if (nativeSql.length() > 0) {
+                            if (addReturning(nativeSql, currentCommandType, returningColumnNames, isReturningPresent, quoteReturningIdentifiers)) {
+                                isReturningPresent = true;
+                            }
+
+                            if (splitStatements) {
+                                if (nativeQueries == null) {
+                                    nativeQueries = new ArrayList<>();
+                                }
+
+                                if (!isValuesFound || !isCurrentReWriteCompatible || valuesParenthesisClosePosition == -1
+                                        || (bindPositions != null
+                                        && valuesParenthesisClosePosition < bindPositions.get(bindPositions.size() - 1))) {
+                                    valuesParenthesisOpenPosition = -1;
+                                    valuesParenthesisClosePosition = -1;
+                                }
+
+                                nativeQueries.add(new NativeQuery(nativeSql.toString(),
+                                        toIntArray(bindPositions), false,
+                                        SqlCommand.createStatementTypeInfo(
+                                                currentCommandType, isBatchedReWriteConfigured, valuesParenthesisOpenPosition,
+                                                valuesParenthesisClosePosition,
+                                                isReturningPresent, nativeQueries.size())));
+                            }
+                        }
+                        prevCommandType = currentCommandType;
+                        isReturningPresentPrev = isReturningPresent;
+                        currentCommandType = SqlCommandType.BLANK;
+                        isReturningPresent = false;
+                        if (splitStatements) {
+                            // Prepare for next query
+                            if (bindPositions != null) {
+                                bindPositions.clear();
+                            }
+                            nativeSql.setLength(0);
+                            isValuesFound = false;
+                            isCurrentReWriteCompatible = false;
+                            valuesParenthesisOpenPosition = -1;
+                            valuesParenthesisClosePosition = -1;
+                            valuesParenthesisCloseFound = false;
+                        }
+                    }
+                    break;
+
+                default:
+                    if (keywordStart >= 0) {
+                        // When we are inside a keyword, we need to detect keyword end boundary
+                        // Note that isKeyWordChar is initialized to false before the switch, so
+                        // all other characters would result in isKeyWordChar=false
+                        isKeyWordChar = isIdentifierContChar(aChar);
+                        break;
+                    }
+                    // Not in keyword, so just detect next keyword start
+                    isKeyWordChar = isIdentifierStartChar(aChar);
+                    if (isKeyWordChar) {
+                        keywordStart = i;
+                        if (valuesParenthesisOpenPosition != -1 && inParen == 0) {
+                            // When the statement already has multi-values, stop looking for more of them
+                            // Since values(?,?),(?,?),... should not contain keywords in the middle
+                            valuesParenthesisCloseFound = true;
+                        }
+                    }
+                    break;
             }
-          }
-          fragmentStart = i + 1;
-          break;
+            if (keywordStart >= 0 && (i == aChars.length - 1 || !isKeyWordChar)) {
+                int wordLength = (isKeyWordChar ? i + 1 : keywordEnd) - keywordStart;
+                if (currentCommandType == SqlCommandType.BLANK) {
+                    if (wordLength == 6 && parseCreateKeyword(aChars, keywordStart)) {
+                        currentCommandType = SqlCommandType.CREATE;
+                    } else if (wordLength == 5 && parseAlterKeyword(aChars, keywordStart)) {
+                        currentCommandType = SqlCommandType.ALTER;
+                    } else if (wordLength == 6 && parseUpdateKeyword(aChars, keywordStart)) {
+                        currentCommandType = SqlCommandType.UPDATE;
+                    } else if (wordLength == 6 && parseDeleteKeyword(aChars, keywordStart)) {
+                        currentCommandType = SqlCommandType.DELETE;
+                    } else if (wordLength == 4 && parseMoveKeyword(aChars, keywordStart)) {
+                        currentCommandType = SqlCommandType.MOVE;
+                    } else if (wordLength == 6 && parseSelectKeyword(aChars, keywordStart)) {
+                        currentCommandType = SqlCommandType.SELECT;
+                    } else if (wordLength == 4 && parseWithKeyword(aChars, keywordStart)) {
+                        currentCommandType = SqlCommandType.WITH;
+                    } else if (wordLength == 6 && parseInsertKeyword(aChars, keywordStart)) {
+                        if (!isInsertPresent && (nativeQueries == null || nativeQueries.isEmpty())) {
+                            // Only allow rewrite for insert command starting with the insert keyword.
+                            // Else, too many risks of wrong interpretation.
+                            isCurrentReWriteCompatible = keyWordCount == 0;
+                            isInsertPresent = true;
+                            currentCommandType = SqlCommandType.INSERT;
+                        } else {
+                            isCurrentReWriteCompatible = false;
+                        }
+                    }
 
-        case ';':
-          // we don't split the queries if BEGIN ATOMIC is present
-          if (!isBeginAtomicPresent && inParen == 0) {
-            if (!whitespaceOnly) {
-              numberOfStatements++;
-              nativeSql.append(aChars, fragmentStart, i - fragmentStart);
-              whitespaceOnly = true;
-            }
-            fragmentStart = i + 1;
-            if (nativeSql.length() > 0) {
-              if (addReturning(nativeSql, currentCommandType, returningColumnNames, isReturningPresent, quoteReturningIdentifiers)) {
-                isReturningPresent = true;
-              }
-
-              if (splitStatements) {
-                if (nativeQueries == null) {
-                  nativeQueries = new ArrayList<>();
-                }
-
-                if (!isValuesFound || !isCurrentReWriteCompatible || valuesParenthesisClosePosition == -1
-                    || (bindPositions != null
-                    && valuesParenthesisClosePosition < bindPositions.get(bindPositions.size() - 1))) {
-                  valuesParenthesisOpenPosition = -1;
-                  valuesParenthesisClosePosition = -1;
-                }
-
-                nativeQueries.add(new NativeQuery(nativeSql.toString(),
-                    toIntArray(bindPositions), false,
-                    SqlCommand.createStatementTypeInfo(
-                        currentCommandType, isBatchedReWriteConfigured, valuesParenthesisOpenPosition,
-                        valuesParenthesisClosePosition,
-                        isReturningPresent, nativeQueries.size())));
-              }
-            }
-            prevCommandType = currentCommandType;
-            isReturningPresentPrev = isReturningPresent;
-            currentCommandType = SqlCommandType.BLANK;
-            isReturningPresent = false;
-            if (splitStatements) {
-              // Prepare for next query
-              if (bindPositions != null) {
-                bindPositions.clear();
-              }
-              nativeSql.setLength(0);
-              isValuesFound = false;
-              isCurrentReWriteCompatible = false;
-              valuesParenthesisOpenPosition = -1;
-              valuesParenthesisClosePosition = -1;
-              valuesParenthesisCloseFound = false;
-            }
-          }
-          break;
-
-        default:
-          if (keywordStart >= 0) {
-            // When we are inside a keyword, we need to detect keyword end boundary
-            // Note that isKeyWordChar is initialized to false before the switch, so
-            // all other characters would result in isKeyWordChar=false
-            isKeyWordChar = isIdentifierContChar(aChar);
-            break;
-          }
-          // Not in keyword, so just detect next keyword start
-          isKeyWordChar = isIdentifierStartChar(aChar);
-          if (isKeyWordChar) {
-            keywordStart = i;
-            if (valuesParenthesisOpenPosition != -1 && inParen == 0) {
-              // When the statement already has multi-values, stop looking for more of them
-              // Since values(?,?),(?,?),... should not contain keywords in the middle
-              valuesParenthesisCloseFound = true;
-            }
-          }
-          break;
-      }
-      if (keywordStart >= 0 && (i == aChars.length - 1 || !isKeyWordChar)) {
-        int wordLength = (isKeyWordChar ? i + 1 : keywordEnd) - keywordStart;
-        if (currentCommandType == SqlCommandType.BLANK) {
-          if (wordLength == 6 && parseCreateKeyword(aChars, keywordStart)) {
-            currentCommandType = SqlCommandType.CREATE;
-          } else if (wordLength == 5 && parseAlterKeyword(aChars, keywordStart)) {
-            currentCommandType = SqlCommandType.ALTER;
-          } else if (wordLength == 6 && parseUpdateKeyword(aChars, keywordStart)) {
-            currentCommandType = SqlCommandType.UPDATE;
-          } else if (wordLength == 6 && parseDeleteKeyword(aChars, keywordStart)) {
-            currentCommandType = SqlCommandType.DELETE;
-          } else if (wordLength == 4 && parseMoveKeyword(aChars, keywordStart)) {
-            currentCommandType = SqlCommandType.MOVE;
-          } else if (wordLength == 6 && parseSelectKeyword(aChars, keywordStart)) {
-            currentCommandType = SqlCommandType.SELECT;
-          } else if (wordLength == 4 && parseWithKeyword(aChars, keywordStart)) {
-            currentCommandType = SqlCommandType.WITH;
-          } else if (wordLength == 6 && parseInsertKeyword(aChars, keywordStart)) {
-            if (!isInsertPresent && (nativeQueries == null || nativeQueries.isEmpty())) {
-              // Only allow rewrite for insert command starting with the insert keyword.
-              // Else, too many risks of wrong interpretation.
-              isCurrentReWriteCompatible = keyWordCount == 0;
-              isInsertPresent = true;
-              currentCommandType = SqlCommandType.INSERT;
-            } else {
-              isCurrentReWriteCompatible = false;
-            }
-          }
-
-        } else if (currentCommandType == SqlCommandType.WITH
-            && inParen == 0) {
-          SqlCommandType command = parseWithCommandType(aChars, i, keywordStart, wordLength);
-          if (command != null) {
-            currentCommandType = command;
-          }
-        } else if (currentCommandType == SqlCommandType.CREATE) {
+                } else if (currentCommandType == SqlCommandType.WITH
+                        && inParen == 0) {
+                    SqlCommandType command = parseWithCommandType(aChars, i, keywordStart, wordLength);
+                    if (command != null) {
+                        currentCommandType = command;
+                    }
+                } else if (currentCommandType == SqlCommandType.CREATE) {
           /*
           We are looking for BEGIN ATOMIC
            */
-          if (wordLength == 5 && parseBeginKeyword(aChars, keywordStart)) {
-            isBeginPresent = true;
-          } else {
-            // found begin, now look for atomic
-            if (isBeginPresent) {
-              if (wordLength == 6 && parseAtomicKeyword(aChars, keywordStart)) {
-                isBeginAtomicPresent = true;
-              }
-              // either way we reset beginFound
-              isBeginPresent = false;
+                    if (wordLength == 5 && parseBeginKeyword(aChars, keywordStart)) {
+                        isBeginPresent = true;
+                    } else {
+                        // found begin, now look for atomic
+                        if (isBeginPresent) {
+                            if (wordLength == 6 && parseAtomicKeyword(aChars, keywordStart)) {
+                                isBeginAtomicPresent = true;
+                            }
+                            // either way we reset beginFound
+                            isBeginPresent = false;
+                        }
+                    }
+                }
+                if (inParen != 0 || aChar == ')') {
+                    // RETURNING and VALUES cannot be present in parentheses
+                } else if (wordLength == 9 && parseReturningKeyword(aChars, keywordStart)) {
+                    isReturningPresent = true;
+                } else if (wordLength == 6 && parseValuesKeyword(aChars, keywordStart)) {
+                    isValuesFound = true;
+                }
+                keywordStart = -1;
+                keyWordCount++;
+            }
+            if (aChar == '(') {
+                inParen++;
+                if (inParen == 1 && isValuesFound && valuesParenthesisOpenPosition == -1) {
+                    valuesParenthesisOpenPosition = nativeSql.length() + i - fragmentStart;
+                }
             }
-          }
         }
-        if (inParen != 0 || aChar == ')') {
-          // RETURNING and VALUES cannot be present in parentheses
-        } else if (wordLength == 9 && parseReturningKeyword(aChars, keywordStart)) {
-          isReturningPresent = true;
-        } else if (wordLength == 6 && parseValuesKeyword(aChars, keywordStart)) {
-          isValuesFound = true;
+
+        if (!isValuesFound || !isCurrentReWriteCompatible || valuesParenthesisClosePosition == -1
+                || (bindPositions != null
+                && valuesParenthesisClosePosition < bindPositions.get(bindPositions.size() - 1))) {
+            valuesParenthesisOpenPosition = -1;
+            valuesParenthesisClosePosition = -1;
         }
-        keywordStart = -1;
-        keyWordCount++;
-      }
-      if (aChar == '(') {
-        inParen++;
-        if (inParen == 1 && isValuesFound && valuesParenthesisOpenPosition == -1) {
-          valuesParenthesisOpenPosition = nativeSql.length() + i - fragmentStart;
+
+        if (fragmentStart < aChars.length && !whitespaceOnly) {
+            nativeSql.append(aChars, fragmentStart, aChars.length - fragmentStart);
+        } else {
+            if (numberOfStatements > 1) {
+                isReturningPresent = false;
+                currentCommandType = SqlCommandType.BLANK;
+            } else if (numberOfStatements == 1) {
+                isReturningPresent = isReturningPresentPrev;
+                currentCommandType = prevCommandType;
+            }
         }
-      }
+
+        if (nativeSql.length() == 0) {
+            return nativeQueries != null ? nativeQueries : Collections.emptyList();
+        }
+
+        if (addReturning(nativeSql, currentCommandType, returningColumnNames, isReturningPresent, quoteReturningIdentifiers)) {
+            isReturningPresent = true;
+        }
+
+        NativeQuery lastQuery = new NativeQuery(nativeSql.toString(),
+                toIntArray(bindPositions), !splitStatements,
+                SqlCommand.createStatementTypeInfo(currentCommandType,
+                        isBatchedReWriteConfigured, valuesParenthesisOpenPosition, valuesParenthesisClosePosition,
+                        isReturningPresent, (nativeQueries == null ? 0 : nativeQueries.size())));
+
+        if (nativeQueries == null) {
+            return Collections.singletonList(lastQuery);
+        }
+
+        if (!whitespaceOnly) {
+            nativeQueries.add(lastQuery);
+        }
+        return nativeQueries;
     }
 
-    if (!isValuesFound || !isCurrentReWriteCompatible || valuesParenthesisClosePosition == -1
-        || (bindPositions != null
-        && valuesParenthesisClosePosition < bindPositions.get(bindPositions.size() - 1))) {
-      valuesParenthesisOpenPosition = -1;
-      valuesParenthesisClosePosition = -1;
+    private static SqlCommandType parseWithCommandType(char[] aChars, int i, int keywordStart,
+                                                       int wordLength) {
+        // This parses `with x as (...) ...`
+        // Corner case is `with select as (insert ..) select * from select
+        SqlCommandType command;
+        if (wordLength == 6 && parseUpdateKeyword(aChars, keywordStart)) {
+            command = SqlCommandType.UPDATE;
+        } else if (wordLength == 6 && parseDeleteKeyword(aChars, keywordStart)) {
+            command = SqlCommandType.DELETE;
+        } else if (wordLength == 6 && parseInsertKeyword(aChars, keywordStart)) {
+            command = SqlCommandType.INSERT;
+        } else if (wordLength == 6 && parseSelectKeyword(aChars, keywordStart)) {
+            command = SqlCommandType.SELECT;
+        } else {
+            return null;
+        }
+        // update/delete/insert/select keyword detected
+        // Check if `AS` follows
+        int nextInd = i;
+        // The loop should skip whitespace and comments
+        for (; nextInd < aChars.length; nextInd++) {
+            char nextChar = aChars[nextInd];
+            if (nextChar == '-') {
+                nextInd = Parser.parseLineComment(aChars, nextInd);
+            } else if (nextChar == '/') {
+                nextInd = Parser.parseBlockComment(aChars, nextInd);
+            } else if (Character.isWhitespace(nextChar)) {
+                // Skip whitespace
+                continue;
+            } else {
+                break;
+            }
+        }
+        if (nextInd + 2 >= aChars.length
+                || (!parseAsKeyword(aChars, nextInd)
+                || isIdentifierContChar(aChars[nextInd + 2]))) {
+            return command;
+        }
+        return null;
     }
 
-    if (fragmentStart < aChars.length && !whitespaceOnly) {
-      nativeSql.append(aChars, fragmentStart, aChars.length - fragmentStart);
-    } else {
-      if (numberOfStatements > 1) {
-        isReturningPresent = false;
-        currentCommandType = SqlCommandType.BLANK;
-      } else if (numberOfStatements == 1) {
-        isReturningPresent = isReturningPresentPrev;
-        currentCommandType = prevCommandType;
-      }
-    }
+    private static boolean addReturning(StringBuilder nativeSql, SqlCommandType currentCommandType,
+                                        String[] returningColumnNames, boolean isReturningPresent, boolean quoteReturningIdentifiers) throws SQLException {
+        if (isReturningPresent || returningColumnNames.length == 0) {
+            return false;
+        }
+        if (currentCommandType != SqlCommandType.INSERT
+                && currentCommandType != SqlCommandType.UPDATE
+                && currentCommandType != SqlCommandType.DELETE
+                && currentCommandType != SqlCommandType.WITH) {
+            return false;
+        }
 
-    if (nativeSql.length() == 0) {
-      return nativeQueries != null ? nativeQueries : Collections.emptyList();
-    }
-
-    if (addReturning(nativeSql, currentCommandType, returningColumnNames, isReturningPresent, quoteReturningIdentifiers)) {
-      isReturningPresent = true;
-    }
-
-    NativeQuery lastQuery = new NativeQuery(nativeSql.toString(),
-        toIntArray(bindPositions), !splitStatements,
-        SqlCommand.createStatementTypeInfo(currentCommandType,
-            isBatchedReWriteConfigured, valuesParenthesisOpenPosition, valuesParenthesisClosePosition,
-            isReturningPresent, (nativeQueries == null ? 0 : nativeQueries.size())));
-
-    if (nativeQueries == null) {
-      return Collections.singletonList(lastQuery);
-    }
-
-    if (!whitespaceOnly) {
-      nativeQueries.add(lastQuery);
-    }
-    return nativeQueries;
-  }
-
-  private static SqlCommandType parseWithCommandType(char[] aChars, int i, int keywordStart,
-      int wordLength) {
-    // This parses `with x as (...) ...`
-    // Corner case is `with select as (insert ..) select * from select
-    SqlCommandType command;
-    if (wordLength == 6 && parseUpdateKeyword(aChars, keywordStart)) {
-      command = SqlCommandType.UPDATE;
-    } else if (wordLength == 6 && parseDeleteKeyword(aChars, keywordStart)) {
-      command = SqlCommandType.DELETE;
-    } else if (wordLength == 6 && parseInsertKeyword(aChars, keywordStart)) {
-      command = SqlCommandType.INSERT;
-    } else if (wordLength == 6 && parseSelectKeyword(aChars, keywordStart)) {
-      command = SqlCommandType.SELECT;
-    } else {
-      return null;
-    }
-    // update/delete/insert/select keyword detected
-    // Check if `AS` follows
-    int nextInd = i;
-    // The loop should skip whitespace and comments
-    for (; nextInd < aChars.length; nextInd++) {
-      char nextChar = aChars[nextInd];
-      if (nextChar == '-') {
-        nextInd = Parser.parseLineComment(aChars, nextInd);
-      } else if (nextChar == '/') {
-        nextInd = Parser.parseBlockComment(aChars, nextInd);
-      } else if (Character.isWhitespace(nextChar)) {
-        // Skip whitespace
-        continue;
-      } else {
-        break;
-      }
-    }
-    if (nextInd + 2 >= aChars.length
-        || (!parseAsKeyword(aChars, nextInd)
-        || isIdentifierContChar(aChars[nextInd + 2]))) {
-      return command;
-    }
-    return null;
-  }
-
-  private static boolean addReturning(StringBuilder nativeSql, SqlCommandType currentCommandType,
-      String[] returningColumnNames, boolean isReturningPresent, boolean quoteReturningIdentifiers) throws SQLException {
-    if (isReturningPresent || returningColumnNames.length == 0) {
-      return false;
-    }
-    if (currentCommandType != SqlCommandType.INSERT
-        && currentCommandType != SqlCommandType.UPDATE
-        && currentCommandType != SqlCommandType.DELETE
-        && currentCommandType != SqlCommandType.WITH) {
-      return false;
-    }
-
-    nativeSql.append("\nRETURNING ");
-    if (returningColumnNames.length == 1 && returningColumnNames[0].charAt(0) == '*') {
-      nativeSql.append('*');
-      return true;
-    }
-    for (int col = 0; col < returningColumnNames.length; col++) {
-      String columnName = returningColumnNames[col];
-      if (col > 0) {
-        nativeSql.append(", ");
-      }
+        nativeSql.append("\nRETURNING ");
+        if (returningColumnNames.length == 1 && returningColumnNames[0].charAt(0) == '*') {
+            nativeSql.append('*');
+            return true;
+        }
+        for (int col = 0; col < returningColumnNames.length; col++) {
+            String columnName = returningColumnNames[col];
+            if (col > 0) {
+                nativeSql.append(", ");
+            }
       /*
       If the client quotes identifiers then doing so again would create an error
        */
-      if (quoteReturningIdentifiers) {
-        Utils.escapeIdentifier(nativeSql, columnName);
-      } else {
-        nativeSql.append(columnName);
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Converts {@link IntList} to {@code int[]}. A {@code null} collection is converted to
-   * {@code null} array.
-   *
-   * @param list input list
-   * @return output array
-   */
-  private static int [] toIntArray(IntList list) {
-    if (list == null) {
-      return null;
-    }
-    return list.toArray();
-  }
-
-  /**
-   * <p>Find the end of the single-quoted string starting at the given offset.</p>
-   *
-   * <p>Note: for {@code 'single '' quote in string'}, this method currently returns the offset of
-   * first {@code '} character after the initial one. The caller must call the method a second time
-   * for the second part of the quoted string.</p>
-   *
-   * @param query                     query
-   * @param offset                    start offset
-   * @param standardConformingStrings standard conforming strings
-   * @return position of the end of the single-quoted string
-   */
-  public static int parseSingleQuotes(final char[] query, int offset,
-      boolean standardConformingStrings) {
-    // check for escape string syntax (E'')
-    if (standardConformingStrings
-        && offset >= 2
-        && (query[offset - 1] == 'e' || query[offset - 1] == 'E')
-        && charTerminatesIdentifier(query[offset - 2])) {
-      standardConformingStrings = false;
-    }
-
-    if (standardConformingStrings) {
-      // do NOT treat backslashes as escape characters
-      while (++offset < query.length) {
-        if (query[offset] == '\'') {
-          return offset;
-        }
-      }
-    } else {
-      // treat backslashes as escape characters
-      while (++offset < query.length) {
-        switch (query[offset]) {
-          case '\\':
-            ++offset;
-            break;
-          case '\'':
-            return offset;
-          default:
-            break;
-        }
-      }
-    }
-
-    return query.length;
-  }
-
-  /**
-   * <p>Find the end of the double-quoted string starting at the given offset.</p>
-   *
-   * <p>Note: for {@code "double "" quote in string"}, this method currently
-   * returns the offset of first {@code &quot;} character after the initial one. The caller must
-   * call the method a second time for the second part of the quoted string.</p>
-   *
-   * @param query  query
-   * @param offset start offset
-   * @return position of the end of the double-quoted string
-   */
-  public static int parseDoubleQuotes(final char[] query, int offset) {
-    while (++offset < query.length && query[offset] != '"') {
-      // do nothing
-    }
-    return offset;
-  }
-
-  /**
-   * Test if the dollar character ({@code $}) at the given offset starts a dollar-quoted string and
-   * return the offset of the ending dollar character.
-   *
-   * @param query  query
-   * @param offset start offset
-   * @return offset of the ending dollar character
-   */
-  public static int parseDollarQuotes(final char[] query, int offset) {
-    if (offset + 1 < query.length
-        && (offset == 0 || !isIdentifierContChar(query[offset - 1]))) {
-      int endIdx = -1;
-      if (query[offset + 1] == '$') {
-        endIdx = offset + 1;
-      } else if (isDollarQuoteStartChar(query[offset + 1])) {
-        for (int d = offset + 2; d < query.length; d++) {
-          if (query[d] == '$') {
-            endIdx = d;
-            break;
-          } else if (!isDollarQuoteContChar(query[d])) {
-            break;
-          }
-        }
-      }
-      if (endIdx > 0) {
-        // found; note: tag includes start and end $ character
-        int tagIdx = offset;
-        int tagLen = endIdx - offset + 1;
-        offset = endIdx; // loop continues at endIdx + 1
-        for (++offset; offset < query.length; offset++) {
-          if (query[offset] == '$'
-              && subArraysEqual(query, tagIdx, offset, tagLen)) {
-            offset += tagLen - 1;
-            break;
-          }
-        }
-      }
-    }
-    return offset;
-  }
-
-  /**
-   * Test if the {@code -} character at {@code offset} starts a {@code --} style line comment,
-   * and return the position of the first {@code \r} or {@code \n} character.
-   *
-   * @param query  query
-   * @param offset start offset
-   * @return position of the first {@code \r} or {@code \n} character
-   */
-  public static int parseLineComment(final char[] query, int offset) {
-    if (offset + 1 < query.length && query[offset + 1] == '-') {
-      while (offset + 1 < query.length) {
-        offset++;
-        if (query[offset] == '\r' || query[offset] == '\n') {
-          break;
-        }
-      }
-    }
-    return offset;
-  }
-
-  /**
-   * Test if the {@code /} character at {@code offset} starts a block comment, and return the
-   * position of the last {@code /} character.
-   *
-   * @param query  query
-   * @param offset start offset
-   * @return position of the last {@code /} character
-   */
-  public static int parseBlockComment(final char[] query, int offset) {
-    if (offset + 1 < query.length && query[offset + 1] == '*') {
-      // /* /* */ */ nest, according to SQL spec
-      int level = 1;
-      for (offset += 2; offset < query.length; offset++) {
-        switch (query[offset - 1]) {
-          case '*':
-            if (query[offset] == '/') {
-              --level;
-              ++offset; // don't parse / in */* twice
-            }
-            break;
-          case '/':
-            if (query[offset] == '*') {
-              ++level;
-              ++offset; // don't parse * in /*/ twice
-            }
-            break;
-          default:
-            break;
-        }
-
-        if (level == 0) {
-          --offset; // reset position to last '/' char
-          break;
-        }
-      }
-    }
-    return offset;
-  }
-
-  /**
-   * Parse string to check presence of DELETE keyword regardless of case. The initial character is
-   * assumed to have been matched.
-   *
-   * @param query char[] of the query statement
-   * @param offset position of query to start checking
-   * @return boolean indicates presence of word
-   */
-  public static boolean parseDeleteKeyword(final char[] query, int offset) {
-    if (query.length < (offset + 6)) {
-      return false;
-    }
-
-    return (query[offset] | 32) == 'd'
-        && (query[offset + 1] | 32) == 'e'
-        && (query[offset + 2] | 32) == 'l'
-        && (query[offset + 3] | 32) == 'e'
-        && (query[offset + 4] | 32) == 't'
-        && (query[offset + 5] | 32) == 'e';
-  }
-
-  /**
-   * Parse string to check presence of INSERT keyword regardless of case.
-   *
-   * @param query char[] of the query statement
-   * @param offset position of query to start checking
-   * @return boolean indicates presence of word
-   */
-  public static boolean parseInsertKeyword(final char[] query, int offset) {
-    if (query.length < (offset + 7)) {
-      return false;
-    }
-
-    return (query[offset] | 32) == 'i'
-        && (query[offset + 1] | 32) == 'n'
-        && (query[offset + 2] | 32) == 's'
-        && (query[offset + 3] | 32) == 'e'
-        && (query[offset + 4] | 32) == 'r'
-        && (query[offset + 5] | 32) == 't';
-  }
-
-  /**
-   Parse string to check presence of BEGIN keyword regardless of case.
-   *
-   * @param query char[] of the query statement
-   * @param offset position of query to start checking
-   * @return boolean indicates presence of word
-   */
-
-  public static boolean parseBeginKeyword(final char[] query, int offset) {
-    if (query.length < (offset + 6)) {
-      return false;
-    }
-    return (query[offset] | 32) == 'b'
-        && (query[offset + 1] | 32) == 'e'
-        && (query[offset + 2] | 32) == 'g'
-        && (query[offset + 3] | 32) == 'i'
-        && (query[offset + 4] | 32) == 'n';
-  }
-
-  /**
-   Parse string to check presence of ATOMIC keyword regardless of case.
-   *
-   * @param query char[] of the query statement
-   * @param offset position of query to start checking
-   * @return boolean indicates presence of word
-   */
-  public static boolean parseAtomicKeyword(final char[] query, int offset) {
-    if (query.length < (offset + 7)) {
-      return false;
-    }
-    return (query[offset] | 32) == 'a'
-        && (query[offset + 1] | 32) == 't'
-        && (query[offset + 2] | 32) == 'o'
-        && (query[offset + 3] | 32) == 'm'
-        && (query[offset + 4] | 32) == 'i'
-        && (query[offset + 5] | 32) == 'c';
-  }
-
-  /**
-   * Parse string to check presence of MOVE keyword regardless of case.
-   *
-   * @param query char[] of the query statement
-   * @param offset position of query to start checking
-   * @return boolean indicates presence of word
-   */
-  public static boolean parseMoveKeyword(final char[] query, int offset) {
-    if (query.length < (offset + 4)) {
-      return false;
-    }
-
-    return (query[offset] | 32) == 'm'
-        && (query[offset + 1] | 32) == 'o'
-        && (query[offset + 2] | 32) == 'v'
-        && (query[offset + 3] | 32) == 'e';
-  }
-
-  /**
-   * Parse string to check presence of RETURNING keyword regardless of case.
-   *
-   * @param query char[] of the query statement
-   * @param offset position of query to start checking
-   * @return boolean indicates presence of word
-   */
-  public static boolean parseReturningKeyword(final char[] query, int offset) {
-    if (query.length < (offset + 9)) {
-      return false;
-    }
-
-    return (query[offset] | 32) == 'r'
-        && (query[offset + 1] | 32) == 'e'
-        && (query[offset + 2] | 32) == 't'
-        && (query[offset + 3] | 32) == 'u'
-        && (query[offset + 4] | 32) == 'r'
-        && (query[offset + 5] | 32) == 'n'
-        && (query[offset + 6] | 32) == 'i'
-        && (query[offset + 7] | 32) == 'n'
-        && (query[offset + 8] | 32) == 'g';
-  }
-
-  /**
-   * Parse string to check presence of SELECT keyword regardless of case.
-   *
-   * @param query char[] of the query statement
-   * @param offset position of query to start checking
-   * @return boolean indicates presence of word
-   */
-  public static boolean parseSelectKeyword(final char[] query, int offset) {
-    if (query.length < (offset + 6)) {
-      return false;
-    }
-
-    return (query[offset] | 32) == 's'
-        && (query[offset + 1] | 32) == 'e'
-        && (query[offset + 2] | 32) == 'l'
-        && (query[offset + 3] | 32) == 'e'
-        && (query[offset + 4] | 32) == 'c'
-        && (query[offset + 5] | 32) == 't';
-  }
-
-  /**
-   * Parse string to check presence of CREATE keyword regardless of case.
-   *
-   * @param query char[] of the query statement
-   * @param offset position of query to start checking
-   * @return boolean indicates presence of word
-   */
-  public static boolean parseAlterKeyword(final char[] query, int offset) {
-    if (query.length < (offset + 5)) {
-      return false;
-    }
-
-    return (query[offset] | 32) == 'a'
-        && (query[offset + 1] | 32) == 'l'
-        && (query[offset + 2] | 32) == 't'
-        && (query[offset + 3] | 32) == 'e'
-        && (query[offset + 4] | 32) == 'r';
-  }
-
-  /**
-   * Parse string to check presence of CREATE keyword regardless of case.
-   *
-   * @param query char[] of the query statement
-   * @param offset position of query to start checking
-   * @return boolean indicates presence of word
-   */
-  public static boolean parseCreateKeyword(final char[] query, int offset) {
-    if (query.length < (offset + 6)) {
-      return false;
-    }
-
-    return (query[offset] | 32) == 'c'
-        && (query[offset + 1] | 32) == 'r'
-        && (query[offset + 2] | 32) == 'e'
-        && (query[offset + 3] | 32) == 'a'
-        && (query[offset + 4] | 32) == 't'
-        && (query[offset + 5] | 32) == 'e';
-  }
-
-  /**
-   * Parse string to check presence of UPDATE keyword regardless of case.
-   *
-   * @param query char[] of the query statement
-   * @param offset position of query to start checking
-   * @return boolean indicates presence of word
-   */
-  public static boolean parseUpdateKeyword(final char[] query, int offset) {
-    if (query.length < (offset + 6)) {
-      return false;
-    }
-
-    return (query[offset] | 32) == 'u'
-        && (query[offset + 1] | 32) == 'p'
-        && (query[offset + 2] | 32) == 'd'
-        && (query[offset + 3] | 32) == 'a'
-        && (query[offset + 4] | 32) == 't'
-        && (query[offset + 5] | 32) == 'e';
-  }
-
-  /**
-   * Parse string to check presence of VALUES keyword regardless of case.
-   *
-   * @param query char[] of the query statement
-   * @param offset position of query to start checking
-   * @return boolean indicates presence of word
-   */
-  public static boolean parseValuesKeyword(final char[] query, int offset) {
-    if (query.length < (offset + 6)) {
-      return false;
-    }
-
-    return (query[offset] | 32) == 'v'
-        && (query[offset + 1] | 32) == 'a'
-        && (query[offset + 2] | 32) == 'l'
-        && (query[offset + 3] | 32) == 'u'
-        && (query[offset + 4] | 32) == 'e'
-        && (query[offset + 5] | 32) == 's';
-  }
-
-  /**
-   * Faster version of {@link Long#parseLong(String)} when parsing a substring is required
-   *
-   * @param s string to parse
-   * @param beginIndex begin index
-   * @param endIndex end index
-   * @return long value
-   */
-  public static long parseLong(String s, int beginIndex, int endIndex) {
-    // Fallback to default implementation in case the string is long
-    if (endIndex - beginIndex > 16) {
-      return Long.parseLong(s.substring(beginIndex, endIndex));
-    }
-    long res = digitAt(s, beginIndex);
-    for (beginIndex++; beginIndex < endIndex; beginIndex++) {
-      res = res * 10 + digitAt(s, beginIndex);
-    }
-    return res;
-  }
-
-  /**
-   * Parse string to check presence of WITH keyword regardless of case.
-   *
-   * @param query char[] of the query statement
-   * @param offset position of query to start checking
-   * @return boolean indicates presence of word
-   */
-  public static boolean parseWithKeyword(final char[] query, int offset) {
-    if (query.length < (offset + 4)) {
-      return false;
-    }
-
-    return (query[offset] | 32) == 'w'
-        && (query[offset + 1] | 32) == 'i'
-        && (query[offset + 2] | 32) == 't'
-        && (query[offset + 3] | 32) == 'h';
-  }
-
-  /**
-   * Parse string to check presence of AS keyword regardless of case.
-   *
-   * @param query char[] of the query statement
-   * @param offset position of query to start checking
-   * @return boolean indicates presence of word
-   */
-  public static boolean parseAsKeyword(final char[] query, int offset) {
-    if (query.length < (offset + 2)) {
-      return false;
-    }
-
-    return (query[offset] | 32) == 'a'
-        && (query[offset + 1] | 32) == 's';
-  }
-
-  /**
-   * Returns true if a given string {@code s} has digit at position {@code pos}.
-   * @param s input string
-   * @param pos position (0-based)
-   * @return true if input string s has digit at position pos
-   */
-  public static boolean isDigitAt(String s, int pos) {
-    return pos > 0 && pos < s.length() && Character.isDigit(s.charAt(pos));
-  }
-
-  /**
-   * Converts digit at position {@code pos} in string {@code s} to integer or throws.
-   * @param s input string
-   * @param pos position (0-based)
-   * @return integer value of a digit at position pos
-   * @throws NumberFormatException if character at position pos is not an integer
-   */
-  public static int digitAt(String s, int pos) {
-    int c = s.charAt(pos) - '0';
-    if (c < 0 || c > 9) {
-      throw new NumberFormatException("Input string: \"" + s + "\", position: " + pos);
-    }
-    return c;
-  }
-
-  /**
-   * Identifies characters which the backend scanner considers to be whitespace.
-   *
-   * <p>
-   * https://github.com/postgres/postgres/blob/17bb62501787c56e0518e61db13a523d47afd724/src/backend/parser/scan.l#L194-L198
-   * </p>
-   *
-   * @param c character
-   * @return true if the character is a whitespace character as defined in the backend's parser
-   */
-  public static boolean isSpace(char c) {
-    return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f';
-  }
-
-  /**
-   * Identifies white space characters which the backend uses to determine if a
-   * {@code String} value needs to be quoted in array representation.
-   *
-   * <p>
-   * https://github.com/postgres/postgres/blob/f2c587067a8eb9cf1c8f009262381a6576ba3dd0/src/backend/utils/adt/arrayfuncs.c#L421-L438
-   * </p>
-   *
-   * @param c
-   *          Character to examine.
-   * @return Indication if the character is a whitespace which back end will
-   *         escape.
-   */
-  public static boolean isArrayWhiteSpace(char c) {
-    return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == 0x0B;
-  }
-
-  /**
-   * @param c character
-   * @return true if the given character is a valid character for an operator in the backend's
-   *     parser
-   */
-  public static boolean isOperatorChar(char c) {
-    /*
-     * Extracted from operators defined by {self} and {op_chars}
-     * in pgsql/src/backend/parser/scan.l.
-     */
-    return ",()[].;:+-*/%^<>=~!@#&|`?".indexOf(c) != -1;
-  }
-
-  /**
-   * Checks if a character is valid as the start of an identifier.
-   * PostgreSQL 9.4 allows column names like _, ‿, ⁀, ⁔, ︳, ︴, ﹍, ﹎, ﹏, _, so
-   * it is assumed isJavaIdentifierPart is good enough for PostgreSQL.
-   *
-   * @param c the character to check
-   * @return true if valid as first character of an identifier; false if not
-   * @see <a href="https://www.postgresql.org/docs/9.6/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS">Identifiers and Key Words</a>
-   */
-  public static boolean isIdentifierStartChar(char c) {
-    /*
-     * PostgreSQL's implementation is located in
-     * pgsql/src/backend/parser/scan.l:
-     * ident_start    [A-Za-z\200-\377_]
-     * ident_cont     [A-Za-z\200-\377_0-9\$]
-     * however it is not clear how that interacts with unicode, so we just use Java's implementation.
-     */
-    return Character.isJavaIdentifierStart(c);
-  }
-
-  /**
-   * Checks if a character is valid as the second or later character of an identifier.
-   *
-   * @param c the character to check
-   * @return true if valid as second or later character of an identifier; false if not
-   */
-  public static boolean isIdentifierContChar(char c) {
-    return Character.isJavaIdentifierPart(c);
-  }
-
-  /**
-   * @param c character
-   * @return true if the character terminates an identifier
-   */
-  public static boolean charTerminatesIdentifier(char c) {
-    return c == '"' || isSpace(c) || isOperatorChar(c);
-  }
-
-  /**
-   * Checks if a character is valid as the start of a dollar quoting tag.
-   *
-   * @param c the character to check
-   * @return true if valid as first character of a dollar quoting tag; false if not
-   */
-  public static boolean isDollarQuoteStartChar(char c) {
-    /*
-     * The allowed dollar quote start and continuation characters
-     * must stay in sync with what the backend defines in
-     * pgsql/src/backend/parser/scan.l
-     *
-     * The quoted string starts with $foo$ where "foo" is an optional string
-     * in the form of an identifier, except that it may not contain "$",
-     * and extends to the first occurrence of an identical string.
-     * There is *no* processing of the quoted text.
-     */
-    return c != '$' && isIdentifierStartChar(c);
-  }
-
-  /**
-   * Checks if a character is valid as the second or later character of a dollar quoting tag.
-   *
-   * @param c the character to check
-   * @return true if valid as second or later character of a dollar quoting tag; false if not
-   */
-  public static boolean isDollarQuoteContChar(char c) {
-    return c != '$' && isIdentifierContChar(c);
-  }
-
-  /**
-   * Compares two sub-arrays of the given character array for equalness. If the length is zero, the
-   * result is true if and only if the offsets are within the bounds of the array.
-   *
-   * @param arr  a char array
-   * @param offA first sub-array start offset
-   * @param offB second sub-array start offset
-   * @param len  length of the sub arrays to compare
-   * @return true if the sub-arrays are equal; false if not
-   */
-  private static boolean subArraysEqual(final char[] arr,
-      final int offA, final int offB,
-      final int len) {
-    if (offA < 0 || offB < 0
-        || offA >= arr.length || offB >= arr.length
-        || offA + len > arr.length || offB + len > arr.length) {
-      return false;
-    }
-
-    for (int i = 0; i < len; i++) {
-      if (arr[offA + i] != arr[offB + i]) {
-        return false;
-      }
-    }
-
-    return true;
-  }
-
-  /**
-   * Converts JDBC-specific callable statement escapes {@code { [? =] call <some_function> [(?,
-   * [?,..])] }} into the PostgreSQL format which is {@code select <some_function> (?, [?, ...]) as
-   * result} or {@code select * from <some_function> (?, [?, ...]) as result} (7.3)
-   *
-   * @param jdbcSql              sql text with JDBC escapes
-   * @param stdStrings           if backslash in single quotes should be regular character or escape one
-   * @param serverVersion        server version
-   * @param protocolVersion      protocol version
-   * @param escapeSyntaxCallMode mode specifying whether JDBC escape call syntax is transformed into a CALL/SELECT statement
-   * @return SQL in appropriate for given server format
-   * @throws SQLException if given SQL is malformed
-   */
-  public static JdbcCallParseInfo modifyJdbcCall(String jdbcSql, boolean stdStrings,
-      int serverVersion, int protocolVersion, EscapeSyntaxCallMode escapeSyntaxCallMode) throws SQLException {
-    // Mini-parser for JDBC function-call syntax (only)
-    // TODO: Merge with escape processing (and parameter parsing?) so we only parse each query once.
-    // RE: frequently used statements are cached (see {@link org.postgresql.jdbc.PgConnection#borrowQuery}), so this "merge" is not that important.
-    String sql = jdbcSql;
-    boolean isFunction = false;
-    boolean outParamBeforeFunc = false;
-
-    int len = jdbcSql.length();
-    int state = 1;
-    boolean inQuotes = false;
-    boolean inEscape = false;
-    int startIndex = -1;
-    int endIndex = -1;
-    boolean syntaxError = false;
-    int i = 0;
-
-    while (i < len && !syntaxError) {
-      char ch = jdbcSql.charAt(i);
-
-      switch (state) {
-        case 1:  // Looking for { at start of query
-          if (ch == '{') {
-            ++i;
-            ++state;
-          } else if (Character.isWhitespace(ch)) {
-            ++i;
-          } else {
-            // Not function-call syntax. Skip the rest of the string.
-            i = len;
-          }
-          break;
-
-        case 2:  // After {, looking for ? or =, skipping whitespace
-          if (ch == '?') {
-            outParamBeforeFunc =
-                isFunction = true;   // { ? = call ... }  -- function with one out parameter
-            ++i;
-            ++state;
-          } else if (ch == 'c' || ch == 'C') {  // { call ... }      -- proc with no out parameters
-            state += 3; // Don't increase 'i'
-          } else if (Character.isWhitespace(ch)) {
-            ++i;
-          } else {
-            // "{ foo ...", doesn't make sense, complain.
-            syntaxError = true;
-          }
-          break;
-
-        case 3:  // Looking for = after ?, skipping whitespace
-          if (ch == '=') {
-            ++i;
-            ++state;
-          } else if (Character.isWhitespace(ch)) {
-            ++i;
-          } else {
-            syntaxError = true;
-          }
-          break;
-
-        case 4:  // Looking for 'call' after '? =' skipping whitespace
-          if (ch == 'c' || ch == 'C') {
-            ++state; // Don't increase 'i'.
-          } else if (Character.isWhitespace(ch)) {
-            ++i;
-          } else {
-            syntaxError = true;
-          }
-          break;
-
-        case 5:  // Should be at 'call ' either at start of string or after ?=
-          if ((ch == 'c' || ch == 'C') && i + 4 <= len && "call"
-              .equalsIgnoreCase(jdbcSql.substring(i, i + 4))) {
-            isFunction = true;
-            i += 4;
-            ++state;
-          } else if (Character.isWhitespace(ch)) {
-            ++i;
-          } else {
-            syntaxError = true;
-          }
-          break;
-
-        case 6:  // Looking for whitespace char after 'call'
-          if (Character.isWhitespace(ch)) {
-            // Ok, we found the start of the real call.
-            ++i;
-            ++state;
-            startIndex = i;
-          } else {
-            syntaxError = true;
-          }
-          break;
-
-        case 7:  // In "body" of the query (after "{ [? =] call ")
-          if (ch == '\'') {
-            inQuotes = !inQuotes;
-            ++i;
-          } else if (inQuotes && ch == '\\' && !stdStrings) {
-            // Backslash in string constant, skip next character.
-            i += 2;
-          } else if (!inQuotes && ch == '{') {
-            inEscape = !inEscape;
-            ++i;
-          } else if (!inQuotes && ch == '}') {
-            if (!inEscape) {
-              // Should be end of string.
-              endIndex = i;
-              ++i;
-              ++state;
+            if (quoteReturningIdentifiers) {
+                Utils.escapeIdentifier(nativeSql, columnName);
             } else {
-              inEscape = false;
+                nativeSql.append(columnName);
             }
-          } else if (!inQuotes && ch == ';') {
-            syntaxError = true;
-          } else {
-            // Everything else is ok.
-            ++i;
-          }
-          break;
-
-        case 8:  // At trailing end of query, eating whitespace
-          if (Character.isWhitespace(ch)) {
-            ++i;
-          } else {
-            syntaxError = true;
-          }
-          break;
-
-        default:
-          throw new IllegalStateException("somehow got into bad state " + state);
-      }
+        }
+        return true;
     }
 
-    // We can only legally end in a couple of states here.
-    if (i == len && !syntaxError) {
-      if (state == 1) {
-        // Not an escaped syntax.
-
-        // Detect PostgreSQL native CALL.
-        // (OUT parameter registration, needed for stored procedures with INOUT arguments, will fail without this)
-        i = 0;
-        while (i < len && Character.isWhitespace(jdbcSql.charAt(i))) {
-          i++; // skip any preceding whitespace
+    /**
+     * Converts {@link IntList} to {@code int[]}. A {@code null} collection is converted to
+     * {@code null} array.
+     *
+     * @param list input list
+     * @return output array
+     */
+    private static int[] toIntArray(IntList list) {
+        if (list == null) {
+            return null;
         }
-        if (i < len - 5) { // 5 == length of "call" + 1 whitespace
-          //Check for CALL followed by whitespace
-          char ch = jdbcSql.charAt(i);
-          if ((ch == 'c' || ch == 'C') && "call".equalsIgnoreCase(jdbcSql.substring(i, i + 4))
-               && Character.isWhitespace(jdbcSql.charAt(i + 4))) {
-            isFunction = true;
-          }
+        return list.toArray();
+    }
+
+    /**
+     * <p>Find the end of the single-quoted string starting at the given offset.</p>
+     *
+     * <p>Note: for {@code 'single '' quote in string'}, this method currently returns the offset of
+     * first {@code '} character after the initial one. The caller must call the method a second time
+     * for the second part of the quoted string.</p>
+     *
+     * @param query                     query
+     * @param offset                    start offset
+     * @param standardConformingStrings standard conforming strings
+     * @return position of the end of the single-quoted string
+     */
+    public static int parseSingleQuotes(final char[] query, int offset,
+                                        boolean standardConformingStrings) {
+        // check for escape string syntax (E'')
+        if (standardConformingStrings
+                && offset >= 2
+                && (query[offset - 1] == 'e' || query[offset - 1] == 'E')
+                && charTerminatesIdentifier(query[offset - 2])) {
+            standardConformingStrings = false;
+        }
+
+        if (standardConformingStrings) {
+            // do NOT treat backslashes as escape characters
+            while (++offset < query.length) {
+                if (query[offset] == '\'') {
+                    return offset;
+                }
+            }
+        } else {
+            // treat backslashes as escape characters
+            while (++offset < query.length) {
+                switch (query[offset]) {
+                    case '\\':
+                        ++offset;
+                        break;
+                    case '\'':
+                        return offset;
+                    default:
+                        break;
+                }
+            }
+        }
+
+        return query.length;
+    }
+
+    /**
+     * <p>Find the end of the double-quoted string starting at the given offset.</p>
+     *
+     * <p>Note: for {@code "double "" quote in string"}, this method currently
+     * returns the offset of first {@code &quot;} character after the initial one. The caller must
+     * call the method a second time for the second part of the quoted string.</p>
+     *
+     * @param query  query
+     * @param offset start offset
+     * @return position of the end of the double-quoted string
+     */
+    public static int parseDoubleQuotes(final char[] query, int offset) {
+        while (++offset < query.length && query[offset] != '"') {
+            // do nothing
+        }
+        return offset;
+    }
+
+    /**
+     * Test if the dollar character ({@code $}) at the given offset starts a dollar-quoted string and
+     * return the offset of the ending dollar character.
+     *
+     * @param query  query
+     * @param offset start offset
+     * @return offset of the ending dollar character
+     */
+    public static int parseDollarQuotes(final char[] query, int offset) {
+        if (offset + 1 < query.length
+                && (offset == 0 || !isIdentifierContChar(query[offset - 1]))) {
+            int endIdx = -1;
+            if (query[offset + 1] == '$') {
+                endIdx = offset + 1;
+            } else if (isDollarQuoteStartChar(query[offset + 1])) {
+                for (int d = offset + 2; d < query.length; d++) {
+                    if (query[d] == '$') {
+                        endIdx = d;
+                        break;
+                    } else if (!isDollarQuoteContChar(query[d])) {
+                        break;
+                    }
+                }
+            }
+            if (endIdx > 0) {
+                // found; note: tag includes start and end $ character
+                int tagIdx = offset;
+                int tagLen = endIdx - offset + 1;
+                offset = endIdx; // loop continues at endIdx + 1
+                for (++offset; offset < query.length; offset++) {
+                    if (query[offset] == '$'
+                            && subArraysEqual(query, tagIdx, offset, tagLen)) {
+                        offset += tagLen - 1;
+                        break;
+                    }
+                }
+            }
+        }
+        return offset;
+    }
+
+    /**
+     * Test if the {@code -} character at {@code offset} starts a {@code --} style line comment,
+     * and return the position of the first {@code \r} or {@code \n} character.
+     *
+     * @param query  query
+     * @param offset start offset
+     * @return position of the first {@code \r} or {@code \n} character
+     */
+    public static int parseLineComment(final char[] query, int offset) {
+        if (offset + 1 < query.length && query[offset + 1] == '-') {
+            while (offset + 1 < query.length) {
+                offset++;
+                if (query[offset] == '\r' || query[offset] == '\n') {
+                    break;
+                }
+            }
+        }
+        return offset;
+    }
+
+    /**
+     * Test if the {@code /} character at {@code offset} starts a block comment, and return the
+     * position of the last {@code /} character.
+     *
+     * @param query  query
+     * @param offset start offset
+     * @return position of the last {@code /} character
+     */
+    public static int parseBlockComment(final char[] query, int offset) {
+        if (offset + 1 < query.length && query[offset + 1] == '*') {
+            // /* /* */ */ nest, according to SQL spec
+            int level = 1;
+            for (offset += 2; offset < query.length; offset++) {
+                switch (query[offset - 1]) {
+                    case '*':
+                        if (query[offset] == '/') {
+                            --level;
+                            ++offset; // don't parse / in */* twice
+                        }
+                        break;
+                    case '/':
+                        if (query[offset] == '*') {
+                            ++level;
+                            ++offset; // don't parse * in /*/ twice
+                        }
+                        break;
+                    default:
+                        break;
+                }
+
+                if (level == 0) {
+                    --offset; // reset position to last '/' char
+                    break;
+                }
+            }
+        }
+        return offset;
+    }
+
+    /**
+     * Parse string to check presence of DELETE keyword regardless of case. The initial character is
+     * assumed to have been matched.
+     *
+     * @param query  char[] of the query statement
+     * @param offset position of query to start checking
+     * @return boolean indicates presence of word
+     */
+    public static boolean parseDeleteKeyword(final char[] query, int offset) {
+        if (query.length < (offset + 6)) {
+            return false;
+        }
+
+        return (query[offset] | 32) == 'd'
+                && (query[offset + 1] | 32) == 'e'
+                && (query[offset + 2] | 32) == 'l'
+                && (query[offset + 3] | 32) == 'e'
+                && (query[offset + 4] | 32) == 't'
+                && (query[offset + 5] | 32) == 'e';
+    }
+
+    /**
+     * Parse string to check presence of INSERT keyword regardless of case.
+     *
+     * @param query  char[] of the query statement
+     * @param offset position of query to start checking
+     * @return boolean indicates presence of word
+     */
+    public static boolean parseInsertKeyword(final char[] query, int offset) {
+        if (query.length < (offset + 7)) {
+            return false;
+        }
+
+        return (query[offset] | 32) == 'i'
+                && (query[offset + 1] | 32) == 'n'
+                && (query[offset + 2] | 32) == 's'
+                && (query[offset + 3] | 32) == 'e'
+                && (query[offset + 4] | 32) == 'r'
+                && (query[offset + 5] | 32) == 't';
+    }
+
+    /**
+     * Parse string to check presence of BEGIN keyword regardless of case.
+     *
+     * @param query  char[] of the query statement
+     * @param offset position of query to start checking
+     * @return boolean indicates presence of word
+     */
+
+    public static boolean parseBeginKeyword(final char[] query, int offset) {
+        if (query.length < (offset + 6)) {
+            return false;
+        }
+        return (query[offset] | 32) == 'b'
+                && (query[offset + 1] | 32) == 'e'
+                && (query[offset + 2] | 32) == 'g'
+                && (query[offset + 3] | 32) == 'i'
+                && (query[offset + 4] | 32) == 'n';
+    }
+
+    /**
+     * Parse string to check presence of ATOMIC keyword regardless of case.
+     *
+     * @param query  char[] of the query statement
+     * @param offset position of query to start checking
+     * @return boolean indicates presence of word
+     */
+    public static boolean parseAtomicKeyword(final char[] query, int offset) {
+        if (query.length < (offset + 7)) {
+            return false;
+        }
+        return (query[offset] | 32) == 'a'
+                && (query[offset + 1] | 32) == 't'
+                && (query[offset + 2] | 32) == 'o'
+                && (query[offset + 3] | 32) == 'm'
+                && (query[offset + 4] | 32) == 'i'
+                && (query[offset + 5] | 32) == 'c';
+    }
+
+    /**
+     * Parse string to check presence of MOVE keyword regardless of case.
+     *
+     * @param query  char[] of the query statement
+     * @param offset position of query to start checking
+     * @return boolean indicates presence of word
+     */
+    public static boolean parseMoveKeyword(final char[] query, int offset) {
+        if (query.length < (offset + 4)) {
+            return false;
+        }
+
+        return (query[offset] | 32) == 'm'
+                && (query[offset + 1] | 32) == 'o'
+                && (query[offset + 2] | 32) == 'v'
+                && (query[offset + 3] | 32) == 'e';
+    }
+
+    /**
+     * Parse string to check presence of RETURNING keyword regardless of case.
+     *
+     * @param query  char[] of the query statement
+     * @param offset position of query to start checking
+     * @return boolean indicates presence of word
+     */
+    public static boolean parseReturningKeyword(final char[] query, int offset) {
+        if (query.length < (offset + 9)) {
+            return false;
+        }
+
+        return (query[offset] | 32) == 'r'
+                && (query[offset + 1] | 32) == 'e'
+                && (query[offset + 2] | 32) == 't'
+                && (query[offset + 3] | 32) == 'u'
+                && (query[offset + 4] | 32) == 'r'
+                && (query[offset + 5] | 32) == 'n'
+                && (query[offset + 6] | 32) == 'i'
+                && (query[offset + 7] | 32) == 'n'
+                && (query[offset + 8] | 32) == 'g';
+    }
+
+    /**
+     * Parse string to check presence of SELECT keyword regardless of case.
+     *
+     * @param query  char[] of the query statement
+     * @param offset position of query to start checking
+     * @return boolean indicates presence of word
+     */
+    public static boolean parseSelectKeyword(final char[] query, int offset) {
+        if (query.length < (offset + 6)) {
+            return false;
+        }
+
+        return (query[offset] | 32) == 's'
+                && (query[offset + 1] | 32) == 'e'
+                && (query[offset + 2] | 32) == 'l'
+                && (query[offset + 3] | 32) == 'e'
+                && (query[offset + 4] | 32) == 'c'
+                && (query[offset + 5] | 32) == 't';
+    }
+
+    /**
+     * Parse string to check presence of CREATE keyword regardless of case.
+     *
+     * @param query  char[] of the query statement
+     * @param offset position of query to start checking
+     * @return boolean indicates presence of word
+     */
+    public static boolean parseAlterKeyword(final char[] query, int offset) {
+        if (query.length < (offset + 5)) {
+            return false;
+        }
+
+        return (query[offset] | 32) == 'a'
+                && (query[offset + 1] | 32) == 'l'
+                && (query[offset + 2] | 32) == 't'
+                && (query[offset + 3] | 32) == 'e'
+                && (query[offset + 4] | 32) == 'r';
+    }
+
+    /**
+     * Parse string to check presence of CREATE keyword regardless of case.
+     *
+     * @param query  char[] of the query statement
+     * @param offset position of query to start checking
+     * @return boolean indicates presence of word
+     */
+    public static boolean parseCreateKeyword(final char[] query, int offset) {
+        if (query.length < (offset + 6)) {
+            return false;
+        }
+
+        return (query[offset] | 32) == 'c'
+                && (query[offset + 1] | 32) == 'r'
+                && (query[offset + 2] | 32) == 'e'
+                && (query[offset + 3] | 32) == 'a'
+                && (query[offset + 4] | 32) == 't'
+                && (query[offset + 5] | 32) == 'e';
+    }
+
+    /**
+     * Parse string to check presence of UPDATE keyword regardless of case.
+     *
+     * @param query  char[] of the query statement
+     * @param offset position of query to start checking
+     * @return boolean indicates presence of word
+     */
+    public static boolean parseUpdateKeyword(final char[] query, int offset) {
+        if (query.length < (offset + 6)) {
+            return false;
+        }
+
+        return (query[offset] | 32) == 'u'
+                && (query[offset + 1] | 32) == 'p'
+                && (query[offset + 2] | 32) == 'd'
+                && (query[offset + 3] | 32) == 'a'
+                && (query[offset + 4] | 32) == 't'
+                && (query[offset + 5] | 32) == 'e';
+    }
+
+    /**
+     * Parse string to check presence of VALUES keyword regardless of case.
+     *
+     * @param query  char[] of the query statement
+     * @param offset position of query to start checking
+     * @return boolean indicates presence of word
+     */
+    public static boolean parseValuesKeyword(final char[] query, int offset) {
+        if (query.length < (offset + 6)) {
+            return false;
+        }
+
+        return (query[offset] | 32) == 'v'
+                && (query[offset + 1] | 32) == 'a'
+                && (query[offset + 2] | 32) == 'l'
+                && (query[offset + 3] | 32) == 'u'
+                && (query[offset + 4] | 32) == 'e'
+                && (query[offset + 5] | 32) == 's';
+    }
+
+    /**
+     * Faster version of {@link Long#parseLong(String)} when parsing a substring is required
+     *
+     * @param s          string to parse
+     * @param beginIndex begin index
+     * @param endIndex   end index
+     * @return long value
+     */
+    public static long parseLong(String s, int beginIndex, int endIndex) {
+        // Fallback to default implementation in case the string is long
+        if (endIndex - beginIndex > 16) {
+            return Long.parseLong(s.substring(beginIndex, endIndex));
+        }
+        long res = digitAt(s, beginIndex);
+        for (beginIndex++; beginIndex < endIndex; beginIndex++) {
+            res = res * 10 + digitAt(s, beginIndex);
+        }
+        return res;
+    }
+
+    /**
+     * Parse string to check presence of WITH keyword regardless of case.
+     *
+     * @param query  char[] of the query statement
+     * @param offset position of query to start checking
+     * @return boolean indicates presence of word
+     */
+    public static boolean parseWithKeyword(final char[] query, int offset) {
+        if (query.length < (offset + 4)) {
+            return false;
+        }
+
+        return (query[offset] | 32) == 'w'
+                && (query[offset + 1] | 32) == 'i'
+                && (query[offset + 2] | 32) == 't'
+                && (query[offset + 3] | 32) == 'h';
+    }
+
+    /**
+     * Parse string to check presence of AS keyword regardless of case.
+     *
+     * @param query  char[] of the query statement
+     * @param offset position of query to start checking
+     * @return boolean indicates presence of word
+     */
+    public static boolean parseAsKeyword(final char[] query, int offset) {
+        if (query.length < (offset + 2)) {
+            return false;
+        }
+
+        return (query[offset] | 32) == 'a'
+                && (query[offset + 1] | 32) == 's';
+    }
+
+    /**
+     * Returns true if a given string {@code s} has digit at position {@code pos}.
+     *
+     * @param s   input string
+     * @param pos position (0-based)
+     * @return true if input string s has digit at position pos
+     */
+    public static boolean isDigitAt(String s, int pos) {
+        return pos > 0 && pos < s.length() && Character.isDigit(s.charAt(pos));
+    }
+
+    /**
+     * Converts digit at position {@code pos} in string {@code s} to integer or throws.
+     *
+     * @param s   input string
+     * @param pos position (0-based)
+     * @return integer value of a digit at position pos
+     * @throws NumberFormatException if character at position pos is not an integer
+     */
+    public static int digitAt(String s, int pos) {
+        int c = s.charAt(pos) - '0';
+        if (c < 0 || c > 9) {
+            throw new NumberFormatException("Input string: \"" + s + "\", position: " + pos);
+        }
+        return c;
+    }
+
+    /**
+     * Identifies characters which the backend scanner considers to be whitespace.
+     *
+     * <p>
+     * https://github.com/postgres/postgres/blob/17bb62501787c56e0518e61db13a523d47afd724/src/backend/parser/scan.l#L194-L198
+     * </p>
+     *
+     * @param c character
+     * @return true if the character is a whitespace character as defined in the backend's parser
+     */
+    public static boolean isSpace(char c) {
+        return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f';
+    }
+
+    /**
+     * Identifies white space characters which the backend uses to determine if a
+     * {@code String} value needs to be quoted in array representation.
+     *
+     * <p>
+     * https://github.com/postgres/postgres/blob/f2c587067a8eb9cf1c8f009262381a6576ba3dd0/src/backend/utils/adt/arrayfuncs.c#L421-L438
+     * </p>
+     *
+     * @param c Character to examine.
+     * @return Indication if the character is a whitespace which back end will
+     * escape.
+     */
+    public static boolean isArrayWhiteSpace(char c) {
+        return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == 0x0B;
+    }
+
+    /**
+     * @param c character
+     * @return true if the given character is a valid character for an operator in the backend's
+     * parser
+     */
+    public static boolean isOperatorChar(char c) {
+        /*
+         * Extracted from operators defined by {self} and {op_chars}
+         * in pgsql/src/backend/parser/scan.l.
+         */
+        return ",()[].;:+-*/%^<>=~!@#&|`?".indexOf(c) != -1;
+    }
+
+    /**
+     * Checks if a character is valid as the start of an identifier.
+     * PostgreSQL 9.4 allows column names like _, ‿, ⁀, ⁔, ︳, ︴, ﹍, ﹎, ﹏, _, so
+     * it is assumed isJavaIdentifierPart is good enough for PostgreSQL.
+     *
+     * @param c the character to check
+     * @return true if valid as first character of an identifier; false if not
+     * @see <a href="https://www.postgresql.org/docs/9.6/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS">Identifiers and Key Words</a>
+     */
+    public static boolean isIdentifierStartChar(char c) {
+        /*
+         * PostgreSQL's implementation is located in
+         * pgsql/src/backend/parser/scan.l:
+         * ident_start    [A-Za-z\200-\377_]
+         * ident_cont     [A-Za-z\200-\377_0-9\$]
+         * however it is not clear how that interacts with unicode, so we just use Java's implementation.
+         */
+        return Character.isJavaIdentifierStart(c);
+    }
+
+    /**
+     * Checks if a character is valid as the second or later character of an identifier.
+     *
+     * @param c the character to check
+     * @return true if valid as second or later character of an identifier; false if not
+     */
+    public static boolean isIdentifierContChar(char c) {
+        return Character.isJavaIdentifierPart(c);
+    }
+
+    /**
+     * @param c character
+     * @return true if the character terminates an identifier
+     */
+    public static boolean charTerminatesIdentifier(char c) {
+        return c == '"' || isSpace(c) || isOperatorChar(c);
+    }
+
+    /**
+     * Checks if a character is valid as the start of a dollar quoting tag.
+     *
+     * @param c the character to check
+     * @return true if valid as first character of a dollar quoting tag; false if not
+     */
+    public static boolean isDollarQuoteStartChar(char c) {
+        /*
+         * The allowed dollar quote start and continuation characters
+         * must stay in sync with what the backend defines in
+         * pgsql/src/backend/parser/scan.l
+         *
+         * The quoted string starts with $foo$ where "foo" is an optional string
+         * in the form of an identifier, except that it may not contain "$",
+         * and extends to the first occurrence of an identical string.
+         * There is *no* processing of the quoted text.
+         */
+        return c != '$' && isIdentifierStartChar(c);
+    }
+
+    /**
+     * Checks if a character is valid as the second or later character of a dollar quoting tag.
+     *
+     * @param c the character to check
+     * @return true if valid as second or later character of a dollar quoting tag; false if not
+     */
+    public static boolean isDollarQuoteContChar(char c) {
+        return c != '$' && isIdentifierContChar(c);
+    }
+
+    /**
+     * Compares two sub-arrays of the given character array for equalness. If the length is zero, the
+     * result is true if and only if the offsets are within the bounds of the array.
+     *
+     * @param arr  a char array
+     * @param offA first sub-array start offset
+     * @param offB second sub-array start offset
+     * @param len  length of the sub arrays to compare
+     * @return true if the sub-arrays are equal; false if not
+     */
+    private static boolean subArraysEqual(final char[] arr,
+                                          final int offA, final int offB,
+                                          final int len) {
+        if (offA < 0 || offB < 0
+                || offA >= arr.length || offB >= arr.length
+                || offA + len > arr.length || offB + len > arr.length) {
+            return false;
+        }
+
+        for (int i = 0; i < len; i++) {
+            if (arr[offA + i] != arr[offB + i]) {
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+    /**
+     * Converts JDBC-specific callable statement escapes {@code { [? =] call <some_function> [(?,
+     * [?,..])] }} into the PostgreSQL format which is {@code select <some_function> (?, [?, ...]) as
+     * result} or {@code select * from <some_function> (?, [?, ...]) as result} (7.3)
+     *
+     * @param jdbcSql              sql text with JDBC escapes
+     * @param stdStrings           if backslash in single quotes should be regular character or escape one
+     * @param serverVersion        server version
+     * @param protocolVersion      protocol version
+     * @param escapeSyntaxCallMode mode specifying whether JDBC escape call syntax is transformed into a CALL/SELECT statement
+     * @return SQL in appropriate for given server format
+     * @throws SQLException if given SQL is malformed
+     */
+    public static JdbcCallParseInfo modifyJdbcCall(String jdbcSql, boolean stdStrings,
+                                                   int serverVersion, int protocolVersion, EscapeSyntaxCallMode escapeSyntaxCallMode) throws SQLException {
+        // Mini-parser for JDBC function-call syntax (only)
+        // TODO: Merge with escape processing (and parameter parsing?) so we only parse each query once.
+        // RE: frequently used statements are cached (see {@link org.postgresql.jdbc.PgConnection#borrowQuery}), so this "merge" is not that important.
+        String sql = jdbcSql;
+        boolean isFunction = false;
+        boolean outParamBeforeFunc = false;
+
+        int len = jdbcSql.length();
+        int state = 1;
+        boolean inQuotes = false;
+        boolean inEscape = false;
+        int startIndex = -1;
+        int endIndex = -1;
+        boolean syntaxError = false;
+        int i = 0;
+
+        while (i < len && !syntaxError) {
+            char ch = jdbcSql.charAt(i);
+
+            switch (state) {
+                case 1:  // Looking for { at start of query
+                    if (ch == '{') {
+                        ++i;
+                        ++state;
+                    } else if (Character.isWhitespace(ch)) {
+                        ++i;
+                    } else {
+                        // Not function-call syntax. Skip the rest of the string.
+                        i = len;
+                    }
+                    break;
+
+                case 2:  // After {, looking for ? or =, skipping whitespace
+                    if (ch == '?') {
+                        outParamBeforeFunc =
+                                isFunction = true;   // { ? = call ... }  -- function with one out parameter
+                        ++i;
+                        ++state;
+                    } else if (ch == 'c' || ch == 'C') {  // { call ... }      -- proc with no out parameters
+                        state += 3; // Don't increase 'i'
+                    } else if (Character.isWhitespace(ch)) {
+                        ++i;
+                    } else {
+                        // "{ foo ...", doesn't make sense, complain.
+                        syntaxError = true;
+                    }
+                    break;
+
+                case 3:  // Looking for = after ?, skipping whitespace
+                    if (ch == '=') {
+                        ++i;
+                        ++state;
+                    } else if (Character.isWhitespace(ch)) {
+                        ++i;
+                    } else {
+                        syntaxError = true;
+                    }
+                    break;
+
+                case 4:  // Looking for 'call' after '? =' skipping whitespace
+                    if (ch == 'c' || ch == 'C') {
+                        ++state; // Don't increase 'i'.
+                    } else if (Character.isWhitespace(ch)) {
+                        ++i;
+                    } else {
+                        syntaxError = true;
+                    }
+                    break;
+
+                case 5:  // Should be at 'call ' either at start of string or after ?=
+                    if ((ch == 'c' || ch == 'C') && i + 4 <= len && "call"
+                            .equalsIgnoreCase(jdbcSql.substring(i, i + 4))) {
+                        isFunction = true;
+                        i += 4;
+                        ++state;
+                    } else if (Character.isWhitespace(ch)) {
+                        ++i;
+                    } else {
+                        syntaxError = true;
+                    }
+                    break;
+
+                case 6:  // Looking for whitespace char after 'call'
+                    if (Character.isWhitespace(ch)) {
+                        // Ok, we found the start of the real call.
+                        ++i;
+                        ++state;
+                        startIndex = i;
+                    } else {
+                        syntaxError = true;
+                    }
+                    break;
+
+                case 7:  // In "body" of the query (after "{ [? =] call ")
+                    if (ch == '\'') {
+                        inQuotes = !inQuotes;
+                        ++i;
+                    } else if (inQuotes && ch == '\\' && !stdStrings) {
+                        // Backslash in string constant, skip next character.
+                        i += 2;
+                    } else if (!inQuotes && ch == '{') {
+                        inEscape = !inEscape;
+                        ++i;
+                    } else if (!inQuotes && ch == '}') {
+                        if (!inEscape) {
+                            // Should be end of string.
+                            endIndex = i;
+                            ++i;
+                            ++state;
+                        } else {
+                            inEscape = false;
+                        }
+                    } else if (!inQuotes && ch == ';') {
+                        syntaxError = true;
+                    } else {
+                        // Everything else is ok.
+                        ++i;
+                    }
+                    break;
+
+                case 8:  // At trailing end of query, eating whitespace
+                    if (Character.isWhitespace(ch)) {
+                        ++i;
+                    } else {
+                        syntaxError = true;
+                    }
+                    break;
+
+                default:
+                    throw new IllegalStateException("somehow got into bad state " + state);
+            }
+        }
+
+        // We can only legally end in a couple of states here.
+        if (i == len && !syntaxError) {
+            if (state == 1) {
+                // Not an escaped syntax.
+
+                // Detect PostgreSQL native CALL.
+                // (OUT parameter registration, needed for stored procedures with INOUT arguments, will fail without this)
+                i = 0;
+                while (i < len && Character.isWhitespace(jdbcSql.charAt(i))) {
+                    i++; // skip any preceding whitespace
+                }
+                if (i < len - 5) { // 5 == length of "call" + 1 whitespace
+                    //Check for CALL followed by whitespace
+                    char ch = jdbcSql.charAt(i);
+                    if ((ch == 'c' || ch == 'C') && "call".equalsIgnoreCase(jdbcSql.substring(i, i + 4))
+                            && Character.isWhitespace(jdbcSql.charAt(i + 4))) {
+                        isFunction = true;
+                    }
+                }
+                return new JdbcCallParseInfo(sql, isFunction);
+            }
+            if (state != 8) {
+                syntaxError = true; // Ran out of query while still parsing
+            }
+        }
+
+        if (syntaxError) {
+            throw new PSQLException(
+                    GT.tr("Malformed function or procedure escape syntax at offset {0}.", i),
+                    PSQLState.STATEMENT_NOT_ALLOWED_IN_FUNCTION_CALL);
+        }
+
+        String prefix;
+        String suffix;
+        if (escapeSyntaxCallMode == EscapeSyntaxCallMode.SELECT || serverVersion < 110000
+                || (outParamBeforeFunc && escapeSyntaxCallMode == EscapeSyntaxCallMode.CALL_IF_NO_RETURN)) {
+            prefix = "select * from ";
+            suffix = " as result";
+        } else {
+            prefix = "call ";
+            suffix = "";
+        }
+
+        String s = jdbcSql.substring(startIndex, endIndex);
+        int prefixLength = prefix.length();
+        StringBuilder sb = new StringBuilder(prefixLength + jdbcSql.length() + suffix.length() + 10);
+        sb.append(prefix);
+        sb.append(s);
+
+        int opening = s.indexOf('(') + 1;
+        if (opening == 0) {
+            // here the function call has no parameters declaration eg : "{ ? = call pack_getValue}"
+            sb.append(outParamBeforeFunc ? "(?)" : "()");
+        } else if (outParamBeforeFunc) {
+            // move the single out parameter into the function call
+            // so that it can be treated like all other parameters
+            boolean needComma = false;
+
+            // the following loop will check if the function call has parameters
+            // eg "{ ? = call pack_getValue(?) }" vs "{ ? = call pack_getValue() }
+            for (int j = opening + prefixLength; j < sb.length(); j++) {
+                char c = sb.charAt(j);
+                if (c == ')') {
+                    break;
+                }
+
+                if (!Character.isWhitespace(c)) {
+                    needComma = true;
+                    break;
+                }
+            }
+
+            // insert the return parameter as the first parameter of the function call
+            if (needComma) {
+                sb.insert(opening + prefixLength, "?,");
+            } else {
+                sb.insert(opening + prefixLength, "?");
+            }
+        }
+
+        if (!suffix.isEmpty()) {
+            sql = sb.append(suffix).toString();
+        } else {
+            sql = sb.toString();
         }
         return new JdbcCallParseInfo(sql, isFunction);
-      }
-      if (state != 8) {
-        syntaxError = true; // Ran out of query while still parsing
-      }
     }
 
-    if (syntaxError) {
-      throw new PSQLException(
-          GT.tr("Malformed function or procedure escape syntax at offset {0}.", i),
-          PSQLState.STATEMENT_NOT_ALLOWED_IN_FUNCTION_CALL);
-    }
-
-    String prefix;
-    String suffix;
-    if (escapeSyntaxCallMode == EscapeSyntaxCallMode.SELECT || serverVersion < 110000
-        || (outParamBeforeFunc && escapeSyntaxCallMode == EscapeSyntaxCallMode.CALL_IF_NO_RETURN)) {
-      prefix = "select * from ";
-      suffix = " as result";
-    } else {
-      prefix = "call ";
-      suffix = "";
-    }
-
-    String s = jdbcSql.substring(startIndex, endIndex);
-    int prefixLength = prefix.length();
-    StringBuilder sb = new StringBuilder(prefixLength + jdbcSql.length() + suffix.length() + 10);
-    sb.append(prefix);
-    sb.append(s);
-
-    int opening = s.indexOf('(') + 1;
-    if (opening == 0) {
-      // here the function call has no parameters declaration eg : "{ ? = call pack_getValue}"
-      sb.append(outParamBeforeFunc ? "(?)" : "()");
-    } else if (outParamBeforeFunc) {
-      // move the single out parameter into the function call
-      // so that it can be treated like all other parameters
-      boolean needComma = false;
-
-      // the following loop will check if the function call has parameters
-      // eg "{ ? = call pack_getValue(?) }" vs "{ ? = call pack_getValue() }
-      for (int j = opening + prefixLength; j < sb.length(); j++) {
-        char c = sb.charAt(j);
-        if (c == ')') {
-          break;
+    /**
+     * <p>Filter the SQL string of Java SQL Escape clauses.</p>
+     *
+     * <p>Currently implemented Escape clauses are those mentioned in 11.3 in the specification.
+     * Basically we look through the sql string for {d xxx}, {t xxx}, {ts xxx}, {oj xxx} or {fn xxx}
+     * in non-string sql code. When we find them, we just strip the escape part leaving only the xxx
+     * part. So, something like "select * from x where d={d '2001-10-09'}" would return "select * from
+     * x where d= '2001-10-09'".</p>
+     *
+     * @param sql                       the original query text
+     * @param replaceProcessingEnabled  whether replace_processing_enabled is on
+     * @param standardConformingStrings whether standard_conforming_strings is on
+     * @return PostgreSQL-compatible SQL
+     * @throws SQLException if given SQL is wrong
+     */
+    public static String replaceProcessing(String sql, boolean replaceProcessingEnabled,
+                                           boolean standardConformingStrings) throws SQLException {
+        if (replaceProcessingEnabled) {
+            // Since escape codes can only appear in SQL CODE, we keep track
+            // of if we enter a string or not.
+            int len = sql.length();
+            char[] chars = sql.toCharArray();
+            StringBuilder newsql = new StringBuilder(len);
+            int i = 0;
+            while (i < len) {
+                i = parseSql(chars, i, newsql, false, standardConformingStrings);
+                // We need to loop here in case we encounter invalid
+                // SQL, consider: SELECT a FROM t WHERE (1 > 0)) ORDER BY a
+                // We can't ending replacing after the extra closing paren
+                // because that changes a syntax error to a valid query
+                // that isn't what the user specified.
+                if (i < len) {
+                    newsql.append(chars[i]);
+                    i++;
+                }
+            }
+            return newsql.toString();
+        } else {
+            return sql;
         }
+    }
 
-        if (!Character.isWhitespace(c)) {
-          needComma = true;
-          break;
+    /**
+     * parse the given sql from index i, appending it to the given buffer until we hit an unmatched
+     * right parentheses or end of string. When the stopOnComma flag is set we also stop processing
+     * when a comma is found in sql text that isn't inside nested parenthesis.
+     *
+     * @param sql         the original query text
+     * @param i           starting position for replacing
+     * @param newsql      where to write the replaced output
+     * @param stopOnComma should we stop after hitting the first comma in sql text?
+     * @param stdStrings  whether standard_conforming_strings is on
+     * @return the position we stopped processing at
+     * @throws SQLException if given SQL is wrong
+     */
+    private static int parseSql(char[] sql, int i, StringBuilder newsql, boolean stopOnComma,
+                                boolean stdStrings) throws SQLException {
+        SqlParseState state = SqlParseState.IN_SQLCODE;
+        int len = sql.length;
+        int nestedParenthesis = 0;
+        boolean endOfNested = false;
+
+        // because of the ++i loop
+        i--;
+        while (!endOfNested && ++i < len) {
+            char c = sql[i];
+
+            state_switch:
+            switch (state) {
+                case IN_SQLCODE:
+                    if (c == '$') {
+                        int i0 = i;
+                        i = parseDollarQuotes(sql, i);
+                        checkParsePosition(i, len, i0, sql,
+                                "Unterminated dollar quote started at position {0} in SQL {1}. Expected terminating $$");
+                        newsql.append(sql, i0, i - i0 + 1);
+                        break;
+                    } else if (c == '\'') {
+                        // start of a string?
+                        int i0 = i;
+                        i = parseSingleQuotes(sql, i, stdStrings);
+                        checkParsePosition(i, len, i0, sql,
+                                "Unterminated string literal started at position {0} in SQL {1}. Expected ' char");
+                        newsql.append(sql, i0, i - i0 + 1);
+                        break;
+                    } else if (c == '"') {
+                        // start of a identifier?
+                        int i0 = i;
+                        i = parseDoubleQuotes(sql, i);
+                        checkParsePosition(i, len, i0, sql,
+                                "Unterminated identifier started at position {0} in SQL {1}. Expected \" char");
+                        newsql.append(sql, i0, i - i0 + 1);
+                        break;
+                    } else if (c == '/') {
+                        int i0 = i;
+                        i = parseBlockComment(sql, i);
+                        checkParsePosition(i, len, i0, sql,
+                                "Unterminated block comment started at position {0} in SQL {1}. Expected */ sequence");
+                        newsql.append(sql, i0, i - i0 + 1);
+                        break;
+                    } else if (c == '-') {
+                        int i0 = i;
+                        i = parseLineComment(sql, i);
+                        newsql.append(sql, i0, i - i0 + 1);
+                        break;
+                    } else if (c == '(') { // begin nested sql
+                        nestedParenthesis++;
+                    } else if (c == ')') { // end of nested sql
+                        nestedParenthesis--;
+                        if (nestedParenthesis < 0) {
+                            endOfNested = true;
+                            break;
+                        }
+                    } else if (stopOnComma && c == ',' && nestedParenthesis == 0) {
+                        endOfNested = true;
+                        break;
+                    } else if (c == '{') { // start of an escape code?
+                        if (i + 1 < len) {
+                            SqlParseState[] availableStates = SqlParseState.VALUES;
+                            // skip first state, it's not a escape code state
+                            for (int j = 1; j < availableStates.length; j++) {
+                                SqlParseState availableState = availableStates[j];
+                                int matchedPosition = availableState.getMatchedPosition(sql, i + 1);
+                                if (matchedPosition == 0) {
+                                    continue;
+                                }
+                                i += matchedPosition;
+                                if (availableState.replacementKeyword != null) {
+                                    newsql.append(availableState.replacementKeyword);
+                                }
+                                state = availableState;
+                                break state_switch;
+                            }
+                        }
+                    }
+                    newsql.append(c);
+                    break;
+
+                case ESC_FUNCTION:
+                    // extract function name
+                    i = escapeFunction(sql, i, newsql, stdStrings);
+                    state = SqlParseState.IN_SQLCODE; // end of escaped function (or query)
+                    break;
+                case ESC_DATE:
+                case ESC_TIME:
+                case ESC_TIMESTAMP:
+                case ESC_OUTERJOIN:
+                case ESC_ESCAPECHAR:
+                    if (c == '}') {
+                        state = SqlParseState.IN_SQLCODE; // end of escape code.
+                    } else {
+                        newsql.append(c);
+                    }
+                    break;
+            } // end switch
         }
-      }
-
-      // insert the return parameter as the first parameter of the function call
-      if (needComma) {
-        sb.insert(opening + prefixLength, "?,");
-      } else {
-        sb.insert(opening + prefixLength, "?");
-      }
+        return i;
     }
 
-    if (!suffix.isEmpty()) {
-      sql = sb.append(suffix).toString();
-    } else {
-      sql = sb.toString();
+    private static int findOpenParenthesis(char[] sql, int i) {
+        int posArgs = i;
+        while (posArgs < sql.length && sql[posArgs] != '(') {
+            posArgs++;
+        }
+        return posArgs;
     }
-    return new JdbcCallParseInfo(sql, isFunction);
-  }
 
-  /**
-   * <p>Filter the SQL string of Java SQL Escape clauses.</p>
-   *
-   * <p>Currently implemented Escape clauses are those mentioned in 11.3 in the specification.
-   * Basically we look through the sql string for {d xxx}, {t xxx}, {ts xxx}, {oj xxx} or {fn xxx}
-   * in non-string sql code. When we find them, we just strip the escape part leaving only the xxx
-   * part. So, something like "select * from x where d={d '2001-10-09'}" would return "select * from
-   * x where d= '2001-10-09'".</p>
-   *
-   * @param sql                       the original query text
-   * @param replaceProcessingEnabled  whether replace_processing_enabled is on
-   * @param standardConformingStrings whether standard_conforming_strings is on
-   * @return PostgreSQL-compatible SQL
-   * @throws SQLException if given SQL is wrong
-   */
-  public static String replaceProcessing(String sql, boolean replaceProcessingEnabled,
-      boolean standardConformingStrings) throws SQLException {
-    if (replaceProcessingEnabled) {
-      // Since escape codes can only appear in SQL CODE, we keep track
-      // of if we enter a string or not.
-      int len = sql.length();
-      char[] chars = sql.toCharArray();
-      StringBuilder newsql = new StringBuilder(len);
-      int i = 0;
-      while (i < len) {
-        i = parseSql(chars, i, newsql, false, standardConformingStrings);
-        // We need to loop here in case we encounter invalid
-        // SQL, consider: SELECT a FROM t WHERE (1 > 0)) ORDER BY a
-        // We can't ending replacing after the extra closing paren
-        // because that changes a syntax error to a valid query
-        // that isn't what the user specified.
+    private static void checkParsePosition(int i, int len, int i0, char[] sql,
+                                           String message)
+            throws PSQLException {
         if (i < len) {
-          newsql.append(chars[i]);
-          i++;
+            return;
         }
-      }
-      return newsql.toString();
-    } else {
-      return sql;
+        throw new PSQLException(
+                GT.tr(message, i0, new String(sql)),
+                PSQLState.SYNTAX_ERROR);
     }
-  }
 
-  /**
-   * parse the given sql from index i, appending it to the given buffer until we hit an unmatched
-   * right parentheses or end of string. When the stopOnComma flag is set we also stop processing
-   * when a comma is found in sql text that isn't inside nested parenthesis.
-   *
-   * @param sql the original query text
-   * @param i starting position for replacing
-   * @param newsql where to write the replaced output
-   * @param stopOnComma should we stop after hitting the first comma in sql text?
-   * @param stdStrings whether standard_conforming_strings is on
-   * @return the position we stopped processing at
-   * @throws SQLException if given SQL is wrong
-   */
-  private static int parseSql(char[] sql, int i, StringBuilder newsql, boolean stopOnComma,
-      boolean stdStrings) throws SQLException {
-    SqlParseState state = SqlParseState.IN_SQLCODE;
-    int len = sql.length;
-    int nestedParenthesis = 0;
-    boolean endOfNested = false;
+    private static int escapeFunction(char[] sql, int i, StringBuilder newsql, boolean stdStrings) throws SQLException {
+        String functionName;
+        int argPos = findOpenParenthesis(sql, i);
+        if (argPos < sql.length) {
+            functionName = new String(sql, i, argPos - i).trim();
+            // extract arguments
+            i = argPos + 1;// we start the scan after the first (
+            i = escapeFunctionArguments(newsql, functionName, sql, i, stdStrings);
+        }
+        // go to the end of the function copying anything found
+        i++;
+        while (i < sql.length && sql[i] != '}') {
+            newsql.append(sql[i++]);
+        }
+        return i;
+    }
 
-    // because of the ++i loop
-    i--;
-    while (!endOfNested && ++i < len) {
-      char c = sql[i];
-
-      state_switch:
-      switch (state) {
-        case IN_SQLCODE:
-          if (c == '$') {
-            int i0 = i;
-            i = parseDollarQuotes(sql, i);
-            checkParsePosition(i, len, i0, sql,
-                "Unterminated dollar quote started at position {0} in SQL {1}. Expected terminating $$");
-            newsql.append(sql, i0, i - i0 + 1);
-            break;
-          } else if (c == '\'') {
-            // start of a string?
-            int i0 = i;
-            i = parseSingleQuotes(sql, i, stdStrings);
-            checkParsePosition(i, len, i0, sql,
-                "Unterminated string literal started at position {0} in SQL {1}. Expected ' char");
-            newsql.append(sql, i0, i - i0 + 1);
-            break;
-          } else if (c == '"') {
-            // start of a identifier?
-            int i0 = i;
-            i = parseDoubleQuotes(sql, i);
-            checkParsePosition(i, len, i0, sql,
-                "Unterminated identifier started at position {0} in SQL {1}. Expected \" char");
-            newsql.append(sql, i0, i - i0 + 1);
-            break;
-          } else if (c == '/') {
-            int i0 = i;
-            i = parseBlockComment(sql, i);
-            checkParsePosition(i, len, i0, sql,
-                "Unterminated block comment started at position {0} in SQL {1}. Expected */ sequence");
-            newsql.append(sql, i0, i - i0 + 1);
-            break;
-          } else if (c == '-') {
-            int i0 = i;
-            i = parseLineComment(sql, i);
-            newsql.append(sql, i0, i - i0 + 1);
-            break;
-          } else if (c == '(') { // begin nested sql
-            nestedParenthesis++;
-          } else if (c == ')') { // end of nested sql
-            nestedParenthesis--;
-            if (nestedParenthesis < 0) {
-              endOfNested = true;
-              break;
+    /**
+     * Generate sql for escaped functions.
+     *
+     * @param newsql       destination StringBuilder
+     * @param functionName the escaped function name
+     * @param sql          input SQL text (containing arguments of a function call with possible JDBC escapes)
+     * @param i            position in the input SQL
+     * @param stdStrings   whether standard_conforming_strings is on
+     * @return the right PostgreSQL sql
+     * @throws SQLException if something goes wrong
+     */
+    private static int escapeFunctionArguments(StringBuilder newsql, String functionName, char[] sql, int i,
+                                               boolean stdStrings)
+            throws SQLException {
+        // Maximum arity of functions in EscapedFunctions is 3
+        List<CharSequence> parsedArgs = new ArrayList<>(3);
+        while (true) {
+            StringBuilder arg = new StringBuilder();
+            int lastPos = i;
+            i = parseSql(sql, i, arg, true, stdStrings);
+            if (i != lastPos) {
+                parsedArgs.add(arg);
             }
-          } else if (stopOnComma && c == ',' && nestedParenthesis == 0) {
-            endOfNested = true;
-            break;
-          } else if (c == '{') { // start of an escape code?
-            if (i + 1 < len) {
-              SqlParseState[] availableStates = SqlParseState.VALUES;
-              // skip first state, it's not a escape code state
-              for (int j = 1; j < availableStates.length; j++) {
-                SqlParseState availableState = availableStates[j];
-                int matchedPosition = availableState.getMatchedPosition(sql, i + 1);
-                if (matchedPosition == 0) {
-                  continue;
-                }
-                i += matchedPosition;
-                if (availableState.replacementKeyword != null) {
-                  newsql.append(availableState.replacementKeyword);
-                }
-                state = availableState;
-                break state_switch;
-              }
+            if (i >= sql.length // should not happen
+                    || sql[i] != ',') {
+                break;
             }
-          }
-          newsql.append(c);
-          break;
-
-        case ESC_FUNCTION:
-          // extract function name
-          i = escapeFunction(sql, i, newsql, stdStrings);
-          state = SqlParseState.IN_SQLCODE; // end of escaped function (or query)
-          break;
-        case ESC_DATE:
-        case ESC_TIME:
-        case ESC_TIMESTAMP:
-        case ESC_OUTERJOIN:
-        case ESC_ESCAPECHAR:
-          if (c == '}') {
-            state = SqlParseState.IN_SQLCODE; // end of escape code.
-          } else {
-            newsql.append(c);
-          }
-          break;
-      } // end switch
-    }
-    return i;
-  }
-
-  private static int findOpenParenthesis(char[] sql, int i) {
-    int posArgs = i;
-    while (posArgs < sql.length && sql[posArgs] != '(') {
-      posArgs++;
-    }
-    return posArgs;
-  }
-
-  private static void checkParsePosition(int i, int len, int i0, char[] sql,
-      String message)
-      throws PSQLException {
-    if (i < len) {
-      return;
-    }
-    throw new PSQLException(
-        GT.tr(message, i0, new String(sql)),
-        PSQLState.SYNTAX_ERROR);
-  }
-
-  private static int escapeFunction(char[] sql, int i, StringBuilder newsql, boolean stdStrings) throws SQLException {
-    String functionName;
-    int argPos = findOpenParenthesis(sql, i);
-    if (argPos < sql.length) {
-      functionName = new String(sql, i, argPos - i).trim();
-      // extract arguments
-      i = argPos + 1;// we start the scan after the first (
-      i = escapeFunctionArguments(newsql, functionName, sql, i, stdStrings);
-    }
-    // go to the end of the function copying anything found
-    i++;
-    while (i < sql.length && sql[i] != '}') {
-      newsql.append(sql[i++]);
-    }
-    return i;
-  }
-
-  /**
-   * Generate sql for escaped functions.
-   *
-   * @param newsql destination StringBuilder
-   * @param functionName the escaped function name
-   * @param sql input SQL text (containing arguments of a function call with possible JDBC escapes)
-   * @param i position in the input SQL
-   * @param stdStrings whether standard_conforming_strings is on
-   * @return the right PostgreSQL sql
-   * @throws SQLException if something goes wrong
-   */
-  private static int escapeFunctionArguments(StringBuilder newsql, String functionName, char[] sql, int i,
-      boolean stdStrings)
-      throws SQLException {
-    // Maximum arity of functions in EscapedFunctions is 3
-    List<CharSequence> parsedArgs = new ArrayList<>(3);
-    while (true) {
-      StringBuilder arg = new StringBuilder();
-      int lastPos = i;
-      i = parseSql(sql, i, arg, true, stdStrings);
-      if (i != lastPos) {
-        parsedArgs.add(arg);
-      }
-      if (i >= sql.length // should not happen
-          || sql[i] != ',') {
-        break;
-      }
-      i++;
-    }
-    Method method = EscapedFunctions2.getFunction(functionName);
-    if (method == null) {
-      newsql.append(functionName);
-      EscapedFunctions2.appendCall(newsql, "(", ",", ")", parsedArgs);
-      return i;
-    }
-    try {
-      method.invoke(null, newsql, parsedArgs);
-    } catch (InvocationTargetException e) {
-      Throwable targetException = e.getTargetException();
-      if (targetException instanceof SQLException) {
-        throw (SQLException) targetException;
-      } else {
-        String message = targetException == null ? "no message" : targetException.getMessage();
-        throw new PSQLException(message, PSQLState.SYSTEM_ERROR);
-      }
-    } catch (IllegalAccessException e) {
-      throw new PSQLException(e.getMessage(), PSQLState.SYSTEM_ERROR);
-    }
-    return i;
-  }
-
-  private static final char[] QUOTE_OR_ALPHABETIC_MARKER = {'\"', '0'};
-  private static final char[] QUOTE_OR_ALPHABETIC_MARKER_OR_PARENTHESIS = {'\"', '0', '('};
-  private static final char[] SINGLE_QUOTE = {'\''};
-
-  // Static variables for parsing SQL when replaceProcessing is true.
-  private enum SqlParseState {
-    IN_SQLCODE,
-    ESC_DATE("d", SINGLE_QUOTE, "DATE "),
-    ESC_TIME("t", SINGLE_QUOTE, "TIME "),
-
-    ESC_TIMESTAMP("ts", SINGLE_QUOTE, "TIMESTAMP "),
-    ESC_FUNCTION("fn", QUOTE_OR_ALPHABETIC_MARKER, null),
-    ESC_OUTERJOIN("oj", QUOTE_OR_ALPHABETIC_MARKER_OR_PARENTHESIS, null),
-    ESC_ESCAPECHAR("escape", SINGLE_QUOTE, "ESCAPE ");
-
-    private static final SqlParseState[] VALUES = values();
-
-    private final char[] escapeKeyword;
-    private final char[] allowedValues;
-    private final String replacementKeyword;
-
-    SqlParseState() {
-      this("", new char[0], null);
-    }
-
-    SqlParseState(String escapeKeyword, char[] allowedValues,
-        String replacementKeyword) {
-      this.escapeKeyword = escapeKeyword.toCharArray();
-      this.allowedValues = allowedValues;
-      this.replacementKeyword = replacementKeyword;
-    }
-
-    private boolean startMatches(char[] sql, int pos) {
-      // check for the keyword
-      for (char c : escapeKeyword) {
-        if (pos >= sql.length) {
-          return false;
+            i++;
         }
-        char curr = sql[pos++];
-        if (curr != c && curr != Character.toUpperCase(c)) {
-          return false;
+        Method method = EscapedFunctions2.getFunction(functionName);
+        if (method == null) {
+            newsql.append(functionName);
+            EscapedFunctions2.appendCall(newsql, "(", ",", ")", parsedArgs);
+            return i;
         }
-      }
-      return pos < sql.length;
+        try {
+            method.invoke(null, newsql, parsedArgs);
+        } catch (InvocationTargetException e) {
+            Throwable targetException = e.getTargetException();
+            if (targetException instanceof SQLException) {
+                throw (SQLException) targetException;
+            } else {
+                String message = targetException == null ? "no message" : targetException.getMessage();
+                throw new PSQLException(message, PSQLState.SYSTEM_ERROR);
+            }
+        } catch (IllegalAccessException e) {
+            throw new PSQLException(e.getMessage(), PSQLState.SYSTEM_ERROR);
+        }
+        return i;
     }
 
-    private int getMatchedPosition(char[] sql, int pos) {
-      // check for the keyword
-      if (!startMatches(sql, pos)) {
-        return 0;
-      }
+    // Static variables for parsing SQL when replaceProcessing is true.
+    private enum SqlParseState {
+        IN_SQLCODE,
+        ESC_DATE("d", SINGLE_QUOTE, "DATE "),
+        ESC_TIME("t", SINGLE_QUOTE, "TIME "),
 
-      int newPos = pos + escapeKeyword.length;
+        ESC_TIMESTAMP("ts", SINGLE_QUOTE, "TIMESTAMP "),
+        ESC_FUNCTION("fn", QUOTE_OR_ALPHABETIC_MARKER, null),
+        ESC_OUTERJOIN("oj", QUOTE_OR_ALPHABETIC_MARKER_OR_PARENTHESIS, null),
+        ESC_ESCAPECHAR("escape", SINGLE_QUOTE, "ESCAPE ");
 
-      // check for the beginning of the value
-      char curr = sql[newPos];
-      // ignore any in-between whitespace
-      while (curr == ' ') {
-        newPos++;
-        if (newPos >= sql.length) {
-          return 0;
+        private static final SqlParseState[] VALUES = values();
+
+        private final char[] escapeKeyword;
+        private final char[] allowedValues;
+        private final String replacementKeyword;
+
+        SqlParseState() {
+            this("", new char[0], null);
         }
-        curr = sql[newPos];
-      }
-      for (char c : allowedValues) {
-        if (curr == c || (c == '0' && Character.isLetter(curr))) {
-          return newPos - pos;
+
+        SqlParseState(String escapeKeyword, char[] allowedValues,
+                      String replacementKeyword) {
+            this.escapeKeyword = escapeKeyword.toCharArray();
+            this.allowedValues = allowedValues;
+            this.replacementKeyword = replacementKeyword;
+        }
+
+        private boolean startMatches(char[] sql, int pos) {
+            // check for the keyword
+            for (char c : escapeKeyword) {
+                if (pos >= sql.length) {
+                    return false;
+                }
+                char curr = sql[pos++];
+                if (curr != c && curr != Character.toUpperCase(c)) {
+                    return false;
+                }
+            }
+            return pos < sql.length;
+        }
+
+        private int getMatchedPosition(char[] sql, int pos) {
+            // check for the keyword
+            if (!startMatches(sql, pos)) {
+                return 0;
+            }
+
+            int newPos = pos + escapeKeyword.length;
+
+            // check for the beginning of the value
+            char curr = sql[newPos];
+            // ignore any in-between whitespace
+            while (curr == ' ') {
+                newPos++;
+                if (newPos >= sql.length) {
+                    return 0;
+                }
+                curr = sql[newPos];
+            }
+            for (char c : allowedValues) {
+                if (curr == c || (c == '0' && Character.isLetter(curr))) {
+                    return newPos - pos;
+                }
+            }
+            return 0;
         }
-      }
-      return 0;
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/Provider.java b/pgjdbc/src/main/java/org/postgresql/core/Provider.java
index 94c44e9..d56c6b6 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/Provider.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/Provider.java
@@ -12,10 +12,10 @@ package org.postgresql.core;
  */
 public interface Provider<T> {
 
-  /**
-   * Gets a result.
-   *
-   * @return a result
-   */
-  T get();
+    /**
+     * Gets a result.
+     *
+     * @return a result
+     */
+    T get();
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/Query.java b/pgjdbc/src/main/java/org/postgresql/core/Query.java
index 5322a09..fd00ec5 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/Query.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/Query.java
@@ -18,70 +18,73 @@ import java.util.Map;
  * @author Oliver Jowett (oliver@opencloud.com)
  */
 public interface Query {
-  /**
-   * <p>Create a ParameterList suitable for storing parameters associated with this Query.</p>
-   *
-   * <p>If this query has no parameters, a ParameterList will be returned, but it may be a shared
-   * immutable object. If this query does have parameters, the returned ParameterList is a new list,
-   * unshared by other callers.</p>
-   *
-   * @return a suitable ParameterList instance for this query
-   */
-  ParameterList createParameterList();
+    /**
+     * <p>Create a ParameterList suitable for storing parameters associated with this Query.</p>
+     *
+     * <p>If this query has no parameters, a ParameterList will be returned, but it may be a shared
+     * immutable object. If this query does have parameters, the returned ParameterList is a new list,
+     * unshared by other callers.</p>
+     *
+     * @return a suitable ParameterList instance for this query
+     */
+    ParameterList createParameterList();
 
-  /**
-   * Stringize this query to a human-readable form, substituting particular parameter values for
-   * parameter placeholders.
-   *
-   * @param parameters a ParameterList returned by this Query's {@link #createParameterList} method,
-   *        or <code>null</code> to leave the parameter placeholders unsubstituted.
-   * @return a human-readable representation of this query
-   */
-  String toString(ParameterList parameters);
+    /**
+     * Stringize this query to a human-readable form, substituting particular parameter values for
+     * parameter placeholders.
+     *
+     * @param parameters a ParameterList returned by this Query's {@link #createParameterList} method,
+     *                   or <code>null</code> to leave the parameter placeholders unsubstituted.
+     * @return a human-readable representation of this query
+     */
+    String toString(ParameterList parameters);
 
-  /**
-   * Returns SQL in native for database format.
-   * @return SQL in native for database format
-   */
-  String getNativeSql();
+    /**
+     * Returns SQL in native for database format.
+     *
+     * @return SQL in native for database format
+     */
+    String getNativeSql();
 
-  /**
-   * Returns properties of the query (sql keyword, and some other parsing info).
-   * @return returns properties of the query (sql keyword, and some other parsing info) or null if not applicable
-   */
-  SqlCommand getSqlCommand();
+    /**
+     * Returns properties of the query (sql keyword, and some other parsing info).
+     *
+     * @return returns properties of the query (sql keyword, and some other parsing info) or null if not applicable
+     */
+    SqlCommand getSqlCommand();
 
-  /**
-   * <p>Close this query and free any server-side resources associated with it. The resources may not
-   * be immediately deallocated, but closing a Query may make the deallocation more prompt.</p>
-   *
-   * <p>A closed Query should not be executed.</p>
-   */
-  void close();
+    /**
+     * <p>Close this query and free any server-side resources associated with it. The resources may not
+     * be immediately deallocated, but closing a Query may make the deallocation more prompt.</p>
+     *
+     * <p>A closed Query should not be executed.</p>
+     */
+    void close();
 
-  boolean isStatementDescribed();
+    boolean isStatementDescribed();
 
-  boolean isEmpty();
+    boolean isEmpty();
 
-  /**
-   * Get the number of times this Query has been batched.
-   * @return number of times <code>addBatch()</code> has been called.
-   */
-  int getBatchSize();
+    /**
+     * Get the number of times this Query has been batched.
+     *
+     * @return number of times <code>addBatch()</code> has been called.
+     */
+    int getBatchSize();
 
-  /**
-   * Get a map that a result set can use to find the index associated to a name.
-   *
-   * @return null if the query implementation does not support this method.
-   */
-  Map<String, Integer> getResultSetColumnNameIndexMap();
+    /**
+     * Get a map that a result set can use to find the index associated to a name.
+     *
+     * @return null if the query implementation does not support this method.
+     */
+    Map<String, Integer> getResultSetColumnNameIndexMap();
 
-  /**
-   * Return a list of the Query objects that make up this query. If this object is already a
-   * SimpleQuery, returns null (avoids an extra array construction in the common case).
-   *
-   * @return an array of single-statement queries, or <code>null</code> if this object is already a
-   *         single-statement query.
-   */
-  Query [] getSubqueries();
+    /**
+     * Return a list of the Query objects that make up this query. If this object is already a
+     * SimpleQuery, returns null (avoids an extra array construction in the common case).
+     *
+     * @return an array of single-statement queries, or <code>null</code> if this object is already a
+     * single-statement query.
+     */
+    Query[] getSubqueries();
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/QueryExecutor.java b/pgjdbc/src/main/java/org/postgresql/core/QueryExecutor.java
index b5d96d7..80ebcba 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/QueryExecutor.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/QueryExecutor.java
@@ -6,15 +6,6 @@
 
 package org.postgresql.core;
 
-import org.postgresql.PGNotification;
-import org.postgresql.copy.CopyOperation;
-import org.postgresql.core.v3.TypeTransferModeRegistry;
-import org.postgresql.jdbc.AutoSave;
-import org.postgresql.jdbc.BatchResultHandler;
-import org.postgresql.jdbc.EscapeSyntaxCallMode;
-import org.postgresql.jdbc.PreferQueryMode;
-import org.postgresql.util.HostSpec;
-
 import java.io.Closeable;
 import java.io.IOException;
 import java.sql.SQLException;
@@ -23,6 +14,14 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TimeZone;
+import org.postgresql.PGNotification;
+import org.postgresql.copy.CopyOperation;
+import org.postgresql.core.v3.TypeTransferModeRegistry;
+import org.postgresql.jdbc.AutoSave;
+import org.postgresql.jdbc.BatchResultHandler;
+import org.postgresql.jdbc.EscapeSyntaxCallMode;
+import org.postgresql.jdbc.PreferQueryMode;
+import org.postgresql.util.HostSpec;
 
 /**
  * <p>Abstracts the protocol-specific details of executing a query.</p>
@@ -53,571 +52,574 @@ import java.util.TimeZone;
  * @author Oliver Jowett (oliver@opencloud.com)
  */
 public interface QueryExecutor extends TypeTransferModeRegistry {
-  /**
-   * Flag for query execution that indicates the given Query object is unlikely to be reused.
-   */
-  int QUERY_ONESHOT = 1;
-
-  /**
-   * Flag for query execution that indicates that resultset metadata isn't needed and can be safely
-   * omitted.
-   */
-  int QUERY_NO_METADATA = 2;
-
-  /**
-   * Flag for query execution that indicates that a resultset isn't expected and the query executor
-   * can safely discard any rows (although the resultset should still appear to be from a
-   * resultset-returning query).
-   */
-  int QUERY_NO_RESULTS = 4;
-
-  /**
-   * Flag for query execution that indicates a forward-fetch-capable cursor should be used if
-   * possible.
-   */
-  int QUERY_FORWARD_CURSOR = 8;
-
-  /**
-   * Flag for query execution that indicates the automatic BEGIN on the first statement when outside
-   * a transaction should not be done.
-   */
-  int QUERY_SUPPRESS_BEGIN = 16;
-
-  /**
-   * Flag for query execution when we don't really want to execute, we just want to get the
-   * parameter metadata for the statement.
-   */
-  int QUERY_DESCRIBE_ONLY = 32;
-
-  /**
-   * Flag for query execution used by generated keys where we want to receive both the ResultSet and
-   * associated update count from the command status.
-   */
-  int QUERY_BOTH_ROWS_AND_STATUS = 64;
-
-  /**
-   * Force this query to be described at each execution. This is done in pipelined batches where we
-   * might need to detect mismatched result types.
-   */
-  int QUERY_FORCE_DESCRIBE_PORTAL = 512;
-
-  /**
-   * Flag to disable batch execution when we expect results (generated keys) from a statement.
-   *
-   * @deprecated in PgJDBC 9.4 as we now auto-size batches.
-   */
-  @Deprecated
-  int QUERY_DISALLOW_BATCHING = 128;
-
-  /**
-   * Flag for query execution to avoid using binary transfer.
-   */
-  int QUERY_NO_BINARY_TRANSFER = 256;
-
-  /**
-   * Execute the query via simple 'Q' command (not parse, bind, exec, but simple execute).
-   * This sends query text on each execution, however it supports sending multiple queries
-   * separated with ';' as a single command.
-   */
-  int QUERY_EXECUTE_AS_SIMPLE = 1024;
-
-  int MAX_SAVE_POINTS = 1000;
-
-  /**
-   * Flag indicating that when beginning a transaction, it should be read only.
-   */
-  int QUERY_READ_ONLY_HINT = 2048;
-
-  /**
-   * Execute a Query, passing results to a provided ResultHandler.
-   *
-   * @param query the query to execute; must be a query returned from calling
-   *        {@link #wrap(List)} on this QueryExecutor object.
-   * @param parameters the parameters for the query. Must be non-<code>null</code> if the query
-   *        takes parameters. Must be a parameter object returned by
-   *        {@link org.postgresql.core.Query#createParameterList()}.
-   * @param handler a ResultHandler responsible for handling results generated by this query
-   * @param maxRows the maximum number of rows to retrieve
-   * @param fetchSize if QUERY_FORWARD_CURSOR is set, the preferred number of rows to retrieve
-   *        before suspending
-   * @param flags a combination of QUERY_* flags indicating how to handle the query.
-   * @throws SQLException if query execution fails
-   */
-  void execute(Query query, ParameterList parameters, ResultHandler handler, int maxRows,
-      int fetchSize, int flags) throws SQLException;
-
-  /**
-   * Execute a Query with adaptive fetch, passing results to a provided ResultHandler.
-   *
-   * @param query the query to execute; must be a query returned from calling
-   *        {@link #wrap(List)} on this QueryExecutor object.
-   * @param parameters the parameters for the query. Must be non-<code>null</code> if the query
-   *        takes parameters. Must be a parameter object returned by
-   *        {@link org.postgresql.core.Query#createParameterList()}.
-   * @param handler a ResultHandler responsible for handling results generated by this query
-   * @param maxRows the maximum number of rows to retrieve
-   * @param fetchSize if QUERY_FORWARD_CURSOR is set, the preferred number of rows to retrieve
-   *        before suspending
-   * @param flags a combination of QUERY_* flags indicating how to handle the query.
-   * @param adaptiveFetch state of adaptiveFetch to use during execution
-   * @throws SQLException if query execution fails
-   */
-  void execute(Query query, ParameterList parameters, ResultHandler handler, int maxRows,
-      int fetchSize, int flags, boolean adaptiveFetch) throws SQLException;
-
-  /**
-   * Execute several Query, passing results to a provided ResultHandler.
-   *
-   * @param queries the queries to execute; each must be a query returned from calling
-   *        {@link #wrap(List)} on this QueryExecutor object.
-   * @param parameterLists the parameter lists for the queries. The parameter lists correspond 1:1
-   *        to the queries passed in the <code>queries</code> array. Each must be non-
-   *        <code>null</code> if the corresponding query takes parameters, and must be a parameter
-   *        object returned by {@link Query#createParameterList()} created by
-   *        the corresponding query.
-   * @param handler a ResultHandler responsible for handling results generated by this query
-   * @param maxRows the maximum number of rows to retrieve
-   * @param fetchSize if QUERY_FORWARD_CURSOR is set, the preferred number of rows to retrieve
-   *        before suspending
-   * @param flags a combination of QUERY_* flags indicating how to handle the query.
-   * @throws SQLException if query execution fails
-   */
-  void execute(Query[] queries, ParameterList[] parameterLists,
-      BatchResultHandler handler, int maxRows,
-      int fetchSize, int flags) throws SQLException;
-
-  /**
-   * Execute several Query with adaptive fetch, passing results to a provided ResultHandler.
-   *
-   * @param queries the queries to execute; each must be a query returned from calling
-   *        {@link #wrap(List)} on this QueryExecutor object.
-   * @param parameterLists the parameter lists for the queries. The parameter lists correspond 1:1
-   *        to the queries passed in the <code>queries</code> array. Each must be non-
-   *        <code>null</code> if the corresponding query takes parameters, and must be a parameter
-   *        object returned by {@link Query#createParameterList()} created by
-   *        the corresponding query.
-   * @param handler a ResultHandler responsible for handling results generated by this query
-   * @param maxRows the maximum number of rows to retrieve
-   * @param fetchSize if QUERY_FORWARD_CURSOR is set, the preferred number of rows to retrieve
-   *        before suspending
-   * @param flags a combination of QUERY_* flags indicating how to handle the query.
-   * @param adaptiveFetch state of adaptiveFetch to use during execution
-   * @throws SQLException if query execution fails
-   */
-  void execute(Query[] queries, ParameterList[] parameterLists,
-      BatchResultHandler handler, int maxRows,
-      int fetchSize, int flags, boolean adaptiveFetch) throws SQLException;
-
-  /**
-   * Fetch additional rows from a cursor.
-   *
-   * @param cursor the cursor to fetch from
-   * @param handler the handler to feed results to
-   * @param fetchSize the preferred number of rows to retrieve before suspending
-   * @param adaptiveFetch state of adaptiveFetch to use during fetching
-   * @throws SQLException if query execution fails
-   */
-  void fetch(ResultCursor cursor, ResultHandler handler, int fetchSize, boolean adaptiveFetch) throws SQLException;
-
-  /**
-   * Create an unparameterized Query object suitable for execution by this QueryExecutor. The
-   * provided query string is not parsed for parameter placeholders ('?' characters), and the
-   * {@link Query#createParameterList} of the returned object will always return an empty
-   * ParameterList.
-   *
-   * @param sql the SQL for the query to create
-   * @return a new Query object
-   * @throws SQLException if something goes wrong
-   */
-  Query createSimpleQuery(String sql) throws SQLException;
-
-  boolean isReWriteBatchedInsertsEnabled();
-
-  CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized,
-      String ... columnNames)
-      throws SQLException;
-
-  Object createQueryKey(String sql, boolean escapeProcessing, boolean isParameterized,
-      String ... columnNames);
-
-  CachedQuery createQueryByKey(Object key) throws SQLException;
-
-  CachedQuery borrowQueryByKey(Object key) throws SQLException;
-
-  CachedQuery borrowQuery(String sql) throws SQLException;
-
-  CachedQuery borrowCallableQuery(String sql) throws SQLException;
-
-  CachedQuery borrowReturningQuery(String sql, String [] columnNames) throws SQLException;
-
-  void releaseQuery(CachedQuery cachedQuery);
-
-  /**
-   * Wrap given native query into a ready for execution format.
-   * @param queries list of queries in native to database syntax
-   * @return query object ready for execution by this query executor
-   */
-  Query wrap(List<NativeQuery> queries);
-
-  /**
-   * Prior to attempting to retrieve notifications, we need to pull any recently received
-   * notifications off of the network buffers. The notification retrieval in ProtocolConnection
-   * cannot do this as it is prone to deadlock, so the higher level caller must be responsible which
-   * requires exposing this method.
-   *
-   * @throws SQLException if and error occurs while fetching notifications
-   */
-  void processNotifies() throws SQLException;
-
-  /**
-   * Prior to attempting to retrieve notifications, we need to pull any recently received
-   * notifications off of the network buffers. The notification retrieval in ProtocolConnection
-   * cannot do this as it is prone to deadlock, so the higher level caller must be responsible which
-   * requires exposing this method. This variant supports blocking for the given time in millis.
-   *
-   * @param timeoutMillis number of milliseconds to block for
-   * @throws SQLException if and error occurs while fetching notifications
-   */
-  void processNotifies(int timeoutMillis) throws SQLException;
-
-  //
-  // Fastpath interface.
-  //
-
-  /**
-   * Create a new ParameterList implementation suitable for invoking a fastpath function via
-   * {@link #fastpathCall}.
-   *
-   * @param count the number of parameters the fastpath call will take
-   * @return a ParameterList suitable for passing to {@link #fastpathCall}.
-   * @deprecated This API is somewhat obsolete, as one may achieve similar performance
-   *         and greater functionality by setting up a prepared statement to define
-   *         the function call. Then, executing the statement with binary transmission of parameters
-   *         and results substitutes for a fast-path function call.
-   */
-  @Deprecated
-  ParameterList createFastpathParameters(int count);
-
-  /**
-   * Invoke a backend function via the fastpath interface.
-   *
-   * @param fnid the OID of the backend function to invoke
-   * @param params a ParameterList returned from {@link #createFastpathParameters} containing the
-   *        parameters to pass to the backend function
-   * @param suppressBegin if begin should be suppressed
-   * @return the binary-format result of the fastpath call, or <code>null</code> if a void result
-   *         was returned
-   * @throws SQLException if an error occurs while executing the fastpath call
-   * @deprecated This API is somewhat obsolete, as one may achieve similar performance
-   *         and greater functionality by setting up a prepared statement to define
-   *         the function call. Then, executing the statement with binary transmission of parameters
-   *         and results substitutes for a fast-path function call.
-   */
-  @Deprecated
-  byte [] fastpathCall(int fnid, ParameterList params, boolean suppressBegin)
-      throws SQLException;
-
-  /**
-   * Issues a COPY FROM STDIN / COPY TO STDOUT statement and returns handler for associated
-   * operation. Until the copy operation completes, no other database operation may be performed.
-   * Implemented for protocol version 3 only.
-   *
-   * @param sql input sql
-   * @param suppressBegin if begin should be suppressed
-   * @return handler for associated operation
-   * @throws SQLException when initializing the given query fails
-   */
-  CopyOperation startCopy(String sql, boolean suppressBegin) throws SQLException;
-
-  /**
-   * @return the version of the implementation
-   */
-  int getProtocolVersion();
-
-  /**
-   * Adds a single oid that should be received using binary encoding.
-   *
-   * @param oid The oid to request with binary encoding.
-   */
-  void addBinaryReceiveOid(int oid);
-
-  /**
-   * Remove given oid from the list of oids for binary receive encoding.
-   * <p>Note: the binary receive for the oid can be re-activated later.</p>
-   *
-   * @param oid The oid to request with binary encoding.
-   */
-  void removeBinaryReceiveOid(int oid);
-
-  /**
-   * Gets the oids that should be received using binary encoding.
-   * <p>Note: this returns an unmodifiable set, and its contents might not reflect the current state.</p>
-   *
-   * @return The oids to request with binary encoding.
-   * @deprecated the method returns a copy of the set, so it is not efficient. Use {@link #useBinaryForReceive(int)}
-   */
-  @Deprecated
-  Set<? extends Integer> getBinaryReceiveOids();
-
-  /**
-   * Sets the oids that should be received using binary encoding.
-   *
-   * @param useBinaryForOids The oids to request with binary encoding.
-   */
-  void setBinaryReceiveOids(Set<Integer> useBinaryForOids);
-
-  /**
-   * Adds a single oid that should be sent using binary encoding.
-   *
-   * @param oid The oid to send with binary encoding.
-   */
-  void addBinarySendOid(int oid);
-
-  /**
-   * Remove given oid from the list of oids for binary send encoding.
-   * <p>Note: the binary send for the oid can be re-activated later.</p>
-   *
-   * @param oid The oid to send with binary encoding.
-   */
-  void removeBinarySendOid(int oid);
-
-  /**
-   * Gets the oids that should be sent using binary encoding.
-   * <p>Note: this returns an unmodifiable set, and its contents might not reflect the current state.</p>
-   *
-   * @return useBinaryForOids The oids to send with binary encoding.
-   * @deprecated the method returns a copy of the set, so it is not efficient. Use {@link #useBinaryForSend(int)}
-   */
-  @Deprecated
-  Set<? extends Integer> getBinarySendOids();
-
-  /**
-   * Sets the oids that should be sent using binary encoding.
-   *
-   * @param useBinaryForOids The oids to send with binary encoding.
-   */
-  void setBinarySendOids(Set<Integer> useBinaryForOids);
-
-  /**
-   * Returns true if server uses integer instead of double for binary date and time encodings.
-   *
-   * @return the server integer_datetime setting.
-   */
-  boolean getIntegerDateTimes();
-
-  /**
-   * @return the host and port this connection is connected to.
-   */
-  HostSpec getHostSpec();
-
-  /**
-   * @return the user this connection authenticated as.
-   */
-  String getUser();
-
-  /**
-   * @return the database this connection is connected to.
-   */
-  String getDatabase();
-
-  /**
-   * Sends a query cancellation for this connection.
-   *
-   * @throws SQLException if something goes wrong.
-   */
-  void sendQueryCancel() throws SQLException;
-
-  /**
-   * Return the process ID (PID) of the backend server process handling this connection.
-   *
-   * @return process ID (PID) of the backend server process handling this connection
-   */
-  int getBackendPID();
-
-  /**
-   * Abort at network level without sending the Terminate message to the backend.
-   */
-  void abort();
-
-  /**
-   * Close this connection cleanly.
-   */
-  void close();
-
-  /**
-   * Returns an action that would close the connection cleanly.
-   * The returned object should refer only the minimum subset of objects required
-   * for proper resource cleanup. For instance, it should better not hold a strong reference to
-   * {@link QueryExecutor}.
-   * @return action that would close the connection cleanly.
-   */
-  Closeable getCloseAction();
-
-  /**
-   * Check if this connection is closed.
-   *
-   * @return true iff the connection is closed.
-   */
-  boolean isClosed();
-
-  /**
-   * <p>Return the server version from the server_version GUC.</p>
-   *
-   * <p>Note that there's no requirement for this to be numeric or of the form x.y.z. PostgreSQL
-   * development releases usually have the format x.ydevel e.g. 9.4devel; betas usually x.ybetan
-   * e.g. 9.4beta1. The --with-extra-version configure option may add an arbitrary string to this.</p>
-   *
-   * <p>Don't use this string for logic, only use it when displaying the server version to the user.
-   * Prefer getServerVersionNum() for all logic purposes.</p>
-   *
-   * @return the server version string from the server_version GUC
-   */
-  String getServerVersion();
-
-  /**
-   * Retrieve and clear the set of asynchronous notifications pending on this connection.
-   *
-   * @return an array of notifications; if there are no notifications, an empty array is returned.
-   * @throws SQLException if and error occurs while fetching notifications
-   */
-  PGNotification[] getNotifications() throws SQLException;
-
-  /**
-   * Retrieve and clear the chain of warnings accumulated on this connection.
-   *
-   * @return the first SQLWarning in the chain; subsequent warnings can be found via
-   *         SQLWarning.getNextWarning().
-   */
-  SQLWarning getWarnings();
-
-  /**
-   * <p>Get a machine-readable server version.</p>
-   *
-   * <p>This returns the value of the server_version_num GUC. If no such GUC exists, it falls back on
-   * attempting to parse the text server version for the major version. If there's no minor version
-   * (e.g. a devel or beta release) then the minor version is set to zero. If the version could not
-   * be parsed, zero is returned.</p>
-   *
-   * @return the server version in numeric XXYYZZ form, eg 090401, from server_version_num
-   */
-  int getServerVersionNum();
-
-  /**
-   * Get the current transaction state of this connection.
-   *
-   * @return a ProtocolConnection.TRANSACTION_* constant.
-   */
-  TransactionState getTransactionState();
-
-  /**
-   * Returns whether the server treats string-literals according to the SQL standard or if it uses
-   * traditional PostgreSQL escaping rules. Versions up to 8.1 always treated backslashes as escape
-   * characters in string-literals. Since 8.2, this depends on the value of the
-   * {@code standard_conforming_strings} server variable.
-   *
-   * @return true if the server treats string literals according to the SQL standard
-   */
-  boolean getStandardConformingStrings();
-
-  /**
-   *
-   * @return true if we are going to quote identifier provided in the returning array default is true
-   */
-  boolean getQuoteReturningIdentifiers();
-
-  /**
-   * Returns backend timezone in java format.
-   * @return backend timezone in java format.
-   */
-  TimeZone getTimeZone();
-
-  /**
-   * @return the current encoding in use by this connection
-   */
-  Encoding getEncoding();
-
-  /**
-   * Returns application_name connection property.
-   * @return application_name connection property
-   */
-  String getApplicationName();
-
-  boolean isColumnSanitiserDisabled();
-
-  EscapeSyntaxCallMode getEscapeSyntaxCallMode();
-
-  PreferQueryMode getPreferQueryMode();
-
-  void setPreferQueryMode(PreferQueryMode mode);
-
-  AutoSave getAutoSave();
-
-  void setAutoSave(AutoSave autoSave);
-
-  boolean willHealOnRetry(SQLException e);
-
-  /**
-   * By default, the connection resets statement cache in case deallocate all/discard all
-   * message is observed.
-   * This API allows to disable that feature for testing purposes.
-   *
-   * @param flushCacheOnDeallocate true if statement cache should be reset when "deallocate/discard" message observed
-   */
-  void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate);
-
-  /**
-   * @return the ReplicationProtocol instance for this connection.
-   */
-  ReplicationProtocol getReplicationProtocol();
-
-  void setNetworkTimeout(int milliseconds) throws IOException;
-
-  int getNetworkTimeout() throws IOException;
-
-  // Expose parameter status to PGConnection
-  Map<String, String> getParameterStatuses();
-
-  String getParameterStatus(String parameterName);
-
-  /**
-   * Get fetch size computed by adaptive fetch size for given query.
-   *
-   * @param adaptiveFetch state of adaptive fetch, which should be used during retrieving
-   * @param cursor        Cursor used by resultSet, containing query, have to be able to cast to
-   *                      Portal class.
-   * @return fetch size computed by adaptive fetch size for given query passed inside cursor
-   */
-  int getAdaptiveFetchSize(boolean adaptiveFetch, ResultCursor cursor);
-
-  /**
-   * Get state of adaptive fetch inside QueryExecutor.
-   *
-   * @return state of adaptive fetch inside QueryExecutor
-   */
-  boolean getAdaptiveFetch();
-
-  /**
-   * Set state of adaptive fetch inside QueryExecutor.
-   *
-   * @param adaptiveFetch desired state of adaptive fetch
-   */
-  void setAdaptiveFetch(boolean adaptiveFetch);
-
-  /**
-   * Add query to adaptive fetch cache inside QueryExecutor.
-   *
-   * @param adaptiveFetch state of adaptive fetch used during adding query
-   * @param cursor        Cursor used by resultSet, containing query, have to be able to cast to
-   *                      Portal class.
-   */
-  void addQueryToAdaptiveFetchCache(boolean adaptiveFetch, ResultCursor cursor);
-
-  /**
-   * Remove query from adaptive fetch cache inside QueryExecutor
-   *
-   * @param adaptiveFetch state of adaptive fetch used during removing query
-   * @param cursor        Cursor used by resultSet, containing query, have to be able to cast to
-   *                      Portal class.
-   */
-  void removeQueryFromAdaptiveFetchCache(boolean adaptiveFetch, ResultCursor cursor);
+    /**
+     * Flag for query execution that indicates the given Query object is unlikely to be reused.
+     */
+    int QUERY_ONESHOT = 1;
+
+    /**
+     * Flag for query execution that indicates that resultset metadata isn't needed and can be safely
+     * omitted.
+     */
+    int QUERY_NO_METADATA = 2;
+
+    /**
+     * Flag for query execution that indicates that a resultset isn't expected and the query executor
+     * can safely discard any rows (although the resultset should still appear to be from a
+     * resultset-returning query).
+     */
+    int QUERY_NO_RESULTS = 4;
+
+    /**
+     * Flag for query execution that indicates a forward-fetch-capable cursor should be used if
+     * possible.
+     */
+    int QUERY_FORWARD_CURSOR = 8;
+
+    /**
+     * Flag for query execution that indicates the automatic BEGIN on the first statement when outside
+     * a transaction should not be done.
+     */
+    int QUERY_SUPPRESS_BEGIN = 16;
+
+    /**
+     * Flag for query execution when we don't really want to execute, we just want to get the
+     * parameter metadata for the statement.
+     */
+    int QUERY_DESCRIBE_ONLY = 32;
+
+    /**
+     * Flag for query execution used by generated keys where we want to receive both the ResultSet and
+     * associated update count from the command status.
+     */
+    int QUERY_BOTH_ROWS_AND_STATUS = 64;
+
+    /**
+     * Force this query to be described at each execution. This is done in pipelined batches where we
+     * might need to detect mismatched result types.
+     */
+    int QUERY_FORCE_DESCRIBE_PORTAL = 512;
+
+    /**
+     * Flag to disable batch execution when we expect results (generated keys) from a statement.
+     *
+     * @deprecated in PgJDBC 9.4 as we now auto-size batches.
+     */
+    @Deprecated
+    int QUERY_DISALLOW_BATCHING = 128;
+
+    /**
+     * Flag for query execution to avoid using binary transfer.
+     */
+    int QUERY_NO_BINARY_TRANSFER = 256;
+
+    /**
+     * Execute the query via simple 'Q' command (not parse, bind, exec, but simple execute).
+     * This sends query text on each execution, however it supports sending multiple queries
+     * separated with ';' as a single command.
+     */
+    int QUERY_EXECUTE_AS_SIMPLE = 1024;
+
+    int MAX_SAVE_POINTS = 1000;
+
+    /**
+     * Flag indicating that when beginning a transaction, it should be read only.
+     */
+    int QUERY_READ_ONLY_HINT = 2048;
+
+    /**
+     * Execute a Query, passing results to a provided ResultHandler.
+     *
+     * @param query      the query to execute; must be a query returned from calling
+     *                   {@link #wrap(List)} on this QueryExecutor object.
+     * @param parameters the parameters for the query. Must be non-<code>null</code> if the query
+     *                   takes parameters. Must be a parameter object returned by
+     *                   {@link org.postgresql.core.Query#createParameterList()}.
+     * @param handler    a ResultHandler responsible for handling results generated by this query
+     * @param maxRows    the maximum number of rows to retrieve
+     * @param fetchSize  if QUERY_FORWARD_CURSOR is set, the preferred number of rows to retrieve
+     *                   before suspending
+     * @param flags      a combination of QUERY_* flags indicating how to handle the query.
+     * @throws SQLException if query execution fails
+     */
+    void execute(Query query, ParameterList parameters, ResultHandler handler, int maxRows,
+                 int fetchSize, int flags) throws SQLException;
+
+    /**
+     * Execute a Query with adaptive fetch, passing results to a provided ResultHandler.
+     *
+     * @param query         the query to execute; must be a query returned from calling
+     *                      {@link #wrap(List)} on this QueryExecutor object.
+     * @param parameters    the parameters for the query. Must be non-<code>null</code> if the query
+     *                      takes parameters. Must be a parameter object returned by
+     *                      {@link org.postgresql.core.Query#createParameterList()}.
+     * @param handler       a ResultHandler responsible for handling results generated by this query
+     * @param maxRows       the maximum number of rows to retrieve
+     * @param fetchSize     if QUERY_FORWARD_CURSOR is set, the preferred number of rows to retrieve
+     *                      before suspending
+     * @param flags         a combination of QUERY_* flags indicating how to handle the query.
+     * @param adaptiveFetch state of adaptiveFetch to use during execution
+     * @throws SQLException if query execution fails
+     */
+    void execute(Query query, ParameterList parameters, ResultHandler handler, int maxRows,
+                 int fetchSize, int flags, boolean adaptiveFetch) throws SQLException;
+
+    /**
+     * Execute several Query, passing results to a provided ResultHandler.
+     *
+     * @param queries        the queries to execute; each must be a query returned from calling
+     *                       {@link #wrap(List)} on this QueryExecutor object.
+     * @param parameterLists the parameter lists for the queries. The parameter lists correspond 1:1
+     *                       to the queries passed in the <code>queries</code> array. Each must be non-
+     *                       <code>null</code> if the corresponding query takes parameters, and must be a parameter
+     *                       object returned by {@link Query#createParameterList()} created by
+     *                       the corresponding query.
+     * @param handler        a ResultHandler responsible for handling results generated by this query
+     * @param maxRows        the maximum number of rows to retrieve
+     * @param fetchSize      if QUERY_FORWARD_CURSOR is set, the preferred number of rows to retrieve
+     *                       before suspending
+     * @param flags          a combination of QUERY_* flags indicating how to handle the query.
+     * @throws SQLException if query execution fails
+     */
+    void execute(Query[] queries, ParameterList[] parameterLists,
+                 BatchResultHandler handler, int maxRows,
+                 int fetchSize, int flags) throws SQLException;
+
+    /**
+     * Execute several Query with adaptive fetch, passing results to a provided ResultHandler.
+     *
+     * @param queries        the queries to execute; each must be a query returned from calling
+     *                       {@link #wrap(List)} on this QueryExecutor object.
+     * @param parameterLists the parameter lists for the queries. The parameter lists correspond 1:1
+     *                       to the queries passed in the <code>queries</code> array. Each must be non-
+     *                       <code>null</code> if the corresponding query takes parameters, and must be a parameter
+     *                       object returned by {@link Query#createParameterList()} created by
+     *                       the corresponding query.
+     * @param handler        a ResultHandler responsible for handling results generated by this query
+     * @param maxRows        the maximum number of rows to retrieve
+     * @param fetchSize      if QUERY_FORWARD_CURSOR is set, the preferred number of rows to retrieve
+     *                       before suspending
+     * @param flags          a combination of QUERY_* flags indicating how to handle the query.
+     * @param adaptiveFetch  state of adaptiveFetch to use during execution
+     * @throws SQLException if query execution fails
+     */
+    void execute(Query[] queries, ParameterList[] parameterLists,
+                 BatchResultHandler handler, int maxRows,
+                 int fetchSize, int flags, boolean adaptiveFetch) throws SQLException;
+
+    /**
+     * Fetch additional rows from a cursor.
+     *
+     * @param cursor        the cursor to fetch from
+     * @param handler       the handler to feed results to
+     * @param fetchSize     the preferred number of rows to retrieve before suspending
+     * @param adaptiveFetch state of adaptiveFetch to use during fetching
+     * @throws SQLException if query execution fails
+     */
+    void fetch(ResultCursor cursor, ResultHandler handler, int fetchSize, boolean adaptiveFetch) throws SQLException;
+
+    /**
+     * Create an unparameterized Query object suitable for execution by this QueryExecutor. The
+     * provided query string is not parsed for parameter placeholders ('?' characters), and the
+     * {@link Query#createParameterList} of the returned object will always return an empty
+     * ParameterList.
+     *
+     * @param sql the SQL for the query to create
+     * @return a new Query object
+     * @throws SQLException if something goes wrong
+     */
+    Query createSimpleQuery(String sql) throws SQLException;
+
+    boolean isReWriteBatchedInsertsEnabled();
+
+    CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized,
+                            String... columnNames)
+            throws SQLException;
+
+    Object createQueryKey(String sql, boolean escapeProcessing, boolean isParameterized,
+                          String... columnNames);
+
+    CachedQuery createQueryByKey(Object key) throws SQLException;
+
+    CachedQuery borrowQueryByKey(Object key) throws SQLException;
+
+    CachedQuery borrowQuery(String sql) throws SQLException;
+
+    CachedQuery borrowCallableQuery(String sql) throws SQLException;
+
+    CachedQuery borrowReturningQuery(String sql, String[] columnNames) throws SQLException;
+
+    void releaseQuery(CachedQuery cachedQuery);
+
+    /**
+     * Wrap given native query into a ready for execution format.
+     *
+     * @param queries list of queries in native to database syntax
+     * @return query object ready for execution by this query executor
+     */
+    Query wrap(List<NativeQuery> queries);
+
+    /**
+     * Prior to attempting to retrieve notifications, we need to pull any recently received
+     * notifications off of the network buffers. The notification retrieval in ProtocolConnection
+     * cannot do this as it is prone to deadlock, so the higher level caller must be responsible which
+     * requires exposing this method.
+     *
+     * @throws SQLException if and error occurs while fetching notifications
+     */
+    void processNotifies() throws SQLException;
+
+    /**
+     * Prior to attempting to retrieve notifications, we need to pull any recently received
+     * notifications off of the network buffers. The notification retrieval in ProtocolConnection
+     * cannot do this as it is prone to deadlock, so the higher level caller must be responsible which
+     * requires exposing this method. This variant supports blocking for the given time in millis.
+     *
+     * @param timeoutMillis number of milliseconds to block for
+     * @throws SQLException if and error occurs while fetching notifications
+     */
+    void processNotifies(int timeoutMillis) throws SQLException;
+
+    //
+    // Fastpath interface.
+    //
+
+    /**
+     * Create a new ParameterList implementation suitable for invoking a fastpath function via
+     * {@link #fastpathCall}.
+     *
+     * @param count the number of parameters the fastpath call will take
+     * @return a ParameterList suitable for passing to {@link #fastpathCall}.
+     * @deprecated This API is somewhat obsolete, as one may achieve similar performance
+     * and greater functionality by setting up a prepared statement to define
+     * the function call. Then, executing the statement with binary transmission of parameters
+     * and results substitutes for a fast-path function call.
+     */
+    @Deprecated
+    ParameterList createFastpathParameters(int count);
+
+    /**
+     * Invoke a backend function via the fastpath interface.
+     *
+     * @param fnid          the OID of the backend function to invoke
+     * @param params        a ParameterList returned from {@link #createFastpathParameters} containing the
+     *                      parameters to pass to the backend function
+     * @param suppressBegin if begin should be suppressed
+     * @return the binary-format result of the fastpath call, or <code>null</code> if a void result
+     * was returned
+     * @throws SQLException if an error occurs while executing the fastpath call
+     * @deprecated This API is somewhat obsolete, as one may achieve similar performance
+     * and greater functionality by setting up a prepared statement to define
+     * the function call. Then, executing the statement with binary transmission of parameters
+     * and results substitutes for a fast-path function call.
+     */
+    @Deprecated
+    byte[] fastpathCall(int fnid, ParameterList params, boolean suppressBegin)
+            throws SQLException;
+
+    /**
+     * Issues a COPY FROM STDIN / COPY TO STDOUT statement and returns handler for associated
+     * operation. Until the copy operation completes, no other database operation may be performed.
+     * Implemented for protocol version 3 only.
+     *
+     * @param sql           input sql
+     * @param suppressBegin if begin should be suppressed
+     * @return handler for associated operation
+     * @throws SQLException when initializing the given query fails
+     */
+    CopyOperation startCopy(String sql, boolean suppressBegin) throws SQLException;
+
+    /**
+     * @return the version of the implementation
+     */
+    int getProtocolVersion();
+
+    /**
+     * Adds a single oid that should be received using binary encoding.
+     *
+     * @param oid The oid to request with binary encoding.
+     */
+    void addBinaryReceiveOid(int oid);
+
+    /**
+     * Remove given oid from the list of oids for binary receive encoding.
+     * <p>Note: the binary receive for the oid can be re-activated later.</p>
+     *
+     * @param oid The oid to request with binary encoding.
+     */
+    void removeBinaryReceiveOid(int oid);
+
+    /**
+     * Gets the oids that should be received using binary encoding.
+     * <p>Note: this returns an unmodifiable set, and its contents might not reflect the current state.</p>
+     *
+     * @return The oids to request with binary encoding.
+     * @deprecated the method returns a copy of the set, so it is not efficient. Use {@link #useBinaryForReceive(int)}
+     */
+    @Deprecated
+    Set<? extends Integer> getBinaryReceiveOids();
+
+    /**
+     * Sets the oids that should be received using binary encoding.
+     *
+     * @param useBinaryForOids The oids to request with binary encoding.
+     */
+    void setBinaryReceiveOids(Set<Integer> useBinaryForOids);
+
+    /**
+     * Adds a single oid that should be sent using binary encoding.
+     *
+     * @param oid The oid to send with binary encoding.
+     */
+    void addBinarySendOid(int oid);
+
+    /**
+     * Remove given oid from the list of oids for binary send encoding.
+     * <p>Note: the binary send for the oid can be re-activated later.</p>
+     *
+     * @param oid The oid to send with binary encoding.
+     */
+    void removeBinarySendOid(int oid);
+
+    /**
+     * Gets the oids that should be sent using binary encoding.
+     * <p>Note: this returns an unmodifiable set, and its contents might not reflect the current state.</p>
+     *
+     * @return useBinaryForOids The oids to send with binary encoding.
+     * @deprecated the method returns a copy of the set, so it is not efficient. Use {@link #useBinaryForSend(int)}
+     */
+    @Deprecated
+    Set<? extends Integer> getBinarySendOids();
+
+    /**
+     * Sets the oids that should be sent using binary encoding.
+     *
+     * @param useBinaryForOids The oids to send with binary encoding.
+     */
+    void setBinarySendOids(Set<Integer> useBinaryForOids);
+
+    /**
+     * Returns true if server uses integer instead of double for binary date and time encodings.
+     *
+     * @return the server integer_datetime setting.
+     */
+    boolean getIntegerDateTimes();
+
+    /**
+     * @return the host and port this connection is connected to.
+     */
+    HostSpec getHostSpec();
+
+    /**
+     * @return the user this connection authenticated as.
+     */
+    String getUser();
+
+    /**
+     * @return the database this connection is connected to.
+     */
+    String getDatabase();
+
+    /**
+     * Sends a query cancellation for this connection.
+     *
+     * @throws SQLException if something goes wrong.
+     */
+    void sendQueryCancel() throws SQLException;
+
+    /**
+     * Return the process ID (PID) of the backend server process handling this connection.
+     *
+     * @return process ID (PID) of the backend server process handling this connection
+     */
+    int getBackendPID();
+
+    /**
+     * Abort at network level without sending the Terminate message to the backend.
+     */
+    void abort();
+
+    /**
+     * Close this connection cleanly.
+     */
+    void close();
+
+    /**
+     * Returns an action that would close the connection cleanly.
+     * The returned object should refer only the minimum subset of objects required
+     * for proper resource cleanup. For instance, it should better not hold a strong reference to
+     * {@link QueryExecutor}.
+     *
+     * @return action that would close the connection cleanly.
+     */
+    Closeable getCloseAction();
+
+    /**
+     * Check if this connection is closed.
+     *
+     * @return true iff the connection is closed.
+     */
+    boolean isClosed();
+
+    /**
+     * <p>Return the server version from the server_version GUC.</p>
+     *
+     * <p>Note that there's no requirement for this to be numeric or of the form x.y.z. PostgreSQL
+     * development releases usually have the format x.ydevel e.g. 9.4devel; betas usually x.ybetan
+     * e.g. 9.4beta1. The --with-extra-version configure option may add an arbitrary string to this.</p>
+     *
+     * <p>Don't use this string for logic, only use it when displaying the server version to the user.
+     * Prefer getServerVersionNum() for all logic purposes.</p>
+     *
+     * @return the server version string from the server_version GUC
+     */
+    String getServerVersion();
+
+    /**
+     * Retrieve and clear the set of asynchronous notifications pending on this connection.
+     *
+     * @return an array of notifications; if there are no notifications, an empty array is returned.
+     * @throws SQLException if and error occurs while fetching notifications
+     */
+    PGNotification[] getNotifications() throws SQLException;
+
+    /**
+     * Retrieve and clear the chain of warnings accumulated on this connection.
+     *
+     * @return the first SQLWarning in the chain; subsequent warnings can be found via
+     * SQLWarning.getNextWarning().
+     */
+    SQLWarning getWarnings();
+
+    /**
+     * <p>Get a machine-readable server version.</p>
+     *
+     * <p>This returns the value of the server_version_num GUC. If no such GUC exists, it falls back on
+     * attempting to parse the text server version for the major version. If there's no minor version
+     * (e.g. a devel or beta release) then the minor version is set to zero. If the version could not
+     * be parsed, zero is returned.</p>
+     *
+     * @return the server version in numeric XXYYZZ form, eg 090401, from server_version_num
+     */
+    int getServerVersionNum();
+
+    /**
+     * Get the current transaction state of this connection.
+     *
+     * @return a ProtocolConnection.TRANSACTION_* constant.
+     */
+    TransactionState getTransactionState();
+
+    /**
+     * Returns whether the server treats string-literals according to the SQL standard or if it uses
+     * traditional PostgreSQL escaping rules. Versions up to 8.1 always treated backslashes as escape
+     * characters in string-literals. Since 8.2, this depends on the value of the
+     * {@code standard_conforming_strings} server variable.
+     *
+     * @return true if the server treats string literals according to the SQL standard
+     */
+    boolean getStandardConformingStrings();
+
+    /**
+     * @return true if we are going to quote identifier provided in the returning array default is true
+     */
+    boolean getQuoteReturningIdentifiers();
+
+    /**
+     * Returns backend timezone in java format.
+     *
+     * @return backend timezone in java format.
+     */
+    TimeZone getTimeZone();
+
+    /**
+     * @return the current encoding in use by this connection
+     */
+    Encoding getEncoding();
+
+    /**
+     * Returns application_name connection property.
+     *
+     * @return application_name connection property
+     */
+    String getApplicationName();
+
+    boolean isColumnSanitiserDisabled();
+
+    EscapeSyntaxCallMode getEscapeSyntaxCallMode();
+
+    PreferQueryMode getPreferQueryMode();
+
+    void setPreferQueryMode(PreferQueryMode mode);
+
+    AutoSave getAutoSave();
+
+    void setAutoSave(AutoSave autoSave);
+
+    boolean willHealOnRetry(SQLException e);
+
+    /**
+     * By default, the connection resets statement cache in case deallocate all/discard all
+     * message is observed.
+     * This API allows to disable that feature for testing purposes.
+     *
+     * @param flushCacheOnDeallocate true if statement cache should be reset when "deallocate/discard" message observed
+     */
+    void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate);
+
+    /**
+     * @return the ReplicationProtocol instance for this connection.
+     */
+    ReplicationProtocol getReplicationProtocol();
+
+    int getNetworkTimeout() throws IOException;
+
+    void setNetworkTimeout(int milliseconds) throws IOException;
+
+    // Expose parameter status to PGConnection
+    Map<String, String> getParameterStatuses();
+
+    String getParameterStatus(String parameterName);
+
+    /**
+     * Get fetch size computed by adaptive fetch size for given query.
+     *
+     * @param adaptiveFetch state of adaptive fetch, which should be used during retrieving
+     * @param cursor        Cursor used by resultSet, containing query, have to be able to cast to
+     *                      Portal class.
+     * @return fetch size computed by adaptive fetch size for given query passed inside cursor
+     */
+    int getAdaptiveFetchSize(boolean adaptiveFetch, ResultCursor cursor);
+
+    /**
+     * Get state of adaptive fetch inside QueryExecutor.
+     *
+     * @return state of adaptive fetch inside QueryExecutor
+     */
+    boolean getAdaptiveFetch();
+
+    /**
+     * Set state of adaptive fetch inside QueryExecutor.
+     *
+     * @param adaptiveFetch desired state of adaptive fetch
+     */
+    void setAdaptiveFetch(boolean adaptiveFetch);
+
+    /**
+     * Add query to adaptive fetch cache inside QueryExecutor.
+     *
+     * @param adaptiveFetch state of adaptive fetch used during adding query
+     * @param cursor        Cursor used by resultSet, containing query, have to be able to cast to
+     *                      Portal class.
+     */
+    void addQueryToAdaptiveFetchCache(boolean adaptiveFetch, ResultCursor cursor);
+
+    /**
+     * Remove query from adaptive fetch cache inside QueryExecutor
+     *
+     * @param adaptiveFetch state of adaptive fetch used during removing query
+     * @param cursor        Cursor used by resultSet, containing query, have to be able to cast to
+     *                      Portal class.
+     */
+    void removeQueryFromAdaptiveFetchCache(boolean adaptiveFetch, ResultCursor cursor);
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/QueryExecutorBase.java b/pgjdbc/src/main/java/org/postgresql/core/QueryExecutorBase.java
index bafc8f1..4bc0915 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/QueryExecutorBase.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/QueryExecutorBase.java
@@ -5,18 +5,6 @@
 
 package org.postgresql.core;
 
-import org.postgresql.PGNotification;
-import org.postgresql.PGProperty;
-import org.postgresql.jdbc.AutoSave;
-import org.postgresql.jdbc.EscapeSyntaxCallMode;
-import org.postgresql.jdbc.PreferQueryMode;
-import org.postgresql.jdbc.ResourceLock;
-import org.postgresql.util.HostSpec;
-import org.postgresql.util.LruCache;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-import org.postgresql.util.ServerErrorMessage;
-
 import java.io.Closeable;
 import java.io.IOException;
 import java.sql.SQLException;
@@ -29,471 +17,477 @@ import java.util.TreeMap;
 import java.util.concurrent.locks.Condition;
 import java.util.logging.Level;
 import java.util.logging.Logger;
+import org.postgresql.PGNotification;
+import org.postgresql.PGProperty;
+import org.postgresql.jdbc.AutoSave;
+import org.postgresql.jdbc.EscapeSyntaxCallMode;
+import org.postgresql.jdbc.PreferQueryMode;
+import org.postgresql.jdbc.ResourceLock;
+import org.postgresql.util.HostSpec;
+import org.postgresql.util.LruCache;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+import org.postgresql.util.ServerErrorMessage;
 
 @SuppressWarnings("try")
 public abstract class QueryExecutorBase implements QueryExecutor {
 
-  private static final Logger LOGGER = Logger.getLogger(QueryExecutorBase.class.getName());
-  protected final PGStream pgStream;
-  private final String user;
-  private final String database;
-  private final int cancelSignalTimeout;
+    private static final Logger LOGGER = Logger.getLogger(QueryExecutorBase.class.getName());
+    protected final PGStream pgStream;
+    protected final QueryExecutorCloseAction closeAction;
+    protected final boolean logServerErrorDetail;
+    protected final ResourceLock lock = new ResourceLock();
+    protected final Condition lockCondition = lock.newCondition();
+    private final String user;
+    private final String database;
+    private final int cancelSignalTimeout;
+    private final boolean reWriteBatchedInserts;
+    private final boolean columnSanitiserDisabled;
+    private final EscapeSyntaxCallMode escapeSyntaxCallMode;
+    private final boolean quoteReturningIdentifiers;
+    private final ArrayList<PGNotification> notifications = new ArrayList<>();
+    private final LruCache<Object, CachedQuery> statementCache;
+    private final CachedQueryCreateAction cachedQueryCreateAction;
+    // For getParameterStatuses(), GUC_REPORT tracking
+    private final TreeMap<String, String> parameterStatuses
+            = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
+    private int cancelPid;
+    private int cancelKey;
+    private String serverVersion;
+    private int serverVersionNum;
+    private TransactionState transactionState = TransactionState.IDLE;
+    private PreferQueryMode preferQueryMode;
+    private AutoSave autoSave;
+    private boolean flushCacheOnDeallocate = true;
+    // default value for server versions that don't report standard_conforming_strings
+    private boolean standardConformingStrings;
+    private SQLWarning warnings;
 
-  private int cancelPid;
-  private int cancelKey;
-  protected final QueryExecutorCloseAction closeAction;
-  private String serverVersion;
-  private int serverVersionNum;
-  private TransactionState transactionState = TransactionState.IDLE;
-  private final boolean reWriteBatchedInserts;
-  private final boolean columnSanitiserDisabled;
-  private final EscapeSyntaxCallMode escapeSyntaxCallMode;
-  private final boolean quoteReturningIdentifiers;
-  private PreferQueryMode preferQueryMode;
-  private AutoSave autoSave;
-  private boolean flushCacheOnDeallocate = true;
-  protected final boolean logServerErrorDetail;
-
-  // default value for server versions that don't report standard_conforming_strings
-  private boolean standardConformingStrings;
-
-  private SQLWarning warnings;
-  private final ArrayList<PGNotification> notifications = new ArrayList<>();
-
-  private final LruCache<Object, CachedQuery> statementCache;
-  private final CachedQueryCreateAction cachedQueryCreateAction;
-
-  // For getParameterStatuses(), GUC_REPORT tracking
-  private final TreeMap<String,String> parameterStatuses
-      = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
-
-  protected final ResourceLock lock = new ResourceLock();
-  protected final Condition lockCondition = lock.newCondition();
-
-  @SuppressWarnings("this-escape")
-  protected QueryExecutorBase(PGStream pgStream, int cancelSignalTimeout, Properties info) throws SQLException {
-    this.pgStream = pgStream;
-    this.user = PGProperty.USER.getOrDefault(info);
-    this.database = PGProperty.PG_DBNAME.getOrDefault(info);
-    this.cancelSignalTimeout = cancelSignalTimeout;
-    this.reWriteBatchedInserts = PGProperty.REWRITE_BATCHED_INSERTS.getBoolean(info);
-    this.columnSanitiserDisabled = PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(info);
-    String callMode = PGProperty.ESCAPE_SYNTAX_CALL_MODE.getOrDefault(info);
-    this.escapeSyntaxCallMode = EscapeSyntaxCallMode.of(callMode);
-    this.quoteReturningIdentifiers = PGProperty.QUOTE_RETURNING_IDENTIFIERS.getBoolean(info);
-    String preferMode = PGProperty.PREFER_QUERY_MODE.getOrDefault(info);
-    this.preferQueryMode = PreferQueryMode.of(preferMode);
-    this.autoSave = AutoSave.of(PGProperty.AUTOSAVE.getOrDefault(info));
-    this.logServerErrorDetail = PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info);
-    // assignment, argument
-    this.cachedQueryCreateAction = new CachedQueryCreateAction(this);
-    statementCache = new LruCache<>(
-        Math.max(0, PGProperty.PREPARED_STATEMENT_CACHE_QUERIES.getInt(info)),
-        Math.max(0, PGProperty.PREPARED_STATEMENT_CACHE_SIZE_MIB.getInt(info) * 1024L * 1024L),
-        false,
-        cachedQueryCreateAction,
-        new LruCache.EvictAction<CachedQuery>() {
-          @Override
-          public void evict(CachedQuery cachedQuery) throws SQLException {
-            cachedQuery.query.close();
-          }
-        });
-    this.closeAction = createCloseAction();
-  }
-
-  protected QueryExecutorCloseAction createCloseAction() {
-    return new QueryExecutorCloseAction(pgStream);
-  }
-
-  /**
-   * Sends "terminate connection" message to the backend.
-   * @throws IOException in case connection termination fails
-   * @deprecated use {@link #getCloseAction()} instead
-   */
-  @Deprecated
-  protected abstract void sendCloseMessage() throws IOException;
-
-  @Override
-  public void setNetworkTimeout(int milliseconds) throws IOException {
-    pgStream.setNetworkTimeout(milliseconds);
-  }
-
-  @Override
-  public int getNetworkTimeout() throws IOException {
-    return pgStream.getNetworkTimeout();
-  }
-
-  @Override
-  public HostSpec getHostSpec() {
-    return pgStream.getHostSpec();
-  }
-
-  @Override
-  public String getUser() {
-    return user;
-  }
-
-  @Override
-  public String getDatabase() {
-    return database;
-  }
-
-  public void setBackendKeyData(int cancelPid, int cancelKey) {
-    this.cancelPid = cancelPid;
-    this.cancelKey = cancelKey;
-  }
-
-  @Override
-  public int getBackendPID() {
-    return cancelPid;
-  }
-
-  @Override
-  public void abort() {
-    closeAction.abort();
-  }
-
-  @Override
-  public Closeable getCloseAction() {
-    return closeAction;
-  }
-
-  @Override
-  public void close() {
-    if (closeAction.isClosed()) {
-      return;
+    @SuppressWarnings("this-escape")
+    protected QueryExecutorBase(PGStream pgStream, int cancelSignalTimeout, Properties info) throws SQLException {
+        this.pgStream = pgStream;
+        this.user = PGProperty.USER.getOrDefault(info);
+        this.database = PGProperty.PG_DBNAME.getOrDefault(info);
+        this.cancelSignalTimeout = cancelSignalTimeout;
+        this.reWriteBatchedInserts = PGProperty.REWRITE_BATCHED_INSERTS.getBoolean(info);
+        this.columnSanitiserDisabled = PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(info);
+        String callMode = PGProperty.ESCAPE_SYNTAX_CALL_MODE.getOrDefault(info);
+        this.escapeSyntaxCallMode = EscapeSyntaxCallMode.of(callMode);
+        this.quoteReturningIdentifiers = PGProperty.QUOTE_RETURNING_IDENTIFIERS.getBoolean(info);
+        String preferMode = PGProperty.PREFER_QUERY_MODE.getOrDefault(info);
+        this.preferQueryMode = PreferQueryMode.of(preferMode);
+        this.autoSave = AutoSave.of(PGProperty.AUTOSAVE.getOrDefault(info));
+        this.logServerErrorDetail = PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info);
+        // assignment, argument
+        this.cachedQueryCreateAction = new CachedQueryCreateAction(this);
+        statementCache = new LruCache<>(
+                Math.max(0, PGProperty.PREPARED_STATEMENT_CACHE_QUERIES.getInt(info)),
+                Math.max(0, PGProperty.PREPARED_STATEMENT_CACHE_SIZE_MIB.getInt(info) * 1024L * 1024L),
+                false,
+                cachedQueryCreateAction,
+                new LruCache.EvictAction<CachedQuery>() {
+                    @Override
+                    public void evict(CachedQuery cachedQuery) throws SQLException {
+                        cachedQuery.query.close();
+                    }
+                });
+        this.closeAction = createCloseAction();
     }
 
-    try {
-      getCloseAction().close();
-    } catch (IOException ioe) {
-      LOGGER.log(Level.FINEST, "Discarding IOException on close:", ioe);
+    protected QueryExecutorCloseAction createCloseAction() {
+        return new QueryExecutorCloseAction(pgStream);
     }
-  }
 
-  @Override
-  public boolean isClosed() {
-    return closeAction.isClosed();
-  }
+    /**
+     * Sends "terminate connection" message to the backend.
+     *
+     * @throws IOException in case connection termination fails
+     * @deprecated use {@link #getCloseAction()} instead
+     */
+    @Deprecated
+    protected abstract void sendCloseMessage() throws IOException;
 
-  @Override
-  public void sendQueryCancel() throws SQLException {
+    @Override
+    public int getNetworkTimeout() throws IOException {
+        return pgStream.getNetworkTimeout();
+    }
 
-    PGStream cancelStream = null;
+    @Override
+    public void setNetworkTimeout(int milliseconds) throws IOException {
+        pgStream.setNetworkTimeout(milliseconds);
+    }
 
-    // Now we need to construct and send a cancel packet
-    try {
-      if (LOGGER.isLoggable(Level.FINEST)) {
-        LOGGER.log(Level.FINEST, " FE=> CancelRequest(pid={0},ckey={1})", new Object[]{cancelPid, cancelKey});
-      }
+    @Override
+    public HostSpec getHostSpec() {
+        return pgStream.getHostSpec();
+    }
 
-      cancelStream =
-          new PGStream(pgStream.getSocketFactory(), pgStream.getHostSpec(), cancelSignalTimeout);
-      if (cancelSignalTimeout > 0) {
-        cancelStream.setNetworkTimeout(cancelSignalTimeout);
-      }
-      cancelStream.sendInteger4(16);
-      cancelStream.sendInteger2(1234);
-      cancelStream.sendInteger2(5678);
-      cancelStream.sendInteger4(cancelPid);
-      cancelStream.sendInteger4(cancelKey);
-      cancelStream.flush();
-      cancelStream.receiveEOF();
-    } catch (IOException e) {
-      // Safe to ignore.
-      LOGGER.log(Level.FINEST, "Ignoring exception on cancel request:", e);
-    } finally {
-      if (cancelStream != null) {
-        try {
-          cancelStream.close();
-        } catch (IOException e) {
-          // Ignored.
+    @Override
+    public String getUser() {
+        return user;
+    }
+
+    @Override
+    public String getDatabase() {
+        return database;
+    }
+
+    public void setBackendKeyData(int cancelPid, int cancelKey) {
+        this.cancelPid = cancelPid;
+        this.cancelKey = cancelKey;
+    }
+
+    @Override
+    public int getBackendPID() {
+        return cancelPid;
+    }
+
+    @Override
+    public void abort() {
+        closeAction.abort();
+    }
+
+    @Override
+    public Closeable getCloseAction() {
+        return closeAction;
+    }
+
+    @Override
+    public void close() {
+        if (closeAction.isClosed()) {
+            return;
         }
-      }
-    }
-  }
 
-  public void addWarning(SQLWarning newWarning) {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (warnings == null) {
-        warnings = newWarning;
-      } else {
-        warnings.setNextWarning(newWarning);
-      }
-    }
-  }
-
-  public void addNotification(PGNotification notification) {
-    try (ResourceLock ignore = lock.obtain()) {
-      notifications.add(notification);
-    }
-  }
-
-  @Override
-  public PGNotification[] getNotifications() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      PGNotification[] array = notifications.toArray(new PGNotification[0]);
-      notifications.clear();
-      return array;
-    }
-  }
-
-  @Override
-  public SQLWarning getWarnings() {
-    try (ResourceLock ignore = lock.obtain()) {
-      SQLWarning chain = warnings;
-      warnings = null;
-      return chain;
-    }
-  }
-
-  @Override
-  public String getServerVersion() {
-    String serverVersion = this.serverVersion;
-    if (serverVersion == null) {
-      throw new IllegalStateException("serverVersion must not be null");
-    }
-    return serverVersion;
-  }
-
-  @SuppressWarnings("deprecation")
-  @Override
-  public int getServerVersionNum() {
-    if (serverVersionNum != 0) {
-      return serverVersionNum;
-    }
-    return serverVersionNum = Utils.parseServerVersionStr(getServerVersion());
-  }
-
-  public void setServerVersion(String serverVersion) {
-    this.serverVersion = serverVersion;
-  }
-
-  public void setServerVersionNum(int serverVersionNum) {
-    this.serverVersionNum = serverVersionNum;
-  }
-
-  public void setTransactionState(TransactionState state) {
-    try (ResourceLock ignore = lock.obtain()) {
-      transactionState = state;
-    }
-  }
-
-  public void setStandardConformingStrings(boolean value) {
-    try (ResourceLock ignore = lock.obtain()) {
-      standardConformingStrings = value;
-    }
-  }
-
-  @Override
-  public boolean getStandardConformingStrings() {
-    try (ResourceLock ignore = lock.obtain()) {
-      return standardConformingStrings;
-    }
-  }
-
-  @Override
-  public boolean getQuoteReturningIdentifiers() {
-    return quoteReturningIdentifiers;
-  }
-
-  @Override
-  public TransactionState getTransactionState() {
-    try (ResourceLock ignore = lock.obtain()) {
-      return transactionState;
-    }
-  }
-
-  public void setEncoding(Encoding encoding) throws IOException {
-    pgStream.setEncoding(encoding);
-  }
-
-  @Override
-  public Encoding getEncoding() {
-    return pgStream.getEncoding();
-  }
-
-  @Override
-  public boolean isReWriteBatchedInsertsEnabled() {
-    return this.reWriteBatchedInserts;
-  }
-
-  @Override
-  public final CachedQuery borrowQuery(String sql) throws SQLException {
-    return statementCache.borrow(sql);
-  }
-
-  @Override
-  public final CachedQuery borrowCallableQuery(String sql) throws SQLException {
-    return statementCache.borrow(new CallableQueryKey(sql));
-  }
-
-  @Override
-  public final CachedQuery borrowReturningQuery(String sql, String [] columnNames)
-      throws SQLException {
-    return statementCache.borrow(new QueryWithReturningColumnsKey(sql, true, true,
-        columnNames
-    ));
-  }
-
-  @Override
-  public CachedQuery borrowQueryByKey(Object key) throws SQLException {
-    return statementCache.borrow(key);
-  }
-
-  @Override
-  public void releaseQuery(CachedQuery cachedQuery) {
-    statementCache.put(cachedQuery.key, cachedQuery);
-  }
-
-  @Override
-  public final Object createQueryKey(String sql, boolean escapeProcessing,
-      boolean isParameterized, String ... columnNames) {
-    Object key;
-    if (columnNames == null || columnNames.length != 0) {
-      // Null means "return whatever sensible columns are" (e.g. primary key, or serial, or something like that)
-      key = new QueryWithReturningColumnsKey(sql, isParameterized, escapeProcessing, columnNames);
-    } else if (isParameterized) {
-      // If no generated columns requested, just use the SQL as a cache key
-      key = sql;
-    } else {
-      key = new BaseQueryKey(sql, false, escapeProcessing);
-    }
-    return key;
-  }
-
-  @Override
-  public CachedQuery createQueryByKey(Object key) throws SQLException {
-    return cachedQueryCreateAction.create(key);
-  }
-
-  @Override
-  public final CachedQuery createQuery(String sql, boolean escapeProcessing,
-      boolean isParameterized, String ... columnNames)
-      throws SQLException {
-    Object key = createQueryKey(sql, escapeProcessing, isParameterized, columnNames);
-    // Note: cache is not reused here for two reasons:
-    //   1) Simplify initial implementation for simple statements
-    //   2) Non-prepared statements are likely to have literals, thus query reuse would not be often
-    return createQueryByKey(key);
-  }
-
-  @Override
-  public boolean isColumnSanitiserDisabled() {
-    return columnSanitiserDisabled;
-  }
-
-  @Override
-  public EscapeSyntaxCallMode getEscapeSyntaxCallMode() {
-    return escapeSyntaxCallMode;
-  }
-
-  @Override
-  public PreferQueryMode getPreferQueryMode() {
-    return preferQueryMode;
-  }
-
-  public void setPreferQueryMode(PreferQueryMode mode) {
-    preferQueryMode = mode;
-  }
-
-  @Override
-  public AutoSave getAutoSave() {
-    return autoSave;
-  }
-
-  @Override
-  public void setAutoSave(AutoSave autoSave) {
-    this.autoSave = autoSave;
-  }
-
-  protected boolean willHealViaReparse(SQLException e) {
-    if (e == null || e.getSQLState() == null) {
-      return false;
+        try {
+            getCloseAction().close();
+        } catch (IOException ioe) {
+            LOGGER.log(Level.FINEST, "Discarding IOException on close:", ioe);
+        }
     }
 
-    // "prepared statement \"S_2\" does not exist"
-    if (PSQLState.INVALID_SQL_STATEMENT_NAME.getState().equals(e.getSQLState())) {
-      return true;
-    }
-    if (!PSQLState.NOT_IMPLEMENTED.getState().equals(e.getSQLState())) {
-      return false;
+    @Override
+    public boolean isClosed() {
+        return closeAction.isClosed();
     }
 
-    if (!(e instanceof PSQLException)) {
-      return false;
+    @Override
+    public void sendQueryCancel() throws SQLException {
+
+        PGStream cancelStream = null;
+
+        // Now we need to construct and send a cancel packet
+        try {
+            if (LOGGER.isLoggable(Level.FINEST)) {
+                LOGGER.log(Level.FINEST, " FE=> CancelRequest(pid={0},ckey={1})", new Object[]{cancelPid, cancelKey});
+            }
+
+            cancelStream =
+                    new PGStream(pgStream.getSocketFactory(), pgStream.getHostSpec(), cancelSignalTimeout);
+            if (cancelSignalTimeout > 0) {
+                cancelStream.setNetworkTimeout(cancelSignalTimeout);
+            }
+            cancelStream.sendInteger4(16);
+            cancelStream.sendInteger2(1234);
+            cancelStream.sendInteger2(5678);
+            cancelStream.sendInteger4(cancelPid);
+            cancelStream.sendInteger4(cancelKey);
+            cancelStream.flush();
+            cancelStream.receiveEOF();
+        } catch (IOException e) {
+            // Safe to ignore.
+            LOGGER.log(Level.FINEST, "Ignoring exception on cancel request:", e);
+        } finally {
+            if (cancelStream != null) {
+                try {
+                    cancelStream.close();
+                } catch (IOException e) {
+                    // Ignored.
+                }
+            }
+        }
     }
 
-    PSQLException pe = (PSQLException) e;
-
-    ServerErrorMessage serverErrorMessage = pe.getServerErrorMessage();
-    if (serverErrorMessage == null) {
-      return false;
-    }
-    // "cached plan must not change result type"
-    String routine = serverErrorMessage.getRoutine();
-    return "RevalidateCachedQuery".equals(routine) // 9.2+
-        || "RevalidateCachedPlan".equals(routine); // <= 9.1
-  }
-
-  @Override
-  public boolean willHealOnRetry(SQLException e) {
-    if (autoSave == AutoSave.NEVER && getTransactionState() == TransactionState.FAILED) {
-      // If autorollback is not activated, then every statement will fail with
-      // 'transaction is aborted', etc, etc
-      return false;
-    }
-    return willHealViaReparse(e);
-  }
-
-  public boolean isFlushCacheOnDeallocate() {
-    return flushCacheOnDeallocate;
-  }
-
-  @Override
-  public void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate) {
-    this.flushCacheOnDeallocate = flushCacheOnDeallocate;
-  }
-
-  protected boolean hasNotifications() {
-    return !notifications.isEmpty();
-  }
-
-  @Override
-  public final Map<String, String> getParameterStatuses() {
-    return Collections.unmodifiableMap(parameterStatuses);
-  }
-
-  @Override
-  public final String getParameterStatus(String parameterName) {
-    return parameterStatuses.get(parameterName);
-  }
-
-  /**
-   * Update the parameter status map in response to a new ParameterStatus
-   * wire protocol message.
-   *
-   * <p>The server sends ParameterStatus messages when GUC_REPORT settings are
-   * initially assigned and whenever they change.</p>
-   *
-   * <p>A future version may invoke a client-defined listener class at this point,
-   * so this should be the only access path.</p>
-   *
-   * <p>Keys are case-insensitive and case-preserving.</p>
-   *
-   * <p>The server doesn't provide a way to report deletion of a reportable
-   * parameter so we don't expose one here.</p>
-   *
-   * @param parameterName case-insensitive case-preserving name of parameter to create or update
-   * @param parameterStatus new value of parameter
-   * @see org.postgresql.PGConnection#getParameterStatuses
-   * @see org.postgresql.PGConnection#getParameterStatus
-   */
-  protected void onParameterStatus(String parameterName, String parameterStatus) {
-    if (parameterName == null || "".equals(parameterName)) {
-      throw new IllegalStateException("attempt to set GUC_REPORT parameter with null or empty-string name");
+    public void addWarning(SQLWarning newWarning) {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (warnings == null) {
+                warnings = newWarning;
+            } else {
+                warnings.setNextWarning(newWarning);
+            }
+        }
     }
 
-    parameterStatuses.put(parameterName, parameterStatus);
-  }
+    public void addNotification(PGNotification notification) {
+        try (ResourceLock ignore = lock.obtain()) {
+            notifications.add(notification);
+        }
+    }
+
+    @Override
+    public PGNotification[] getNotifications() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            PGNotification[] array = notifications.toArray(new PGNotification[0]);
+            notifications.clear();
+            return array;
+        }
+    }
+
+    @Override
+    public SQLWarning getWarnings() {
+        try (ResourceLock ignore = lock.obtain()) {
+            SQLWarning chain = warnings;
+            warnings = null;
+            return chain;
+        }
+    }
+
+    @Override
+    public String getServerVersion() {
+        String serverVersion = this.serverVersion;
+        if (serverVersion == null) {
+            throw new IllegalStateException("serverVersion must not be null");
+        }
+        return serverVersion;
+    }
+
+    public void setServerVersion(String serverVersion) {
+        this.serverVersion = serverVersion;
+    }
+
+    @SuppressWarnings("deprecation")
+    @Override
+    public int getServerVersionNum() {
+        if (serverVersionNum != 0) {
+            return serverVersionNum;
+        }
+        return serverVersionNum = Utils.parseServerVersionStr(getServerVersion());
+    }
+
+    public void setServerVersionNum(int serverVersionNum) {
+        this.serverVersionNum = serverVersionNum;
+    }
+
+    @Override
+    public boolean getStandardConformingStrings() {
+        try (ResourceLock ignore = lock.obtain()) {
+            return standardConformingStrings;
+        }
+    }
+
+    public void setStandardConformingStrings(boolean value) {
+        try (ResourceLock ignore = lock.obtain()) {
+            standardConformingStrings = value;
+        }
+    }
+
+    @Override
+    public boolean getQuoteReturningIdentifiers() {
+        return quoteReturningIdentifiers;
+    }
+
+    @Override
+    public TransactionState getTransactionState() {
+        try (ResourceLock ignore = lock.obtain()) {
+            return transactionState;
+        }
+    }
+
+    public void setTransactionState(TransactionState state) {
+        try (ResourceLock ignore = lock.obtain()) {
+            transactionState = state;
+        }
+    }
+
+    @Override
+    public Encoding getEncoding() {
+        return pgStream.getEncoding();
+    }
+
+    public void setEncoding(Encoding encoding) throws IOException {
+        pgStream.setEncoding(encoding);
+    }
+
+    @Override
+    public boolean isReWriteBatchedInsertsEnabled() {
+        return this.reWriteBatchedInserts;
+    }
+
+    @Override
+    public final CachedQuery borrowQuery(String sql) throws SQLException {
+        return statementCache.borrow(sql);
+    }
+
+    @Override
+    public final CachedQuery borrowCallableQuery(String sql) throws SQLException {
+        return statementCache.borrow(new CallableQueryKey(sql));
+    }
+
+    @Override
+    public final CachedQuery borrowReturningQuery(String sql, String[] columnNames)
+            throws SQLException {
+        return statementCache.borrow(new QueryWithReturningColumnsKey(sql, true, true,
+                columnNames
+        ));
+    }
+
+    @Override
+    public CachedQuery borrowQueryByKey(Object key) throws SQLException {
+        return statementCache.borrow(key);
+    }
+
+    @Override
+    public void releaseQuery(CachedQuery cachedQuery) {
+        statementCache.put(cachedQuery.key, cachedQuery);
+    }
+
+    @Override
+    public final Object createQueryKey(String sql, boolean escapeProcessing,
+                                       boolean isParameterized, String... columnNames) {
+        Object key;
+        if (columnNames == null || columnNames.length != 0) {
+            // Null means "return whatever sensible columns are" (e.g. primary key, or serial, or something like that)
+            key = new QueryWithReturningColumnsKey(sql, isParameterized, escapeProcessing, columnNames);
+        } else if (isParameterized) {
+            // If no generated columns requested, just use the SQL as a cache key
+            key = sql;
+        } else {
+            key = new BaseQueryKey(sql, false, escapeProcessing);
+        }
+        return key;
+    }
+
+    @Override
+    public CachedQuery createQueryByKey(Object key) throws SQLException {
+        return cachedQueryCreateAction.create(key);
+    }
+
+    @Override
+    public final CachedQuery createQuery(String sql, boolean escapeProcessing,
+                                         boolean isParameterized, String... columnNames)
+            throws SQLException {
+        Object key = createQueryKey(sql, escapeProcessing, isParameterized, columnNames);
+        // Note: cache is not reused here for two reasons:
+        //   1) Simplify initial implementation for simple statements
+        //   2) Non-prepared statements are likely to have literals, thus query reuse would not be often
+        return createQueryByKey(key);
+    }
+
+    @Override
+    public boolean isColumnSanitiserDisabled() {
+        return columnSanitiserDisabled;
+    }
+
+    @Override
+    public EscapeSyntaxCallMode getEscapeSyntaxCallMode() {
+        return escapeSyntaxCallMode;
+    }
+
+    @Override
+    public PreferQueryMode getPreferQueryMode() {
+        return preferQueryMode;
+    }
+
+    public void setPreferQueryMode(PreferQueryMode mode) {
+        preferQueryMode = mode;
+    }
+
+    @Override
+    public AutoSave getAutoSave() {
+        return autoSave;
+    }
+
+    @Override
+    public void setAutoSave(AutoSave autoSave) {
+        this.autoSave = autoSave;
+    }
+
+    protected boolean willHealViaReparse(SQLException e) {
+        if (e == null || e.getSQLState() == null) {
+            return false;
+        }
+
+        // "prepared statement \"S_2\" does not exist"
+        if (PSQLState.INVALID_SQL_STATEMENT_NAME.getState().equals(e.getSQLState())) {
+            return true;
+        }
+        if (!PSQLState.NOT_IMPLEMENTED.getState().equals(e.getSQLState())) {
+            return false;
+        }
+
+        if (!(e instanceof PSQLException)) {
+            return false;
+        }
+
+        PSQLException pe = (PSQLException) e;
+
+        ServerErrorMessage serverErrorMessage = pe.getServerErrorMessage();
+        if (serverErrorMessage == null) {
+            return false;
+        }
+        // "cached plan must not change result type"
+        String routine = serverErrorMessage.getRoutine();
+        return "RevalidateCachedQuery".equals(routine) // 9.2+
+                || "RevalidateCachedPlan".equals(routine); // <= 9.1
+    }
+
+    @Override
+    public boolean willHealOnRetry(SQLException e) {
+        if (autoSave == AutoSave.NEVER && getTransactionState() == TransactionState.FAILED) {
+            // If autorollback is not activated, then every statement will fail with
+            // 'transaction is aborted', etc, etc
+            return false;
+        }
+        return willHealViaReparse(e);
+    }
+
+    public boolean isFlushCacheOnDeallocate() {
+        return flushCacheOnDeallocate;
+    }
+
+    @Override
+    public void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate) {
+        this.flushCacheOnDeallocate = flushCacheOnDeallocate;
+    }
+
+    protected boolean hasNotifications() {
+        return !notifications.isEmpty();
+    }
+
+    @Override
+    public final Map<String, String> getParameterStatuses() {
+        return Collections.unmodifiableMap(parameterStatuses);
+    }
+
+    @Override
+    public final String getParameterStatus(String parameterName) {
+        return parameterStatuses.get(parameterName);
+    }
+
+    /**
+     * Update the parameter status map in response to a new ParameterStatus
+     * wire protocol message.
+     *
+     * <p>The server sends ParameterStatus messages when GUC_REPORT settings are
+     * initially assigned and whenever they change.</p>
+     *
+     * <p>A future version may invoke a client-defined listener class at this point,
+     * so this should be the only access path.</p>
+     *
+     * <p>Keys are case-insensitive and case-preserving.</p>
+     *
+     * <p>The server doesn't provide a way to report deletion of a reportable
+     * parameter so we don't expose one here.</p>
+     *
+     * @param parameterName   case-insensitive case-preserving name of parameter to create or update
+     * @param parameterStatus new value of parameter
+     * @see org.postgresql.PGConnection#getParameterStatuses
+     * @see org.postgresql.PGConnection#getParameterStatus
+     */
+    protected void onParameterStatus(String parameterName, String parameterStatus) {
+        if (parameterName == null || "".equals(parameterName)) {
+            throw new IllegalStateException("attempt to set GUC_REPORT parameter with null or empty-string name");
+        }
+
+        parameterStatuses.put(parameterName, parameterStatus);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/QueryExecutorCloseAction.java b/pgjdbc/src/main/java/org/postgresql/core/QueryExecutorCloseAction.java
index d28eac9..6cbd182 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/QueryExecutorCloseAction.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/QueryExecutorCloseAction.java
@@ -18,74 +18,74 @@ import java.util.logging.Logger;
  * to reduce heap usage in case the user abandons connection without closing it first.
  */
 public class QueryExecutorCloseAction implements Closeable {
-  private static final Logger LOGGER = Logger.getLogger(QueryExecutorBase.class.getName());
+    private static final Logger LOGGER = Logger.getLogger(QueryExecutorBase.class.getName());
 
-  @SuppressWarnings("cast")
-  private static final AtomicReferenceFieldUpdater<QueryExecutorCloseAction, PGStream> PG_STREAM_UPDATER =
-      AtomicReferenceFieldUpdater.newUpdater(
-          QueryExecutorCloseAction.class, (Class<PGStream>) PGStream.class, "pgStream");
+    @SuppressWarnings("cast")
+    private static final AtomicReferenceFieldUpdater<QueryExecutorCloseAction, PGStream> PG_STREAM_UPDATER =
+            AtomicReferenceFieldUpdater.newUpdater(
+                    QueryExecutorCloseAction.class, (Class<PGStream>) PGStream.class, "pgStream");
 
-  private volatile PGStream pgStream;
+    private volatile PGStream pgStream;
 
-  public QueryExecutorCloseAction(PGStream pgStream) {
-    this.pgStream = pgStream;
-  }
-
-  public boolean isClosed() {
-    PGStream pgStream = this.pgStream;
-    return pgStream == null || pgStream.isClosed();
-  }
-
-  public void abort() {
-    PGStream pgStream = this.pgStream;
-    if (pgStream == null || !PG_STREAM_UPDATER.compareAndSet(this, pgStream, null)) {
-      // The connection has already been closed
-      return;
+    public QueryExecutorCloseAction(PGStream pgStream) {
+        this.pgStream = pgStream;
     }
-    try {
-      LOGGER.log(Level.FINEST, " FE=> close socket");
-      pgStream.getSocket().close();
-    } catch (IOException e) {
-      // ignore
-    }
-  }
 
-  @Override
-  public void close() throws IOException {
-    LOGGER.log(Level.FINEST, " FE=> Terminate");
-    PGStream pgStream = this.pgStream;
-    if (pgStream == null || !PG_STREAM_UPDATER.compareAndSet(this, pgStream, null)) {
-      // The connection has already been closed
-      return;
+    public boolean isClosed() {
+        PGStream pgStream = this.pgStream;
+        return pgStream == null || pgStream.isClosed();
     }
-    sendCloseMessage(pgStream);
 
-    // Technically speaking, this check should not be needed,
-    // however org.postgresql.test.jdbc2.ConnectionTest.testPGStreamSettings
-    // closes pgStream reflectively, so here's an extra check to prevent failures
-    // when getNetworkTimeout is called on a closed stream
-    if (pgStream.isClosed()) {
-      return;
+    public void abort() {
+        PGStream pgStream = this.pgStream;
+        if (pgStream == null || !PG_STREAM_UPDATER.compareAndSet(this, pgStream, null)) {
+            // The connection has already been closed
+            return;
+        }
+        try {
+            LOGGER.log(Level.FINEST, " FE=> close socket");
+            pgStream.getSocket().close();
+        } catch (IOException e) {
+            // ignore
+        }
     }
-    pgStream.flush();
-    pgStream.close();
-  }
 
-  public void sendCloseMessage(PGStream pgStream) throws IOException {
-    // Technically speaking, this check should not be needed,
-    // however org.postgresql.test.jdbc2.ConnectionTest.testPGStreamSettings
-    // closes pgStream reflectively, so here's an extra check to prevent failures
-    // when getNetworkTimeout is called on a closed stream
-    if (pgStream.isClosed()) {
-      return;
+    @Override
+    public void close() throws IOException {
+        LOGGER.log(Level.FINEST, " FE=> Terminate");
+        PGStream pgStream = this.pgStream;
+        if (pgStream == null || !PG_STREAM_UPDATER.compareAndSet(this, pgStream, null)) {
+            // The connection has already been closed
+            return;
+        }
+        sendCloseMessage(pgStream);
+
+        // Technically speaking, this check should not be needed,
+        // however org.postgresql.test.jdbc2.ConnectionTest.testPGStreamSettings
+        // closes pgStream reflectively, so here's an extra check to prevent failures
+        // when getNetworkTimeout is called on a closed stream
+        if (pgStream.isClosed()) {
+            return;
+        }
+        pgStream.flush();
+        pgStream.close();
     }
-    // Prevent blocking the thread for too long
-    // The connection will be discarded anyway, so there's no much sense in waiting long
-    int timeout = pgStream.getNetworkTimeout();
-    if (timeout == 0 || timeout > 1000) {
-      pgStream.setNetworkTimeout(1000);
+
+    public void sendCloseMessage(PGStream pgStream) throws IOException {
+        // Technically speaking, this check should not be needed,
+        // however org.postgresql.test.jdbc2.ConnectionTest.testPGStreamSettings
+        // closes pgStream reflectively, so here's an extra check to prevent failures
+        // when getNetworkTimeout is called on a closed stream
+        if (pgStream.isClosed()) {
+            return;
+        }
+        // Prevent blocking the thread for too long
+        // The connection will be discarded anyway, so there's no much sense in waiting long
+        int timeout = pgStream.getNetworkTimeout();
+        if (timeout == 0 || timeout > 1000) {
+            pgStream.setNetworkTimeout(1000);
+        }
+        pgStream.sendChar('X');
+        pgStream.sendInteger4(4);
     }
-    pgStream.sendChar('X');
-    pgStream.sendInteger4(4);
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/QueryWithReturningColumnsKey.java b/pgjdbc/src/main/java/org/postgresql/core/QueryWithReturningColumnsKey.java
index 0b18c0e..32688f1 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/QueryWithReturningColumnsKey.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/QueryWithReturningColumnsKey.java
@@ -15,68 +15,68 @@ import java.util.Arrays;
  * should be returned. {@link Parser} is aware of that and does not quote {@code *}</p>
  */
 class QueryWithReturningColumnsKey extends BaseQueryKey {
-  public final String[] columnNames;
-  private int size; // query length cannot exceed MAX_INT
+    public final String[] columnNames;
+    private int size; // query length cannot exceed MAX_INT
 
-  QueryWithReturningColumnsKey(String sql, boolean isParameterized, boolean escapeProcessing,
-      String [] columnNames) {
-    super(sql, isParameterized, escapeProcessing);
-    if (columnNames == null) {
-      // TODO: teach parser to fetch key columns somehow when no column names were given
-      columnNames = new String[]{"*"};
-    }
-    this.columnNames = columnNames;
-  }
-
-  @Override
-  public long getSize() {
-    int size = this.size;
-    if (size != 0) {
-      return size;
-    }
-    size = (int) super.getSize();
-    if (columnNames != null) {
-      size += 16; // array itself
-      for (String columnName: columnNames) {
-        size += columnName.length() * 2; // 2 bytes per char, revise with Java 9's compact strings
-      }
-    }
-    this.size = size;
-    return size;
-  }
-
-  @Override
-  public String toString() {
-    return "QueryWithReturningColumnsKey{"
-        + "sql='" + sql + '\''
-        + ", isParameterized=" + isParameterized
-        + ", escapeProcessing=" + escapeProcessing
-        + ", columnNames=" + Arrays.toString(columnNames)
-        + '}';
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    if (!super.equals(o)) {
-      return false;
+    QueryWithReturningColumnsKey(String sql, boolean isParameterized, boolean escapeProcessing,
+                                 String[] columnNames) {
+        super(sql, isParameterized, escapeProcessing);
+        if (columnNames == null) {
+            // TODO: teach parser to fetch key columns somehow when no column names were given
+            columnNames = new String[]{"*"};
+        }
+        this.columnNames = columnNames;
     }
 
-    QueryWithReturningColumnsKey that = (QueryWithReturningColumnsKey) o;
+    @Override
+    public long getSize() {
+        int size = this.size;
+        if (size != 0) {
+            return size;
+        }
+        size = (int) super.getSize();
+        if (columnNames != null) {
+            size += 16; // array itself
+            for (String columnName : columnNames) {
+                size += columnName.length() * 2; // 2 bytes per char, revise with Java 9's compact strings
+            }
+        }
+        this.size = size;
+        return size;
+    }
 
-    // Probably incorrect - comparing Object[] arrays with Arrays.equals
-    return Arrays.equals(columnNames, that.columnNames);
-  }
+    @Override
+    public String toString() {
+        return "QueryWithReturningColumnsKey{"
+                + "sql='" + sql + '\''
+                + ", isParameterized=" + isParameterized
+                + ", escapeProcessing=" + escapeProcessing
+                + ", columnNames=" + Arrays.toString(columnNames)
+                + '}';
+    }
 
-  @Override
-  public int hashCode() {
-    int result = super.hashCode();
-    result = 31 * result + Arrays.hashCode(columnNames);
-    return result;
-  }
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        if (!super.equals(o)) {
+            return false;
+        }
+
+        QueryWithReturningColumnsKey that = (QueryWithReturningColumnsKey) o;
+
+        // Probably incorrect - comparing Object[] arrays with Arrays.equals
+        return Arrays.equals(columnNames, that.columnNames);
+    }
+
+    @Override
+    public int hashCode() {
+        int result = super.hashCode();
+        result = 31 * result + Arrays.hashCode(columnNames);
+        return result;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/ReplicationProtocol.java b/pgjdbc/src/main/java/org/postgresql/core/ReplicationProtocol.java
index 8c67c06..383a352 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/ReplicationProtocol.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/ReplicationProtocol.java
@@ -5,30 +5,29 @@
 
 package org.postgresql.core;
 
+import java.sql.SQLException;
 import org.postgresql.replication.PGReplicationStream;
 import org.postgresql.replication.fluent.logical.LogicalReplicationOptions;
 import org.postgresql.replication.fluent.physical.PhysicalReplicationOptions;
 
-import java.sql.SQLException;
-
 /**
  * <p>Abstracts the protocol-specific details of physic and logic replication.</p>
  *
  * <p>With each connection open with replication options associate own instance ReplicationProtocol.</p>
  */
 public interface ReplicationProtocol {
-  /**
-   * @param options not null options for logical replication stream
-   * @return not null stream instance from which available fetch wal logs that was decode by output
-   *     plugin
-   * @throws SQLException on error
-   */
-  PGReplicationStream startLogical(LogicalReplicationOptions options) throws SQLException;
+    /**
+     * @param options not null options for logical replication stream
+     * @return not null stream instance from which available fetch wal logs that was decode by output
+     * plugin
+     * @throws SQLException on error
+     */
+    PGReplicationStream startLogical(LogicalReplicationOptions options) throws SQLException;
 
-  /**
-   * @param options not null options for physical replication stream
-   * @return not null stream instance from which available fetch wal logs
-   * @throws SQLException on error
-   */
-  PGReplicationStream startPhysical(PhysicalReplicationOptions options) throws SQLException;
+    /**
+     * @param options not null options for physical replication stream
+     * @return not null stream instance from which available fetch wal logs
+     * @throws SQLException on error
+     */
+    PGReplicationStream startPhysical(PhysicalReplicationOptions options) throws SQLException;
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/ResultCursor.java b/pgjdbc/src/main/java/org/postgresql/core/ResultCursor.java
index 7bd88c6..bc7aa7c 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/ResultCursor.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/ResultCursor.java
@@ -14,9 +14,9 @@ package org.postgresql.core;
  * @author Oliver Jowett (oliver@opencloud.com)
  */
 public interface ResultCursor {
-  /**
-   * Close this cursor. This may not immediately free underlying resources but may make it happen
-   * more promptly. Closed cursors should not be passed to QueryExecutor methods.
-   */
-  void close();
+    /**
+     * Close this cursor. This may not immediately free underlying resources but may make it happen
+     * more promptly. Closed cursors should not be passed to QueryExecutor methods.
+     */
+    void close();
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/ResultHandler.java b/pgjdbc/src/main/java/org/postgresql/core/ResultHandler.java
index c462bf1..2664af0 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/ResultHandler.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/ResultHandler.java
@@ -25,71 +25,73 @@ import java.util.List;
  * @author Oliver Jowett (oliver@opencloud.com)
  */
 public interface ResultHandler {
-  /**
-   * Called when result rows are received from a query.
-   *
-   * @param fromQuery the underlying query that generated these results; this may not be very
-   *        specific (e.g. it may be a query that includes multiple statements).
-   * @param fields column metadata for the resultset; might be <code>null</code> if
-   *        Query.QUERY_NO_METADATA was specified.
-   * @param tuples the actual data
-   * @param cursor a cursor to use to fetch additional data; <code>null</code> if no further results
-   *        are present.
-   */
-  void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
-      ResultCursor cursor);
+    /**
+     * Called when result rows are received from a query.
+     *
+     * @param fromQuery the underlying query that generated these results; this may not be very
+     *                  specific (e.g. it may be a query that includes multiple statements).
+     * @param fields    column metadata for the resultset; might be <code>null</code> if
+     *                  Query.QUERY_NO_METADATA was specified.
+     * @param tuples    the actual data
+     * @param cursor    a cursor to use to fetch additional data; <code>null</code> if no further results
+     *                  are present.
+     */
+    void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
+                          ResultCursor cursor);
 
-  /**
-   * Called when a query that did not return a resultset completes.
-   *
-   * @param status the command status string (e.g. "SELECT") returned by the backend
-   * @param updateCount the number of rows affected by an INSERT, UPDATE, DELETE, FETCH, or MOVE
-   *        command; -1 if not available.
-   * @param insertOID for a single-row INSERT query, the OID of the newly inserted row; 0 if not
-   *        available.
-   */
-  void handleCommandStatus(String status, long updateCount, long insertOID);
+    /**
+     * Called when a query that did not return a resultset completes.
+     *
+     * @param status      the command status string (e.g. "SELECT") returned by the backend
+     * @param updateCount the number of rows affected by an INSERT, UPDATE, DELETE, FETCH, or MOVE
+     *                    command; -1 if not available.
+     * @param insertOID   for a single-row INSERT query, the OID of the newly inserted row; 0 if not
+     *                    available.
+     */
+    void handleCommandStatus(String status, long updateCount, long insertOID);
 
-  /**
-   * Called when a warning is emitted.
-   *
-   * @param warning the warning that occurred.
-   */
-  void handleWarning(SQLWarning warning);
+    /**
+     * Called when a warning is emitted.
+     *
+     * @param warning the warning that occurred.
+     */
+    void handleWarning(SQLWarning warning);
 
-  /**
-   * Called when an error occurs. Subsequent queries are abandoned; in general the only calls
-   * between a handleError call and a subsequent handleCompletion call are handleError or
-   * handleWarning.
-   *
-   * @param error the error that occurred
-   */
-  void handleError(SQLException error);
+    /**
+     * Called when an error occurs. Subsequent queries are abandoned; in general the only calls
+     * between a handleError call and a subsequent handleCompletion call are handleError or
+     * handleWarning.
+     *
+     * @param error the error that occurred
+     */
+    void handleError(SQLException error);
 
-  /**
-   * Called before a QueryExecutor method returns. This method may throw a SQLException if desired;
-   * if it does, the QueryExecutor method will propagate that exception to the original caller.
-   *
-   * @throws SQLException if the handler wishes the original method to throw an exception.
-   */
-  void handleCompletion() throws SQLException;
+    /**
+     * Called before a QueryExecutor method returns. This method may throw a SQLException if desired;
+     * if it does, the QueryExecutor method will propagate that exception to the original caller.
+     *
+     * @throws SQLException if the handler wishes the original method to throw an exception.
+     */
+    void handleCompletion() throws SQLException;
 
-  /**
-   * Callback for batch statements. In case batch statement is executed in autocommit==true mode,
-   * the executor might commit "as it this it is best", so the result handler should track which
-   * statements are executed successfully and which are not.
-   */
-  void secureProgress();
+    /**
+     * Callback for batch statements. In case batch statement is executed in autocommit==true mode,
+     * the executor might commit "as it this it is best", so the result handler should track which
+     * statements are executed successfully and which are not.
+     */
+    void secureProgress();
 
-  /**
-   * Returns the first encountered exception. The rest are chained via {@link SQLException#setNextException(SQLException)}
-   * @return the first encountered exception
-   */
-  SQLException getException();
+    /**
+     * Returns the first encountered exception. The rest are chained via {@link SQLException#setNextException(SQLException)}
+     *
+     * @return the first encountered exception
+     */
+    SQLException getException();
 
-  /**
-   * Returns the first encountered warning. The rest are chained via {@link SQLException#setNextException(SQLException)}
-   * @return the first encountered warning
-   */
-  SQLWarning getWarning();
+    /**
+     * Returns the first encountered warning. The rest are chained via {@link SQLException#setNextException(SQLException)}
+     *
+     * @return the first encountered warning
+     */
+    SQLWarning getWarning();
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/ResultHandlerBase.java b/pgjdbc/src/main/java/org/postgresql/core/ResultHandlerBase.java
index 9caf01a..dd51895 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/ResultHandlerBase.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/ResultHandlerBase.java
@@ -16,66 +16,66 @@ import java.util.List;
  * so this class tracks the last exception object to speedup {@code setNextException}.
  */
 public class ResultHandlerBase implements ResultHandler {
-  // Last exception is tracked to avoid O(N) SQLException#setNextException just in case there
-  // will be lots of exceptions (e.g. all batch rows fail with constraint violation or so)
-  private SQLException firstException;
-  private SQLException lastException;
+    // Last exception is tracked to avoid O(N) SQLException#setNextException just in case there
+    // will be lots of exceptions (e.g. all batch rows fail with constraint violation or so)
+    private SQLException firstException;
+    private SQLException lastException;
 
-  private SQLWarning firstWarning;
-  private SQLWarning lastWarning;
+    private SQLWarning firstWarning;
+    private SQLWarning lastWarning;
 
-  public ResultHandlerBase() {
-  }
-
-  @Override
-  public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
-      ResultCursor cursor) {
-  }
-
-  @Override
-  public void handleCommandStatus(String status, long updateCount, long insertOID) {
-  }
-
-  @Override
-  public void secureProgress() {
-  }
-
-  @Override
-  public void handleWarning(SQLWarning warning) {
-    if (firstWarning == null) {
-      firstWarning = lastWarning = warning;
-      return;
+    public ResultHandlerBase() {
     }
-    SQLWarning lastWarning = this.lastWarning;
-    lastWarning.setNextException(warning);
-    this.lastWarning = warning;
-  }
 
-  @Override
-  public void handleError(SQLException error) {
-    if (firstException == null) {
-      firstException = lastException = error;
-      return;
+    @Override
+    public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
+                                 ResultCursor cursor) {
     }
-    lastException.setNextException(error);
-    this.lastException = error;
-  }
 
-  @Override
-  public void handleCompletion() throws SQLException {
-    SQLException firstException = this.firstException;
-    if (firstException != null) {
-      throw firstException;
+    @Override
+    public void handleCommandStatus(String status, long updateCount, long insertOID) {
     }
-  }
 
-  @Override
-  public SQLException getException() {
-    return firstException;
-  }
+    @Override
+    public void secureProgress() {
+    }
 
-  @Override
-  public SQLWarning getWarning() {
-    return firstWarning;
-  }
+    @Override
+    public void handleWarning(SQLWarning warning) {
+        if (firstWarning == null) {
+            firstWarning = lastWarning = warning;
+            return;
+        }
+        SQLWarning lastWarning = this.lastWarning;
+        lastWarning.setNextException(warning);
+        this.lastWarning = warning;
+    }
+
+    @Override
+    public void handleError(SQLException error) {
+        if (firstException == null) {
+            firstException = lastException = error;
+            return;
+        }
+        lastException.setNextException(error);
+        this.lastException = error;
+    }
+
+    @Override
+    public void handleCompletion() throws SQLException {
+        SQLException firstException = this.firstException;
+        if (firstException != null) {
+            throw firstException;
+        }
+    }
+
+    @Override
+    public SQLException getException() {
+        return firstException;
+    }
+
+    @Override
+    public SQLWarning getWarning() {
+        return firstWarning;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/ResultHandlerDelegate.java b/pgjdbc/src/main/java/org/postgresql/core/ResultHandlerDelegate.java
index 456ce1e..4288054 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/ResultHandlerDelegate.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/ResultHandlerDelegate.java
@@ -16,68 +16,68 @@ import java.util.List;
  * for the interface methods</p>
  */
 public class ResultHandlerDelegate implements ResultHandler {
-  private final ResultHandler delegate;
+    private final ResultHandler delegate;
 
-  public ResultHandlerDelegate(ResultHandler delegate) {
-    this.delegate = delegate;
-  }
-
-  @Override
-  public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
-      ResultCursor cursor) {
-    if (delegate != null) {
-      delegate.handleResultRows(fromQuery, fields, tuples, cursor);
+    public ResultHandlerDelegate(ResultHandler delegate) {
+        this.delegate = delegate;
     }
-  }
 
-  @Override
-  public void handleCommandStatus(String status, long updateCount, long insertOID) {
-    if (delegate != null) {
-      delegate.handleCommandStatus(status, updateCount, insertOID);
+    @Override
+    public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
+                                 ResultCursor cursor) {
+        if (delegate != null) {
+            delegate.handleResultRows(fromQuery, fields, tuples, cursor);
+        }
     }
-  }
 
-  @Override
-  public void handleWarning(SQLWarning warning) {
-    if (delegate != null) {
-      delegate.handleWarning(warning);
+    @Override
+    public void handleCommandStatus(String status, long updateCount, long insertOID) {
+        if (delegate != null) {
+            delegate.handleCommandStatus(status, updateCount, insertOID);
+        }
     }
-  }
 
-  @Override
-  public void handleError(SQLException error) {
-    if (delegate != null) {
-      delegate.handleError(error);
+    @Override
+    public void handleWarning(SQLWarning warning) {
+        if (delegate != null) {
+            delegate.handleWarning(warning);
+        }
     }
-  }
 
-  @Override
-  public void handleCompletion() throws SQLException {
-    if (delegate != null) {
-      delegate.handleCompletion();
+    @Override
+    public void handleError(SQLException error) {
+        if (delegate != null) {
+            delegate.handleError(error);
+        }
     }
-  }
 
-  @Override
-  public void secureProgress() {
-    if (delegate != null) {
-      delegate.secureProgress();
+    @Override
+    public void handleCompletion() throws SQLException {
+        if (delegate != null) {
+            delegate.handleCompletion();
+        }
     }
-  }
 
-  @Override
-  public SQLException getException() {
-    if (delegate != null) {
-      return delegate.getException();
+    @Override
+    public void secureProgress() {
+        if (delegate != null) {
+            delegate.secureProgress();
+        }
     }
-    return null;
-  }
 
-  @Override
-  public SQLWarning getWarning() {
-    if (delegate != null) {
-      return delegate.getWarning();
+    @Override
+    public SQLException getException() {
+        if (delegate != null) {
+            return delegate.getException();
+        }
+        return null;
+    }
+
+    @Override
+    public SQLWarning getWarning() {
+        if (delegate != null) {
+            return delegate.getWarning();
+        }
+        return null;
     }
-    return null;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/ServerVersion.java b/pgjdbc/src/main/java/org/postgresql/core/ServerVersion.java
index d8ec12a..5f41bd4 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/ServerVersion.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/ServerVersion.java
@@ -13,136 +13,125 @@ import java.text.ParsePosition;
  */
 public enum ServerVersion implements Version {
 
-  INVALID("0.0.0"),
-  v8_2("8.2.0"),
-  v8_3("8.3.0"),
-  v8_4("8.4.0"),
-  v9_0("9.0.0"),
-  v9_1("9.1.0"),
-  v9_2("9.2.0"),
-  v9_3("9.3.0"),
-  v9_4("9.4.0"),
-  v9_5("9.5.0"),
-  v9_6("9.6.0"),
-  v10("10"),
-  v11("11"),
-  v12("12"),
-  v13("13"),
-  v14("14"),
-  v15("15"),
-  v16("16")
-  ;
+    INVALID("0.0.0"),
+    v8_2("8.2.0"),
+    v8_3("8.3.0"),
+    v8_4("8.4.0"),
+    v9_0("9.0.0"),
+    v9_1("9.1.0"),
+    v9_2("9.2.0"),
+    v9_3("9.3.0"),
+    v9_4("9.4.0"),
+    v9_5("9.5.0"),
+    v9_6("9.6.0"),
+    v10("10"),
+    v11("11"),
+    v12("12"),
+    v13("13"),
+    v14("14"),
+    v15("15"),
+    v16("16");
 
-  private final int version;
+    private final int version;
 
-  ServerVersion(String version) {
-    this.version = parseServerVersionStr(version);
-  }
+    ServerVersion(String version) {
+        this.version = parseServerVersionStr(version);
+    }
 
-  /**
-   * Get a machine-readable version number.
-   *
-   * @return the version in numeric XXYYZZ form, e.g. 90401 for 9.4.1
-   */
-  @Override
-  public int getVersionNum() {
-    return version;
-  }
+    /**
+     * <p>Attempt to parse the server version string into an XXYYZZ form version number into a
+     * {@link Version}.</p>
+     *
+     * <p>If the specified version cannot be parsed, the {@link Version#getVersionNum()} will return 0.</p>
+     *
+     * @param version version in numeric XXYYZZ form, e.g. "090401" for 9.4.1
+     * @return a {@link Version} representing the specified version string.
+     */
+    public static Version from(String version) {
+        final int versionNum = parseServerVersionStr(version);
+        return new Version() {
+            @Override
+            public int getVersionNum() {
+                return versionNum;
+            }
 
-  /**
-   * <p>Attempt to parse the server version string into an XXYYZZ form version number into a
-   * {@link Version}.</p>
-   *
-   * <p>If the specified version cannot be parsed, the {@link Version#getVersionNum()} will return 0.</p>
-   *
-   * @param version version in numeric XXYYZZ form, e.g. "090401" for 9.4.1
-   * @return a {@link Version} representing the specified version string.
-   */
-  public static Version from(String version) {
-    final int versionNum = parseServerVersionStr(version);
-    return new Version() {
-      @Override
-      public int getVersionNum() {
-        return versionNum;
-      }
+            @Override
+            public boolean equals(Object obj) {
+                if (obj instanceof Version) {
+                    return this.getVersionNum() == ((Version) obj).getVersionNum();
+                }
+                return false;
+            }
 
-      @Override
-      public boolean equals(Object obj) {
-        if (obj instanceof Version) {
-          return this.getVersionNum() == ((Version) obj).getVersionNum();
+            @Override
+            public int hashCode() {
+                return getVersionNum();
+            }
+
+            @Override
+            public String toString() {
+                return Integer.toString(versionNum);
+            }
+        };
+    }
+
+    /**
+     * <p>Attempt to parse the server version string into an XXYYZZ form version number.</p>
+     *
+     * <p>Returns 0 if the version could not be parsed.</p>
+     *
+     * <p>Returns minor version 0 if the minor version could not be determined, e.g. devel or beta
+     * releases.</p>
+     *
+     * <p>If a single major part like 90400 is passed, it's assumed to be a pre-parsed version and
+     * returned verbatim. (Anything equal to or greater than 10000 is presumed to be this form).</p>
+     *
+     * <p>The yy or zz version parts may be larger than 99. A NumberFormatException is thrown if a
+     * version part is out of range.</p>
+     *
+     * @param serverVersion server version in a XXYYZZ form
+     * @return server version in number form
+     */
+    static int parseServerVersionStr(String serverVersion) throws NumberFormatException {
+        if (serverVersion == null) {
+            return 0;
         }
-        return false;
-      }
 
-      @Override
-      public int hashCode() {
-        return getVersionNum();
-      }
+        NumberFormat numformat = NumberFormat.getIntegerInstance();
+        numformat.setGroupingUsed(false);
+        ParsePosition parsepos = new ParsePosition(0);
 
-      @Override
-      public String toString() {
-        return Integer.toString(versionNum);
-      }
-    };
-  }
+        int[] parts = new int[3];
+        int versionParts;
+        for (versionParts = 0; versionParts < 3; versionParts++) {
+            Number part = (Number) numformat.parseObject(serverVersion, parsepos);
+            if (part == null) {
+                break;
+            }
+            parts[versionParts] = part.intValue();
+            if (parsepos.getIndex() == serverVersion.length()
+                    || serverVersion.charAt(parsepos.getIndex()) != '.') {
+                break;
+            }
+            // Skip .
+            parsepos.setIndex(parsepos.getIndex() + 1);
+        }
+        versionParts++;
 
-  /**
-   * <p>Attempt to parse the server version string into an XXYYZZ form version number.</p>
-   *
-   * <p>Returns 0 if the version could not be parsed.</p>
-   *
-   * <p>Returns minor version 0 if the minor version could not be determined, e.g. devel or beta
-   * releases.</p>
-   *
-   * <p>If a single major part like 90400 is passed, it's assumed to be a pre-parsed version and
-   * returned verbatim. (Anything equal to or greater than 10000 is presumed to be this form).</p>
-   *
-   * <p>The yy or zz version parts may be larger than 99. A NumberFormatException is thrown if a
-   * version part is out of range.</p>
-   *
-   * @param serverVersion server version in a XXYYZZ form
-   * @return server version in number form
-   */
-  static int parseServerVersionStr(String serverVersion) throws NumberFormatException {
-    if (serverVersion == null) {
-      return 0;
-    }
-
-    NumberFormat numformat = NumberFormat.getIntegerInstance();
-    numformat.setGroupingUsed(false);
-    ParsePosition parsepos = new ParsePosition(0);
-
-    int[] parts = new int[3];
-    int versionParts;
-    for (versionParts = 0; versionParts < 3; versionParts++) {
-      Number part = (Number) numformat.parseObject(serverVersion, parsepos);
-      if (part == null) {
-        break;
-      }
-      parts[versionParts] = part.intValue();
-      if (parsepos.getIndex() == serverVersion.length()
-          || serverVersion.charAt(parsepos.getIndex()) != '.') {
-        break;
-      }
-      // Skip .
-      parsepos.setIndex(parsepos.getIndex() + 1);
-    }
-    versionParts++;
-
-    if (parts[0] >= 10000) {
-      /*
-       * PostgreSQL version 1000? I don't think so. We're seeing a version like 90401; return it
-       * verbatim, but only if there's nothing else in the version. If there is, treat it as a parse
-       * error.
-       */
-      if (parsepos.getIndex() == serverVersion.length() && versionParts == 1) {
-        return parts[0];
-      } else {
-        throw new NumberFormatException(
-            "First major-version part equal to or greater than 10000 in invalid version string: "
-                + serverVersion);
-      }
-    }
+        if (parts[0] >= 10000) {
+            /*
+             * PostgreSQL version 1000? I don't think so. We're seeing a version like 90401; return it
+             * verbatim, but only if there's nothing else in the version. If there is, treat it as a parse
+             * error.
+             */
+            if (parsepos.getIndex() == serverVersion.length() && versionParts == 1) {
+                return parts[0];
+            } else {
+                throw new NumberFormatException(
+                        "First major-version part equal to or greater than 10000 in invalid version string: "
+                                + serverVersion);
+            }
+        }
 
     /* #667 - Allow for versions with greater than 3 parts.
       For versions with more than 3 parts, still return 3 parts (4th part ignored for now
@@ -150,36 +139,46 @@ public enum ServerVersion implements Version {
       Allows for future versions of the server to utilize more than 3 part version numbers
       without upgrading the jdbc driver */
 
-    if (versionParts >= 3) {
-      if (parts[1] > 99) {
-        throw new NumberFormatException(
-            "Unsupported second part of major version > 99 in invalid version string: "
-                + serverVersion);
-      }
-      if (parts[2] > 99) {
-        throw new NumberFormatException(
-            "Unsupported second part of minor version > 99 in invalid version string: "
-                + serverVersion);
-      }
-      return (parts[0] * 100 + parts[1]) * 100 + parts[2];
+        if (versionParts >= 3) {
+            if (parts[1] > 99) {
+                throw new NumberFormatException(
+                        "Unsupported second part of major version > 99 in invalid version string: "
+                                + serverVersion);
+            }
+            if (parts[2] > 99) {
+                throw new NumberFormatException(
+                        "Unsupported second part of minor version > 99 in invalid version string: "
+                                + serverVersion);
+            }
+            return (parts[0] * 100 + parts[1]) * 100 + parts[2];
+        }
+        if (versionParts == 2) {
+            if (parts[0] >= 10) {
+                return parts[0] * 100 * 100 + parts[1];
+            }
+            if (parts[1] > 99) {
+                throw new NumberFormatException(
+                        "Unsupported second part of major version > 99 in invalid version string: "
+                                + serverVersion);
+            }
+            return (parts[0] * 100 + parts[1]) * 100;
+        }
+        if (versionParts == 1) {
+            if (parts[0] >= 10) {
+                return parts[0] * 100 * 100;
+            }
+        }
+        return 0; /* unknown */
     }
-    if (versionParts == 2) {
-      if (parts[0] >= 10) {
-        return parts[0] * 100 * 100 + parts[1];
-      }
-      if (parts[1] > 99) {
-        throw new NumberFormatException(
-            "Unsupported second part of major version > 99 in invalid version string: "
-                + serverVersion);
-      }
-      return (parts[0] * 100 + parts[1]) * 100;
+
+    /**
+     * Get a machine-readable version number.
+     *
+     * @return the version in numeric XXYYZZ form, e.g. 90401 for 9.4.1
+     */
+    @Override
+    public int getVersionNum() {
+        return version;
     }
-    if (versionParts == 1) {
-      if (parts[0] >= 10) {
-        return parts[0] * 100 * 100;
-      }
-    }
-    return 0; /* unknown */
-  }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/SetupQueryRunner.java b/pgjdbc/src/main/java/org/postgresql/core/SetupQueryRunner.java
index 739043e..4559586 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/SetupQueryRunner.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/SetupQueryRunner.java
@@ -6,13 +6,12 @@
 
 package org.postgresql.core;
 
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
 import java.sql.SQLException;
 import java.sql.SQLWarning;
 import java.util.List;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
 
 /**
  * Poor man's Statement &amp; ResultSet, used for initial queries while we're still initializing the
@@ -20,57 +19,57 @@ import java.util.List;
  */
 public class SetupQueryRunner {
 
-  public SetupQueryRunner() {
-  }
-
-  private static class SimpleResultHandler extends ResultHandlerBase {
-    private List<Tuple> tuples;
-
-    List<Tuple> getResults() {
-      return tuples;
+    public SetupQueryRunner() {
     }
 
-    @Override
-    public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
-        ResultCursor cursor) {
-      this.tuples = tuples;
+    public static Tuple run(QueryExecutor executor, String queryString,
+                            boolean wantResults) throws SQLException {
+        Query query = executor.createSimpleQuery(queryString);
+        SimpleResultHandler handler = new SimpleResultHandler();
+
+        int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_SUPPRESS_BEGIN
+                | QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
+        if (!wantResults) {
+            flags |= QueryExecutor.QUERY_NO_RESULTS | QueryExecutor.QUERY_NO_METADATA;
+        }
+
+        try {
+            executor.execute(query, null, handler, 0, 0, flags);
+        } finally {
+            query.close();
+        }
+
+        if (!wantResults) {
+            return null;
+        }
+
+        List<Tuple> tuples = handler.getResults();
+        if (tuples == null || tuples.size() != 1) {
+            throw new PSQLException(GT.tr("An unexpected result was returned by a query."),
+                    PSQLState.CONNECTION_UNABLE_TO_CONNECT);
+        }
+
+        return tuples.get(0);
     }
 
-    @Override
-    public void handleWarning(SQLWarning warning) {
-      // We ignore warnings. We assume we know what we're
-      // doing in the setup queries.
+    private static class SimpleResultHandler extends ResultHandlerBase {
+        private List<Tuple> tuples;
+
+        List<Tuple> getResults() {
+            return tuples;
+        }
+
+        @Override
+        public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
+                                     ResultCursor cursor) {
+            this.tuples = tuples;
+        }
+
+        @Override
+        public void handleWarning(SQLWarning warning) {
+            // We ignore warnings. We assume we know what we're
+            // doing in the setup queries.
+        }
     }
-  }
-
-  public static Tuple run(QueryExecutor executor, String queryString,
-      boolean wantResults) throws SQLException {
-    Query query = executor.createSimpleQuery(queryString);
-    SimpleResultHandler handler = new SimpleResultHandler();
-
-    int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_SUPPRESS_BEGIN
-        | QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
-    if (!wantResults) {
-      flags |= QueryExecutor.QUERY_NO_RESULTS | QueryExecutor.QUERY_NO_METADATA;
-    }
-
-    try {
-      executor.execute(query, null, handler, 0, 0, flags);
-    } finally {
-      query.close();
-    }
-
-    if (!wantResults) {
-      return null;
-    }
-
-    List<Tuple> tuples = handler.getResults();
-    if (tuples == null || tuples.size() != 1) {
-      throw new PSQLException(GT.tr("An unexpected result was returned by a query."),
-          PSQLState.CONNECTION_UNABLE_TO_CONNECT);
-    }
-
-    return tuples.get(0);
-  }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/SocketFactoryFactory.java b/pgjdbc/src/main/java/org/postgresql/core/SocketFactoryFactory.java
index f54fb00..2d786f0 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/SocketFactoryFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/SocketFactoryFactory.java
@@ -5,6 +5,9 @@
 
 package org.postgresql.core;
 
+import java.util.Properties;
+import javax.net.SocketFactory;
+import javax.net.ssl.SSLSocketFactory;
 import org.postgresql.PGProperty;
 import org.postgresql.ssl.LibPQFactory;
 import org.postgresql.util.GT;
@@ -12,66 +15,61 @@ import org.postgresql.util.ObjectFactory;
 import org.postgresql.util.PSQLException;
 import org.postgresql.util.PSQLState;
 
-import java.util.Properties;
-
-import javax.net.SocketFactory;
-import javax.net.ssl.SSLSocketFactory;
-
 /**
  * Instantiates {@link SocketFactory} based on the {@link PGProperty#SOCKET_FACTORY}.
  */
 public class SocketFactoryFactory {
 
-  public SocketFactoryFactory() {
-  }
+    public SocketFactoryFactory() {
+    }
 
-  /**
-   * Instantiates {@link SocketFactory} based on the {@link PGProperty#SOCKET_FACTORY}.
-   *
-   * @param info connection properties
-   * @return socket factory
-   * @throws PSQLException if something goes wrong
-   */
-  public static SocketFactory getSocketFactory(Properties info) throws PSQLException {
-    // Socket factory
-    String socketFactoryClassName = PGProperty.SOCKET_FACTORY.getOrDefault(info);
-    if (socketFactoryClassName == null) {
-      return SocketFactory.getDefault();
+    /**
+     * Instantiates {@link SocketFactory} based on the {@link PGProperty#SOCKET_FACTORY}.
+     *
+     * @param info connection properties
+     * @return socket factory
+     * @throws PSQLException if something goes wrong
+     */
+    public static SocketFactory getSocketFactory(Properties info) throws PSQLException {
+        // Socket factory
+        String socketFactoryClassName = PGProperty.SOCKET_FACTORY.getOrDefault(info);
+        if (socketFactoryClassName == null) {
+            return SocketFactory.getDefault();
+        }
+        try {
+            return ObjectFactory.instantiate(SocketFactory.class, socketFactoryClassName, info, true,
+                    PGProperty.SOCKET_FACTORY_ARG.getOrDefault(info));
+        } catch (Exception e) {
+            throw new PSQLException(
+                    GT.tr("The SocketFactory class provided {0} could not be instantiated.",
+                            socketFactoryClassName),
+                    PSQLState.CONNECTION_FAILURE, e);
+        }
     }
-    try {
-      return ObjectFactory.instantiate(SocketFactory.class, socketFactoryClassName, info, true,
-          PGProperty.SOCKET_FACTORY_ARG.getOrDefault(info));
-    } catch (Exception e) {
-      throw new PSQLException(
-          GT.tr("The SocketFactory class provided {0} could not be instantiated.",
-              socketFactoryClassName),
-          PSQLState.CONNECTION_FAILURE, e);
-    }
-  }
 
-  /**
-   * Instantiates {@link SSLSocketFactory} based on the {@link PGProperty#SSL_FACTORY}.
-   *
-   * @param info connection properties
-   * @return SSL socket factory
-   * @throws PSQLException if something goes wrong
-   */
-  @SuppressWarnings("deprecation")
-  public static SSLSocketFactory getSslSocketFactory(Properties info) throws PSQLException {
-    String classname = PGProperty.SSL_FACTORY.getOrDefault(info);
-    if (classname == null
-        || "org.postgresql.ssl.jdbc4.LibPQFactory".equals(classname)
-        || "org.postgresql.ssl.LibPQFactory".equals(classname)) {
-      return new LibPQFactory(info);
+    /**
+     * Instantiates {@link SSLSocketFactory} based on the {@link PGProperty#SSL_FACTORY}.
+     *
+     * @param info connection properties
+     * @return SSL socket factory
+     * @throws PSQLException if something goes wrong
+     */
+    @SuppressWarnings("deprecation")
+    public static SSLSocketFactory getSslSocketFactory(Properties info) throws PSQLException {
+        String classname = PGProperty.SSL_FACTORY.getOrDefault(info);
+        if (classname == null
+                || "org.postgresql.ssl.jdbc4.LibPQFactory".equals(classname)
+                || "org.postgresql.ssl.LibPQFactory".equals(classname)) {
+            return new LibPQFactory(info);
+        }
+        try {
+            return ObjectFactory.instantiate(SSLSocketFactory.class, classname, info, true,
+                    PGProperty.SSL_FACTORY_ARG.getOrDefault(info));
+        } catch (Exception e) {
+            throw new PSQLException(
+                    GT.tr("The SSLSocketFactory class provided {0} could not be instantiated.", classname),
+                    PSQLState.CONNECTION_FAILURE, e);
+        }
     }
-    try {
-      return ObjectFactory.instantiate(SSLSocketFactory.class, classname, info, true,
-          PGProperty.SSL_FACTORY_ARG.getOrDefault(info));
-    } catch (Exception e) {
-      throw new PSQLException(
-          GT.tr("The SSLSocketFactory class provided {0} could not be instantiated.", classname),
-          PSQLState.CONNECTION_FAILURE, e);
-    }
-  }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/SqlCommand.java b/pgjdbc/src/main/java/org/postgresql/core/SqlCommand.java
index 90201fa..3f6d542 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/SqlCommand.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/SqlCommand.java
@@ -14,68 +14,66 @@ import static org.postgresql.core.SqlCommandType.WITH;
  *
  * @author Jeremy Whiting jwhiting@redhat.com
  * @author Christopher Deckers (chrriis@gmail.com)
- *
  */
 public class SqlCommand {
-  public static final SqlCommand BLANK = SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK);
+    public static final SqlCommand BLANK = SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK);
+    private final SqlCommandType commandType;
+    private final boolean parsedSQLhasRETURNINGKeyword;
+    private final int valuesBraceOpenPosition;
+    private final int valuesBraceClosePosition;
 
-  public boolean isBatchedReWriteCompatible() {
-    return valuesBraceOpenPosition >= 0;
-  }
+    private SqlCommand(SqlCommandType type, boolean isBatchedReWriteConfigured,
+                       int valuesBraceOpenPosition, int valuesBraceClosePosition, boolean isPresent,
+                       int priorQueryCount) {
+        commandType = type;
+        parsedSQLhasRETURNINGKeyword = isPresent;
+        boolean batchedReWriteCompatible = (type == INSERT) && isBatchedReWriteConfigured
+                && valuesBraceOpenPosition >= 0 && valuesBraceClosePosition > valuesBraceOpenPosition
+                && !isPresent && priorQueryCount == 0;
+        this.valuesBraceOpenPosition = batchedReWriteCompatible ? valuesBraceOpenPosition : -1;
+        this.valuesBraceClosePosition = batchedReWriteCompatible ? valuesBraceClosePosition : -1;
+    }
 
-  public int getBatchRewriteValuesBraceOpenPosition() {
-    return valuesBraceOpenPosition;
-  }
+    public static SqlCommand createStatementTypeInfo(SqlCommandType type,
+                                                     boolean isBatchedReWritePropertyConfigured,
+                                                     int valuesBraceOpenPosition, int valuesBraceClosePosition, boolean isRETURNINGkeywordPresent,
+                                                     int priorQueryCount) {
+        return new SqlCommand(type, isBatchedReWritePropertyConfigured,
+                valuesBraceOpenPosition, valuesBraceClosePosition, isRETURNINGkeywordPresent,
+                priorQueryCount);
+    }
 
-  public int getBatchRewriteValuesBraceClosePosition() {
-    return valuesBraceClosePosition;
-  }
+    public static SqlCommand createStatementTypeInfo(SqlCommandType type) {
+        return new SqlCommand(type, false, -1, -1, false, 0);
+    }
 
-  public SqlCommandType getType() {
-    return commandType;
-  }
+    public static SqlCommand createStatementTypeInfo(SqlCommandType type,
+                                                     boolean isRETURNINGkeywordPresent) {
+        return new SqlCommand(type, false, -1, -1, isRETURNINGkeywordPresent, 0);
+    }
 
-  public boolean isReturningKeywordPresent() {
-    return parsedSQLhasRETURNINGKeyword;
-  }
+    public boolean isBatchedReWriteCompatible() {
+        return valuesBraceOpenPosition >= 0;
+    }
 
-  public boolean returnsRows() {
-    return parsedSQLhasRETURNINGKeyword || commandType == SELECT || commandType == WITH;
-  }
+    public int getBatchRewriteValuesBraceOpenPosition() {
+        return valuesBraceOpenPosition;
+    }
 
-  public static SqlCommand createStatementTypeInfo(SqlCommandType type,
-      boolean isBatchedReWritePropertyConfigured,
-      int valuesBraceOpenPosition, int valuesBraceClosePosition, boolean isRETURNINGkeywordPresent,
-      int priorQueryCount) {
-    return new SqlCommand(type, isBatchedReWritePropertyConfigured,
-        valuesBraceOpenPosition, valuesBraceClosePosition, isRETURNINGkeywordPresent,
-        priorQueryCount);
-  }
+    public int getBatchRewriteValuesBraceClosePosition() {
+        return valuesBraceClosePosition;
+    }
 
-  public static SqlCommand createStatementTypeInfo(SqlCommandType type) {
-    return new SqlCommand(type, false, -1, -1, false, 0);
-  }
+    public SqlCommandType getType() {
+        return commandType;
+    }
 
-  public static SqlCommand createStatementTypeInfo(SqlCommandType type,
-      boolean isRETURNINGkeywordPresent) {
-    return new SqlCommand(type, false, -1, -1, isRETURNINGkeywordPresent, 0);
-  }
+    public boolean isReturningKeywordPresent() {
+        return parsedSQLhasRETURNINGKeyword;
+    }
 
-  private SqlCommand(SqlCommandType type, boolean isBatchedReWriteConfigured,
-      int valuesBraceOpenPosition, int valuesBraceClosePosition, boolean isPresent,
-      int priorQueryCount) {
-    commandType = type;
-    parsedSQLhasRETURNINGKeyword = isPresent;
-    boolean batchedReWriteCompatible = (type == INSERT) && isBatchedReWriteConfigured
-        && valuesBraceOpenPosition >= 0 && valuesBraceClosePosition > valuesBraceOpenPosition
-        && !isPresent && priorQueryCount == 0;
-    this.valuesBraceOpenPosition = batchedReWriteCompatible ? valuesBraceOpenPosition : -1;
-    this.valuesBraceClosePosition = batchedReWriteCompatible ? valuesBraceClosePosition : -1;
-  }
-
-  private final SqlCommandType commandType;
-  private final boolean parsedSQLhasRETURNINGKeyword;
-  private final int valuesBraceOpenPosition;
-  private final int valuesBraceClosePosition;
+    public boolean returnsRows() {
+        return parsedSQLhasRETURNINGKeyword || commandType == SELECT || commandType == WITH;
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/SqlCommandType.java b/pgjdbc/src/main/java/org/postgresql/core/SqlCommandType.java
index 3a4fc43..d306d87 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/SqlCommandType.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/SqlCommandType.java
@@ -7,23 +7,23 @@ package org.postgresql.core;
 
 /**
  * Type information inspection support.
- * @author Jeremy Whiting jwhiting@redhat.com
  *
+ * @author Jeremy Whiting jwhiting@redhat.com
  */
 
 public enum SqlCommandType {
 
-  /**
-   * Use BLANK for empty sql queries or when parsing the sql string is not
-   * necessary.
-   */
-  BLANK,
-  INSERT,
-  UPDATE,
-  DELETE,
-  MOVE,
-  SELECT,
-  WITH,
-  CREATE,
-  ALTER
+    /**
+     * Use BLANK for empty sql queries or when parsing the sql string is not
+     * necessary.
+     */
+    BLANK,
+    INSERT,
+    UPDATE,
+    DELETE,
+    MOVE,
+    SELECT,
+    WITH,
+    CREATE,
+    ALTER
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/TransactionState.java b/pgjdbc/src/main/java/org/postgresql/core/TransactionState.java
index b819026..14c53a6 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/TransactionState.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/TransactionState.java
@@ -6,7 +6,7 @@
 package org.postgresql.core;
 
 public enum TransactionState {
-  IDLE,
-  OPEN,
-  FAILED
+    IDLE,
+    OPEN,
+    FAILED
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/Tuple.java b/pgjdbc/src/main/java/org/postgresql/core/Tuple.java
index 5f6e488..852e170 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/Tuple.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/Tuple.java
@@ -9,92 +9,100 @@ package org.postgresql.core;
  * Class representing a row in a {@link java.sql.ResultSet}.
  */
 public class Tuple {
-  private final boolean forUpdate;
-  final byte[] [] data;
+    final byte[][] data;
+    private final boolean forUpdate;
 
-  /**
-   * Construct an empty tuple. Used in updatable result sets.
-   * @param length the number of fields in the tuple.
-   */
-  public Tuple(int length) {
-    this(new byte[length][], true);
-  }
-
-  /**
-   * Construct a populated tuple. Used when returning results.
-   * @param data the tuple data
-   */
-  public Tuple(byte[] [] data) {
-    this(data, false);
-  }
-
-  private Tuple(byte[] [] data, boolean forUpdate) {
-    this.data = data;
-    this.forUpdate = forUpdate;
-  }
-
-  /**
-   * Number of fields in the tuple
-   * @return number of fields
-   */
-  public int fieldCount() {
-    return data.length;
-  }
-
-  /**
-   * Total length in bytes of the tuple data.
-   * @return the number of bytes in this tuple
-   */
-  public int length() {
-    int length = 0;
-    for (byte[] field : data) {
-      if (field != null) {
-        length += field.length;
-      }
+    /**
+     * Construct an empty tuple. Used in updatable result sets.
+     *
+     * @param length the number of fields in the tuple.
+     */
+    public Tuple(int length) {
+        this(new byte[length][], true);
     }
-    return length;
-  }
 
-  /**
-   * Get the data for the given field
-   * @param index 0-based field position in the tuple
-   * @return byte array of the data
-   */
-  public byte [] get(int index) {
-    return data[index];
-  }
-
-  /**
-   * Create a copy of the tuple for updating.
-   * @return a copy of the tuple that allows updates
-   */
-  public Tuple updateableCopy() {
-    return copy(true);
-  }
-
-  /**
-   * Create a read-only copy of the tuple
-   * @return a copy of the tuple that does not allow updates
-   */
-  public Tuple readOnlyCopy() {
-    return copy(false);
-  }
-
-  private Tuple copy(boolean forUpdate) {
-    byte[][] dataCopy = new byte[data.length][];
-    System.arraycopy(data, 0, dataCopy, 0, data.length);
-    return new Tuple(dataCopy, forUpdate);
-  }
-
-  /**
-   * Set the given field to the given data.
-   * @param index 0-based field position
-   * @param fieldData the data to set
-   */
-  public void set(int index, byte [] fieldData) {
-    if (!forUpdate) {
-      throw new IllegalArgumentException("Attempted to write to readonly tuple");
+    /**
+     * Construct a populated tuple. Used when returning results.
+     *
+     * @param data the tuple data
+     */
+    public Tuple(byte[][] data) {
+        this(data, false);
+    }
+
+    private Tuple(byte[][] data, boolean forUpdate) {
+        this.data = data;
+        this.forUpdate = forUpdate;
+    }
+
+    /**
+     * Number of fields in the tuple
+     *
+     * @return number of fields
+     */
+    public int fieldCount() {
+        return data.length;
+    }
+
+    /**
+     * Total length in bytes of the tuple data.
+     *
+     * @return the number of bytes in this tuple
+     */
+    public int length() {
+        int length = 0;
+        for (byte[] field : data) {
+            if (field != null) {
+                length += field.length;
+            }
+        }
+        return length;
+    }
+
+    /**
+     * Get the data for the given field
+     *
+     * @param index 0-based field position in the tuple
+     * @return byte array of the data
+     */
+    public byte[] get(int index) {
+        return data[index];
+    }
+
+    /**
+     * Create a copy of the tuple for updating.
+     *
+     * @return a copy of the tuple that allows updates
+     */
+    public Tuple updateableCopy() {
+        return copy(true);
+    }
+
+    /**
+     * Create a read-only copy of the tuple
+     *
+     * @return a copy of the tuple that does not allow updates
+     */
+    public Tuple readOnlyCopy() {
+        return copy(false);
+    }
+
+    private Tuple copy(boolean forUpdate) {
+        byte[][] dataCopy = new byte[data.length][];
+        System.arraycopy(data, 0, dataCopy, 0, data.length);
+        return new Tuple(dataCopy, forUpdate);
+    }
+
+    /**
+     * Set the given field to the given data.
+     *
+     * @param index     0-based field position
+     * @param fieldData the data to set
+     */
+    public void set(int index, byte[] fieldData) {
+        if (!forUpdate) {
+            throw new IllegalArgumentException("Attempted to write to readonly tuple");
+        }
+        data[index] = fieldData;
     }
-    data[index] = fieldData;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/TypeInfo.java b/pgjdbc/src/main/java/org/postgresql/core/TypeInfo.java
index f41b407..2d23ac2 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/TypeInfo.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/TypeInfo.java
@@ -5,141 +5,140 @@
 
 package org.postgresql.core;
 
-import org.postgresql.util.PGobject;
-
 import java.sql.SQLException;
 import java.util.Iterator;
+import org.postgresql.util.PGobject;
 
 public interface TypeInfo {
-  void addCoreType(String pgTypeName, Integer oid, Integer sqlType, String javaClass,
-      Integer arrayOid);
+    void addCoreType(String pgTypeName, Integer oid, Integer sqlType, String javaClass,
+                     Integer arrayOid);
 
-  void addDataType(String type, Class<? extends PGobject> klass) throws SQLException;
+    void addDataType(String type, Class<? extends PGobject> klass) throws SQLException;
 
-  /**
-   * Look up the SQL typecode for a given type oid.
-   *
-   * @param oid the type's OID
-   * @return the SQL type code (a constant from {@link java.sql.Types}) for the type
-   * @throws SQLException if an error occurs when retrieving sql type
-   */
-  int getSQLType(int oid) throws SQLException;
+    /**
+     * Look up the SQL typecode for a given type oid.
+     *
+     * @param oid the type's OID
+     * @return the SQL type code (a constant from {@link java.sql.Types}) for the type
+     * @throws SQLException if an error occurs when retrieving sql type
+     */
+    int getSQLType(int oid) throws SQLException;
 
-  /**
-   * Look up the SQL typecode for a given postgresql type name.
-   *
-   * @param pgTypeName the server type name to look up
-   * @return the SQL type code (a constant from {@link java.sql.Types}) for the type
-   * @throws SQLException if an error occurs when retrieving sql type
-   */
-  int getSQLType(String pgTypeName) throws SQLException;
+    /**
+     * Look up the SQL typecode for a given postgresql type name.
+     *
+     * @param pgTypeName the server type name to look up
+     * @return the SQL type code (a constant from {@link java.sql.Types}) for the type
+     * @throws SQLException if an error occurs when retrieving sql type
+     */
+    int getSQLType(String pgTypeName) throws SQLException;
 
-  int getJavaArrayType(String className) throws SQLException;
+    int getJavaArrayType(String className) throws SQLException;
 
-  /**
-   * Look up the oid for a given postgresql type name. This is the inverse of
-   * {@link #getPGType(int)}.
-   *
-   * @param pgTypeName the server type name to look up
-   * @return the type's OID, or 0 if unknown
-   * @throws SQLException if an error occurs when retrieving PG type
-   */
-  int getPGType(String pgTypeName) throws SQLException;
+    /**
+     * Look up the oid for a given postgresql type name. This is the inverse of
+     * {@link #getPGType(int)}.
+     *
+     * @param pgTypeName the server type name to look up
+     * @return the type's OID, or 0 if unknown
+     * @throws SQLException if an error occurs when retrieving PG type
+     */
+    int getPGType(String pgTypeName) throws SQLException;
 
-  /**
-   * Look up the postgresql type name for a given oid. This is the inverse of
-   * {@link #getPGType(String)}.
-   *
-   * @param oid the type's OID
-   * @return the server type name for that OID or null if unknown
-   * @throws SQLException if an error occurs when retrieving PG type
-   */
-  String getPGType(int oid) throws SQLException;
+    /**
+     * Look up the postgresql type name for a given oid. This is the inverse of
+     * {@link #getPGType(String)}.
+     *
+     * @param oid the type's OID
+     * @return the server type name for that OID or null if unknown
+     * @throws SQLException if an error occurs when retrieving PG type
+     */
+    String getPGType(int oid) throws SQLException;
 
-  /**
-   * Look up the oid of an array's base type given the array's type oid.
-   *
-   * @param oid the array type's OID
-   * @return the base type's OID, or 0 if unknown
-   * @throws SQLException if an error occurs when retrieving array element
-   */
-  int getPGArrayElement(int oid) throws SQLException;
+    /**
+     * Look up the oid of an array's base type given the array's type oid.
+     *
+     * @param oid the array type's OID
+     * @return the base type's OID, or 0 if unknown
+     * @throws SQLException if an error occurs when retrieving array element
+     */
+    int getPGArrayElement(int oid) throws SQLException;
 
-  /**
-   * Determine the oid of the given base postgresql type's array type.
-   *
-   * @param elementTypeName the base type's
-   * @return the array type's OID, or 0 if unknown
-   * @throws SQLException if an error occurs when retrieving array type
-   */
-  int getPGArrayType(String elementTypeName) throws SQLException;
+    /**
+     * Determine the oid of the given base postgresql type's array type.
+     *
+     * @param elementTypeName the base type's
+     * @return the array type's OID, or 0 if unknown
+     * @throws SQLException if an error occurs when retrieving array type
+     */
+    int getPGArrayType(String elementTypeName) throws SQLException;
 
-  /**
-   * Determine the delimiter for the elements of the given array type oid.
-   *
-   * @param oid the array type's OID
-   * @return the base type's array type delimiter
-   * @throws SQLException if an error occurs when retrieving array delimiter
-   */
-  char getArrayDelimiter(int oid) throws SQLException;
+    /**
+     * Determine the delimiter for the elements of the given array type oid.
+     *
+     * @param oid the array type's OID
+     * @return the base type's array type delimiter
+     * @throws SQLException if an error occurs when retrieving array delimiter
+     */
+    char getArrayDelimiter(int oid) throws SQLException;
 
-  Iterator<String> getPGTypeNamesWithSQLTypes();
+    Iterator<String> getPGTypeNamesWithSQLTypes();
 
-  Iterator<Integer> getPGTypeOidsWithSQLTypes();
+    Iterator<Integer> getPGTypeOidsWithSQLTypes();
 
-  Class<? extends PGobject> getPGobject(String type);
+    Class<? extends PGobject> getPGobject(String type);
 
-  String getJavaClass(int oid) throws SQLException;
+    String getJavaClass(int oid) throws SQLException;
 
-  String getTypeForAlias(String alias);
+    String getTypeForAlias(String alias);
 
-  int getPrecision(int oid, int typmod);
+    int getPrecision(int oid, int typmod);
 
-  int getScale(int oid, int typmod);
+    int getScale(int oid, int typmod);
 
-  boolean isCaseSensitive(int oid);
+    boolean isCaseSensitive(int oid);
 
-  boolean isSigned(int oid);
+    boolean isSigned(int oid);
 
-  int getDisplaySize(int oid, int typmod);
+    int getDisplaySize(int oid, int typmod);
 
-  int getMaximumPrecision(int oid);
+    int getMaximumPrecision(int oid);
 
-  boolean requiresQuoting(int oid) throws SQLException;
+    boolean requiresQuoting(int oid) throws SQLException;
 
-  /**
-   * Returns true if particular sqlType requires quoting.
-   * This method is used internally by the driver, so it might disappear without notice.
-   *
-   * @param sqlType sql type as in java.sql.Types
-   * @return true if the type requires quoting
-   * @throws SQLException if something goes wrong
-   */
-  boolean requiresQuotingSqlType(int sqlType) throws SQLException;
+    /**
+     * Returns true if particular sqlType requires quoting.
+     * This method is used internally by the driver, so it might disappear without notice.
+     *
+     * @param sqlType sql type as in java.sql.Types
+     * @return true if the type requires quoting
+     * @throws SQLException if something goes wrong
+     */
+    boolean requiresQuotingSqlType(int sqlType) throws SQLException;
 
-  /**
-   * <p>Java Integers are signed 32-bit integers, but oids are unsigned 32-bit integers.
-   * We therefore read them as positive long values and then force them into signed integers
-   * (wrapping around into negative values when required) or we'd be unable to correctly
-   * handle the upper half of the oid space.</p>
-   *
-   * <p>This function handles the mapping of uint32-values in the long to java integers, and
-   * throws for values that are out of range.</p>
-   *
-   * @param oid the oid as a long.
-   * @return the (internal) signed integer representation of the (unsigned) oid.
-   * @throws SQLException if the long has a value outside of the range representable by uint32
-   */
-  int longOidToInt(long oid) throws SQLException;
+    /**
+     * <p>Java Integers are signed 32-bit integers, but oids are unsigned 32-bit integers.
+     * We therefore read them as positive long values and then force them into signed integers
+     * (wrapping around into negative values when required) or we'd be unable to correctly
+     * handle the upper half of the oid space.</p>
+     *
+     * <p>This function handles the mapping of uint32-values in the long to java integers, and
+     * throws for values that are out of range.</p>
+     *
+     * @param oid the oid as a long.
+     * @return the (internal) signed integer representation of the (unsigned) oid.
+     * @throws SQLException if the long has a value outside of the range representable by uint32
+     */
+    int longOidToInt(long oid) throws SQLException;
 
-  /**
-   * Java Integers are signed 32-bit integers, but oids are unsigned 32-bit integers.
-   * We must therefore first map the (internal) integer representation to a positive long
-   * value before sending it to postgresql, or we would be unable to correctly handle the
-   * upper half of the oid space because these negative values are disallowed as OID values.
-   *
-   * @param oid the (signed) integer oid to convert into a long.
-   * @return the non-negative value of this oid, stored as a java long.
-   */
-  long intOidToLong(int oid);
+    /**
+     * Java Integers are signed 32-bit integers, but oids are unsigned 32-bit integers.
+     * We must therefore first map the (internal) integer representation to a positive long
+     * value before sending it to postgresql, or we would be unable to correctly handle the
+     * upper half of the oid space because these negative values are disallowed as OID values.
+     *
+     * @param oid the (signed) integer oid to convert into a long.
+     * @return the non-negative value of this oid, stored as a java long.
+     */
+    long intOidToLong(int oid);
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/Utils.java b/pgjdbc/src/main/java/org/postgresql/core/Utils.java
index d96c6e3..b674949 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/Utils.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/Utils.java
@@ -6,175 +6,174 @@
 
 package org.postgresql.core;
 
+import java.io.IOException;
+import java.sql.SQLException;
 import org.postgresql.util.GT;
 import org.postgresql.util.PSQLException;
 import org.postgresql.util.PSQLState;
 
-import java.io.IOException;
-import java.sql.SQLException;
-
 /**
  * Collection of utilities used by the protocol-level code.
  */
 public class Utils {
 
-  public Utils() {
-  }
-
-  /**
-   * Turn a bytearray into a printable form, representing each byte in hex.
-   *
-   * @param data the bytearray to stringize
-   * @return a hex-encoded printable representation of {@code data}
-   */
-  public static String toHexString(byte[] data) {
-    StringBuilder sb = new StringBuilder(data.length * 2);
-    for (byte element : data) {
-      sb.append(Integer.toHexString((element >> 4) & 15));
-      sb.append(Integer.toHexString(element & 15));
+    public Utils() {
     }
-    return sb.toString();
-  }
 
-  /**
-   * Escape the given literal {@code value} and append it to the string builder {@code sbuf}. If
-   * {@code sbuf} is {@code null}, a new StringBuilder will be returned. The argument
-   * {@code standardConformingStrings} defines whether the backend expects standard-conforming
-   * string literals or allows backslash escape sequences.
-   *
-   * @param sbuf the string builder to append to; or {@code null}
-   * @param value the string value
-   * @param standardConformingStrings if standard conforming strings should be used
-   * @return the sbuf argument; or a new string builder for sbuf == null
-   * @throws SQLException if the string contains a {@code \0} character
-   */
-  public static StringBuilder escapeLiteral(StringBuilder sbuf, String value,
-      boolean standardConformingStrings) throws SQLException {
-    if (sbuf == null) {
-      sbuf = new StringBuilder((value.length() + 10) / 10 * 11); // Add 10% for escaping.
-    }
-    doAppendEscapedLiteral(sbuf, value, standardConformingStrings);
-    return sbuf;
-  }
-
-  /**
-   * Common part for {@link #escapeLiteral(StringBuilder, String, boolean)}.
-   *
-   * @param sbuf Either StringBuffer or StringBuilder as we do not expect any IOException to be
-   *        thrown
-   * @param value value to append
-   * @param standardConformingStrings if standard conforming strings should be used
-   */
-  private static void doAppendEscapedLiteral(Appendable sbuf, String value,
-      boolean standardConformingStrings) throws SQLException {
-    try {
-      if (standardConformingStrings) {
-        // With standard_conforming_strings on, escape only single-quotes.
-        for (int i = 0; i < value.length(); i++) {
-          char ch = value.charAt(i);
-          if (ch == '\0') {
-            throw new PSQLException(GT.tr("Zero bytes may not occur in string parameters."),
-                PSQLState.INVALID_PARAMETER_VALUE);
-          }
-          if (ch == '\'') {
-            sbuf.append('\'');
-          }
-          sbuf.append(ch);
+    /**
+     * Turn a bytearray into a printable form, representing each byte in hex.
+     *
+     * @param data the bytearray to stringize
+     * @return a hex-encoded printable representation of {@code data}
+     */
+    public static String toHexString(byte[] data) {
+        StringBuilder sb = new StringBuilder(data.length * 2);
+        for (byte element : data) {
+            sb.append(Integer.toHexString((element >> 4) & 15));
+            sb.append(Integer.toHexString(element & 15));
         }
-      } else {
-        // With standard_conforming_string off, escape backslashes and
-        // single-quotes, but still escape single-quotes by doubling, to
-        // avoid a security hazard if the reported value of
-        // standard_conforming_strings is incorrect, or an error if
-        // backslash_quote is off.
-        for (int i = 0; i < value.length(); i++) {
-          char ch = value.charAt(i);
-          if (ch == '\0') {
-            throw new PSQLException(GT.tr("Zero bytes may not occur in string parameters."),
-                PSQLState.INVALID_PARAMETER_VALUE);
-          }
-          if (ch == '\\' || ch == '\'') {
-            sbuf.append(ch);
-          }
-          sbuf.append(ch);
-        }
-      }
-    } catch (IOException e) {
-      throw new PSQLException(GT.tr("No IOException expected from StringBuffer or StringBuilder"),
-          PSQLState.UNEXPECTED_ERROR, e);
+        return sb.toString();
     }
-  }
 
-  /**
-   * Escape the given identifier {@code value} and append it to the string builder {@code sbuf}.
-   * If {@code sbuf} is {@code null}, a new StringBuilder will be returned. This method is
-   * different from appendEscapedLiteral in that it includes the quoting required for the identifier
-   * while {@link #escapeLiteral(StringBuilder, String, boolean)} does not.
-   *
-   * @param sbuf the string builder to append to; or {@code null}
-   * @param value the string value
-   * @return the sbuf argument; or a new string builder for sbuf == null
-   * @throws SQLException if the string contains a {@code \0} character
-   */
-  public static StringBuilder escapeIdentifier(StringBuilder sbuf, String value)
-      throws SQLException {
-    if (sbuf == null) {
-      sbuf = new StringBuilder(2 + (value.length() + 10) / 10 * 11); // Add 10% for escaping.
-    }
-    doAppendEscapedIdentifier(sbuf, value);
-    return sbuf;
-  }
-
-  /**
-   * Common part for appendEscapedIdentifier.
-   *
-   * @param sbuf Either StringBuffer or StringBuilder as we do not expect any IOException to be
-   *        thrown.
-   * @param value value to append
-   */
-  private static void doAppendEscapedIdentifier(Appendable sbuf, String value) throws SQLException {
-    try {
-      sbuf.append('"');
-
-      for (int i = 0; i < value.length(); i++) {
-        char ch = value.charAt(i);
-        if (ch == '\0') {
-          throw new PSQLException(GT.tr("Zero bytes may not occur in identifiers."),
-              PSQLState.INVALID_PARAMETER_VALUE);
+    /**
+     * Escape the given literal {@code value} and append it to the string builder {@code sbuf}. If
+     * {@code sbuf} is {@code null}, a new StringBuilder will be returned. The argument
+     * {@code standardConformingStrings} defines whether the backend expects standard-conforming
+     * string literals or allows backslash escape sequences.
+     *
+     * @param sbuf                      the string builder to append to; or {@code null}
+     * @param value                     the string value
+     * @param standardConformingStrings if standard conforming strings should be used
+     * @return the sbuf argument; or a new string builder for sbuf == null
+     * @throws SQLException if the string contains a {@code \0} character
+     */
+    public static StringBuilder escapeLiteral(StringBuilder sbuf, String value,
+                                              boolean standardConformingStrings) throws SQLException {
+        if (sbuf == null) {
+            sbuf = new StringBuilder((value.length() + 10) / 10 * 11); // Add 10% for escaping.
         }
-        if (ch == '"') {
-          sbuf.append(ch);
-        }
-        sbuf.append(ch);
-      }
-
-      sbuf.append('"');
-    } catch (IOException e) {
-      throw new PSQLException(GT.tr("No IOException expected from StringBuffer or StringBuilder"),
-          PSQLState.UNEXPECTED_ERROR, e);
+        doAppendEscapedLiteral(sbuf, value, standardConformingStrings);
+        return sbuf;
     }
-  }
 
-  /**
-   * <p>Attempt to parse the server version string into an XXYYZZ form version number.</p>
-   *
-   * <p>Returns 0 if the version could not be parsed.</p>
-   *
-   * <p>Returns minor version 0 if the minor version could not be determined, e.g. devel or beta
-   * releases.</p>
-   *
-   * <p>If a single major part like 90400 is passed, it's assumed to be a pre-parsed version and
-   * returned verbatim. (Anything equal to or greater than 10000 is presumed to be this form).</p>
-   *
-   * <p>The yy or zz version parts may be larger than 99. A NumberFormatException is thrown if a
-   * version part is out of range.</p>
-   *
-   * @param serverVersion server version in a XXYYZZ form
-   * @return server version in number form
-   * @deprecated use specific {@link Version} instance
-   */
-  @Deprecated
-  public static int parseServerVersionStr(String serverVersion) throws NumberFormatException {
-    return ServerVersion.parseServerVersionStr(serverVersion);
-  }
+    /**
+     * Common part for {@link #escapeLiteral(StringBuilder, String, boolean)}.
+     *
+     * @param sbuf                      Either StringBuffer or StringBuilder as we do not expect any IOException to be
+     *                                  thrown
+     * @param value                     value to append
+     * @param standardConformingStrings if standard conforming strings should be used
+     */
+    private static void doAppendEscapedLiteral(Appendable sbuf, String value,
+                                               boolean standardConformingStrings) throws SQLException {
+        try {
+            if (standardConformingStrings) {
+                // With standard_conforming_strings on, escape only single-quotes.
+                for (int i = 0; i < value.length(); i++) {
+                    char ch = value.charAt(i);
+                    if (ch == '\0') {
+                        throw new PSQLException(GT.tr("Zero bytes may not occur in string parameters."),
+                                PSQLState.INVALID_PARAMETER_VALUE);
+                    }
+                    if (ch == '\'') {
+                        sbuf.append('\'');
+                    }
+                    sbuf.append(ch);
+                }
+            } else {
+                // With standard_conforming_string off, escape backslashes and
+                // single-quotes, but still escape single-quotes by doubling, to
+                // avoid a security hazard if the reported value of
+                // standard_conforming_strings is incorrect, or an error if
+                // backslash_quote is off.
+                for (int i = 0; i < value.length(); i++) {
+                    char ch = value.charAt(i);
+                    if (ch == '\0') {
+                        throw new PSQLException(GT.tr("Zero bytes may not occur in string parameters."),
+                                PSQLState.INVALID_PARAMETER_VALUE);
+                    }
+                    if (ch == '\\' || ch == '\'') {
+                        sbuf.append(ch);
+                    }
+                    sbuf.append(ch);
+                }
+            }
+        } catch (IOException e) {
+            throw new PSQLException(GT.tr("No IOException expected from StringBuffer or StringBuilder"),
+                    PSQLState.UNEXPECTED_ERROR, e);
+        }
+    }
+
+    /**
+     * Escape the given identifier {@code value} and append it to the string builder {@code sbuf}.
+     * If {@code sbuf} is {@code null}, a new StringBuilder will be returned. This method is
+     * different from appendEscapedLiteral in that it includes the quoting required for the identifier
+     * while {@link #escapeLiteral(StringBuilder, String, boolean)} does not.
+     *
+     * @param sbuf  the string builder to append to; or {@code null}
+     * @param value the string value
+     * @return the sbuf argument; or a new string builder for sbuf == null
+     * @throws SQLException if the string contains a {@code \0} character
+     */
+    public static StringBuilder escapeIdentifier(StringBuilder sbuf, String value)
+            throws SQLException {
+        if (sbuf == null) {
+            sbuf = new StringBuilder(2 + (value.length() + 10) / 10 * 11); // Add 10% for escaping.
+        }
+        doAppendEscapedIdentifier(sbuf, value);
+        return sbuf;
+    }
+
+    /**
+     * Common part for appendEscapedIdentifier.
+     *
+     * @param sbuf  Either StringBuffer or StringBuilder as we do not expect any IOException to be
+     *              thrown.
+     * @param value value to append
+     */
+    private static void doAppendEscapedIdentifier(Appendable sbuf, String value) throws SQLException {
+        try {
+            sbuf.append('"');
+
+            for (int i = 0; i < value.length(); i++) {
+                char ch = value.charAt(i);
+                if (ch == '\0') {
+                    throw new PSQLException(GT.tr("Zero bytes may not occur in identifiers."),
+                            PSQLState.INVALID_PARAMETER_VALUE);
+                }
+                if (ch == '"') {
+                    sbuf.append(ch);
+                }
+                sbuf.append(ch);
+            }
+
+            sbuf.append('"');
+        } catch (IOException e) {
+            throw new PSQLException(GT.tr("No IOException expected from StringBuffer or StringBuilder"),
+                    PSQLState.UNEXPECTED_ERROR, e);
+        }
+    }
+
+    /**
+     * <p>Attempt to parse the server version string into an XXYYZZ form version number.</p>
+     *
+     * <p>Returns 0 if the version could not be parsed.</p>
+     *
+     * <p>Returns minor version 0 if the minor version could not be determined, e.g. devel or beta
+     * releases.</p>
+     *
+     * <p>If a single major part like 90400 is passed, it's assumed to be a pre-parsed version and
+     * returned verbatim. (Anything equal to or greater than 10000 is presumed to be this form).</p>
+     *
+     * <p>The yy or zz version parts may be larger than 99. A NumberFormatException is thrown if a
+     * version part is out of range.</p>
+     *
+     * @param serverVersion server version in a XXYYZZ form
+     * @return server version in number form
+     * @deprecated use specific {@link Version} instance
+     */
+    @Deprecated
+    public static int parseServerVersionStr(String serverVersion) throws NumberFormatException {
+        return ServerVersion.parseServerVersionStr(serverVersion);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/Version.java b/pgjdbc/src/main/java/org/postgresql/core/Version.java
index 639226a..23a63b4 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/Version.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/Version.java
@@ -7,11 +7,11 @@ package org.postgresql.core;
 
 public interface Version {
 
-  /**
-   * Get a machine-readable version number.
-   *
-   * @return the version in numeric XXYYZZ form, e.g. 90401 for 9.4.1
-   */
-  int getVersionNum();
+    /**
+     * Get a machine-readable version number.
+     *
+     * @return the version in numeric XXYYZZ form, e.g. 90401 for 9.4.1
+     */
+    int getVersionNum();
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/VisibleBufferedInputStream.java b/pgjdbc/src/main/java/org/postgresql/core/VisibleBufferedInputStream.java
index c78623f..590f064 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/VisibleBufferedInputStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/VisibleBufferedInputStream.java
@@ -18,339 +18,338 @@ import java.net.SocketTimeoutException;
  */
 public class VisibleBufferedInputStream extends InputStream {
 
-  /**
-   * If a direct read to byte array is called that would require a smaller read from the wrapped
-   * stream that MINIMUM_READ then first fill the buffer and serve the bytes from there. Larger
-   * reads are directly done to the provided byte array.
-   */
-  private static final int MINIMUM_READ = 1024;
+    /**
+     * If a direct read to byte array is called that would require a smaller read from the wrapped
+     * stream that MINIMUM_READ then first fill the buffer and serve the bytes from there. Larger
+     * reads are directly done to the provided byte array.
+     */
+    private static final int MINIMUM_READ = 1024;
 
-  /**
-   * In how large spans is the C string zero-byte scanned.
-   */
-  private static final int STRING_SCAN_SPAN = 1024;
+    /**
+     * In how large spans is the C string zero-byte scanned.
+     */
+    private static final int STRING_SCAN_SPAN = 1024;
 
-  /**
-   * The wrapped input stream.
-   */
-  private final InputStream wrapped;
+    /**
+     * The wrapped input stream.
+     */
+    private final InputStream wrapped;
 
-  /**
-   * The buffer.
-   */
-  private byte[] buffer;
+    /**
+     * The buffer.
+     */
+    private byte[] buffer;
 
-  /**
-   * Current read position in the buffer.
-   */
-  private int index;
+    /**
+     * Current read position in the buffer.
+     */
+    private int index;
 
-  /**
-   * How far is the buffer filled with valid data.
-   */
-  private int endIndex;
+    /**
+     * How far is the buffer filled with valid data.
+     */
+    private int endIndex;
 
-  /**
-   * socket timeout has been requested
-   */
-  private boolean timeoutRequested;
+    /**
+     * socket timeout has been requested
+     */
+    private boolean timeoutRequested;
 
-  /**
-   * Creates a new buffer around the given stream.
-   *
-   * @param in The stream to buffer.
-   * @param bufferSize The initial size of the buffer.
-   */
-  public VisibleBufferedInputStream(InputStream in, int bufferSize) {
-    wrapped = in;
-    buffer = new byte[bufferSize < MINIMUM_READ ? MINIMUM_READ : bufferSize];
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public int read() throws IOException {
-    if (ensureBytes(1)) {
-      return buffer[index++] & 0xFF;
-    }
-    return -1;
-  }
-
-  /**
-   * Reads a byte from the buffer without advancing the index pointer.
-   *
-   * @return byte from the buffer without advancing the index pointer
-   * @throws IOException if something wrong happens
-   */
-  public int peek() throws IOException {
-    if (ensureBytes(1)) {
-      return buffer[index] & 0xFF;
-    }
-    return -1;
-  }
-
-  /**
-   * Reads byte from the buffer without any checks. This method never reads from the underlaying
-   * stream. Before calling this method the {@link #ensureBytes} method must have been called.
-   *
-   * @return The next byte from the buffer.
-   * @throws ArrayIndexOutOfBoundsException If ensureBytes was not called to make sure the buffer
-   *         contains the byte.
-   */
-  public byte readRaw() {
-    return buffer[index++];
-  }
-
-  /**
-   * Ensures that the buffer contains at least n bytes. This method invalidates the buffer and index
-   * fields.
-   *
-   * @param n The amount of bytes to ensure exists in buffer
-   * @return true if required bytes are available and false if EOF
-   * @throws IOException If reading of the wrapped stream failed.
-   */
-  public boolean ensureBytes(int n) throws IOException {
-    return ensureBytes(n, true);
-  }
-
-  /**
-   * Ensures that the buffer contains at least n bytes. This method invalidates the buffer and index
-   * fields.
-   *
-   * @param n The amount of bytes to ensure exists in buffer
-   * @param block whether or not to block the IO
-   * @return true if required bytes are available and false if EOF or the parameter block was false and socket timeout occurred.
-   * @throws IOException If reading of the wrapped stream failed.
-   */
-  public boolean ensureBytes(int n, boolean block) throws IOException {
-    int required = n - endIndex + index;
-    while (required > 0) {
-      if (!readMore(required, block)) {
-        return false;
-      }
-      required = n - endIndex + index;
-    }
-    return true;
-  }
-
-  /**
-   * Reads more bytes into the buffer.
-   *
-   * @param wanted How much should be at least read.
-   * @return True if at least some bytes were read.
-   * @throws IOException If reading of the wrapped stream failed.
-   */
-  private boolean readMore(int wanted, boolean block) throws IOException {
-    if (endIndex == index) {
-      index = 0;
-      endIndex = 0;
-    }
-    int canFit = buffer.length - endIndex;
-    if (canFit < wanted) {
-      // would the wanted bytes fit if we compacted the buffer
-      // and still leave some slack
-      if (index + canFit > wanted + MINIMUM_READ) {
-        compact();
-      } else {
-        doubleBuffer();
-      }
-      canFit = buffer.length - endIndex;
-    }
-    int read = 0;
-    try {
-      read = wrapped.read(buffer, endIndex, canFit);
-      if (!block && read == 0) {
-        return false;
-      }
-    } catch (SocketTimeoutException e) {
-      if (!block) {
-        return false;
-      }
-      if (timeoutRequested) {
-        throw e;
-      }
-    }
-    if (read < 0) {
-      return false;
-    }
-    endIndex += read;
-    return true;
-  }
-
-  /**
-   * Doubles the size of the buffer.
-   */
-  private void doubleBuffer() {
-    byte[] buf = new byte[buffer.length * 2];
-    moveBufferTo(buf);
-    buffer = buf;
-  }
-
-  /**
-   * Compacts the unread bytes of the buffer to the beginning of the buffer.
-   */
-  private void compact() {
-    moveBufferTo(buffer);
-  }
-
-  /**
-   * Moves bytes from the buffer to the beginning of the destination buffer. Also sets the index and
-   * endIndex variables.
-   *
-   * @param dest The destination buffer.
-   */
-  private void moveBufferTo(byte[] dest) {
-    int size = endIndex - index;
-    System.arraycopy(buffer, index, dest, 0, size);
-    index = 0;
-    endIndex = size;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public int read(byte[] to, int off, int len) throws IOException {
-    if ((off | len | (off + len) | (to.length - (off + len))) < 0) {
-      throw new IndexOutOfBoundsException();
-    } else if (len == 0) {
-      return 0;
+    /**
+     * Creates a new buffer around the given stream.
+     *
+     * @param in         The stream to buffer.
+     * @param bufferSize The initial size of the buffer.
+     */
+    public VisibleBufferedInputStream(InputStream in, int bufferSize) {
+        wrapped = in;
+        buffer = new byte[bufferSize < MINIMUM_READ ? MINIMUM_READ : bufferSize];
     }
 
-    // if the read would go to wrapped stream, but would result
-    // in a small read then try read to the buffer instead
-    int avail = endIndex - index;
-    if (len - avail < MINIMUM_READ) {
-      ensureBytes(len);
-      avail = endIndex - index;
-    }
-
-    // first copy from buffer
-    if (avail > 0) {
-      if (len <= avail) {
-        System.arraycopy(buffer, index, to, off, len);
-        index += len;
-        return len;
-      }
-      System.arraycopy(buffer, index, to, off, avail);
-      len -= avail;
-      off += avail;
-    }
-    int read = avail;
-
-    // good place to reset index because the buffer is fully drained
-    index = 0;
-    endIndex = 0;
-
-    // then directly from wrapped stream
-    do {
-      int r;
-      try {
-        r = wrapped.read(to, off, len);
-      } catch (SocketTimeoutException e) {
-        if (read == 0 && timeoutRequested) {
-          throw e;
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public int read() throws IOException {
+        if (ensureBytes(1)) {
+            return buffer[index++] & 0xFF;
         }
+        return -1;
+    }
+
+    /**
+     * Reads a byte from the buffer without advancing the index pointer.
+     *
+     * @return byte from the buffer without advancing the index pointer
+     * @throws IOException if something wrong happens
+     */
+    public int peek() throws IOException {
+        if (ensureBytes(1)) {
+            return buffer[index] & 0xFF;
+        }
+        return -1;
+    }
+
+    /**
+     * Reads byte from the buffer without any checks. This method never reads from the underlaying
+     * stream. Before calling this method the {@link #ensureBytes} method must have been called.
+     *
+     * @return The next byte from the buffer.
+     * @throws ArrayIndexOutOfBoundsException If ensureBytes was not called to make sure the buffer
+     *                                        contains the byte.
+     */
+    public byte readRaw() {
+        return buffer[index++];
+    }
+
+    /**
+     * Ensures that the buffer contains at least n bytes. This method invalidates the buffer and index
+     * fields.
+     *
+     * @param n The amount of bytes to ensure exists in buffer
+     * @return true if required bytes are available and false if EOF
+     * @throws IOException If reading of the wrapped stream failed.
+     */
+    public boolean ensureBytes(int n) throws IOException {
+        return ensureBytes(n, true);
+    }
+
+    /**
+     * Ensures that the buffer contains at least n bytes. This method invalidates the buffer and index
+     * fields.
+     *
+     * @param n     The amount of bytes to ensure exists in buffer
+     * @param block whether or not to block the IO
+     * @return true if required bytes are available and false if EOF or the parameter block was false and socket timeout occurred.
+     * @throws IOException If reading of the wrapped stream failed.
+     */
+    public boolean ensureBytes(int n, boolean block) throws IOException {
+        int required = n - endIndex + index;
+        while (required > 0) {
+            if (!readMore(required, block)) {
+                return false;
+            }
+            required = n - endIndex + index;
+        }
+        return true;
+    }
+
+    /**
+     * Reads more bytes into the buffer.
+     *
+     * @param wanted How much should be at least read.
+     * @return True if at least some bytes were read.
+     * @throws IOException If reading of the wrapped stream failed.
+     */
+    private boolean readMore(int wanted, boolean block) throws IOException {
+        if (endIndex == index) {
+            index = 0;
+            endIndex = 0;
+        }
+        int canFit = buffer.length - endIndex;
+        if (canFit < wanted) {
+            // would the wanted bytes fit if we compacted the buffer
+            // and still leave some slack
+            if (index + canFit > wanted + MINIMUM_READ) {
+                compact();
+            } else {
+                doubleBuffer();
+            }
+            canFit = buffer.length - endIndex;
+        }
+        int read = 0;
+        try {
+            read = wrapped.read(buffer, endIndex, canFit);
+            if (!block && read == 0) {
+                return false;
+            }
+        } catch (SocketTimeoutException e) {
+            if (!block) {
+                return false;
+            }
+            if (timeoutRequested) {
+                throw e;
+            }
+        }
+        if (read < 0) {
+            return false;
+        }
+        endIndex += read;
+        return true;
+    }
+
+    /**
+     * Doubles the size of the buffer.
+     */
+    private void doubleBuffer() {
+        byte[] buf = new byte[buffer.length * 2];
+        moveBufferTo(buf);
+        buffer = buf;
+    }
+
+    /**
+     * Compacts the unread bytes of the buffer to the beginning of the buffer.
+     */
+    private void compact() {
+        moveBufferTo(buffer);
+    }
+
+    /**
+     * Moves bytes from the buffer to the beginning of the destination buffer. Also sets the index and
+     * endIndex variables.
+     *
+     * @param dest The destination buffer.
+     */
+    private void moveBufferTo(byte[] dest) {
+        int size = endIndex - index;
+        System.arraycopy(buffer, index, dest, 0, size);
+        index = 0;
+        endIndex = size;
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public int read(byte[] to, int off, int len) throws IOException {
+        if ((off | len | (off + len) | (to.length - (off + len))) < 0) {
+            throw new IndexOutOfBoundsException();
+        } else if (len == 0) {
+            return 0;
+        }
+
+        // if the read would go to wrapped stream, but would result
+        // in a small read then try read to the buffer instead
+        int avail = endIndex - index;
+        if (len - avail < MINIMUM_READ) {
+            ensureBytes(len);
+            avail = endIndex - index;
+        }
+
+        // first copy from buffer
+        if (avail > 0) {
+            if (len <= avail) {
+                System.arraycopy(buffer, index, to, off, len);
+                index += len;
+                return len;
+            }
+            System.arraycopy(buffer, index, to, off, avail);
+            len -= avail;
+            off += avail;
+        }
+        int read = avail;
+
+        // good place to reset index because the buffer is fully drained
+        index = 0;
+        endIndex = 0;
+
+        // then directly from wrapped stream
+        do {
+            int r;
+            try {
+                r = wrapped.read(to, off, len);
+            } catch (SocketTimeoutException e) {
+                if (read == 0 && timeoutRequested) {
+                    throw e;
+                }
+                return read;
+            }
+            if (r <= 0) {
+                return read == 0 ? r : read;
+            }
+            read += r;
+            off += r;
+            len -= r;
+        } while (len > 0);
+
         return read;
-      }
-      if (r <= 0) {
-        return read == 0 ? r : read;
-      }
-      read += r;
-      off += r;
-      len -= r;
-    } while (len > 0);
-
-    return read;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public long skip(long n) throws IOException {
-    int avail = endIndex - index;
-    if (n >= Integer.MAX_VALUE) {
-      throw new IllegalArgumentException("n is too large");
     }
-    if (avail >= n) {
-      index = index + (int)n;
-      return n;
-    }
-    n -= avail;
-    index = 0;
-    endIndex = 0;
-    return avail + wrapped.skip(n);
-  }
 
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public int available() throws IOException {
-    int avail = endIndex - index;
-    return avail > 0 ? avail : wrapped.available();
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public void close() throws IOException {
-    wrapped.close();
-  }
-
-  /**
-   * Returns direct handle to the used buffer. Use the {@link #ensureBytes} to prefill required
-   * bytes the buffer and {@link #getIndex} to fetch the current position of the buffer.
-   *
-   * @return The underlaying buffer.
-   */
-  public byte[] getBuffer() {
-    return buffer;
-  }
-
-  /**
-   * Returns the current read position in the buffer.
-   *
-   * @return the current read position in the buffer.
-   */
-  public int getIndex() {
-    return index;
-  }
-
-  /**
-   * Scans the length of the next null terminated string (C-style string) from the stream.
-   *
-   * @return The length of the next null terminated string.
-   * @throws IOException If reading of stream fails.
-   * @throws EOFException If the stream did not contain any null terminators.
-   */
-  public int scanCStringLength() throws IOException {
-    int pos = index;
-    while (true) {
-      while (pos < endIndex) {
-        if (buffer[pos++] == '\0') {
-          return pos - index;
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public long skip(long n) throws IOException {
+        int avail = endIndex - index;
+        if (n >= Integer.MAX_VALUE) {
+            throw new IllegalArgumentException("n is too large");
         }
-      }
-      if (!readMore(STRING_SCAN_SPAN, true)) {
-        throw new EOFException();
-      }
-      pos = index;
+        if (avail >= n) {
+            index = index + (int) n;
+            return n;
+        }
+        n -= avail;
+        index = 0;
+        endIndex = 0;
+        return avail + wrapped.skip(n);
     }
-  }
 
-  public void setTimeoutRequested(boolean timeoutRequested) {
-    this.timeoutRequested = timeoutRequested;
-  }
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public int available() throws IOException {
+        int avail = endIndex - index;
+        return avail > 0 ? avail : wrapped.available();
+    }
 
-  /**
-   *
-   * @return the wrapped stream
-   */
-  public InputStream getWrapped() {
-    return wrapped;
-  }
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public void close() throws IOException {
+        wrapped.close();
+    }
+
+    /**
+     * Returns direct handle to the used buffer. Use the {@link #ensureBytes} to prefill required
+     * bytes the buffer and {@link #getIndex} to fetch the current position of the buffer.
+     *
+     * @return The underlaying buffer.
+     */
+    public byte[] getBuffer() {
+        return buffer;
+    }
+
+    /**
+     * Returns the current read position in the buffer.
+     *
+     * @return the current read position in the buffer.
+     */
+    public int getIndex() {
+        return index;
+    }
+
+    /**
+     * Scans the length of the next null terminated string (C-style string) from the stream.
+     *
+     * @return The length of the next null terminated string.
+     * @throws IOException  If reading of stream fails.
+     * @throws EOFException If the stream did not contain any null terminators.
+     */
+    public int scanCStringLength() throws IOException {
+        int pos = index;
+        while (true) {
+            while (pos < endIndex) {
+                if (buffer[pos++] == '\0') {
+                    return pos - index;
+                }
+            }
+            if (!readMore(STRING_SCAN_SPAN, true)) {
+                throw new EOFException();
+            }
+            pos = index;
+        }
+    }
+
+    public void setTimeoutRequested(boolean timeoutRequested) {
+        this.timeoutRequested = timeoutRequested;
+    }
+
+    /**
+     * @return the wrapped stream
+     */
+    public InputStream getWrapped() {
+        return wrapped;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/AuthenticationPluginManager.java b/pgjdbc/src/main/java/org/postgresql/core/v3/AuthenticationPluginManager.java
index f6cf40a..01420b4 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/AuthenticationPluginManager.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/AuthenticationPluginManager.java
@@ -5,14 +5,6 @@
 
 package org.postgresql.core.v3;
 
-import org.postgresql.PGProperty;
-import org.postgresql.plugin.AuthenticationPlugin;
-import org.postgresql.plugin.AuthenticationRequestType;
-import org.postgresql.util.GT;
-import org.postgresql.util.ObjectFactory;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.CharBuffer;
@@ -21,105 +13,112 @@ import java.util.Arrays;
 import java.util.Properties;
 import java.util.logging.Level;
 import java.util.logging.Logger;
+import org.postgresql.PGProperty;
+import org.postgresql.plugin.AuthenticationPlugin;
+import org.postgresql.plugin.AuthenticationRequestType;
+import org.postgresql.util.GT;
+import org.postgresql.util.ObjectFactory;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
 
 class AuthenticationPluginManager {
-  private static final Logger LOGGER = Logger.getLogger(AuthenticationPluginManager.class.getName());
+    private static final Logger LOGGER = Logger.getLogger(AuthenticationPluginManager.class.getName());
 
-  @FunctionalInterface
-  public interface PasswordAction<T, R> {
-    R apply(T password) throws PSQLException, IOException;
-  }
-
-  private AuthenticationPluginManager() {
-  }
-
-  /**
-   * If a password is requested by the server during connection initiation, this
-   * method will be invoked to supply the password. This method will only be
-   * invoked if the server actually requests a password, e.g. trust authentication
-   * will skip it entirely.
-   *
-   * <p>The caller provides a action method that will be invoked with the {@code char[]}
-   * password. After completion, for security reasons the {@code char[]} array will be
-   * wiped by filling it with zeroes. Callers must not rely on being able to read
-   * the password {@code char[]} after the action has completed.</p>
-   *
-   * @param type The authentication type that is being requested
-   * @param info The connection properties for the connection
-   * @param action The action to invoke with the password
-   * @throws PSQLException Throws a PSQLException if the plugin class cannot be instantiated
-   * @throws IOException Bubbles up any thrown IOException from the provided action
-   */
-  public static <T> T withPassword(AuthenticationRequestType type, Properties info,
-      PasswordAction<char [], T> action) throws PSQLException, IOException {
-    char[] password = null;
-
-    String authPluginClassName = PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getOrDefault(info);
-
-    if (authPluginClassName == null || "".equals(authPluginClassName)) {
-      // Default auth plugin simply pulls password directly from connection properties
-      String passwordText = PGProperty.PASSWORD.getOrDefault(info);
-      if (passwordText != null) {
-        password = passwordText.toCharArray();
-      }
-    } else {
-      AuthenticationPlugin authPlugin;
-      try {
-        authPlugin = ObjectFactory.instantiate(AuthenticationPlugin.class, authPluginClassName, info,
-            false, null);
-      } catch (Exception ex) {
-        String msg = GT.tr("Unable to load Authentication Plugin {0}", authPluginClassName);
-        LOGGER.log(Level.FINE, msg, ex);
-        throw new PSQLException(msg, PSQLState.INVALID_PARAMETER_VALUE, ex);
-      }
-
-      password = authPlugin.getPassword(type);
+    private AuthenticationPluginManager() {
     }
 
-    try {
-      return action.apply(password);
-    } finally {
-      if (password != null) {
-        Arrays.fill(password, (char) 0);
-      }
+    /**
+     * If a password is requested by the server during connection initiation, this
+     * method will be invoked to supply the password. This method will only be
+     * invoked if the server actually requests a password, e.g. trust authentication
+     * will skip it entirely.
+     *
+     * <p>The caller provides a action method that will be invoked with the {@code char[]}
+     * password. After completion, for security reasons the {@code char[]} array will be
+     * wiped by filling it with zeroes. Callers must not rely on being able to read
+     * the password {@code char[]} after the action has completed.</p>
+     *
+     * @param type   The authentication type that is being requested
+     * @param info   The connection properties for the connection
+     * @param action The action to invoke with the password
+     * @throws PSQLException Throws a PSQLException if the plugin class cannot be instantiated
+     * @throws IOException   Bubbles up any thrown IOException from the provided action
+     */
+    public static <T> T withPassword(AuthenticationRequestType type, Properties info,
+                                     PasswordAction<char[], T> action) throws PSQLException, IOException {
+        char[] password = null;
+
+        String authPluginClassName = PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getOrDefault(info);
+
+        if (authPluginClassName == null || "".equals(authPluginClassName)) {
+            // Default auth plugin simply pulls password directly from connection properties
+            String passwordText = PGProperty.PASSWORD.getOrDefault(info);
+            if (passwordText != null) {
+                password = passwordText.toCharArray();
+            }
+        } else {
+            AuthenticationPlugin authPlugin;
+            try {
+                authPlugin = ObjectFactory.instantiate(AuthenticationPlugin.class, authPluginClassName, info,
+                        false, null);
+            } catch (Exception ex) {
+                String msg = GT.tr("Unable to load Authentication Plugin {0}", authPluginClassName);
+                LOGGER.log(Level.FINE, msg, ex);
+                throw new PSQLException(msg, PSQLState.INVALID_PARAMETER_VALUE, ex);
+            }
+
+            password = authPlugin.getPassword(type);
+        }
+
+        try {
+            return action.apply(password);
+        } finally {
+            if (password != null) {
+                Arrays.fill(password, (char) 0);
+            }
+        }
     }
-  }
 
-  /**
-   * Helper that wraps {@link #withPassword(AuthenticationRequestType, Properties, PasswordAction)}, checks that it is not-null, and encodes
-   * it as a byte array. Used by internal code paths that require an encoded password
-   * that may be an empty string, but not null.
-   *
-   * <p>The caller provides a callback method that will be invoked with the {@code byte[]}
-   * encoded password. After completion, for security reasons the {@code byte[]} array will be
-   * wiped by filling it with zeroes. Callers must not rely on being able to read
-   * the password {@code byte[]} after the callback has completed.</p>
+    /**
+     * Helper that wraps {@link #withPassword(AuthenticationRequestType, Properties, PasswordAction)}, checks that it is not-null, and encodes
+     * it as a byte array. Used by internal code paths that require an encoded password
+     * that may be an empty string, but not null.
+     *
+     * <p>The caller provides a callback method that will be invoked with the {@code byte[]}
+     * encoded password. After completion, for security reasons the {@code byte[]} array will be
+     * wiped by filling it with zeroes. Callers must not rely on being able to read
+     * the password {@code byte[]} after the callback has completed.</p>
+     *
+     * @param type   The authentication type that is being requested
+     * @param info   The connection properties for the connection
+     * @param action The action to invoke with the encoded password
+     * @throws PSQLException Throws a PSQLException if the plugin class cannot be instantiated or if the retrieved password is null.
+     * @throws IOException   Bubbles up any thrown IOException from the provided callback
+     */
+    public static <T> T withEncodedPassword(AuthenticationRequestType type, Properties info,
+                                            PasswordAction<byte[], T> action) throws PSQLException, IOException {
+        byte[] encodedPassword = withPassword(type, info, password -> {
+            if (password == null) {
+                throw new PSQLException(
+                        GT.tr("The server requested password-based authentication, but no password was provided by plugin {0}",
+                                PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getOrDefault(info)),
+                        PSQLState.CONNECTION_REJECTED);
+            }
+            ByteBuffer buf = StandardCharsets.UTF_8.encode(CharBuffer.wrap(password));
+            byte[] bytes = new byte[buf.limit()];
+            buf.get(bytes);
+            return bytes;
+        });
 
-   * @param type The authentication type that is being requested
-   * @param info The connection properties for the connection
-   * @param action The action to invoke with the encoded password
-   * @throws PSQLException Throws a PSQLException if the plugin class cannot be instantiated or if the retrieved password is null.
-   * @throws IOException Bubbles up any thrown IOException from the provided callback
-   */
-  public static <T> T withEncodedPassword(AuthenticationRequestType type, Properties info,
-      PasswordAction<byte[], T> action) throws PSQLException, IOException {
-    byte[] encodedPassword = withPassword(type, info, password -> {
-      if (password == null) {
-        throw new PSQLException(
-            GT.tr("The server requested password-based authentication, but no password was provided by plugin {0}",
-                PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getOrDefault(info)),
-            PSQLState.CONNECTION_REJECTED);
-      }
-      ByteBuffer buf = StandardCharsets.UTF_8.encode(CharBuffer.wrap(password));
-      byte[] bytes = new byte[buf.limit()];
-      buf.get(bytes);
-      return bytes;
-    });
-
-    try {
-      return action.apply(encodedPassword);
-    } finally {
-      Arrays.fill(encodedPassword, (byte) 0);
+        try {
+            return action.apply(encodedPassword);
+        } finally {
+            Arrays.fill(encodedPassword, (byte) 0);
+        }
+    }
+
+    @FunctionalInterface
+    public interface PasswordAction<T, R> {
+        R apply(T password) throws PSQLException, IOException;
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/BatchedQuery.java b/pgjdbc/src/main/java/org/postgresql/core/v3/BatchedQuery.java
index ed57f75..7f26f2f 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/BatchedQuery.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/BatchedQuery.java
@@ -16,167 +16,166 @@ import org.postgresql.core.ParameterList;
  *
  * @author Jeremy Whiting jwhiting@redhat.com
  * @author Christopher Deckers (chrriis@gmail.com)
- *
  */
 public class BatchedQuery extends SimpleQuery {
 
-  private String sql;
-  private final int valuesBraceOpenPosition;
-  private final int valuesBraceClosePosition;
-  private final int batchSize;
-  private BatchedQuery [] blocks;
+    private final int valuesBraceOpenPosition;
+    private final int valuesBraceClosePosition;
+    private final int batchSize;
+    private String sql;
+    private BatchedQuery[] blocks;
 
-  public BatchedQuery(NativeQuery query, TypeTransferModeRegistry transferModeRegistry,
-      int valuesBraceOpenPosition,
-      int valuesBraceClosePosition, boolean sanitiserDisabled) {
-    super(query, transferModeRegistry, sanitiserDisabled);
-    this.valuesBraceOpenPosition = valuesBraceOpenPosition;
-    this.valuesBraceClosePosition = valuesBraceClosePosition;
-    this.batchSize = 1;
-  }
+    public BatchedQuery(NativeQuery query, TypeTransferModeRegistry transferModeRegistry,
+                        int valuesBraceOpenPosition,
+                        int valuesBraceClosePosition, boolean sanitiserDisabled) {
+        super(query, transferModeRegistry, sanitiserDisabled);
+        this.valuesBraceOpenPosition = valuesBraceOpenPosition;
+        this.valuesBraceClosePosition = valuesBraceClosePosition;
+        this.batchSize = 1;
+    }
 
-  private BatchedQuery(BatchedQuery src, int batchSize) {
-    super(src);
-    this.valuesBraceOpenPosition = src.valuesBraceOpenPosition;
-    this.valuesBraceClosePosition = src.valuesBraceClosePosition;
-    this.batchSize = batchSize;
-  }
+    private BatchedQuery(BatchedQuery src, int batchSize) {
+        super(src);
+        this.valuesBraceOpenPosition = src.valuesBraceOpenPosition;
+        this.valuesBraceClosePosition = src.valuesBraceClosePosition;
+        this.batchSize = batchSize;
+    }
 
-  public BatchedQuery deriveForMultiBatch(int valueBlock) {
-    if (getBatchSize() != 1) {
-      throw new IllegalStateException("Only the original decorator can be derived.");
-    }
-    if (valueBlock == 1) {
-      return this;
-    }
-    int index = Integer.numberOfTrailingZeros(valueBlock) - 1;
-    if (valueBlock > 128 || valueBlock != (1 << (index + 1))) {
-      throw new IllegalArgumentException(
-          "Expected value block should be a power of 2 smaller or equal to 128. Actual block is "
-              + valueBlock);
-    }
-    if (blocks == null) {
-      blocks = new BatchedQuery[7];
-    }
-    BatchedQuery bq = blocks[index];
-    if (bq == null) {
-      bq = new BatchedQuery(this, valueBlock);
-      blocks[index] = bq;
-    }
-    return bq;
-  }
-
-  @Override
-  public int getBatchSize() {
-    return batchSize;
-  }
-
-  /**
-   * Method to return the sql based on number of batches. Skipping the initial
-   * batch.
-   */
-  @Override
-  public String getNativeSql() {
-    if (sql != null) {
-      return sql;
-    }
-    sql = buildNativeSql(null);
-    return sql;
-  }
-
-  private String buildNativeSql(ParameterList params) {
-    String sql = null;
-    // dynamically build sql with parameters for batches
-    String nativeSql = super.getNativeSql();
-    int batchSize = getBatchSize();
-    if (batchSize < 2) {
-      sql = nativeSql;
-      return sql;
-    }
-    if (nativeSql == null) {
-      sql = "";
-      return sql;
-    }
-    int valuesBlockCharCount = 0;
-    // Split the values section around every dynamic parameter.
-    int[] bindPositions = getNativeQuery().bindPositions;
-    int[] chunkStart = new int[1 + bindPositions.length];
-    int[] chunkEnd = new int[1 + bindPositions.length];
-    chunkStart[0] = valuesBraceOpenPosition;
-    if (bindPositions.length == 0) {
-      valuesBlockCharCount = valuesBraceClosePosition - valuesBraceOpenPosition + 1;
-      chunkEnd[0] = valuesBraceClosePosition + 1;
-    } else {
-      chunkEnd[0] = bindPositions[0];
-      // valuesBlockCharCount += chunks[0].length;
-      valuesBlockCharCount += chunkEnd[0] - chunkStart[0];
-      for (int i = 0; i < bindPositions.length; i++) {
-        int startIndex = bindPositions[i] + 2;
-        int endIndex =
-            i < bindPositions.length - 1 ? bindPositions[i + 1] : valuesBraceClosePosition + 1;
-        for (; startIndex < endIndex; startIndex++) {
-          if (!Character.isDigit(nativeSql.charAt(startIndex))) {
-            break;
-          }
+    public BatchedQuery deriveForMultiBatch(int valueBlock) {
+        if (getBatchSize() != 1) {
+            throw new IllegalStateException("Only the original decorator can be derived.");
         }
-        chunkStart[i + 1] = startIndex;
-        chunkEnd[i + 1] = endIndex;
-        // valuesBlockCharCount += chunks[i + 1].length;
-        valuesBlockCharCount += chunkEnd[i + 1] - chunkStart[i + 1];
-      }
+        if (valueBlock == 1) {
+            return this;
+        }
+        int index = Integer.numberOfTrailingZeros(valueBlock) - 1;
+        if (valueBlock > 128 || valueBlock != (1 << (index + 1))) {
+            throw new IllegalArgumentException(
+                    "Expected value block should be a power of 2 smaller or equal to 128. Actual block is "
+                            + valueBlock);
+        }
+        if (blocks == null) {
+            blocks = new BatchedQuery[7];
+        }
+        BatchedQuery bq = blocks[index];
+        if (bq == null) {
+            bq = new BatchedQuery(this, valueBlock);
+            blocks[index] = bq;
+        }
+        return bq;
     }
-    int length = nativeSql.length();
-    //valuesBraceOpenPosition + valuesBlockCharCount;
-    length += NativeQuery.calculateBindLength(bindPositions.length * batchSize);
-    length -= NativeQuery.calculateBindLength(bindPositions.length);
-    length += (valuesBlockCharCount + 1 /*comma*/) * (batchSize - 1 /* initial sql */);
 
-    StringBuilder s = new StringBuilder(length);
-    // Add query until end of values parameter block.
-    int pos;
-    if (bindPositions.length > 0 && params == null) {
-      // Add the first values (...) clause, it would be values($1,..., $n), and it matches with
-      // the values clause of a simple non-rewritten SQL
-      s.append(nativeSql, 0, valuesBraceClosePosition + 1);
-      pos = bindPositions.length + 1;
-    } else {
-      pos = 1;
-      batchSize++; // do not use super.toString(params) as it does not work if query ends with --
-      // We need to carefully add (...),(...), and we do not want to get (...) --, (...)
-      // s.append(super.toString(params));
-      s.append(nativeSql, 0, valuesBraceOpenPosition);
+    @Override
+    public int getBatchSize() {
+        return batchSize;
     }
-    for (int i = 2; i <= batchSize; i++) {
-      if (i > 2 || pos != 1) {
-        // For "has binds" the first valuds
-        s.append(',');
-      }
-      s.append(nativeSql, chunkStart[0], chunkEnd[0]);
-      for (int j = 1; j < chunkStart.length; j++) {
-        if (params == null) {
-          NativeQuery.appendBindName(s, pos++);
+
+    /**
+     * Method to return the sql based on number of batches. Skipping the initial
+     * batch.
+     */
+    @Override
+    public String getNativeSql() {
+        if (sql != null) {
+            return sql;
+        }
+        sql = buildNativeSql(null);
+        return sql;
+    }
+
+    private String buildNativeSql(ParameterList params) {
+        String sql = null;
+        // dynamically build sql with parameters for batches
+        String nativeSql = super.getNativeSql();
+        int batchSize = getBatchSize();
+        if (batchSize < 2) {
+            sql = nativeSql;
+            return sql;
+        }
+        if (nativeSql == null) {
+            sql = "";
+            return sql;
+        }
+        int valuesBlockCharCount = 0;
+        // Split the values section around every dynamic parameter.
+        int[] bindPositions = getNativeQuery().bindPositions;
+        int[] chunkStart = new int[1 + bindPositions.length];
+        int[] chunkEnd = new int[1 + bindPositions.length];
+        chunkStart[0] = valuesBraceOpenPosition;
+        if (bindPositions.length == 0) {
+            valuesBlockCharCount = valuesBraceClosePosition - valuesBraceOpenPosition + 1;
+            chunkEnd[0] = valuesBraceClosePosition + 1;
         } else {
-          s.append(params.toString(pos++, true));
+            chunkEnd[0] = bindPositions[0];
+            // valuesBlockCharCount += chunks[0].length;
+            valuesBlockCharCount += chunkEnd[0] - chunkStart[0];
+            for (int i = 0; i < bindPositions.length; i++) {
+                int startIndex = bindPositions[i] + 2;
+                int endIndex =
+                        i < bindPositions.length - 1 ? bindPositions[i + 1] : valuesBraceClosePosition + 1;
+                for (; startIndex < endIndex; startIndex++) {
+                    if (!Character.isDigit(nativeSql.charAt(startIndex))) {
+                        break;
+                    }
+                }
+                chunkStart[i + 1] = startIndex;
+                chunkEnd[i + 1] = endIndex;
+                // valuesBlockCharCount += chunks[i + 1].length;
+                valuesBlockCharCount += chunkEnd[i + 1] - chunkStart[i + 1];
+            }
         }
-        s.append(nativeSql, chunkStart[j], chunkEnd[j]);
-      }
-    }
-    // Add trailing content: final query is like original with multi values.
-    // This could contain "--" comments, so it is important to add them at end.
-    s.append(nativeSql, valuesBraceClosePosition + 1, nativeSql.length());
-    sql = s.toString();
-    // Predict length only when building sql with $1, $2, ... (that is no specific params given)
-    assert params != null || s.length() == length
-        : "Predicted length != actual: " + length + " !=" + s.length();
-    return sql;
-  }
+        int length = nativeSql.length();
+        //valuesBraceOpenPosition + valuesBlockCharCount;
+        length += NativeQuery.calculateBindLength(bindPositions.length * batchSize);
+        length -= NativeQuery.calculateBindLength(bindPositions.length);
+        length += (valuesBlockCharCount + 1 /*comma*/) * (batchSize - 1 /* initial sql */);
 
-  @Override
-  public String toString(ParameterList params) {
-    if (getBatchSize() < 2) {
-      return super.toString(params);
+        StringBuilder s = new StringBuilder(length);
+        // Add query until end of values parameter block.
+        int pos;
+        if (bindPositions.length > 0 && params == null) {
+            // Add the first values (...) clause, it would be values($1,..., $n), and it matches with
+            // the values clause of a simple non-rewritten SQL
+            s.append(nativeSql, 0, valuesBraceClosePosition + 1);
+            pos = bindPositions.length + 1;
+        } else {
+            pos = 1;
+            batchSize++; // do not use super.toString(params) as it does not work if query ends with --
+            // We need to carefully add (...),(...), and we do not want to get (...) --, (...)
+            // s.append(super.toString(params));
+            s.append(nativeSql, 0, valuesBraceOpenPosition);
+        }
+        for (int i = 2; i <= batchSize; i++) {
+            if (i > 2 || pos != 1) {
+                // For "has binds" the first valuds
+                s.append(',');
+            }
+            s.append(nativeSql, chunkStart[0], chunkEnd[0]);
+            for (int j = 1; j < chunkStart.length; j++) {
+                if (params == null) {
+                    NativeQuery.appendBindName(s, pos++);
+                } else {
+                    s.append(params.toString(pos++, true));
+                }
+                s.append(nativeSql, chunkStart[j], chunkEnd[j]);
+            }
+        }
+        // Add trailing content: final query is like original with multi values.
+        // This could contain "--" comments, so it is important to add them at end.
+        s.append(nativeSql, valuesBraceClosePosition + 1, nativeSql.length());
+        sql = s.toString();
+        // Predict length only when building sql with $1, $2, ... (that is no specific params given)
+        assert params != null || s.length() == length
+                : "Predicted length != actual: " + length + " !=" + s.length();
+        return sql;
+    }
+
+    @Override
+    public String toString(ParameterList params) {
+        if (getBatchSize() < 2) {
+            return super.toString(params);
+        }
+        return buildNativeSql(params);
     }
-    return buildNativeSql(params);
-  }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeParameterList.java b/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeParameterList.java
index 8075834..2193d2b 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeParameterList.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeParameterList.java
@@ -6,15 +6,14 @@
 
 package org.postgresql.core.v3;
 
+import java.io.InputStream;
+import java.sql.SQLException;
 import org.postgresql.core.ParameterList;
 import org.postgresql.util.ByteStreamWriter;
 import org.postgresql.util.GT;
 import org.postgresql.util.PSQLException;
 import org.postgresql.util.PSQLState;
 
-import java.io.InputStream;
-import java.sql.SQLException;
-
 /**
  * Parameter list for V3 query strings that contain multiple statements. We delegate to one
  * SimpleParameterList per statement, and translate parameter indexes as needed.
@@ -22,194 +21,194 @@ import java.sql.SQLException;
  * @author Oliver Jowett (oliver@opencloud.com)
  */
 class CompositeParameterList implements V3ParameterList {
-  CompositeParameterList(SimpleParameterList[] subparams, int[] offsets) {
-    this.subparams = subparams;
-    this.offsets = offsets;
-    this.total = offsets[offsets.length - 1] + subparams[offsets.length - 1].getInParameterCount();
-  }
+    private final int total;
+    private final SimpleParameterList[] subparams;
+    private final int[] offsets;
 
-  private int findSubParam(int index) throws SQLException {
-    if (index < 1 || index > total) {
-      throw new PSQLException(
-          GT.tr("The column index is out of range: {0}, number of columns: {1}.", index, total),
-          PSQLState.INVALID_PARAMETER_VALUE);
+    CompositeParameterList(SimpleParameterList[] subparams, int[] offsets) {
+        this.subparams = subparams;
+        this.offsets = offsets;
+        this.total = offsets[offsets.length - 1] + subparams[offsets.length - 1].getInParameterCount();
     }
 
-    for (int i = offsets.length - 1; i >= 0; i--) {
-      if (offsets[i] < index) {
-        return i;
-      }
+    private int findSubParam(int index) throws SQLException {
+        if (index < 1 || index > total) {
+            throw new PSQLException(
+                    GT.tr("The column index is out of range: {0}, number of columns: {1}.", index, total),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+
+        for (int i = offsets.length - 1; i >= 0; i--) {
+            if (offsets[i] < index) {
+                return i;
+            }
+        }
+
+        throw new IllegalArgumentException("I am confused; can't find a subparam for index " + index);
     }
 
-    throw new IllegalArgumentException("I am confused; can't find a subparam for index " + index);
-  }
+    @Override
+    public void registerOutParameter(int index, int sqlType) {
 
-  @Override
-  public void registerOutParameter(int index, int sqlType) {
-
-  }
-
-  public int getDirection(int i) {
-    return 0;
-  }
-
-  @Override
-  public int getParameterCount() {
-    return total;
-  }
-
-  @Override
-  public int getInParameterCount() {
-    return total;
-  }
-
-  @Override
-  public int getOutParameterCount() {
-    return 0;
-  }
-
-  @Override
-  public int[] getTypeOIDs() {
-    int[] oids = new int[total];
-    for (int i = 0; i < offsets.length; i++) {
-      int[] subOids = subparams[i].getTypeOIDs();
-      System.arraycopy(subOids, 0, oids, offsets[i], subOids.length);
-    }
-    return oids;
-  }
-
-  @Override
-  public void setIntParameter(int index, int value) throws SQLException {
-    int sub = findSubParam(index);
-    subparams[sub].setIntParameter(index - offsets[sub], value);
-  }
-
-  @Override
-  public void setLiteralParameter(int index, String value, int oid) throws SQLException {
-    int sub = findSubParam(index);
-    subparams[sub].setStringParameter(index - offsets[sub], value, oid);
-  }
-
-  @Override
-  public void setStringParameter(int index, String value, int oid) throws SQLException {
-    int sub = findSubParam(index);
-    subparams[sub].setStringParameter(index - offsets[sub], value, oid);
-  }
-
-  @Override
-  public void setBinaryParameter(int index, byte[] value, int oid) throws SQLException {
-    int sub = findSubParam(index);
-    subparams[sub].setBinaryParameter(index - offsets[sub], value, oid);
-  }
-
-  @Override
-  public void setBytea(int index, byte[] data, int offset, int length) throws SQLException {
-    int sub = findSubParam(index);
-    subparams[sub].setBytea(index - offsets[sub], data, offset, length);
-  }
-
-  @Override
-  public void setBytea(int index, InputStream stream, int length) throws SQLException {
-    int sub = findSubParam(index);
-    subparams[sub].setBytea(index - offsets[sub], stream, length);
-  }
-
-  @Override
-  public void setBytea(int index, InputStream stream) throws SQLException {
-    int sub = findSubParam(index);
-    subparams[sub].setBytea(index - offsets[sub], stream);
-  }
-
-  @Override
-  public void setBytea(int index, ByteStreamWriter writer) throws SQLException {
-    int sub = findSubParam(index);
-    subparams[sub].setBytea(index - offsets[sub], writer);
-  }
-
-  @Override
-  public void setText(int index, InputStream stream) throws SQLException {
-    int sub = findSubParam(index);
-    subparams[sub].setText(index - offsets[sub], stream);
-  }
-
-  @Override
-  public void setNull(int index, int oid) throws SQLException {
-    int sub = findSubParam(index);
-    subparams[sub].setNull(index - offsets[sub], oid);
-  }
-
-  @Override
-  public String toString(int index, boolean standardConformingStrings) {
-    try {
-      int sub = findSubParam(index);
-      return subparams[sub].toString(index - offsets[sub], standardConformingStrings);
-    } catch (SQLException e) {
-      throw new IllegalStateException(e.getMessage());
-    }
-  }
-
-  @Override
-  public ParameterList copy() {
-    SimpleParameterList[] copySub = new SimpleParameterList[subparams.length];
-    for (int sub = 0; sub < subparams.length; sub++) {
-      copySub[sub] = (SimpleParameterList) subparams[sub].copy();
     }
 
-    return new CompositeParameterList(copySub, offsets);
-  }
-
-  @Override
-  public void clear() {
-    for (SimpleParameterList subparam : subparams) {
-      subparam.clear();
+    public int getDirection(int i) {
+        return 0;
     }
-  }
 
-  @Override
-  public SimpleParameterList [] getSubparams() {
-    return subparams;
-  }
-
-  @Override
-  public void checkAllParametersSet() throws SQLException {
-    for (SimpleParameterList subparam : subparams) {
-      subparam.checkAllParametersSet();
+    @Override
+    public int getParameterCount() {
+        return total;
     }
-  }
 
-  @Override
-  public byte [][] getEncoding() {
-    return null; // unsupported
-  }
-
-  @Override
-  public byte [] getFlags() {
-    return null; // unsupported
-  }
-
-  @Override
-  public int [] getParamTypes() {
-    return null; // unsupported
-  }
-
-  @Override
-  public Object [] getValues() {
-    return null; // unsupported
-  }
-
-  @Override
-  public void appendAll(ParameterList list) throws SQLException {
-    // no-op, unsupported
-  }
-
-  @Override
-  public void convertFunctionOutParameters() {
-    for (SimpleParameterList subparam : subparams) {
-      subparam.convertFunctionOutParameters();
+    @Override
+    public int getInParameterCount() {
+        return total;
     }
-  }
 
-  private final int total;
-  private final SimpleParameterList[] subparams;
-  private final int[] offsets;
+    @Override
+    public int getOutParameterCount() {
+        return 0;
+    }
+
+    @Override
+    public int[] getTypeOIDs() {
+        int[] oids = new int[total];
+        for (int i = 0; i < offsets.length; i++) {
+            int[] subOids = subparams[i].getTypeOIDs();
+            System.arraycopy(subOids, 0, oids, offsets[i], subOids.length);
+        }
+        return oids;
+    }
+
+    @Override
+    public void setIntParameter(int index, int value) throws SQLException {
+        int sub = findSubParam(index);
+        subparams[sub].setIntParameter(index - offsets[sub], value);
+    }
+
+    @Override
+    public void setLiteralParameter(int index, String value, int oid) throws SQLException {
+        int sub = findSubParam(index);
+        subparams[sub].setStringParameter(index - offsets[sub], value, oid);
+    }
+
+    @Override
+    public void setStringParameter(int index, String value, int oid) throws SQLException {
+        int sub = findSubParam(index);
+        subparams[sub].setStringParameter(index - offsets[sub], value, oid);
+    }
+
+    @Override
+    public void setBinaryParameter(int index, byte[] value, int oid) throws SQLException {
+        int sub = findSubParam(index);
+        subparams[sub].setBinaryParameter(index - offsets[sub], value, oid);
+    }
+
+    @Override
+    public void setBytea(int index, byte[] data, int offset, int length) throws SQLException {
+        int sub = findSubParam(index);
+        subparams[sub].setBytea(index - offsets[sub], data, offset, length);
+    }
+
+    @Override
+    public void setBytea(int index, InputStream stream, int length) throws SQLException {
+        int sub = findSubParam(index);
+        subparams[sub].setBytea(index - offsets[sub], stream, length);
+    }
+
+    @Override
+    public void setBytea(int index, InputStream stream) throws SQLException {
+        int sub = findSubParam(index);
+        subparams[sub].setBytea(index - offsets[sub], stream);
+    }
+
+    @Override
+    public void setBytea(int index, ByteStreamWriter writer) throws SQLException {
+        int sub = findSubParam(index);
+        subparams[sub].setBytea(index - offsets[sub], writer);
+    }
+
+    @Override
+    public void setText(int index, InputStream stream) throws SQLException {
+        int sub = findSubParam(index);
+        subparams[sub].setText(index - offsets[sub], stream);
+    }
+
+    @Override
+    public void setNull(int index, int oid) throws SQLException {
+        int sub = findSubParam(index);
+        subparams[sub].setNull(index - offsets[sub], oid);
+    }
+
+    @Override
+    public String toString(int index, boolean standardConformingStrings) {
+        try {
+            int sub = findSubParam(index);
+            return subparams[sub].toString(index - offsets[sub], standardConformingStrings);
+        } catch (SQLException e) {
+            throw new IllegalStateException(e.getMessage());
+        }
+    }
+
+    @Override
+    public ParameterList copy() {
+        SimpleParameterList[] copySub = new SimpleParameterList[subparams.length];
+        for (int sub = 0; sub < subparams.length; sub++) {
+            copySub[sub] = (SimpleParameterList) subparams[sub].copy();
+        }
+
+        return new CompositeParameterList(copySub, offsets);
+    }
+
+    @Override
+    public void clear() {
+        for (SimpleParameterList subparam : subparams) {
+            subparam.clear();
+        }
+    }
+
+    @Override
+    public SimpleParameterList[] getSubparams() {
+        return subparams;
+    }
+
+    @Override
+    public void checkAllParametersSet() throws SQLException {
+        for (SimpleParameterList subparam : subparams) {
+            subparam.checkAllParametersSet();
+        }
+    }
+
+    @Override
+    public byte[][] getEncoding() {
+        return null; // unsupported
+    }
+
+    @Override
+    public byte[] getFlags() {
+        return null; // unsupported
+    }
+
+    @Override
+    public int[] getParamTypes() {
+        return null; // unsupported
+    }
+
+    @Override
+    public Object[] getValues() {
+        return null; // unsupported
+    }
+
+    @Override
+    public void appendAll(ParameterList list) throws SQLException {
+        // no-op, unsupported
+    }
+
+    @Override
+    public void convertFunctionOutParameters() {
+        for (SimpleParameterList subparam : subparams) {
+            subparam.convertFunctionOutParameters();
+        }
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeQuery.java b/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeQuery.java
index bb34876..e0792b2 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeQuery.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/CompositeQuery.java
@@ -6,12 +6,11 @@
 
 package org.postgresql.core.v3;
 
+import java.util.Map;
 import org.postgresql.core.ParameterList;
 import org.postgresql.core.Query;
 import org.postgresql.core.SqlCommand;
 
-import java.util.Map;
-
 /**
  * V3 Query implementation for queries that involve multiple statements. We split it up into one
  * SimpleQuery per statement, and wrap the corresponding per-statement SimpleParameterList objects
@@ -20,92 +19,92 @@ import java.util.Map;
  * @author Oliver Jowett (oliver@opencloud.com)
  */
 class CompositeQuery implements Query {
-  CompositeQuery(SimpleQuery[] subqueries, int[] offsets) {
-    this.subqueries = subqueries;
-    this.offsets = offsets;
-  }
+    private final SimpleQuery[] subqueries;
+    private final int[] offsets;
 
-  @Override
-  public ParameterList createParameterList() {
-    SimpleParameterList[] subparams = new SimpleParameterList[subqueries.length];
-    for (int i = 0; i < subqueries.length; i++) {
-      subparams[i] = (SimpleParameterList) subqueries[i].createParameterList();
+    CompositeQuery(SimpleQuery[] subqueries, int[] offsets) {
+        this.subqueries = subqueries;
+        this.offsets = offsets;
     }
-    return new CompositeParameterList(subparams, offsets);
-  }
 
-  @Override
-  public String toString(ParameterList parameters) {
-    StringBuilder sbuf = new StringBuilder(subqueries[0].toString());
-    for (int i = 1; i < subqueries.length; i++) {
-      sbuf.append(';');
-      sbuf.append(subqueries[i]);
+    @Override
+    public ParameterList createParameterList() {
+        SimpleParameterList[] subparams = new SimpleParameterList[subqueries.length];
+        for (int i = 0; i < subqueries.length; i++) {
+            subparams[i] = (SimpleParameterList) subqueries[i].createParameterList();
+        }
+        return new CompositeParameterList(subparams, offsets);
     }
-    return sbuf.toString();
-  }
 
-  @Override
-  public String getNativeSql() {
-    StringBuilder sbuf = new StringBuilder(subqueries[0].getNativeSql());
-    for (int i = 1; i < subqueries.length; i++) {
-      sbuf.append(';');
-      sbuf.append(subqueries[i].getNativeSql());
+    @Override
+    public String toString(ParameterList parameters) {
+        StringBuilder sbuf = new StringBuilder(subqueries[0].toString());
+        for (int i = 1; i < subqueries.length; i++) {
+            sbuf.append(';');
+            sbuf.append(subqueries[i]);
+        }
+        return sbuf.toString();
     }
-    return sbuf.toString();
-  }
 
-  @Override
-  public SqlCommand getSqlCommand() {
-    return null;
-  }
-
-  @Override
-  public String toString() {
-    return toString(null);
-  }
-
-  @Override
-  public void close() {
-    for (SimpleQuery subquery : subqueries) {
-      subquery.close();
+    @Override
+    public String getNativeSql() {
+        StringBuilder sbuf = new StringBuilder(subqueries[0].getNativeSql());
+        for (int i = 1; i < subqueries.length; i++) {
+            sbuf.append(';');
+            sbuf.append(subqueries[i].getNativeSql());
+        }
+        return sbuf.toString();
     }
-  }
 
-  @Override
-  public Query[] getSubqueries() {
-    return subqueries;
-  }
-
-  @Override
-  public boolean isStatementDescribed() {
-    for (SimpleQuery subquery : subqueries) {
-      if (!subquery.isStatementDescribed()) {
-        return false;
-      }
+    @Override
+    public SqlCommand getSqlCommand() {
+        return null;
     }
-    return true;
-  }
 
-  @Override
-  public boolean isEmpty() {
-    for (SimpleQuery subquery : subqueries) {
-      if (!subquery.isEmpty()) {
-        return false;
-      }
+    @Override
+    public String toString() {
+        return toString(null);
     }
-    return true;
-  }
 
-  @Override
-  public int getBatchSize() {
-    return 0; // no-op, unsupported
-  }
+    @Override
+    public void close() {
+        for (SimpleQuery subquery : subqueries) {
+            subquery.close();
+        }
+    }
 
-  @Override
-  public Map<String, Integer> getResultSetColumnNameIndexMap() {
-    return null; // unsupported
-  }
+    @Override
+    public Query[] getSubqueries() {
+        return subqueries;
+    }
 
-  private final SimpleQuery[] subqueries;
-  private final int[] offsets;
+    @Override
+    public boolean isStatementDescribed() {
+        for (SimpleQuery subquery : subqueries) {
+            if (!subquery.isStatementDescribed()) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    @Override
+    public boolean isEmpty() {
+        for (SimpleQuery subquery : subqueries) {
+            if (!subquery.isEmpty()) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    @Override
+    public int getBatchSize() {
+        return 0; // no-op, unsupported
+    }
+
+    @Override
+    public Map<String, Integer> getResultSetColumnNameIndexMap() {
+        return null; // unsupported
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/ConnectionFactoryImpl.java b/pgjdbc/src/main/java/org/postgresql/core/v3/ConnectionFactoryImpl.java
index 1815a91..8944451 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/ConnectionFactoryImpl.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/ConnectionFactoryImpl.java
@@ -6,6 +6,22 @@
 
 package org.postgresql.core.v3;
 
+import java.io.IOException;
+import java.net.ConnectException;
+import java.nio.charset.StandardCharsets;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.TimeZone;
+import java.util.logging.Level;
+import java.util.logging.LogRecord;
+import java.util.logging.Logger;
+import javax.net.SocketFactory;
 import org.postgresql.PGProperty;
 import org.postgresql.core.ConnectionFactory;
 import org.postgresql.core.PGStream;
@@ -35,24 +51,6 @@ import org.postgresql.util.PSQLException;
 import org.postgresql.util.PSQLState;
 import org.postgresql.util.ServerErrorMessage;
 
-import java.io.IOException;
-import java.net.ConnectException;
-import java.nio.charset.StandardCharsets;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.TimeZone;
-import java.util.logging.Level;
-import java.util.logging.LogRecord;
-import java.util.logging.Logger;
-
-import javax.net.SocketFactory;
-
 /**
  * ConnectionFactory implementation for version 3 (7.4+) connections.
  *
@@ -60,397 +58,373 @@ import javax.net.SocketFactory;
  */
 public class ConnectionFactoryImpl extends ConnectionFactory {
 
-  private static class StartupParam {
-    private final String key;
-    private final String value;
+    private static final Logger LOGGER = Logger.getLogger(ConnectionFactoryImpl.class.getName());
+    private static final int AUTH_REQ_OK = 0;
+    private static final int AUTH_REQ_KRB4 = 1;
+    private static final int AUTH_REQ_KRB5 = 2;
+    private static final int AUTH_REQ_PASSWORD = 3;
+    private static final int AUTH_REQ_CRYPT = 4;
+    private static final int AUTH_REQ_MD5 = 5;
+    private static final int AUTH_REQ_SCM = 6;
+    private static final int AUTH_REQ_GSS = 7;
+    private static final int AUTH_REQ_GSS_CONTINUE = 8;
+    private static final int AUTH_REQ_SSPI = 9;
+    private static final int AUTH_REQ_SASL = 10;
+    private static final int AUTH_REQ_SASL_CONTINUE = 11;
+    private static final int AUTH_REQ_SASL_FINAL = 12;
+    private static final String IN_HOT_STANDBY = "in_hot_standby";
 
-    StartupParam(String key, String value) {
-      this.key = key;
-      this.value = value;
+    public ConnectionFactoryImpl() {
+    }
+
+    private static void log(Level level, String msg, Throwable thrown, Object... params) {
+        if (!LOGGER.isLoggable(level)) {
+            return;
+        }
+        LogRecord rec = new LogRecord(level, msg);
+        // Set the loggerName of the LogRecord with the current logger
+        rec.setLoggerName(LOGGER.getName());
+        rec.setParameters(params);
+        rec.setThrown(thrown);
+        LOGGER.log(rec);
+    }
+
+    /**
+     * Convert Java time zone to postgres time zone. All others stay the same except that GMT+nn
+     * changes to GMT-nn and vise versa.
+     * If you provide GMT+/-nn postgres uses POSIX rules which has a positive sign for west of Greenwich
+     * JAVA uses ISO rules which the positive sign is east of Greenwich
+     * To make matters more interesting postgres will always report in ISO
+     *
+     * @return The current JVM time zone in postgresql format.
+     */
+    private static String createPostgresTimeZone() {
+        String tz = TimeZone.getDefault().getID();
+        if (tz.length() <= 3 || !tz.startsWith("GMT")) {
+            return tz;
+        }
+        char sign = tz.charAt(3);
+        String start;
+        switch (sign) {
+            case '+':
+                start = "GMT-";
+                break;
+            case '-':
+                start = "GMT+";
+                break;
+            default:
+                // unknown type
+                return tz;
+        }
+
+        return start + tz.substring(4);
+    }
+
+    private PGStream tryConnect(Properties info, SocketFactory socketFactory, HostSpec hostSpec,
+                                SslMode sslMode, GSSEncMode gssEncMode)
+            throws SQLException, IOException {
+        int connectTimeout = PGProperty.CONNECT_TIMEOUT.getInt(info) * 1000;
+        String user = PGProperty.USER.getOrDefault(info);
+        String database = PGProperty.PG_DBNAME.getOrDefault(info);
+        if (user == null) {
+            throw new PSQLException(GT.tr("User cannot be null"), PSQLState.INVALID_NAME);
+        }
+        if (database == null) {
+            throw new PSQLException(GT.tr("Database cannot be null"), PSQLState.INVALID_NAME);
+        }
+
+        PGStream newStream = new PGStream(socketFactory, hostSpec, connectTimeout);
+        try {
+            // Set the socket timeout if the "socketTimeout" property has been set.
+            int socketTimeout = PGProperty.SOCKET_TIMEOUT.getInt(info);
+            if (socketTimeout > 0) {
+                newStream.setNetworkTimeout(socketTimeout * 1000);
+            }
+
+            String maxResultBuffer = PGProperty.MAX_RESULT_BUFFER.getOrDefault(info);
+            newStream.setMaxResultBuffer(maxResultBuffer);
+
+            // Enable TCP keep-alive probe if required.
+            boolean requireTCPKeepAlive = PGProperty.TCP_KEEP_ALIVE.getBoolean(info);
+            newStream.getSocket().setKeepAlive(requireTCPKeepAlive);
+
+            // Enable TCP no delay if required
+            boolean requireTCPNoDelay = PGProperty.TCP_NO_DELAY.getBoolean(info);
+            newStream.getSocket().setTcpNoDelay(requireTCPNoDelay);
+
+            // Try to set SO_SNDBUF and SO_RECVBUF socket options, if requested.
+            // If receiveBufferSize and send_buffer_size are set to a value greater
+            // than 0, adjust. -1 means use the system default, 0 is ignored since not
+            // supported.
+
+            // Set SO_RECVBUF read buffer size
+            int receiveBufferSize = PGProperty.RECEIVE_BUFFER_SIZE.getInt(info);
+            if (receiveBufferSize > -1) {
+                // value of 0 not a valid buffer size value
+                if (receiveBufferSize > 0) {
+                    newStream.getSocket().setReceiveBufferSize(receiveBufferSize);
+                } else {
+                    LOGGER.log(Level.WARNING, "Ignore invalid value for receiveBufferSize: {0}",
+                            receiveBufferSize);
+                }
+            }
+
+            // Set SO_SNDBUF write buffer size
+            int sendBufferSize = PGProperty.SEND_BUFFER_SIZE.getInt(info);
+            if (sendBufferSize > -1) {
+                if (sendBufferSize > 0) {
+                    newStream.getSocket().setSendBufferSize(sendBufferSize);
+                } else {
+                    LOGGER.log(Level.WARNING, "Ignore invalid value for sendBufferSize: {0}", sendBufferSize);
+                }
+            }
+
+            if (LOGGER.isLoggable(Level.FINE)) {
+                LOGGER.log(Level.FINE, "Receive Buffer Size is {0}",
+                        newStream.getSocket().getReceiveBufferSize());
+                LOGGER.log(Level.FINE, "Send Buffer Size is {0}",
+                        newStream.getSocket().getSendBufferSize());
+            }
+
+            newStream = enableGSSEncrypted(newStream, gssEncMode, hostSpec.getHost(), info, connectTimeout);
+
+            // if we have a security context then gss negotiation succeeded. Do not attempt SSL
+            // negotiation
+            if (!newStream.isGssEncrypted()) {
+                // Construct and send an ssl startup packet if requested.
+                newStream = enableSSL(newStream, sslMode, info, connectTimeout);
+            }
+
+            // Make sure to set network timeout again, in case the stream changed due to GSS or SSL
+            if (socketTimeout > 0) {
+                newStream.setNetworkTimeout(socketTimeout * 1000);
+            }
+
+            List<StartupParam> paramList = getParametersForStartup(user, database, info);
+            sendStartupPacket(newStream, paramList);
+
+            // Do authentication (until AuthenticationOk).
+            doAuthentication(newStream, hostSpec.getHost(), user, info);
+
+            return newStream;
+        } catch (Exception e) {
+            closeStream(newStream);
+            throw e;
+        }
     }
 
     @Override
-    public String toString() {
-      return this.key + "=" + this.value;
-    }
+    public QueryExecutor openConnectionImpl(HostSpec[] hostSpecs, Properties info) throws SQLException {
+        SslMode sslMode = SslMode.of(info);
+        GSSEncMode gssEncMode = GSSEncMode.of(info);
 
-    public byte[] getEncodedKey() {
-      return this.key.getBytes(StandardCharsets.UTF_8);
-    }
-
-    public byte[] getEncodedValue() {
-      return this.value.getBytes(StandardCharsets.UTF_8);
-    }
-  }
-
-  private static final Logger LOGGER = Logger.getLogger(ConnectionFactoryImpl.class.getName());
-  private static final int AUTH_REQ_OK = 0;
-  private static final int AUTH_REQ_KRB4 = 1;
-  private static final int AUTH_REQ_KRB5 = 2;
-  private static final int AUTH_REQ_PASSWORD = 3;
-  private static final int AUTH_REQ_CRYPT = 4;
-  private static final int AUTH_REQ_MD5 = 5;
-  private static final int AUTH_REQ_SCM = 6;
-  private static final int AUTH_REQ_GSS = 7;
-  private static final int AUTH_REQ_GSS_CONTINUE = 8;
-  private static final int AUTH_REQ_SSPI = 9;
-  private static final int AUTH_REQ_SASL = 10;
-  private static final int AUTH_REQ_SASL_CONTINUE = 11;
-  private static final int AUTH_REQ_SASL_FINAL = 12;
-
-  private static final String IN_HOT_STANDBY = "in_hot_standby";
-
-  public ConnectionFactoryImpl() {
-  }
-
-  private PGStream tryConnect(Properties info, SocketFactory socketFactory, HostSpec hostSpec,
-      SslMode sslMode, GSSEncMode gssEncMode)
-      throws SQLException, IOException {
-    int connectTimeout = PGProperty.CONNECT_TIMEOUT.getInt(info) * 1000;
-    String user = PGProperty.USER.getOrDefault(info);
-    String database = PGProperty.PG_DBNAME.getOrDefault(info);
-    if (user == null) {
-      throw new PSQLException(GT.tr("User cannot be null"), PSQLState.INVALID_NAME);
-    }
-    if (database == null) {
-      throw new PSQLException(GT.tr("Database cannot be null"), PSQLState.INVALID_NAME);
-    }
-
-    PGStream newStream = new PGStream(socketFactory, hostSpec, connectTimeout);
-    try {
-      // Set the socket timeout if the "socketTimeout" property has been set.
-      int socketTimeout = PGProperty.SOCKET_TIMEOUT.getInt(info);
-      if (socketTimeout > 0) {
-        newStream.setNetworkTimeout(socketTimeout * 1000);
-      }
-
-      String maxResultBuffer = PGProperty.MAX_RESULT_BUFFER.getOrDefault(info);
-      newStream.setMaxResultBuffer(maxResultBuffer);
-
-      // Enable TCP keep-alive probe if required.
-      boolean requireTCPKeepAlive = PGProperty.TCP_KEEP_ALIVE.getBoolean(info);
-      newStream.getSocket().setKeepAlive(requireTCPKeepAlive);
-
-      // Enable TCP no delay if required
-      boolean requireTCPNoDelay = PGProperty.TCP_NO_DELAY.getBoolean(info);
-      newStream.getSocket().setTcpNoDelay(requireTCPNoDelay);
-
-      // Try to set SO_SNDBUF and SO_RECVBUF socket options, if requested.
-      // If receiveBufferSize and send_buffer_size are set to a value greater
-      // than 0, adjust. -1 means use the system default, 0 is ignored since not
-      // supported.
-
-      // Set SO_RECVBUF read buffer size
-      int receiveBufferSize = PGProperty.RECEIVE_BUFFER_SIZE.getInt(info);
-      if (receiveBufferSize > -1) {
-        // value of 0 not a valid buffer size value
-        if (receiveBufferSize > 0) {
-          newStream.getSocket().setReceiveBufferSize(receiveBufferSize);
-        } else {
-          LOGGER.log(Level.WARNING, "Ignore invalid value for receiveBufferSize: {0}",
-              receiveBufferSize);
-        }
-      }
-
-      // Set SO_SNDBUF write buffer size
-      int sendBufferSize = PGProperty.SEND_BUFFER_SIZE.getInt(info);
-      if (sendBufferSize > -1) {
-        if (sendBufferSize > 0) {
-          newStream.getSocket().setSendBufferSize(sendBufferSize);
-        } else {
-          LOGGER.log(Level.WARNING, "Ignore invalid value for sendBufferSize: {0}", sendBufferSize);
-        }
-      }
-
-      if (LOGGER.isLoggable(Level.FINE)) {
-        LOGGER.log(Level.FINE, "Receive Buffer Size is {0}",
-            newStream.getSocket().getReceiveBufferSize());
-        LOGGER.log(Level.FINE, "Send Buffer Size is {0}",
-            newStream.getSocket().getSendBufferSize());
-      }
-
-      newStream = enableGSSEncrypted(newStream, gssEncMode, hostSpec.getHost(), info, connectTimeout);
-
-      // if we have a security context then gss negotiation succeeded. Do not attempt SSL
-      // negotiation
-      if (!newStream.isGssEncrypted()) {
-        // Construct and send an ssl startup packet if requested.
-        newStream = enableSSL(newStream, sslMode, info, connectTimeout);
-      }
-
-      // Make sure to set network timeout again, in case the stream changed due to GSS or SSL
-      if (socketTimeout > 0) {
-        newStream.setNetworkTimeout(socketTimeout * 1000);
-      }
-
-      List<StartupParam> paramList = getParametersForStartup(user, database, info);
-      sendStartupPacket(newStream, paramList);
-
-      // Do authentication (until AuthenticationOk).
-      doAuthentication(newStream, hostSpec.getHost(), user, info);
-
-      return newStream;
-    } catch (Exception e) {
-      closeStream(newStream);
-      throw e;
-    }
-  }
-
-  @Override
-  public QueryExecutor openConnectionImpl(HostSpec[] hostSpecs, Properties info) throws SQLException {
-    SslMode sslMode = SslMode.of(info);
-    GSSEncMode gssEncMode = GSSEncMode.of(info);
-
-    HostRequirement targetServerType;
-    String targetServerTypeStr = PGProperty.TARGET_SERVER_TYPE.getOrDefault(info);
-    try {
-      targetServerType = HostRequirement.getTargetServerType(targetServerTypeStr);
-    } catch (IllegalArgumentException ex) {
-      throw new PSQLException(
-          GT.tr("Invalid targetServerType value: {0}", targetServerTypeStr),
-          PSQLState.CONNECTION_UNABLE_TO_CONNECT);
-    }
-
-    SocketFactory socketFactory = SocketFactoryFactory.getSocketFactory(info);
-
-    HostChooser hostChooser =
-        HostChooserFactory.createHostChooser(hostSpecs, targetServerType, info);
-    Iterator<CandidateHost> hostIter = hostChooser.iterator();
-    Map<HostSpec, HostStatus> knownStates = new HashMap<>();
-    while (hostIter.hasNext()) {
-      CandidateHost candidateHost = hostIter.next();
-      HostSpec hostSpec = candidateHost.hostSpec;
-      LOGGER.log(Level.FINE, "Trying to establish a protocol version 3 connection to {0}", hostSpec);
-
-      // Note: per-connect-attempt status map is used here instead of GlobalHostStatusTracker
-      // for the case when "no good hosts" match (e.g. all the hosts are known as "connectfail")
-      // In that case, the system tries to connect to each host in order, thus it should not look into
-      // GlobalHostStatusTracker
-      HostStatus knownStatus = knownStates.get(hostSpec);
-      if (knownStatus != null && !candidateHost.targetServerType.allowConnectingTo(knownStatus)) {
-        if (LOGGER.isLoggable(Level.FINER)) {
-          LOGGER.log(Level.FINER, "Known status of host {0} is {1}, and required status was {2}. Will try next host",
-                     new Object[]{hostSpec, knownStatus, candidateHost.targetServerType});
-        }
-        continue;
-      }
-
-      //
-      // Establish a connection.
-      //
-
-      PGStream newStream = null;
-      try {
+        HostRequirement targetServerType;
+        String targetServerTypeStr = PGProperty.TARGET_SERVER_TYPE.getOrDefault(info);
         try {
-          newStream = tryConnect(info, socketFactory, hostSpec, sslMode, gssEncMode);
-        } catch (SQLException e) {
-          if (sslMode == SslMode.PREFER
-              && PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState().equals(e.getSQLState())) {
-            // Try non-SSL connection to cover case like "non-ssl only db"
-            // Note: PREFER allows loss of encryption, so no significant harm is made
-            Throwable ex = null;
+            targetServerType = HostRequirement.getTargetServerType(targetServerTypeStr);
+        } catch (IllegalArgumentException ex) {
+            throw new PSQLException(
+                    GT.tr("Invalid targetServerType value: {0}", targetServerTypeStr),
+                    PSQLState.CONNECTION_UNABLE_TO_CONNECT);
+        }
+
+        SocketFactory socketFactory = SocketFactoryFactory.getSocketFactory(info);
+
+        HostChooser hostChooser =
+                HostChooserFactory.createHostChooser(hostSpecs, targetServerType, info);
+        Iterator<CandidateHost> hostIter = hostChooser.iterator();
+        Map<HostSpec, HostStatus> knownStates = new HashMap<>();
+        while (hostIter.hasNext()) {
+            CandidateHost candidateHost = hostIter.next();
+            HostSpec hostSpec = candidateHost.hostSpec;
+            LOGGER.log(Level.FINE, "Trying to establish a protocol version 3 connection to {0}", hostSpec);
+
+            // Note: per-connect-attempt status map is used here instead of GlobalHostStatusTracker
+            // for the case when "no good hosts" match (e.g. all the hosts are known as "connectfail")
+            // In that case, the system tries to connect to each host in order, thus it should not look into
+            // GlobalHostStatusTracker
+            HostStatus knownStatus = knownStates.get(hostSpec);
+            if (knownStatus != null && !candidateHost.targetServerType.allowConnectingTo(knownStatus)) {
+                if (LOGGER.isLoggable(Level.FINER)) {
+                    LOGGER.log(Level.FINER, "Known status of host {0} is {1}, and required status was {2}. Will try next host",
+                            new Object[]{hostSpec, knownStatus, candidateHost.targetServerType});
+                }
+                continue;
+            }
+
+            //
+            // Establish a connection.
+            //
+
+            PGStream newStream = null;
             try {
-              newStream =
-                  tryConnect(info, socketFactory, hostSpec, SslMode.DISABLE, gssEncMode);
-              LOGGER.log(Level.FINE, "Downgraded to non-encrypted connection for host {0}",
-                  hostSpec);
-            } catch (SQLException | IOException ee) {
-              ex = ee;
+                try {
+                    newStream = tryConnect(info, socketFactory, hostSpec, sslMode, gssEncMode);
+                } catch (SQLException e) {
+                    if (sslMode == SslMode.PREFER
+                            && PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState().equals(e.getSQLState())) {
+                        // Try non-SSL connection to cover case like "non-ssl only db"
+                        // Note: PREFER allows loss of encryption, so no significant harm is made
+                        Throwable ex = null;
+                        try {
+                            newStream =
+                                    tryConnect(info, socketFactory, hostSpec, SslMode.DISABLE, gssEncMode);
+                            LOGGER.log(Level.FINE, "Downgraded to non-encrypted connection for host {0}",
+                                    hostSpec);
+                        } catch (SQLException | IOException ee) {
+                            ex = ee;
+                        }
+
+                        if (ex != null) {
+                            log(Level.FINE, "sslMode==PREFER, however non-SSL connection failed as well", ex);
+                            // non-SSL failed as well, so re-throw original exception
+                            // Add non-SSL exception as suppressed
+                            e.addSuppressed(ex);
+                            throw e;
+                        }
+                    } else if (sslMode == SslMode.ALLOW
+                            && PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState().equals(e.getSQLState())) {
+                        // Try using SSL
+                        Throwable ex = null;
+                        try {
+                            newStream =
+                                    tryConnect(info, socketFactory, hostSpec, SslMode.REQUIRE, gssEncMode);
+                            LOGGER.log(Level.FINE, "Upgraded to encrypted connection for host {0}",
+                                    hostSpec);
+                        } catch (SQLException ee) {
+                            ex = ee;
+                        } catch (IOException ee) {
+                            ex = ee; // Can't use multi-catch in Java 6 :(
+                        }
+                        if (ex != null) {
+                            log(Level.FINE, "sslMode==ALLOW, however SSL connection failed as well", ex);
+                            // non-SSL failed as well, so re-throw original exception
+                            // Add SSL exception as suppressed
+                            e.addSuppressed(ex);
+                            throw e;
+                        }
+
+                    } else {
+                        throw e;
+                    }
+                }
+
+                int cancelSignalTimeout = PGProperty.CANCEL_SIGNAL_TIMEOUT.getInt(info) * 1000;
+
+                // Do final startup.
+                QueryExecutor queryExecutor = new QueryExecutorImpl(newStream, cancelSignalTimeout, info);
+
+                // Check Primary or Secondary
+                HostStatus hostStatus = HostStatus.ConnectOK;
+                if (candidateHost.targetServerType != HostRequirement.any) {
+                    hostStatus = isPrimary(queryExecutor) ? HostStatus.Primary : HostStatus.Secondary;
+                }
+                GlobalHostStatusTracker.reportHostStatus(hostSpec, hostStatus);
+                knownStates.put(hostSpec, hostStatus);
+                if (!candidateHost.targetServerType.allowConnectingTo(hostStatus)) {
+                    queryExecutor.close();
+                    continue;
+                }
+
+                runInitialQueries(queryExecutor, info);
+
+                // And we're done.
+                return queryExecutor;
+            } catch (ConnectException cex) {
+                // Added by Peter Mount <peter@retep.org.uk>
+                // ConnectException is thrown when the connection cannot be made.
+                // we trap this an return a more meaningful message for the end user
+                GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail);
+                knownStates.put(hostSpec, HostStatus.ConnectFail);
+                if (hostIter.hasNext()) {
+                    log(Level.FINE, "ConnectException occurred while connecting to {0}", cex, hostSpec);
+                    // still more addresses to try
+                    continue;
+                }
+                throw new PSQLException(GT.tr(
+                        "Connection to {0} refused. Check that the hostname and port are correct and that the postmaster is accepting TCP/IP connections.",
+                        hostSpec), PSQLState.CONNECTION_UNABLE_TO_CONNECT, cex);
+            } catch (IOException ioe) {
+                closeStream(newStream);
+                GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail);
+                knownStates.put(hostSpec, HostStatus.ConnectFail);
+                if (hostIter.hasNext()) {
+                    log(Level.FINE, "IOException occurred while connecting to {0}", ioe, hostSpec);
+                    // still more addresses to try
+                    continue;
+                }
+                throw new PSQLException(GT.tr("The connection attempt failed."),
+                        PSQLState.CONNECTION_UNABLE_TO_CONNECT, ioe);
+            } catch (SQLException se) {
+                closeStream(newStream);
+                GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail);
+                knownStates.put(hostSpec, HostStatus.ConnectFail);
+                if (hostIter.hasNext()) {
+                    log(Level.FINE, "SQLException occurred while connecting to {0}", se, hostSpec);
+                    // still more addresses to try
+                    continue;
+                }
+                throw se;
             }
+        }
+        throw new PSQLException(GT
+                .tr("Could not find a server with specified targetServerType: {0}", targetServerType),
+                PSQLState.CONNECTION_UNABLE_TO_CONNECT);
+    }
 
-            if (ex != null) {
-              log(Level.FINE, "sslMode==PREFER, however non-SSL connection failed as well", ex);
-              // non-SSL failed as well, so re-throw original exception
-              // Add non-SSL exception as suppressed
-              e.addSuppressed(ex);
-              throw e;
+    private List<StartupParam> getParametersForStartup(String user, String database, Properties info) {
+        List<StartupParam> paramList = new ArrayList<>();
+        paramList.add(new StartupParam("user", user));
+        paramList.add(new StartupParam("database", database));
+        paramList.add(new StartupParam("client_encoding", "UTF8"));
+        paramList.add(new StartupParam("DateStyle", "ISO"));
+        paramList.add(new StartupParam("TimeZone", createPostgresTimeZone()));
+
+        Version assumeVersion = ServerVersion.from(PGProperty.ASSUME_MIN_SERVER_VERSION.getOrDefault(info));
+
+        if (assumeVersion.getVersionNum() >= ServerVersion.v9_0.getVersionNum()) {
+            // User is explicitly telling us this is a 9.0+ server so set properties here:
+            paramList.add(new StartupParam("extra_float_digits", "3"));
+            String appName = PGProperty.APPLICATION_NAME.getOrDefault(info);
+            if (appName != null) {
+                paramList.add(new StartupParam("application_name", appName));
             }
-          } else if (sslMode == SslMode.ALLOW
-              && PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState().equals(e.getSQLState())) {
-            // Try using SSL
-            Throwable ex = null;
-            try {
-              newStream =
-                  tryConnect(info, socketFactory, hostSpec, SslMode.REQUIRE, gssEncMode);
-              LOGGER.log(Level.FINE, "Upgraded to encrypted connection for host {0}",
-                  hostSpec);
-            } catch (SQLException ee) {
-              ex = ee;
-            } catch (IOException ee) {
-              ex = ee; // Can't use multi-catch in Java 6 :(
-            }
-            if (ex != null) {
-              log(Level.FINE, "sslMode==ALLOW, however SSL connection failed as well", ex);
-              // non-SSL failed as well, so re-throw original exception
-              // Add SSL exception as suppressed
-              e.addSuppressed(ex);
-              throw e;
-            }
-
-          } else {
-            throw e;
-          }
+        } else {
+            // User has not explicitly told us that this is a 9.0+ server so stick to old default:
+            paramList.add(new StartupParam("extra_float_digits", "2"));
         }
 
-        int cancelSignalTimeout = PGProperty.CANCEL_SIGNAL_TIMEOUT.getInt(info) * 1000;
-
-        // Do final startup.
-        QueryExecutor queryExecutor = new QueryExecutorImpl(newStream, cancelSignalTimeout, info);
-
-        // Check Primary or Secondary
-        HostStatus hostStatus = HostStatus.ConnectOK;
-        if (candidateHost.targetServerType != HostRequirement.any) {
-          hostStatus = isPrimary(queryExecutor) ? HostStatus.Primary : HostStatus.Secondary;
-        }
-        GlobalHostStatusTracker.reportHostStatus(hostSpec, hostStatus);
-        knownStates.put(hostSpec, hostStatus);
-        if (!candidateHost.targetServerType.allowConnectingTo(hostStatus)) {
-          queryExecutor.close();
-          continue;
+        String replication = PGProperty.REPLICATION.getOrDefault(info);
+        if (replication != null && assumeVersion.getVersionNum() >= ServerVersion.v9_4.getVersionNum()) {
+            paramList.add(new StartupParam("replication", replication));
         }
 
-        runInitialQueries(queryExecutor, info);
-
-        // And we're done.
-        return queryExecutor;
-      } catch (ConnectException cex) {
-        // Added by Peter Mount <peter@retep.org.uk>
-        // ConnectException is thrown when the connection cannot be made.
-        // we trap this an return a more meaningful message for the end user
-        GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail);
-        knownStates.put(hostSpec, HostStatus.ConnectFail);
-        if (hostIter.hasNext()) {
-          log(Level.FINE, "ConnectException occurred while connecting to {0}", cex, hostSpec);
-          // still more addresses to try
-          continue;
+        String currentSchema = PGProperty.CURRENT_SCHEMA.getOrDefault(info);
+        if (currentSchema != null) {
+            paramList.add(new StartupParam("search_path", currentSchema));
         }
-        throw new PSQLException(GT.tr(
-            "Connection to {0} refused. Check that the hostname and port are correct and that the postmaster is accepting TCP/IP connections.",
-            hostSpec), PSQLState.CONNECTION_UNABLE_TO_CONNECT, cex);
-      } catch (IOException ioe) {
-        closeStream(newStream);
-        GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail);
-        knownStates.put(hostSpec, HostStatus.ConnectFail);
-        if (hostIter.hasNext()) {
-          log(Level.FINE, "IOException occurred while connecting to {0}", ioe, hostSpec);
-          // still more addresses to try
-          continue;
+
+        String options = PGProperty.OPTIONS.getOrDefault(info);
+        if (options != null) {
+            paramList.add(new StartupParam("options", options));
         }
-        throw new PSQLException(GT.tr("The connection attempt failed."),
-            PSQLState.CONNECTION_UNABLE_TO_CONNECT, ioe);
-      } catch (SQLException se) {
-        closeStream(newStream);
-        GlobalHostStatusTracker.reportHostStatus(hostSpec, HostStatus.ConnectFail);
-        knownStates.put(hostSpec, HostStatus.ConnectFail);
-        if (hostIter.hasNext()) {
-          log(Level.FINE, "SQLException occurred while connecting to {0}", se, hostSpec);
-          // still more addresses to try
-          continue;
+
+        return paramList;
+    }
+
+    @SuppressWarnings("fallthrough")
+    private PGStream enableGSSEncrypted(PGStream pgStream, GSSEncMode gssEncMode, String host, Properties info,
+                                        int connectTimeout)
+            throws IOException, PSQLException {
+
+        if (gssEncMode == GSSEncMode.DISABLE) {
+            return pgStream;
         }
-        throw se;
-      }
-    }
-    throw new PSQLException(GT
-        .tr("Could not find a server with specified targetServerType: {0}", targetServerType),
-        PSQLState.CONNECTION_UNABLE_TO_CONNECT);
-  }
 
-  private List<StartupParam> getParametersForStartup(String user, String database, Properties info) {
-    List<StartupParam> paramList = new ArrayList<>();
-    paramList.add(new StartupParam("user", user));
-    paramList.add(new StartupParam("database", database));
-    paramList.add(new StartupParam("client_encoding", "UTF8"));
-    paramList.add(new StartupParam("DateStyle", "ISO"));
-    paramList.add(new StartupParam("TimeZone", createPostgresTimeZone()));
-
-    Version assumeVersion = ServerVersion.from(PGProperty.ASSUME_MIN_SERVER_VERSION.getOrDefault(info));
-
-    if (assumeVersion.getVersionNum() >= ServerVersion.v9_0.getVersionNum()) {
-      // User is explicitly telling us this is a 9.0+ server so set properties here:
-      paramList.add(new StartupParam("extra_float_digits", "3"));
-      String appName = PGProperty.APPLICATION_NAME.getOrDefault(info);
-      if (appName != null) {
-        paramList.add(new StartupParam("application_name", appName));
-      }
-    } else {
-      // User has not explicitly told us that this is a 9.0+ server so stick to old default:
-      paramList.add(new StartupParam("extra_float_digits", "2"));
-    }
-
-    String replication = PGProperty.REPLICATION.getOrDefault(info);
-    if (replication != null && assumeVersion.getVersionNum() >= ServerVersion.v9_4.getVersionNum()) {
-      paramList.add(new StartupParam("replication", replication));
-    }
-
-    String currentSchema = PGProperty.CURRENT_SCHEMA.getOrDefault(info);
-    if (currentSchema != null) {
-      paramList.add(new StartupParam("search_path", currentSchema));
-    }
-
-    String options = PGProperty.OPTIONS.getOrDefault(info);
-    if (options != null) {
-      paramList.add(new StartupParam("options", options));
-    }
-
-    return paramList;
-  }
-
-  private static void log(Level level, String msg, Throwable thrown, Object... params) {
-    if (!LOGGER.isLoggable(level)) {
-      return;
-    }
-    LogRecord rec = new LogRecord(level, msg);
-    // Set the loggerName of the LogRecord with the current logger
-    rec.setLoggerName(LOGGER.getName());
-    rec.setParameters(params);
-    rec.setThrown(thrown);
-    LOGGER.log(rec);
-  }
-
-  /**
-   * Convert Java time zone to postgres time zone. All others stay the same except that GMT+nn
-   * changes to GMT-nn and vise versa.
-   * If you provide GMT+/-nn postgres uses POSIX rules which has a positive sign for west of Greenwich
-   * JAVA uses ISO rules which the positive sign is east of Greenwich
-   * To make matters more interesting postgres will always report in ISO
-   *
-   * @return The current JVM time zone in postgresql format.
-   */
-  private static String createPostgresTimeZone() {
-    String tz = TimeZone.getDefault().getID();
-    if (tz.length() <= 3 || !tz.startsWith("GMT")) {
-      return tz;
-    }
-    char sign = tz.charAt(3);
-    String start;
-    switch (sign) {
-      case '+':
-        start = "GMT-";
-        break;
-      case '-':
-        start = "GMT+";
-        break;
-      default:
-        // unknown type
-        return tz;
-    }
-
-    return start + tz.substring(4);
-  }
-
-  @SuppressWarnings("fallthrough")
-  private PGStream enableGSSEncrypted(PGStream pgStream, GSSEncMode gssEncMode, String host, Properties info,
-                                    int connectTimeout)
-      throws IOException, PSQLException {
-
-    if ( gssEncMode == GSSEncMode.DISABLE ) {
-      return pgStream;
-    }
-
-    if (gssEncMode == GSSEncMode.ALLOW ) {
-      // start with plain text and let the server request it
-      return pgStream;
-    }
+        if (gssEncMode == GSSEncMode.ALLOW) {
+            // start with plain text and let the server request it
+            return pgStream;
+        }
 
     /*
      at this point gssEncMode is either PREFER or REQUIRE
@@ -462,446 +436,468 @@ public class ConnectionFactoryImpl extends ConnectionFactory {
     /*
     let's see if the server will allow a GSS encrypted connection
      */
-    String user = PGProperty.USER.getOrDefault(info);
-    if (user == null) {
-      throw new PSQLException("GSSAPI encryption required but was impossible user is null", PSQLState.CONNECTION_REJECTED);
-    }
-
-    // attempt to acquire a GSS encrypted connection
-    LOGGER.log(Level.FINEST, " FE=> GSSENCRequest");
-
-    int gssTimeout = PGProperty.SSL_RESPONSE_TIMEOUT.getInt(info);
-    int currentTimeout = pgStream.getNetworkTimeout();
-
-    // if the current timeout is less than sslTimeout then
-    // use the smaller timeout. We could do something tricky
-    // here to not set it in that case but this is pretty readable
-    if (currentTimeout > 0 && currentTimeout < gssTimeout) {
-      gssTimeout = currentTimeout;
-    }
-
-    pgStream.setNetworkTimeout(gssTimeout);
-
-    // Send GSSEncryption request packet
-    pgStream.sendInteger4(8);
-    pgStream.sendInteger2(1234);
-    pgStream.sendInteger2(5680);
-    pgStream.flush();
-    // Now get the response from the backend, one of N, E, S.
-    int beresp = pgStream.receiveChar();
-    pgStream.setNetworkTimeout(currentTimeout);
-    switch (beresp) {
-      case 'E':
-        LOGGER.log(Level.FINEST, " <=BE GSSEncrypted Error");
-
-        // Server doesn't even know about the SSL handshake protocol
-        if (gssEncMode.requireEncryption()) {
-          throw new PSQLException(GT.tr("The server does not support GSS Encoding."),
-              PSQLState.CONNECTION_REJECTED);
+        String user = PGProperty.USER.getOrDefault(info);
+        if (user == null) {
+            throw new PSQLException("GSSAPI encryption required but was impossible user is null", PSQLState.CONNECTION_REJECTED);
         }
 
-        // We have to reconnect to continue.
-        pgStream.close();
-        return new PGStream(pgStream.getSocketFactory(), pgStream.getHostSpec(), connectTimeout);
+        // attempt to acquire a GSS encrypted connection
+        LOGGER.log(Level.FINEST, " FE=> GSSENCRequest");
 
-      case 'N':
-        LOGGER.log(Level.FINEST, " <=BE GSSEncrypted Refused");
+        int gssTimeout = PGProperty.SSL_RESPONSE_TIMEOUT.getInt(info);
+        int currentTimeout = pgStream.getNetworkTimeout();
 
-        // Server does not support gss encryption
-        if (gssEncMode.requireEncryption()) {
-          throw new PSQLException(GT.tr("The server does not support GSS Encryption."),
-              PSQLState.CONNECTION_REJECTED);
+        // if the current timeout is less than sslTimeout then
+        // use the smaller timeout. We could do something tricky
+        // here to not set it in that case but this is pretty readable
+        if (currentTimeout > 0 && currentTimeout < gssTimeout) {
+            gssTimeout = currentTimeout;
         }
 
-        return pgStream;
+        pgStream.setNetworkTimeout(gssTimeout);
 
-      case 'G':
-        LOGGER.log(Level.FINEST, " <=BE GSSEncryptedOk");
-        try {
-          AuthenticationPluginManager.withPassword(AuthenticationRequestType.GSS, info, password -> {
-            MakeGSS.authenticate(true, pgStream, host, user, password,
-                PGProperty.JAAS_APPLICATION_NAME.getOrDefault(info),
-                PGProperty.KERBEROS_SERVER_NAME.getOrDefault(info), false, // TODO: fix this
-                PGProperty.JAAS_LOGIN.getBoolean(info),
-                PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info));
-            return void.class;
-          });
-          return pgStream;
-        } catch (PSQLException ex) {
-          // allow the connection to proceed
-          if (gssEncMode == GSSEncMode.PREFER) {
-            // we have to reconnect to continue
-            return new PGStream(pgStream, connectTimeout);
-          }
-        }
-        // fallthrough
-
-      default:
-        throw new PSQLException(GT.tr("An error occurred while setting up the GSS Encoded connection."),
-            PSQLState.PROTOCOL_VIOLATION);
-    }
-  }
-
-  private PGStream enableSSL(PGStream pgStream, SslMode sslMode, Properties info,
-      int connectTimeout)
-      throws IOException, PSQLException {
-    if (sslMode == SslMode.DISABLE) {
-      return pgStream;
-    }
-    if (sslMode == SslMode.ALLOW) {
-      // Allow ==> start with plaintext, use encryption if required by server
-      return pgStream;
-    }
-
-    LOGGER.log(Level.FINEST, " FE=> SSLRequest");
-
-    int sslTimeout = PGProperty.SSL_RESPONSE_TIMEOUT.getInt(info);
-    int currentTimeout = pgStream.getNetworkTimeout();
-
-    // if the current timeout is less than sslTimeout then
-    // use the smaller timeout. We could do something tricky
-    // here to not set it in that case but this is pretty readable
-    if (currentTimeout > 0 && currentTimeout < sslTimeout) {
-      sslTimeout = currentTimeout;
-    }
-
-    pgStream.setNetworkTimeout(sslTimeout);
-    // Send SSL request packet
-    pgStream.sendInteger4(8);
-    pgStream.sendInteger2(1234);
-    pgStream.sendInteger2(5679);
-    pgStream.flush();
-
-    // Now get the response from the backend, one of N, E, S.
-    int beresp = pgStream.receiveChar();
-    pgStream.setNetworkTimeout(currentTimeout);
-
-    switch (beresp) {
-      case 'E':
-        LOGGER.log(Level.FINEST, " <=BE SSLError");
-
-        // Server doesn't even know about the SSL handshake protocol
-        if (sslMode.requireEncryption()) {
-          throw new PSQLException(GT.tr("The server does not support SSL."),
-              PSQLState.CONNECTION_REJECTED);
-        }
-
-        // We have to reconnect to continue.
-        return new PGStream(pgStream, connectTimeout);
-
-      case 'N':
-        LOGGER.log(Level.FINEST, " <=BE SSLRefused");
-
-        // Server does not support ssl
-        if (sslMode.requireEncryption()) {
-          throw new PSQLException(GT.tr("The server does not support SSL."),
-              PSQLState.CONNECTION_REJECTED);
-        }
-
-        return pgStream;
-
-      case 'S':
-        LOGGER.log(Level.FINEST, " <=BE SSLOk");
-
-        // Server supports ssl
-        MakeSSL.convert(pgStream, info);
-        return pgStream;
-
-      default:
-        throw new PSQLException(GT.tr("An error occurred while setting up the SSL connection."),
-            PSQLState.PROTOCOL_VIOLATION);
-    }
-  }
-
-  private void sendStartupPacket(PGStream pgStream, List<StartupParam> params)
-      throws IOException {
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      StringBuilder details = new StringBuilder();
-      for (int i = 0; i < params.size(); i++) {
-        if (i != 0) {
-          details.append(", ");
-        }
-        details.append(params.get(i).toString());
-      }
-      LOGGER.log(Level.FINEST, " FE=> StartupPacket({0})", details);
-    }
-
-    // Precalculate message length and encode params.
-    int length = 4 + 4;
-    byte[][] encodedParams = new byte[params.size() * 2][];
-    for (int i = 0; i < params.size(); i++) {
-      encodedParams[i * 2] = params.get(i).getEncodedKey();
-      encodedParams[i * 2 + 1] = params.get(i).getEncodedValue();
-      length += encodedParams[i * 2].length + 1 + encodedParams[i * 2 + 1].length + 1;
-    }
-
-    length += 1; // Terminating \0
-
-    // Send the startup message.
-    pgStream.sendInteger4(length);
-    pgStream.sendInteger2(3); // protocol major
-    pgStream.sendInteger2(0); // protocol minor
-    for (byte[] encodedParam : encodedParams) {
-      pgStream.send(encodedParam);
-      pgStream.sendChar(0);
-    }
-
-    pgStream.sendChar(0);
-    pgStream.flush();
-  }
-
-  private void doAuthentication(PGStream pgStream, String host, String user, Properties info) throws IOException, SQLException {
-    // Now get the response from the backend, either an error message
-    // or an authentication request
-
-    /* SCRAM authentication state, if used */
-    ScramAuthenticator scramAuthenticator = null;
-
-      authloop:
-      while (true) {
+        // Send GSSEncryption request packet
+        pgStream.sendInteger4(8);
+        pgStream.sendInteger2(1234);
+        pgStream.sendInteger2(5680);
+        pgStream.flush();
+        // Now get the response from the backend, one of N, E, S.
         int beresp = pgStream.receiveChar();
+        pgStream.setNetworkTimeout(currentTimeout);
+        switch (beresp) {
+            case 'E':
+                LOGGER.log(Level.FINEST, " <=BE GSSEncrypted Error");
+
+                // Server doesn't even know about the SSL handshake protocol
+                if (gssEncMode.requireEncryption()) {
+                    throw new PSQLException(GT.tr("The server does not support GSS Encoding."),
+                            PSQLState.CONNECTION_REJECTED);
+                }
+
+                // We have to reconnect to continue.
+                pgStream.close();
+                return new PGStream(pgStream.getSocketFactory(), pgStream.getHostSpec(), connectTimeout);
+
+            case 'N':
+                LOGGER.log(Level.FINEST, " <=BE GSSEncrypted Refused");
+
+                // Server does not support gss encryption
+                if (gssEncMode.requireEncryption()) {
+                    throw new PSQLException(GT.tr("The server does not support GSS Encryption."),
+                            PSQLState.CONNECTION_REJECTED);
+                }
+
+                return pgStream;
+
+            case 'G':
+                LOGGER.log(Level.FINEST, " <=BE GSSEncryptedOk");
+                try {
+                    AuthenticationPluginManager.withPassword(AuthenticationRequestType.GSS, info, password -> {
+                        MakeGSS.authenticate(true, pgStream, host, user, password,
+                                PGProperty.JAAS_APPLICATION_NAME.getOrDefault(info),
+                                PGProperty.KERBEROS_SERVER_NAME.getOrDefault(info), false, // TODO: fix this
+                                PGProperty.JAAS_LOGIN.getBoolean(info),
+                                PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info));
+                        return void.class;
+                    });
+                    return pgStream;
+                } catch (PSQLException ex) {
+                    // allow the connection to proceed
+                    if (gssEncMode == GSSEncMode.PREFER) {
+                        // we have to reconnect to continue
+                        return new PGStream(pgStream, connectTimeout);
+                    }
+                }
+                // fallthrough
+
+            default:
+                throw new PSQLException(GT.tr("An error occurred while setting up the GSS Encoded connection."),
+                        PSQLState.PROTOCOL_VIOLATION);
+        }
+    }
+
+    private PGStream enableSSL(PGStream pgStream, SslMode sslMode, Properties info,
+                               int connectTimeout)
+            throws IOException, PSQLException {
+        if (sslMode == SslMode.DISABLE) {
+            return pgStream;
+        }
+        if (sslMode == SslMode.ALLOW) {
+            // Allow ==> start with plaintext, use encryption if required by server
+            return pgStream;
+        }
+
+        LOGGER.log(Level.FINEST, " FE=> SSLRequest");
+
+        int sslTimeout = PGProperty.SSL_RESPONSE_TIMEOUT.getInt(info);
+        int currentTimeout = pgStream.getNetworkTimeout();
+
+        // if the current timeout is less than sslTimeout then
+        // use the smaller timeout. We could do something tricky
+        // here to not set it in that case but this is pretty readable
+        if (currentTimeout > 0 && currentTimeout < sslTimeout) {
+            sslTimeout = currentTimeout;
+        }
+
+        pgStream.setNetworkTimeout(sslTimeout);
+        // Send SSL request packet
+        pgStream.sendInteger4(8);
+        pgStream.sendInteger2(1234);
+        pgStream.sendInteger2(5679);
+        pgStream.flush();
+
+        // Now get the response from the backend, one of N, E, S.
+        int beresp = pgStream.receiveChar();
+        pgStream.setNetworkTimeout(currentTimeout);
 
         switch (beresp) {
-          case 'E':
-            // An error occurred, so pass the error message to the
-            // user.
-            //
-            // The most common one to be thrown here is:
-            // "User authentication failed"
-            //
-            int elen = pgStream.receiveInteger4();
+            case 'E':
+                LOGGER.log(Level.FINEST, " <=BE SSLError");
 
-            ServerErrorMessage errorMsg =
-                    new ServerErrorMessage(pgStream.receiveErrorString(elen - 4));
-            LOGGER.log(Level.FINEST, " <=BE ErrorMessage({0})", errorMsg);
-            throw new PSQLException(errorMsg, PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info));
-
-          case 'R':
-            // Authentication request.
-            // Get the message length
-            int msgLen = pgStream.receiveInteger4();
-
-            // Get the type of request
-            int areq = pgStream.receiveInteger4();
-
-            // Process the request.
-            switch (areq) {
-              case AUTH_REQ_MD5: {
-                byte[] md5Salt = pgStream.receive(4);
-                if (LOGGER.isLoggable(Level.FINEST)) {
-                  LOGGER.log(Level.FINEST, " <=BE AuthenticationReqMD5(salt={0})", Utils.toHexString(md5Salt));
+                // Server doesn't even know about the SSL handshake protocol
+                if (sslMode.requireEncryption()) {
+                    throw new PSQLException(GT.tr("The server does not support SSL."),
+                            PSQLState.CONNECTION_REJECTED);
                 }
 
-                byte[] digest = AuthenticationPluginManager.withEncodedPassword(
-                        AuthenticationRequestType.MD5_PASSWORD, info,
-                        encodedPassword -> MD5Digest.encode(user.getBytes(StandardCharsets.UTF_8),
-                                encodedPassword, md5Salt)
-                );
+                // We have to reconnect to continue.
+                return new PGStream(pgStream, connectTimeout);
 
-                if (LOGGER.isLoggable(Level.FINEST)) {
-                  LOGGER.log(Level.FINEST, " FE=> Password(md5digest={0})", new String(digest, StandardCharsets.US_ASCII));
+            case 'N':
+                LOGGER.log(Level.FINEST, " <=BE SSLRefused");
+
+                // Server does not support ssl
+                if (sslMode.requireEncryption()) {
+                    throw new PSQLException(GT.tr("The server does not support SSL."),
+                            PSQLState.CONNECTION_REJECTED);
                 }
 
-                try {
-                  pgStream.sendChar('p');
-                  pgStream.sendInteger4(4 + digest.length + 1);
-                  pgStream.send(digest);
-                } finally {
-                  Arrays.fill(digest, (byte) 0);
-                }
-                pgStream.sendChar(0);
-                pgStream.flush();
+                return pgStream;
 
-                break;
-              }
+            case 'S':
+                LOGGER.log(Level.FINEST, " <=BE SSLOk");
 
-              case AUTH_REQ_PASSWORD: {
-                LOGGER.log(Level.FINEST, "<=BE AuthenticationReqPassword");
-                LOGGER.log(Level.FINEST, " FE=> Password(password=<not shown>)");
+                // Server supports ssl
+                MakeSSL.convert(pgStream, info);
+                return pgStream;
 
-                AuthenticationPluginManager.withEncodedPassword(AuthenticationRequestType.CLEARTEXT_PASSWORD, info, encodedPassword -> {
-                  pgStream.sendChar('p');
-                  pgStream.sendInteger4(4 + encodedPassword.length + 1);
-                  pgStream.send(encodedPassword);
-                  return void.class;
-                });
-                pgStream.sendChar(0);
-                pgStream.flush();
-
-                break;
-              }
-
-              case AUTH_REQ_GSS:
-                /*
-                 * Use GSSAPI if requested on all platforms, via JSSE.
-                 *
-                 * Note that this is slightly different to libpq, which uses SSPI for GSSAPI where
-                 * supported. We prefer to use the existing Java JSSE Kerberos support rather than
-                 * going to native (via JNA) calls where possible, so that JSSE system properties
-                 * etc continue to work normally.
-                 *
-                 * Note that while SSPI is often Kerberos-based there's no guarantee it will be; it
-                 * may be NTLM or anything else. If the client responds to an SSPI request via
-                 * GSSAPI and the other end isn't using Kerberos for SSPI then authentication will
-                 * fail.
-                 */
-                final String gsslib = PGProperty.GSS_LIB.getOrDefault(info);
-                final boolean usespnego = PGProperty.USE_SPNEGO.getBoolean(info);
-
-                /*
-                 * Use gssapi. If the user has specified a Kerberos server
-                 * name we'll always use JSSE GSSAPI.
-                 */
-                if ("gssapi".equals(gsslib)) {
-                  LOGGER.log(Level.FINE, "Using JSSE GSSAPI, param gsslib=gssapi");
-                }
-
-                /* Use JGSS's GSSAPI for this request */
-                AuthenticationPluginManager.withPassword(AuthenticationRequestType.GSS, info, password -> {
-                  MakeGSS.authenticate(false, pgStream, host, user, password,
-                          PGProperty.JAAS_APPLICATION_NAME.getOrDefault(info),
-                          PGProperty.KERBEROS_SERVER_NAME.getOrDefault(info), usespnego,
-                          PGProperty.JAAS_LOGIN.getBoolean(info),
-                          PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info));
-                  return void.class;
-                });
-                break;
-
-              case AUTH_REQ_GSS_CONTINUE:
-                // unused
-                break;
-
-              case AUTH_REQ_SASL:
-
-              LOGGER.log(Level.FINEST, " <=BE AuthenticationSASL");
-
-              scramAuthenticator = AuthenticationPluginManager.withPassword(AuthenticationRequestType.SASL, info, password -> {
-                if (password == null) {
-                  throw new PSQLException(
-                          GT.tr(
-                                  "The server requested SCRAM-based authentication, but no password was provided."),
-                          PSQLState.CONNECTION_REJECTED);
-                }
-                if (password.length == 0) {
-                  throw new PSQLException(
-                          GT.tr(
-                                  "The server requested SCRAM-based authentication, but the password is an empty string."),
-                          PSQLState.CONNECTION_REJECTED);
-                }
-                return new ScramAuthenticator(user, String.valueOf(password), pgStream);
-              });
-              scramAuthenticator.processServerMechanismsAndInit();
-              scramAuthenticator.sendScramClientFirstMessage();
-              // This works as follows:
-              // 1. When tests is run from IDE, it is assumed SCRAM library is on the classpath
-              // 2. In regular build for Java < 8 this `if` is deactivated and the code always throws
-              if (false) {
-                throw new PSQLException(GT.tr(
-                        "SCRAM authentication is not supported by this driver. You need JDK >= 8 and pgjdbc >= 42.2.0 (not \".jre\" versions)",
-                        areq), PSQLState.CONNECTION_REJECTED);
-              }
-              break;
-
-              case AUTH_REQ_SASL_CONTINUE:
-                scramAuthenticator.processServerFirstMessage(msgLen - 4 - 4);
-                break;
-
-              case AUTH_REQ_SASL_FINAL:
-                scramAuthenticator.verifyServerSignature(msgLen - 4 - 4);
-                break;
-
-              case AUTH_REQ_OK:
-                /* Cleanup after successful authentication */
-                LOGGER.log(Level.FINEST, " <=BE AuthenticationOk");
-                break authloop; // We're done.
-
-              default:
-                LOGGER.log(Level.FINEST, " <=BE AuthenticationReq (unsupported type {0})", areq);
-                throw new PSQLException(GT.tr(
-                        "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.",
-                        areq), PSQLState.CONNECTION_REJECTED);
-            }
-
-            break;
-
-          default:
-            throw new PSQLException(GT.tr("Protocol error.  Session setup failed."),
-                    PSQLState.PROTOCOL_VIOLATION);
+            default:
+                throw new PSQLException(GT.tr("An error occurred while setting up the SSL connection."),
+                        PSQLState.PROTOCOL_VIOLATION);
         }
-      }
-  }
-
-  @SuppressWarnings("deprecation")
-  private void runInitialQueries(QueryExecutor queryExecutor, Properties info)
-      throws SQLException {
-    String assumeMinServerVersion = PGProperty.ASSUME_MIN_SERVER_VERSION.getOrDefault(info);
-    if (Utils.parseServerVersionStr(assumeMinServerVersion) >= ServerVersion.v9_0.getVersionNum()) {
-      // We already sent the parameter values in the StartupMessage so skip this
-      return;
     }
 
-    final int dbVersion = queryExecutor.getServerVersionNum();
+    private void sendStartupPacket(PGStream pgStream, List<StartupParam> params)
+            throws IOException {
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            StringBuilder details = new StringBuilder();
+            for (int i = 0; i < params.size(); i++) {
+                if (i != 0) {
+                    details.append(", ");
+                }
+                details.append(params.get(i).toString());
+            }
+            LOGGER.log(Level.FINEST, " FE=> StartupPacket({0})", details);
+        }
 
-    if (PGProperty.GROUP_STARTUP_PARAMETERS.getBoolean(info) && dbVersion >= ServerVersion.v9_0.getVersionNum()) {
-      SetupQueryRunner.run(queryExecutor, "BEGIN", false);
+        // Precalculate message length and encode params.
+        int length = 4 + 4;
+        byte[][] encodedParams = new byte[params.size() * 2][];
+        for (int i = 0; i < params.size(); i++) {
+            encodedParams[i * 2] = params.get(i).getEncodedKey();
+            encodedParams[i * 2 + 1] = params.get(i).getEncodedValue();
+            length += encodedParams[i * 2].length + 1 + encodedParams[i * 2 + 1].length + 1;
+        }
+
+        length += 1; // Terminating \0
+
+        // Send the startup message.
+        pgStream.sendInteger4(length);
+        pgStream.sendInteger2(3); // protocol major
+        pgStream.sendInteger2(0); // protocol minor
+        for (byte[] encodedParam : encodedParams) {
+            pgStream.send(encodedParam);
+            pgStream.sendChar(0);
+        }
+
+        pgStream.sendChar(0);
+        pgStream.flush();
     }
 
-    if (dbVersion >= ServerVersion.v9_0.getVersionNum()) {
-      SetupQueryRunner.run(queryExecutor, "SET extra_float_digits = 3", false);
+    private void doAuthentication(PGStream pgStream, String host, String user, Properties info) throws IOException, SQLException {
+        // Now get the response from the backend, either an error message
+        // or an authentication request
+
+        /* SCRAM authentication state, if used */
+        ScramAuthenticator scramAuthenticator = null;
+
+        authloop:
+        while (true) {
+            int beresp = pgStream.receiveChar();
+
+            switch (beresp) {
+                case 'E':
+                    // An error occurred, so pass the error message to the
+                    // user.
+                    //
+                    // The most common one to be thrown here is:
+                    // "User authentication failed"
+                    //
+                    int elen = pgStream.receiveInteger4();
+
+                    ServerErrorMessage errorMsg =
+                            new ServerErrorMessage(pgStream.receiveErrorString(elen - 4));
+                    LOGGER.log(Level.FINEST, " <=BE ErrorMessage({0})", errorMsg);
+                    throw new PSQLException(errorMsg, PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info));
+
+                case 'R':
+                    // Authentication request.
+                    // Get the message length
+                    int msgLen = pgStream.receiveInteger4();
+
+                    // Get the type of request
+                    int areq = pgStream.receiveInteger4();
+
+                    // Process the request.
+                    switch (areq) {
+                        case AUTH_REQ_MD5: {
+                            byte[] md5Salt = pgStream.receive(4);
+                            if (LOGGER.isLoggable(Level.FINEST)) {
+                                LOGGER.log(Level.FINEST, " <=BE AuthenticationReqMD5(salt={0})", Utils.toHexString(md5Salt));
+                            }
+
+                            byte[] digest = AuthenticationPluginManager.withEncodedPassword(
+                                    AuthenticationRequestType.MD5_PASSWORD, info,
+                                    encodedPassword -> MD5Digest.encode(user.getBytes(StandardCharsets.UTF_8),
+                                            encodedPassword, md5Salt)
+                            );
+
+                            if (LOGGER.isLoggable(Level.FINEST)) {
+                                LOGGER.log(Level.FINEST, " FE=> Password(md5digest={0})", new String(digest, StandardCharsets.US_ASCII));
+                            }
+
+                            try {
+                                pgStream.sendChar('p');
+                                pgStream.sendInteger4(4 + digest.length + 1);
+                                pgStream.send(digest);
+                            } finally {
+                                Arrays.fill(digest, (byte) 0);
+                            }
+                            pgStream.sendChar(0);
+                            pgStream.flush();
+
+                            break;
+                        }
+
+                        case AUTH_REQ_PASSWORD: {
+                            LOGGER.log(Level.FINEST, "<=BE AuthenticationReqPassword");
+                            LOGGER.log(Level.FINEST, " FE=> Password(password=<not shown>)");
+
+                            AuthenticationPluginManager.withEncodedPassword(AuthenticationRequestType.CLEARTEXT_PASSWORD, info, encodedPassword -> {
+                                pgStream.sendChar('p');
+                                pgStream.sendInteger4(4 + encodedPassword.length + 1);
+                                pgStream.send(encodedPassword);
+                                return void.class;
+                            });
+                            pgStream.sendChar(0);
+                            pgStream.flush();
+
+                            break;
+                        }
+
+                        case AUTH_REQ_GSS:
+                            /*
+                             * Use GSSAPI if requested on all platforms, via JSSE.
+                             *
+                             * Note that this is slightly different to libpq, which uses SSPI for GSSAPI where
+                             * supported. We prefer to use the existing Java JSSE Kerberos support rather than
+                             * going to native (via JNA) calls where possible, so that JSSE system properties
+                             * etc continue to work normally.
+                             *
+                             * Note that while SSPI is often Kerberos-based there's no guarantee it will be; it
+                             * may be NTLM or anything else. If the client responds to an SSPI request via
+                             * GSSAPI and the other end isn't using Kerberos for SSPI then authentication will
+                             * fail.
+                             */
+                            final String gsslib = PGProperty.GSS_LIB.getOrDefault(info);
+                            final boolean usespnego = PGProperty.USE_SPNEGO.getBoolean(info);
+
+                            /*
+                             * Use gssapi. If the user has specified a Kerberos server
+                             * name we'll always use JSSE GSSAPI.
+                             */
+                            if ("gssapi".equals(gsslib)) {
+                                LOGGER.log(Level.FINE, "Using JSSE GSSAPI, param gsslib=gssapi");
+                            }
+
+                            /* Use JGSS's GSSAPI for this request */
+                            AuthenticationPluginManager.withPassword(AuthenticationRequestType.GSS, info, password -> {
+                                MakeGSS.authenticate(false, pgStream, host, user, password,
+                                        PGProperty.JAAS_APPLICATION_NAME.getOrDefault(info),
+                                        PGProperty.KERBEROS_SERVER_NAME.getOrDefault(info), usespnego,
+                                        PGProperty.JAAS_LOGIN.getBoolean(info),
+                                        PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info));
+                                return void.class;
+                            });
+                            break;
+
+                        case AUTH_REQ_GSS_CONTINUE:
+                            // unused
+                            break;
+
+                        case AUTH_REQ_SASL:
+
+                            LOGGER.log(Level.FINEST, " <=BE AuthenticationSASL");
+
+                            scramAuthenticator = AuthenticationPluginManager.withPassword(AuthenticationRequestType.SASL, info, password -> {
+                                if (password == null) {
+                                    throw new PSQLException(
+                                            GT.tr(
+                                                    "The server requested SCRAM-based authentication, but no password was provided."),
+                                            PSQLState.CONNECTION_REJECTED);
+                                }
+                                if (password.length == 0) {
+                                    throw new PSQLException(
+                                            GT.tr(
+                                                    "The server requested SCRAM-based authentication, but the password is an empty string."),
+                                            PSQLState.CONNECTION_REJECTED);
+                                }
+                                return new ScramAuthenticator(user, String.valueOf(password), pgStream);
+                            });
+                            scramAuthenticator.processServerMechanismsAndInit();
+                            scramAuthenticator.sendScramClientFirstMessage();
+                            // This works as follows:
+                            // 1. When tests is run from IDE, it is assumed SCRAM library is on the classpath
+                            // 2. In regular build for Java < 8 this `if` is deactivated and the code always throws
+                            if (false) {
+                                throw new PSQLException(GT.tr(
+                                        "SCRAM authentication is not supported by this driver. You need JDK >= 8 and pgjdbc >= 42.2.0 (not \".jre\" versions)",
+                                        areq), PSQLState.CONNECTION_REJECTED);
+                            }
+                            break;
+
+                        case AUTH_REQ_SASL_CONTINUE:
+                            scramAuthenticator.processServerFirstMessage(msgLen - 4 - 4);
+                            break;
+
+                        case AUTH_REQ_SASL_FINAL:
+                            scramAuthenticator.verifyServerSignature(msgLen - 4 - 4);
+                            break;
+
+                        case AUTH_REQ_OK:
+                            /* Cleanup after successful authentication */
+                            LOGGER.log(Level.FINEST, " <=BE AuthenticationOk");
+                            break authloop; // We're done.
+
+                        default:
+                            LOGGER.log(Level.FINEST, " <=BE AuthenticationReq (unsupported type {0})", areq);
+                            throw new PSQLException(GT.tr(
+                                    "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.",
+                                    areq), PSQLState.CONNECTION_REJECTED);
+                    }
+
+                    break;
+
+                default:
+                    throw new PSQLException(GT.tr("Protocol error.  Session setup failed."),
+                            PSQLState.PROTOCOL_VIOLATION);
+            }
+        }
     }
 
-    String appName = PGProperty.APPLICATION_NAME.getOrDefault(info);
-    if (appName != null && dbVersion >= ServerVersion.v9_0.getVersionNum()) {
-      StringBuilder sql = new StringBuilder();
-      sql.append("SET application_name = '");
-      Utils.escapeLiteral(sql, appName, queryExecutor.getStandardConformingStrings());
-      sql.append("'");
-      SetupQueryRunner.run(queryExecutor, sql.toString(), false);
+    @SuppressWarnings("deprecation")
+    private void runInitialQueries(QueryExecutor queryExecutor, Properties info)
+            throws SQLException {
+        String assumeMinServerVersion = PGProperty.ASSUME_MIN_SERVER_VERSION.getOrDefault(info);
+        if (Utils.parseServerVersionStr(assumeMinServerVersion) >= ServerVersion.v9_0.getVersionNum()) {
+            // We already sent the parameter values in the StartupMessage so skip this
+            return;
+        }
+
+        final int dbVersion = queryExecutor.getServerVersionNum();
+
+        if (PGProperty.GROUP_STARTUP_PARAMETERS.getBoolean(info) && dbVersion >= ServerVersion.v9_0.getVersionNum()) {
+            SetupQueryRunner.run(queryExecutor, "BEGIN", false);
+        }
+
+        if (dbVersion >= ServerVersion.v9_0.getVersionNum()) {
+            SetupQueryRunner.run(queryExecutor, "SET extra_float_digits = 3", false);
+        }
+
+        String appName = PGProperty.APPLICATION_NAME.getOrDefault(info);
+        if (appName != null && dbVersion >= ServerVersion.v9_0.getVersionNum()) {
+            StringBuilder sql = new StringBuilder();
+            sql.append("SET application_name = '");
+            Utils.escapeLiteral(sql, appName, queryExecutor.getStandardConformingStrings());
+            sql.append("'");
+            SetupQueryRunner.run(queryExecutor, sql.toString(), false);
+        }
+
+        if (PGProperty.GROUP_STARTUP_PARAMETERS.getBoolean(info) && dbVersion >= ServerVersion.v9_0.getVersionNum()) {
+            SetupQueryRunner.run(queryExecutor, "COMMIT", false);
+        }
     }
 
-    if (PGProperty.GROUP_STARTUP_PARAMETERS.getBoolean(info) && dbVersion >= ServerVersion.v9_0.getVersionNum()) {
-      SetupQueryRunner.run(queryExecutor, "COMMIT", false);
+    /**
+     * Since PG14 there is GUC_REPORT ParamStatus {@code in_hot_standby} which is set to "on"
+     * when the server is in archive recovery or standby mode. In driver's lingo such server is called
+     * {@link org.postgresql.hostchooser.HostRequirement#secondary}.
+     * Previously {@code transaction_read_only} was used as a workable substitute.
+     * However {@code transaction_read_only} could have been manually overridden on the primary server
+     * by database user leading to a false positives: ie server is effectively read-only but
+     * technically is "primary" (not in a recovery/standby mode).
+     *
+     * <p>This method checks whether {@code in_hot_standby} GUC was reported by the server
+     * during initial connection:</p>
+     *
+     * <ul>
+     * <li>{@code in_hot_standby} was reported and the value was "on" then the server is a replica
+     * and database is read-only by definition, false is returned.</li>
+     * <li>{@code in_hot_standby} was reported and the value was "off"
+     * then the server is indeed primary but database may be in
+     * read-only mode nevertheless. We proceed to conservatively {@code show transaction_read_only}
+     * since users may not be expecting a readonly connection for {@code targetServerType=primary}</li>
+     * <li>If {@code in_hot_standby} has not been reported we fallback to pre v14 behavior.</li>
+     * </ul>
+     *
+     * <p>Do not confuse {@code hot_standby} and {@code in_hot_standby} ParamStatuses</p>
+     *
+     * @see <a href="https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-ASYNC">GUC_REPORT documentation</a>
+     * @see <a href="https://www.postgresql.org/docs/current/hot-standby.html">Hot standby documentation</a>
+     * @see <a href="https://www.postgresql.org/message-id/flat/1700970.cRWpxnom9y@hammer.magicstack.net">in_hot_standby patch thread v10</a>
+     * @see <a href="https://www.postgresql.org/message-id/flat/CAF3%2BxM%2B8-ztOkaV9gHiJ3wfgENTq97QcjXQt%2BrbFQ6F7oNzt9A%40mail.gmail.com">in_hot_standby patch thread v14</a>
+     */
+    private boolean isPrimary(QueryExecutor queryExecutor) throws SQLException, IOException {
+        String inHotStandby = queryExecutor.getParameterStatus(IN_HOT_STANDBY);
+        if ("on".equalsIgnoreCase(inHotStandby)) {
+            return false;
+        }
+        Tuple results = SetupQueryRunner.run(queryExecutor, "show transaction_read_only", true);
+        Tuple nonNullResults = results;
+        String queriedTransactionReadonly = queryExecutor.getEncoding().decode(nonNullResults.get(0));
+        return "off".equalsIgnoreCase(queriedTransactionReadonly);
     }
-  }
 
-  /**
-   * Since PG14 there is GUC_REPORT ParamStatus {@code in_hot_standby} which is set to "on"
-   * when the server is in archive recovery or standby mode. In driver's lingo such server is called
-   * {@link org.postgresql.hostchooser.HostRequirement#secondary}.
-   * Previously {@code transaction_read_only} was used as a workable substitute.
-   * However {@code transaction_read_only} could have been manually overridden on the primary server
-   * by database user leading to a false positives: ie server is effectively read-only but
-   * technically is "primary" (not in a recovery/standby mode).
-   *
-   * <p>This method checks whether {@code in_hot_standby} GUC was reported by the server
-   * during initial connection:</p>
-   *
-   * <ul>
-   * <li>{@code in_hot_standby} was reported and the value was "on" then the server is a replica
-   * and database is read-only by definition, false is returned.</li>
-   * <li>{@code in_hot_standby} was reported and the value was "off"
-   * then the server is indeed primary but database may be in
-   * read-only mode nevertheless. We proceed to conservatively {@code show transaction_read_only}
-   * since users may not be expecting a readonly connection for {@code targetServerType=primary}</li>
-   * <li>If {@code in_hot_standby} has not been reported we fallback to pre v14 behavior.</li>
-   * </ul>
-   *
-   * <p>Do not confuse {@code hot_standby} and {@code in_hot_standby} ParamStatuses</p>
-   *
-   * @see <a href="https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-ASYNC">GUC_REPORT documentation</a>
-   * @see <a href="https://www.postgresql.org/docs/current/hot-standby.html">Hot standby documentation</a>
-   * @see <a href="https://www.postgresql.org/message-id/flat/1700970.cRWpxnom9y@hammer.magicstack.net">in_hot_standby patch thread v10</a>
-   * @see <a href="https://www.postgresql.org/message-id/flat/CAF3%2BxM%2B8-ztOkaV9gHiJ3wfgENTq97QcjXQt%2BrbFQ6F7oNzt9A%40mail.gmail.com">in_hot_standby patch thread v14</a>
-   *
-   */
-  private boolean isPrimary(QueryExecutor queryExecutor) throws SQLException, IOException {
-    String inHotStandby = queryExecutor.getParameterStatus(IN_HOT_STANDBY);
-    if ("on".equalsIgnoreCase(inHotStandby)) {
-      return false;
+    private static class StartupParam {
+        private final String key;
+        private final String value;
+
+        StartupParam(String key, String value) {
+            this.key = key;
+            this.value = value;
+        }
+
+        @Override
+        public String toString() {
+            return this.key + "=" + this.value;
+        }
+
+        public byte[] getEncodedKey() {
+            return this.key.getBytes(StandardCharsets.UTF_8);
+        }
+
+        public byte[] getEncodedValue() {
+            return this.value.getBytes(StandardCharsets.UTF_8);
+        }
     }
-    Tuple results = SetupQueryRunner.run(queryExecutor, "show transaction_read_only", true);
-    Tuple nonNullResults = results;
-    String queriedTransactionReadonly = queryExecutor.getEncoding().decode(nonNullResults.get(0));
-    return "off".equalsIgnoreCase(queriedTransactionReadonly);
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/CopyDualImpl.java b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyDualImpl.java
index 6ed13b8..a2f3c2d 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/CopyDualImpl.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyDualImpl.java
@@ -5,60 +5,59 @@
 
 package org.postgresql.core.v3;
 
+import java.sql.SQLException;
+import java.util.ArrayDeque;
+import java.util.Queue;
 import org.postgresql.copy.CopyDual;
 import org.postgresql.util.ByteStreamWriter;
 import org.postgresql.util.PSQLException;
 
-import java.sql.SQLException;
-import java.util.ArrayDeque;
-import java.util.Queue;
-
 public class CopyDualImpl extends CopyOperationImpl implements CopyDual {
-  private final Queue<byte[]> received = new ArrayDeque<>();
+    private final Queue<byte[]> received = new ArrayDeque<>();
 
-  public CopyDualImpl() {
-  }
-
-  @Override
-  public void writeToCopy(byte[] data, int off, int siz) throws SQLException {
-    getQueryExecutor().writeToCopy(this, data, off, siz);
-  }
-
-  @Override
-  public void writeToCopy(ByteStreamWriter from) throws SQLException {
-    getQueryExecutor().writeToCopy(this, from);
-  }
-
-  @Override
-  public void flushCopy() throws SQLException {
-    getQueryExecutor().flushCopy(this);
-  }
-
-  @Override
-  public long endCopy() throws SQLException {
-    return getQueryExecutor().endCopy(this);
-  }
-
-  @Override
-  public byte [] readFromCopy() throws SQLException {
-    return readFromCopy(true);
-  }
-
-  @Override
-  public byte [] readFromCopy(boolean block) throws SQLException {
-    if (received.isEmpty()) {
-      getQueryExecutor().readFromCopy(this, block);
+    public CopyDualImpl() {
     }
 
-    return received.poll();
-  }
+    @Override
+    public void writeToCopy(byte[] data, int off, int siz) throws SQLException {
+        getQueryExecutor().writeToCopy(this, data, off, siz);
+    }
 
-  @Override
-  public void handleCommandStatus(String status) throws PSQLException {
-  }
+    @Override
+    public void writeToCopy(ByteStreamWriter from) throws SQLException {
+        getQueryExecutor().writeToCopy(this, from);
+    }
 
-  @Override
-  protected void handleCopydata(byte[] data) {
-    received.add(data);
-  }
+    @Override
+    public void flushCopy() throws SQLException {
+        getQueryExecutor().flushCopy(this);
+    }
+
+    @Override
+    public long endCopy() throws SQLException {
+        return getQueryExecutor().endCopy(this);
+    }
+
+    @Override
+    public byte[] readFromCopy() throws SQLException {
+        return readFromCopy(true);
+    }
+
+    @Override
+    public byte[] readFromCopy(boolean block) throws SQLException {
+        if (received.isEmpty()) {
+            getQueryExecutor().readFromCopy(this, block);
+        }
+
+        return received.poll();
+    }
+
+    @Override
+    public void handleCommandStatus(String status) throws PSQLException {
+    }
+
+    @Override
+    protected void handleCopydata(byte[] data) {
+        received.add(data);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/CopyInImpl.java b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyInImpl.java
index 50b73d7..64da921 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/CopyInImpl.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyInImpl.java
@@ -5,19 +5,18 @@
 
 package org.postgresql.core.v3;
 
+import java.sql.SQLException;
 import org.postgresql.copy.CopyIn;
 import org.postgresql.util.ByteStreamWriter;
 import org.postgresql.util.GT;
 import org.postgresql.util.PSQLException;
 import org.postgresql.util.PSQLState;
 
-import java.sql.SQLException;
-
 /**
  * <p>COPY FROM STDIN operation.</p>
  *
  * <p>Anticipated flow:
- *
+ * <p>
  * CopyManager.copyIn() -&gt;QueryExecutor.startCopy() - sends given query to server
  * -&gt;processCopyResults(): - receives CopyInResponse from Server - creates new CopyInImpl
  * -&gt;initCopy(): - receives copy metadata from server -&gt;CopyInImpl.init() -&gt;lock()
@@ -34,32 +33,32 @@ import java.sql.SQLException;
  */
 public class CopyInImpl extends CopyOperationImpl implements CopyIn {
 
-  public CopyInImpl() {
-  }
+    public CopyInImpl() {
+    }
 
-  @Override
-  public void writeToCopy(byte[] data, int off, int siz) throws SQLException {
-    getQueryExecutor().writeToCopy(this, data, off, siz);
-  }
+    @Override
+    public void writeToCopy(byte[] data, int off, int siz) throws SQLException {
+        getQueryExecutor().writeToCopy(this, data, off, siz);
+    }
 
-  @Override
-  public void writeToCopy(ByteStreamWriter from) throws SQLException {
-    getQueryExecutor().writeToCopy(this, from);
-  }
+    @Override
+    public void writeToCopy(ByteStreamWriter from) throws SQLException {
+        getQueryExecutor().writeToCopy(this, from);
+    }
 
-  @Override
-  public void flushCopy() throws SQLException {
-    getQueryExecutor().flushCopy(this);
-  }
+    @Override
+    public void flushCopy() throws SQLException {
+        getQueryExecutor().flushCopy(this);
+    }
 
-  @Override
-  public long endCopy() throws SQLException {
-    return getQueryExecutor().endCopy(this);
-  }
+    @Override
+    public long endCopy() throws SQLException {
+        return getQueryExecutor().endCopy(this);
+    }
 
-  @Override
-  protected void handleCopydata(byte[] data) throws PSQLException {
-    throw new PSQLException(GT.tr("CopyIn copy direction can't receive data"),
-        PSQLState.PROTOCOL_VIOLATION);
-  }
+    @Override
+    protected void handleCopydata(byte[] data) throws PSQLException {
+        throw new PSQLException(GT.tr("CopyIn copy direction can't receive data"),
+                PSQLState.PROTOCOL_VIOLATION);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOperationImpl.java b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOperationImpl.java
index 680c6d2..cd4c2ed 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOperationImpl.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOperationImpl.java
@@ -5,77 +5,76 @@
 
 package org.postgresql.core.v3;
 
+import java.sql.SQLException;
 import org.postgresql.copy.CopyOperation;
 import org.postgresql.util.GT;
 import org.postgresql.util.PSQLException;
 import org.postgresql.util.PSQLState;
 
-import java.sql.SQLException;
-
 public abstract class CopyOperationImpl implements CopyOperation {
-  QueryExecutorImpl queryExecutor;
-  int rowFormat;
-  int [] fieldFormats;
-  long handledRowCount = -1;
+    QueryExecutorImpl queryExecutor;
+    int rowFormat;
+    int[] fieldFormats;
+    long handledRowCount = -1;
 
-  public CopyOperationImpl() {
-  }
-
-  void init(QueryExecutorImpl q, int fmt, int[] fmts) {
-    queryExecutor = q;
-    rowFormat = fmt;
-    fieldFormats = fmts;
-  }
-
-  protected QueryExecutorImpl getQueryExecutor() {
-    return queryExecutor;
-  }
-
-  @Override
-  public void cancelCopy() throws SQLException {
-    queryExecutor.cancelCopy(this);
-  }
-
-  @Override
-  public int getFieldCount() {
-    return fieldFormats.length;
-  }
-
-  @Override
-  public int getFieldFormat(int field) {
-    return fieldFormats[field];
-  }
-
-  @Override
-  public int getFormat() {
-    return rowFormat;
-  }
-
-  @Override
-  public boolean isActive() {
-    return queryExecutor.hasLockOn(this);
-  }
-
-  public void handleCommandStatus(String status) throws PSQLException {
-    if (status.startsWith("COPY")) {
-      int i = status.lastIndexOf(' ');
-      handledRowCount = i > 3 ? Long.parseLong(status.substring(i + 1)) : -1;
-    } else {
-      throw new PSQLException(GT.tr("CommandComplete expected COPY but got: " + status),
-          PSQLState.COMMUNICATION_ERROR);
+    public CopyOperationImpl() {
     }
-  }
 
-  /**
-   * Consume received copy data.
-   *
-   * @param data data that was receive by copy protocol
-   * @throws PSQLException if some internal problem occurs
-   */
-  protected abstract void handleCopydata(byte[] data) throws PSQLException;
+    void init(QueryExecutorImpl q, int fmt, int[] fmts) {
+        queryExecutor = q;
+        rowFormat = fmt;
+        fieldFormats = fmts;
+    }
 
-  @Override
-  public long getHandledRowCount() {
-    return handledRowCount;
-  }
+    protected QueryExecutorImpl getQueryExecutor() {
+        return queryExecutor;
+    }
+
+    @Override
+    public void cancelCopy() throws SQLException {
+        queryExecutor.cancelCopy(this);
+    }
+
+    @Override
+    public int getFieldCount() {
+        return fieldFormats.length;
+    }
+
+    @Override
+    public int getFieldFormat(int field) {
+        return fieldFormats[field];
+    }
+
+    @Override
+    public int getFormat() {
+        return rowFormat;
+    }
+
+    @Override
+    public boolean isActive() {
+        return queryExecutor.hasLockOn(this);
+    }
+
+    public void handleCommandStatus(String status) throws PSQLException {
+        if (status.startsWith("COPY")) {
+            int i = status.lastIndexOf(' ');
+            handledRowCount = i > 3 ? Long.parseLong(status.substring(i + 1)) : -1;
+        } else {
+            throw new PSQLException(GT.tr("CommandComplete expected COPY but got: " + status),
+                    PSQLState.COMMUNICATION_ERROR);
+        }
+    }
+
+    /**
+     * Consume received copy data.
+     *
+     * @param data data that was receive by copy protocol
+     * @throws PSQLException if some internal problem occurs
+     */
+    protected abstract void handleCopydata(byte[] data) throws PSQLException;
+
+    @Override
+    public long getHandledRowCount() {
+        return handledRowCount;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOutImpl.java b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOutImpl.java
index f7898bf..d0faa4f 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOutImpl.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/CopyOutImpl.java
@@ -5,9 +5,8 @@
 
 package org.postgresql.core.v3;
 
-import org.postgresql.copy.CopyOut;
-
 import java.sql.SQLException;
+import org.postgresql.copy.CopyOut;
 
 /**
  * <p>Anticipated flow of a COPY TO STDOUT operation:</p>
@@ -24,25 +23,25 @@ import java.sql.SQLException;
  * &lt;-returned: byte array of data received from server or null at end.</p>
  */
 public class CopyOutImpl extends CopyOperationImpl implements CopyOut {
-  private byte [] currentDataRow;
+    private byte[] currentDataRow;
 
-  public CopyOutImpl() {
-  }
+    public CopyOutImpl() {
+    }
 
-  @Override
-  public byte [] readFromCopy() throws SQLException {
-    return readFromCopy(true);
-  }
+    @Override
+    public byte[] readFromCopy() throws SQLException {
+        return readFromCopy(true);
+    }
 
-  @Override
-  public byte [] readFromCopy(boolean block) throws SQLException {
-    currentDataRow = null;
-    getQueryExecutor().readFromCopy(this, block);
-    return currentDataRow;
-  }
+    @Override
+    public byte[] readFromCopy(boolean block) throws SQLException {
+        currentDataRow = null;
+        getQueryExecutor().readFromCopy(this, block);
+        return currentDataRow;
+    }
 
-  @Override
-  protected void handleCopydata(byte[] data) {
-    currentDataRow = data;
-  }
+    @Override
+    protected void handleCopydata(byte[] data) {
+        currentDataRow = data;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/DescribeRequest.java b/pgjdbc/src/main/java/org/postgresql/core/v3/DescribeRequest.java
index 14a0fef..3853dcc 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/DescribeRequest.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/DescribeRequest.java
@@ -7,19 +7,18 @@ package org.postgresql.core.v3;
 
 /**
  * Information for "pending describe queue".
- *
  */
 class DescribeRequest {
-  public final SimpleQuery query;
-  public final SimpleParameterList parameterList;
-  public final boolean describeOnly;
-  public final String statementName;
+    public final SimpleQuery query;
+    public final SimpleParameterList parameterList;
+    public final boolean describeOnly;
+    public final String statementName;
 
-  DescribeRequest(SimpleQuery query, SimpleParameterList parameterList,
-      boolean describeOnly, String statementName) {
-    this.query = query;
-    this.parameterList = parameterList;
-    this.describeOnly = describeOnly;
-    this.statementName = statementName;
-  }
+    DescribeRequest(SimpleQuery query, SimpleParameterList parameterList,
+                    boolean describeOnly, String statementName) {
+        this.query = query;
+        this.parameterList = parameterList;
+        this.describeOnly = describeOnly;
+        this.statementName = statementName;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/ExecuteRequest.java b/pgjdbc/src/main/java/org/postgresql/core/v3/ExecuteRequest.java
index e01190b..2d58827 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/ExecuteRequest.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/ExecuteRequest.java
@@ -7,16 +7,15 @@ package org.postgresql.core.v3;
 
 /**
  * Information for "pending execute queue".
- *
  */
 class ExecuteRequest {
-  public final SimpleQuery query;
-  public final Portal portal;
-  public final boolean asSimple;
+    public final SimpleQuery query;
+    public final Portal portal;
+    public final boolean asSimple;
 
-  ExecuteRequest(SimpleQuery query, Portal portal, boolean asSimple) {
-    this.query = query;
-    this.portal = portal;
-    this.asSimple = asSimple;
-  }
+    ExecuteRequest(SimpleQuery query, Portal portal, boolean asSimple) {
+        this.query = query;
+        this.portal = portal;
+        this.asSimple = asSimple;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/Portal.java b/pgjdbc/src/main/java/org/postgresql/core/v3/Portal.java
index 1300355..bc30745 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/Portal.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/Portal.java
@@ -6,10 +6,9 @@
 
 package org.postgresql.core.v3;
 
-import org.postgresql.core.ResultCursor;
-
 import java.lang.ref.PhantomReference;
 import java.nio.charset.StandardCharsets;
+import org.postgresql.core.ResultCursor;
 
 /**
  * V3 ResultCursor implementation in terms of backend Portals. This holds the state of a single
@@ -18,51 +17,51 @@ import java.nio.charset.StandardCharsets;
  * @author Oliver Jowett (oliver@opencloud.com)
  */
 class Portal implements ResultCursor {
-  Portal(SimpleQuery query, String portalName) {
-    this.query = query;
-    this.portalName = portalName;
-    this.encodedName = portalName.getBytes(StandardCharsets.UTF_8);
-  }
+    private final SimpleQuery query;
+    private final String portalName;
+    private final byte[] encodedName;
+    private PhantomReference<?> cleanupRef;
 
-  @Override
-  public void close() {
-    PhantomReference<?> cleanupRef = this.cleanupRef;
-    if (cleanupRef != null) {
-      cleanupRef.clear();
-      cleanupRef.enqueue();
-      this.cleanupRef = null;
+    Portal(SimpleQuery query, String portalName) {
+        this.query = query;
+        this.portalName = portalName;
+        this.encodedName = portalName.getBytes(StandardCharsets.UTF_8);
     }
-  }
 
-  String getPortalName() {
-    return portalName;
-  }
+    @Override
+    public void close() {
+        PhantomReference<?> cleanupRef = this.cleanupRef;
+        if (cleanupRef != null) {
+            cleanupRef.clear();
+            cleanupRef.enqueue();
+            this.cleanupRef = null;
+        }
+    }
 
-  byte[] getEncodedPortalName() {
-    return encodedName;
-  }
+    String getPortalName() {
+        return portalName;
+    }
 
-  SimpleQuery getQuery() {
-    return query;
-  }
+    // Holding on to a reference to the generating query has
+    // the nice side-effect that while this Portal is referenced,
+    // so is the SimpleQuery, so the underlying statement won't
+    // be closed while the portal is open (the backend closes
+    // all open portals when the statement is closed)
 
-  void setCleanupRef(PhantomReference<?> cleanupRef) {
-    this.cleanupRef = cleanupRef;
-  }
+    byte[] getEncodedPortalName() {
+        return encodedName;
+    }
 
-  @Override
-  public String toString() {
-    return portalName;
-  }
+    SimpleQuery getQuery() {
+        return query;
+    }
 
-  // Holding on to a reference to the generating query has
-  // the nice side-effect that while this Portal is referenced,
-  // so is the SimpleQuery, so the underlying statement won't
-  // be closed while the portal is open (the backend closes
-  // all open portals when the statement is closed)
+    void setCleanupRef(PhantomReference<?> cleanupRef) {
+        this.cleanupRef = cleanupRef;
+    }
 
-  private final SimpleQuery query;
-  private final String portalName;
-  private final byte[] encodedName;
-  private PhantomReference<?> cleanupRef;
+    @Override
+    public String toString() {
+        return portalName;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/QueryExecutorImpl.java b/pgjdbc/src/main/java/org/postgresql/core/v3/QueryExecutorImpl.java
index e1e12b6..f2b2f2c 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/QueryExecutorImpl.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/QueryExecutorImpl.java
@@ -6,6 +6,32 @@
 
 package org.postgresql.core.v3;
 
+import java.io.IOException;
+import java.lang.ref.PhantomReference;
+import java.lang.ref.Reference;
+import java.lang.ref.ReferenceQueue;
+import java.net.Socket;
+import java.net.SocketException;
+import java.net.SocketTimeoutException;
+import java.nio.charset.StandardCharsets;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Deque;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.Set;
+import java.util.TimeZone;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.logging.Level;
+import java.util.logging.Logger;
 import org.postgresql.PGProperty;
 import org.postgresql.copy.CopyIn;
 import org.postgresql.copy.CopyOperation;
@@ -46,3057 +72,3006 @@ import org.postgresql.util.PSQLState;
 import org.postgresql.util.PSQLWarning;
 import org.postgresql.util.ServerErrorMessage;
 
-import java.io.IOException;
-import java.lang.ref.PhantomReference;
-import java.lang.ref.Reference;
-import java.lang.ref.ReferenceQueue;
-import java.net.Socket;
-import java.net.SocketException;
-import java.net.SocketTimeoutException;
-import java.nio.charset.StandardCharsets;
-import java.sql.SQLException;
-import java.sql.SQLWarning;
-import java.util.ArrayDeque;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Deque;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Locale;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.Set;
-import java.util.TimeZone;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
 /**
  * QueryExecutor implementation for the V3 protocol.
  */
 @SuppressWarnings("try")
 public class QueryExecutorImpl extends QueryExecutorBase {
 
-  private static final Logger LOGGER = Logger.getLogger(QueryExecutorImpl.class.getName());
+    private static final Logger LOGGER = Logger.getLogger(QueryExecutorImpl.class.getName());
 
-  private static final Field[] NO_FIELDS = new Field[0];
+    private static final Field[] NO_FIELDS = new Field[0];
+    // Deadlock avoidance:
+    //
+    // It's possible for the send and receive streams to get "deadlocked" against each other since
+    // we do not have a separate thread. The scenario is this: we have two streams:
+    //
+    // driver -> TCP buffering -> server
+    // server -> TCP buffering -> driver
+    //
+    // The server behaviour is roughly:
+    // while true:
+    // read message
+    // execute message
+    // write results
+    //
+    // If the server -> driver stream has a full buffer, the write will block.
+    // If the driver is still writing when this happens, and the driver -> server
+    // stream also fills up, we deadlock: the driver is blocked on write() waiting
+    // for the server to read some more data, and the server is blocked on write()
+    // waiting for the driver to read some more data.
+    //
+    // To avoid this, we guess at how much response data we can request from the
+    // server before the server -> driver stream's buffer is full (MAX_BUFFERED_RECV_BYTES).
+    // This is the point where the server blocks on write and stops reading data. If we
+    // reach this point, we force a Sync message and read pending data from the server
+    // until ReadyForQuery, then go back to writing more queries unless we saw an error.
+    //
+    // This is not 100% reliable -- it's only done in the batch-query case and only
+    // at a reasonably high level (per query, not per message), and it's only an estimate
+    // -- so it might break. To do it correctly in all cases would seem to require a
+    // separate send or receive thread as we can only do the Sync-and-read-results
+    // operation at particular points, and also as we don't really know how much data
+    // the server is sending.
+    //
+    // Our message size estimation is coarse, and disregards asynchronous
+    // notifications, warnings/info/debug messages, etc, so the response size may be
+    // quite different from the 250 bytes assumed here even for queries that don't
+    // return data.
+    //
+    // See github issue #194 and #195 .
+    //
+    // Assume 64k server->client buffering, which is extremely conservative. A typical
+    // system will have 200kb or more of buffers for its receive buffers, and the sending
+    // system will typically have the same on the send side, giving us 400kb or to work
+    // with. (We could check Java's receive buffer size, but prefer to assume a very
+    // conservative buffer instead, and we don't know how big the server's send
+    // buffer is.)
+    //
+    private static final int MAX_BUFFERED_RECV_BYTES = 64000;
+    private static final int NODATA_QUERY_RESPONSE_SIZE_BYTES = 250;
+    private static final Portal UNNAMED_PORTAL = new Portal(null, "unnamed");
 
-  static {
-    //canonicalize commonly seen strings to reduce memory and speed comparisons
-    Encoding.canonicalize("application_name");
-    Encoding.canonicalize("client_encoding");
-    Encoding.canonicalize("DateStyle");
-    Encoding.canonicalize("integer_datetimes");
-    Encoding.canonicalize("off");
-    Encoding.canonicalize("on");
-    Encoding.canonicalize("server_encoding");
-    Encoding.canonicalize("server_version");
-    Encoding.canonicalize("server_version_num");
-    Encoding.canonicalize("standard_conforming_strings");
-    Encoding.canonicalize("TimeZone");
-    Encoding.canonicalize("UTF8");
-    Encoding.canonicalize("UTF-8");
-    Encoding.canonicalize("in_hot_standby");
-  }
-
-  /**
-   * TimeZone of the current connection (TimeZone backend parameter).
-   */
-  private TimeZone timeZone;
-
-  /**
-   * application_name connection property.
-   */
-  private String applicationName;
-
-  /**
-   * True if server uses integers for date and time fields. False if server uses double.
-   */
-  private boolean integerDateTimes;
-
-  /**
-   * Bit set that has a bit set for each oid which should be received using binary format.
-   */
-  private final Set<Integer> useBinaryReceiveForOids = new HashSet<>();
-
-  /**
-   * Bit set that has a bit set for each oid which should be sent using binary format.
-   */
-  private final Set<Integer> useBinarySendForOids = new HashSet<>();
-
-  /**
-   * This is a fake query object so processResults can distinguish "ReadyForQuery" messages
-   * from Sync messages vs from simple execute (aka 'Q').
-   */
-  private final SimpleQuery sync;
-
-  private short deallocateEpoch;
-
-  /**
-   * This caches the latest observed {@code set search_path} query so the reset of prepared
-   * statement cache can be skipped if using repeated calls for the same {@code set search_path}
-   * value.
-   */
-  private String lastSetSearchPathQuery;
-
-  /**
-   * The exception that caused the last transaction to fail.
-   */
-  private SQLException transactionFailCause;
-
-  private final ReplicationProtocol replicationProtocol;
-
-  /**
-   * {@code CommandComplete(B)} messages are quite common, so we reuse instance to parse those
-   */
-  private final CommandCompleteParser commandCompleteParser = new CommandCompleteParser();
-
-  private final AdaptiveFetchCache adaptiveFetchCache;
-
-  @SuppressWarnings("this-escape")
-  public QueryExecutorImpl(PGStream pgStream,
-      int cancelSignalTimeout, Properties info) throws SQLException, IOException {
-    super(pgStream, cancelSignalTimeout, info);
-
-    this.sync = (SimpleQuery) createQuery("SYNC", false, true).query;
-
-    long maxResultBuffer = pgStream.getMaxResultBuffer();
-    this.adaptiveFetchCache = new AdaptiveFetchCache(maxResultBuffer, info);
-
-    this.allowEncodingChanges = PGProperty.ALLOW_ENCODING_CHANGES.getBoolean(info);
-    this.cleanupSavePoints = PGProperty.CLEANUP_SAVEPOINTS.getBoolean(info);
-    // assignment, argument
-    this.replicationProtocol = new V3ReplicationProtocol(this, pgStream);
-    readStartupMessages();
-  }
-
-  @Override
-  public int getProtocolVersion() {
-    return 3;
-  }
-
-  /**
-   * <p>Supplement to synchronization of public methods on current QueryExecutor.</p>
-   *
-   * <p>Necessary for keeping the connection intact between calls to public methods sharing a state
-   * such as COPY subprotocol. waitOnLock() must be called at beginning of each connection access
-   * point.</p>
-   *
-   * <p>Public methods sharing that state must then be synchronized among themselves. Normal method
-   * synchronization typically suffices for that.</p>
-   *
-   * <p>See notes on related methods as well as currentCopy() below.</p>
-   */
-  private Object lockedFor;
-
-  /**
-   * Obtain lock over this connection for given object, blocking to wait if necessary.
-   *
-   * @param obtainer object that gets the lock. Normally current thread.
-   * @throws PSQLException when already holding the lock or getting interrupted.
-   */
-  private void lock(Object obtainer) throws PSQLException {
-    if (lockedFor == obtainer) {
-      throw new PSQLException(GT.tr("Tried to obtain lock while already holding it"),
-          PSQLState.OBJECT_NOT_IN_STATE);
-
-    }
-    waitOnLock();
-    lockedFor = obtainer;
-  }
-
-  /**
-   * Release lock on this connection presumably held by given object.
-   *
-   * @param holder object that holds the lock. Normally current thread.
-   * @throws PSQLException when this thread does not hold the lock
-   */
-  private void unlock(Object holder) throws PSQLException {
-    if (lockedFor != holder) {
-      throw new PSQLException(GT.tr("Tried to break lock on database connection"),
-          PSQLState.OBJECT_NOT_IN_STATE);
-    }
-    lockedFor = null;
-    lockCondition.signal();
-  }
-
-  /**
-   * Wait until our lock is released. Execution of a single synchronized method can then continue
-   * without further ado. Must be called at beginning of each synchronized public method.
-   */
-  private void waitOnLock() throws PSQLException {
-    while (lockedFor != null) {
-      try {
-        lockCondition.await();
-      } catch (InterruptedException ie) {
-        Thread.currentThread().interrupt();
-        throw new PSQLException(
-            GT.tr("Interrupted while waiting to obtain lock on database connection"),
-            PSQLState.OBJECT_NOT_IN_STATE, ie);
-      }
-    }
-  }
-
-  /**
-   * @param holder object assumed to hold the lock
-   * @return whether given object actually holds the lock
-   */
-  boolean hasLockOn(Object holder) {
-    try (ResourceLock ignore = lock.obtain()) {
-      return lockedFor == holder;
-    }
-  }
-
-  /**
-   * @param holder object assumed to hold the lock
-   * @return whether given object actually holds the lock
-   */
-  private boolean hasLock(Object holder) {
-    return lockedFor == holder;
-  }
-
-  //
-  // Query parsing
-  //
-
-  @Override
-  public Query createSimpleQuery(String sql) throws SQLException {
-    List<NativeQuery> queries = Parser.parseJdbcSql(sql,
-        getStandardConformingStrings(), false, true,
-        isReWriteBatchedInsertsEnabled(), getQuoteReturningIdentifiers());
-    return wrap(queries);
-  }
-
-  @Override
-  public Query wrap(List<NativeQuery> queries) {
-    if (queries.isEmpty()) {
-      // Empty query
-      return emptyQuery;
-    }
-    if (queries.size() == 1) {
-      NativeQuery firstQuery = queries.get(0);
-      if (isReWriteBatchedInsertsEnabled()
-          && firstQuery.getCommand().isBatchedReWriteCompatible()) {
-        int valuesBraceOpenPosition =
-            firstQuery.getCommand().getBatchRewriteValuesBraceOpenPosition();
-        int valuesBraceClosePosition =
-            firstQuery.getCommand().getBatchRewriteValuesBraceClosePosition();
-        return new BatchedQuery(firstQuery, this, valuesBraceOpenPosition,
-            valuesBraceClosePosition, isColumnSanitiserDisabled());
-      } else {
-        return new SimpleQuery(firstQuery, this, isColumnSanitiserDisabled());
-      }
+    static {
+        //canonicalize commonly seen strings to reduce memory and speed comparisons
+        Encoding.canonicalize("application_name");
+        Encoding.canonicalize("client_encoding");
+        Encoding.canonicalize("DateStyle");
+        Encoding.canonicalize("integer_datetimes");
+        Encoding.canonicalize("off");
+        Encoding.canonicalize("on");
+        Encoding.canonicalize("server_encoding");
+        Encoding.canonicalize("server_version");
+        Encoding.canonicalize("server_version_num");
+        Encoding.canonicalize("standard_conforming_strings");
+        Encoding.canonicalize("TimeZone");
+        Encoding.canonicalize("UTF8");
+        Encoding.canonicalize("UTF-8");
+        Encoding.canonicalize("in_hot_standby");
     }
 
-    // Multiple statements.
-    SimpleQuery[] subqueries = new SimpleQuery[queries.size()];
-    int[] offsets = new int[subqueries.length];
-    int offset = 0;
-    for (int i = 0; i < queries.size(); i++) {
-      NativeQuery nativeQuery = queries.get(i);
-      offsets[i] = offset;
-      subqueries[i] = new SimpleQuery(nativeQuery, this, isColumnSanitiserDisabled());
-      offset += nativeQuery.bindPositions.length;
+    /**
+     * Bit set that has a bit set for each oid which should be received using binary format.
+     */
+    private final Set<Integer> useBinaryReceiveForOids = new HashSet<>();
+
+    /**
+     * Bit set that has a bit set for each oid which should be sent using binary format.
+     */
+    private final Set<Integer> useBinarySendForOids = new HashSet<>();
+
+    /**
+     * This is a fake query object so processResults can distinguish "ReadyForQuery" messages
+     * from Sync messages vs from simple execute (aka 'Q').
+     */
+    private final SimpleQuery sync;
+    private final ReplicationProtocol replicationProtocol;
+    /**
+     * {@code CommandComplete(B)} messages are quite common, so we reuse instance to parse those
+     */
+    private final CommandCompleteParser commandCompleteParser = new CommandCompleteParser();
+    private final AdaptiveFetchCache adaptiveFetchCache;
+    private final HashMap<PhantomReference<SimpleQuery>, String> parsedQueryMap =
+            new HashMap<>();
+    private final ReferenceQueue<SimpleQuery> parsedQueryCleanupQueue =
+            new ReferenceQueue<>();
+    private final HashMap<PhantomReference<Portal>, String> openPortalMap =
+            new HashMap<>();
+    private final ReferenceQueue<Portal> openPortalCleanupQueue = new ReferenceQueue<>();
+    private final Deque<SimpleQuery> pendingParseQueue = new ArrayDeque<>();
+    private final Deque<Portal> pendingBindQueue = new ArrayDeque<>();
+    private final Deque<ExecuteRequest> pendingExecuteQueue = new ArrayDeque<>();
+    private final Deque<DescribeRequest> pendingDescribeStatementQueue =
+            new ArrayDeque<>();
+    private final Deque<SimpleQuery> pendingDescribePortalQueue = new ArrayDeque<>();
+    private final boolean allowEncodingChanges;
+    private final boolean cleanupSavePoints;
+
+    //
+    // Query parsing
+    //
+    private final SimpleQuery beginTransactionQuery =
+            new SimpleQuery(
+                    new NativeQuery("BEGIN", null, false, SqlCommand.BLANK),
+                    null, false);
+    private final SimpleQuery beginReadOnlyTransactionQuery =
+            new SimpleQuery(
+                    new NativeQuery("BEGIN READ ONLY", null, false, SqlCommand.BLANK),
+                    null, false);
+
+    //
+    // Query execution
+    //
+    private final SimpleQuery emptyQuery =
+            new SimpleQuery(
+                    new NativeQuery("", null, false,
+                            SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK)
+                    ), null, false);
+    private final SimpleQuery autoSaveQuery =
+            new SimpleQuery(
+                    new NativeQuery("SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
+                    null, false);
+    private final SimpleQuery releaseAutoSave =
+            new SimpleQuery(
+                    new NativeQuery("RELEASE SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
+                    null, false);
+    /*
+    In autosave mode we use this query to roll back errored transactions
+     */
+    private final SimpleQuery restoreToAutoSave =
+            new SimpleQuery(
+                    new NativeQuery("ROLLBACK TO SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
+                    null, false);
+    AtomicBoolean processingCopyResults = new AtomicBoolean(false);
+    /**
+     * TimeZone of the current connection (TimeZone backend parameter).
+     */
+    private TimeZone timeZone;
+    /**
+     * application_name connection property.
+     */
+    private String applicationName;
+    /**
+     * True if server uses integers for date and time fields. False if server uses double.
+     */
+    private boolean integerDateTimes;
+    private short deallocateEpoch;
+    /**
+     * This caches the latest observed {@code set search_path} query so the reset of prepared
+     * statement cache can be skipped if using repeated calls for the same {@code set search_path}
+     * value.
+     */
+    private String lastSetSearchPathQuery;
+    /**
+     * The exception that caused the last transaction to fail.
+     */
+    private SQLException transactionFailCause;
+
+    //
+    // Fastpath
+    //
+    /**
+     * <p>Supplement to synchronization of public methods on current QueryExecutor.</p>
+     *
+     * <p>Necessary for keeping the connection intact between calls to public methods sharing a state
+     * such as COPY subprotocol. waitOnLock() must be called at beginning of each connection access
+     * point.</p>
+     *
+     * <p>Public methods sharing that state must then be synchronized among themselves. Normal method
+     * synchronization typically suffices for that.</p>
+     *
+     * <p>See notes on related methods as well as currentCopy() below.</p>
+     */
+    private Object lockedFor;
+    private long nextUniqueID = 1;
+    /**
+     * <p>The estimated server response size since we last consumed the input stream from the server, in
+     * bytes.</p>
+     *
+     * <p>Starts at zero, reset by every Sync message. Mainly used for batches.</p>
+     *
+     * <p>Used to avoid deadlocks, see MAX_BUFFERED_RECV_BYTES.</p>
+     */
+    private int estimatedReceiveBufferBytes;
+
+    @SuppressWarnings("this-escape")
+    public QueryExecutorImpl(PGStream pgStream,
+                             int cancelSignalTimeout, Properties info) throws SQLException, IOException {
+        super(pgStream, cancelSignalTimeout, info);
+
+        this.sync = (SimpleQuery) createQuery("SYNC", false, true).query;
+
+        long maxResultBuffer = pgStream.getMaxResultBuffer();
+        this.adaptiveFetchCache = new AdaptiveFetchCache(maxResultBuffer, info);
+
+        this.allowEncodingChanges = PGProperty.ALLOW_ENCODING_CHANGES.getBoolean(info);
+        this.cleanupSavePoints = PGProperty.CLEANUP_SAVEPOINTS.getBoolean(info);
+        // assignment, argument
+        this.replicationProtocol = new V3ReplicationProtocol(this, pgStream);
+        readStartupMessages();
     }
 
-    return new CompositeQuery(subqueries, offsets);
-  }
-
-  //
-  // Query execution
-  //
-
-  private int updateQueryMode(int flags) {
-    switch (getPreferQueryMode()) {
-      case SIMPLE:
-        return flags | QUERY_EXECUTE_AS_SIMPLE;
-      case EXTENDED:
-        return flags & ~QUERY_EXECUTE_AS_SIMPLE;
-      default:
-        return flags;
+    @Override
+    public int getProtocolVersion() {
+        return 3;
     }
-  }
 
-  @Override
-  public void execute(Query query, ParameterList parameters,
-      ResultHandler handler,
-      int maxRows, int fetchSize, int flags) throws SQLException {
-    execute(query, parameters, handler, maxRows, fetchSize, flags, false);
-  }
+    /**
+     * Obtain lock over this connection for given object, blocking to wait if necessary.
+     *
+     * @param obtainer object that gets the lock. Normally current thread.
+     * @throws PSQLException when already holding the lock or getting interrupted.
+     */
+    private void lock(Object obtainer) throws PSQLException {
+        if (lockedFor == obtainer) {
+            throw new PSQLException(GT.tr("Tried to obtain lock while already holding it"),
+                    PSQLState.OBJECT_NOT_IN_STATE);
 
-  @Override
-  public void execute(Query query, ParameterList parameters,
-      ResultHandler handler,
-      int maxRows, int fetchSize, int flags, boolean adaptiveFetch) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      waitOnLock();
-      if (LOGGER.isLoggable(Level.FINEST)) {
-        LOGGER.log(Level.FINEST, "  simple execute, handler={0}, maxRows={1}, fetchSize={2}, flags={3}",
-            new Object[]{handler, maxRows, fetchSize, flags});
-      }
-
-      if (parameters == null) {
-        parameters = SimpleQuery.NO_PARAMETERS;
-      }
-
-      flags = updateQueryMode(flags);
-
-      boolean describeOnly = (QUERY_DESCRIBE_ONLY & flags) != 0;
-
-      ((V3ParameterList) parameters).convertFunctionOutParameters();
-
-      // Check parameters are all set..
-      if (!describeOnly) {
-        ((V3ParameterList) parameters).checkAllParametersSet();
-      }
-
-      boolean autosave = false;
-      try {
-        try {
-          handler = sendQueryPreamble(handler, flags);
-          autosave = sendAutomaticSavepoint(query, flags);
-          sendQuery(query, (V3ParameterList) parameters, maxRows, fetchSize, flags,
-              handler, null, adaptiveFetch);
-          if ((flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0) {
-            // Sync message is not required for 'Q' execution as 'Q' ends with ReadyForQuery message
-            // on its own
-          } else {
-            sendSync();
-          }
-          processResults(handler, flags, adaptiveFetch);
-          estimatedReceiveBufferBytes = 0;
-        } catch (PGBindException se) {
-          // There are three causes of this error, an
-          // invalid total Bind message length, a
-          // BinaryStream that cannot provide the amount
-          // of data claimed by the length argument, and
-          // a BinaryStream that throws an Exception
-          // when reading.
-          //
-          // We simply do not send the Execute message
-          // so we can just continue on as if nothing
-          // has happened. Perhaps we need to
-          // introduce an error here to force the
-          // caller to rollback if there is a
-          // transaction in progress?
-          //
-          sendSync();
-          processResults(handler, flags, adaptiveFetch);
-          estimatedReceiveBufferBytes = 0;
-          handler
-              .handleError(new PSQLException(GT.tr("Unable to bind parameter values for statement."),
-                  PSQLState.INVALID_PARAMETER_VALUE, se.getIOException()));
         }
-      } catch (IOException e) {
-        abort();
-        handler.handleError(
-            new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
-                PSQLState.CONNECTION_FAILURE, e));
-      }
-
-      try {
-        handler.handleCompletion();
-        if (cleanupSavePoints) {
-          releaseSavePoint(autosave, flags);
-        }
-      } catch (SQLException e) {
-        rollbackIfRequired(autosave, e);
-      }
+        waitOnLock();
+        lockedFor = obtainer;
     }
-  }
 
-  private boolean sendAutomaticSavepoint(Query query, int flags) throws IOException {
-    if (((flags & QueryExecutor.QUERY_SUPPRESS_BEGIN) == 0
-        || getTransactionState() == TransactionState.OPEN)
-        && query != restoreToAutoSave
-        && !"COMMIT".equalsIgnoreCase(query.getNativeSql())
-        && getAutoSave() != AutoSave.NEVER
-        // If query has no resulting fields, it cannot fail with 'cached plan must not change result type'
-        // thus no need to set a savepoint before such query
-        && (getAutoSave() == AutoSave.ALWAYS
-        // If CompositeQuery is observed, just assume it might fail and set the savepoint
-        || !(query instanceof SimpleQuery)
-        || ((SimpleQuery) query).getFields() != null)) {
+    /**
+     * Release lock on this connection presumably held by given object.
+     *
+     * @param holder object that holds the lock. Normally current thread.
+     * @throws PSQLException when this thread does not hold the lock
+     */
+    private void unlock(Object holder) throws PSQLException {
+        if (lockedFor != holder) {
+            throw new PSQLException(GT.tr("Tried to break lock on database connection"),
+                    PSQLState.OBJECT_NOT_IN_STATE);
+        }
+        lockedFor = null;
+        lockCondition.signal();
+    }
+
+    /**
+     * Wait until our lock is released. Execution of a single synchronized method can then continue
+     * without further ado. Must be called at beginning of each synchronized public method.
+     */
+    private void waitOnLock() throws PSQLException {
+        while (lockedFor != null) {
+            try {
+                lockCondition.await();
+            } catch (InterruptedException ie) {
+                Thread.currentThread().interrupt();
+                throw new PSQLException(
+                        GT.tr("Interrupted while waiting to obtain lock on database connection"),
+                        PSQLState.OBJECT_NOT_IN_STATE, ie);
+            }
+        }
+    }
+
+    //
+    // Copy subprotocol implementation
+    //
+
+    /**
+     * @param holder object assumed to hold the lock
+     * @return whether given object actually holds the lock
+     */
+    boolean hasLockOn(Object holder) {
+        try (ResourceLock ignore = lock.obtain()) {
+            return lockedFor == holder;
+        }
+    }
+
+    /**
+     * @param holder object assumed to hold the lock
+     * @return whether given object actually holds the lock
+     */
+    private boolean hasLock(Object holder) {
+        return lockedFor == holder;
+    }
+
+    @Override
+    public Query createSimpleQuery(String sql) throws SQLException {
+        List<NativeQuery> queries = Parser.parseJdbcSql(sql,
+                getStandardConformingStrings(), false, true,
+                isReWriteBatchedInsertsEnabled(), getQuoteReturningIdentifiers());
+        return wrap(queries);
+    }
+
+    @Override
+    public Query wrap(List<NativeQuery> queries) {
+        if (queries.isEmpty()) {
+            // Empty query
+            return emptyQuery;
+        }
+        if (queries.size() == 1) {
+            NativeQuery firstQuery = queries.get(0);
+            if (isReWriteBatchedInsertsEnabled()
+                    && firstQuery.getCommand().isBatchedReWriteCompatible()) {
+                int valuesBraceOpenPosition =
+                        firstQuery.getCommand().getBatchRewriteValuesBraceOpenPosition();
+                int valuesBraceClosePosition =
+                        firstQuery.getCommand().getBatchRewriteValuesBraceClosePosition();
+                return new BatchedQuery(firstQuery, this, valuesBraceOpenPosition,
+                        valuesBraceClosePosition, isColumnSanitiserDisabled());
+            } else {
+                return new SimpleQuery(firstQuery, this, isColumnSanitiserDisabled());
+            }
+        }
+
+        // Multiple statements.
+        SimpleQuery[] subqueries = new SimpleQuery[queries.size()];
+        int[] offsets = new int[subqueries.length];
+        int offset = 0;
+        for (int i = 0; i < queries.size(); i++) {
+            NativeQuery nativeQuery = queries.get(i);
+            offsets[i] = offset;
+            subqueries[i] = new SimpleQuery(nativeQuery, this, isColumnSanitiserDisabled());
+            offset += nativeQuery.bindPositions.length;
+        }
+
+        return new CompositeQuery(subqueries, offsets);
+    }
+
+    private int updateQueryMode(int flags) {
+        switch (getPreferQueryMode()) {
+            case SIMPLE:
+                return flags | QUERY_EXECUTE_AS_SIMPLE;
+            case EXTENDED:
+                return flags & ~QUERY_EXECUTE_AS_SIMPLE;
+            default:
+                return flags;
+        }
+    }
+
+    @Override
+    public void execute(Query query, ParameterList parameters,
+                        ResultHandler handler,
+                        int maxRows, int fetchSize, int flags) throws SQLException {
+        execute(query, parameters, handler, maxRows, fetchSize, flags, false);
+    }
+
+    @Override
+    public void execute(Query query, ParameterList parameters,
+                        ResultHandler handler,
+                        int maxRows, int fetchSize, int flags, boolean adaptiveFetch) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            waitOnLock();
+            if (LOGGER.isLoggable(Level.FINEST)) {
+                LOGGER.log(Level.FINEST, "  simple execute, handler={0}, maxRows={1}, fetchSize={2}, flags={3}",
+                        new Object[]{handler, maxRows, fetchSize, flags});
+            }
+
+            if (parameters == null) {
+                parameters = SimpleQuery.NO_PARAMETERS;
+            }
+
+            flags = updateQueryMode(flags);
+
+            boolean describeOnly = (QUERY_DESCRIBE_ONLY & flags) != 0;
+
+            ((V3ParameterList) parameters).convertFunctionOutParameters();
+
+            // Check parameters are all set..
+            if (!describeOnly) {
+                ((V3ParameterList) parameters).checkAllParametersSet();
+            }
+
+            boolean autosave = false;
+            try {
+                try {
+                    handler = sendQueryPreamble(handler, flags);
+                    autosave = sendAutomaticSavepoint(query, flags);
+                    sendQuery(query, (V3ParameterList) parameters, maxRows, fetchSize, flags,
+                            handler, null, adaptiveFetch);
+                    if ((flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0) {
+                        // Sync message is not required for 'Q' execution as 'Q' ends with ReadyForQuery message
+                        // on its own
+                    } else {
+                        sendSync();
+                    }
+                    processResults(handler, flags, adaptiveFetch);
+                    estimatedReceiveBufferBytes = 0;
+                } catch (PGBindException se) {
+                    // There are three causes of this error, an
+                    // invalid total Bind message length, a
+                    // BinaryStream that cannot provide the amount
+                    // of data claimed by the length argument, and
+                    // a BinaryStream that throws an Exception
+                    // when reading.
+                    //
+                    // We simply do not send the Execute message
+                    // so we can just continue on as if nothing
+                    // has happened. Perhaps we need to
+                    // introduce an error here to force the
+                    // caller to rollback if there is a
+                    // transaction in progress?
+                    //
+                    sendSync();
+                    processResults(handler, flags, adaptiveFetch);
+                    estimatedReceiveBufferBytes = 0;
+                    handler
+                            .handleError(new PSQLException(GT.tr("Unable to bind parameter values for statement."),
+                                    PSQLState.INVALID_PARAMETER_VALUE, se.getIOException()));
+                }
+            } catch (IOException e) {
+                abort();
+                handler.handleError(
+                        new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
+                                PSQLState.CONNECTION_FAILURE, e));
+            }
+
+            try {
+                handler.handleCompletion();
+                if (cleanupSavePoints) {
+                    releaseSavePoint(autosave, flags);
+                }
+            } catch (SQLException e) {
+                rollbackIfRequired(autosave, e);
+            }
+        }
+    }
+
+    private boolean sendAutomaticSavepoint(Query query, int flags) throws IOException {
+        if (((flags & QueryExecutor.QUERY_SUPPRESS_BEGIN) == 0
+                || getTransactionState() == TransactionState.OPEN)
+                && query != restoreToAutoSave
+                && !"COMMIT".equalsIgnoreCase(query.getNativeSql())
+                && getAutoSave() != AutoSave.NEVER
+                // If query has no resulting fields, it cannot fail with 'cached plan must not change result type'
+                // thus no need to set a savepoint before such query
+                && (getAutoSave() == AutoSave.ALWAYS
+                // If CompositeQuery is observed, just assume it might fail and set the savepoint
+                || !(query instanceof SimpleQuery)
+                || ((SimpleQuery) query).getFields() != null)) {
 
       /*
       create a different SAVEPOINT the first time so that all subsequent SAVEPOINTS can be released
       easily. There have been reports of server resources running out if there are too many
       SAVEPOINTS.
        */
-      sendOneQuery(autoSaveQuery, SimpleQuery.NO_PARAMETERS, 1, 0,
-          QUERY_NO_RESULTS | QUERY_NO_METADATA
-              // PostgreSQL does not support bind, exec, simple, sync message flow,
-              // so we force autosavepoint to use simple if the main query is using simple
-              | QUERY_EXECUTE_AS_SIMPLE);
-      return true;
-    }
-    return false;
-  }
-
-  private void releaseSavePoint(boolean autosave, int flags) throws SQLException {
-    if ( autosave
-        && getAutoSave() == AutoSave.ALWAYS
-        && getTransactionState() == TransactionState.OPEN) {
-      try {
-        sendOneQuery(releaseAutoSave, SimpleQuery.NO_PARAMETERS, 1, 0,
-            QUERY_NO_RESULTS | QUERY_NO_METADATA
-                | QUERY_EXECUTE_AS_SIMPLE);
-
-      } catch (IOException ex) {
-        throw  new PSQLException(GT.tr("Error releasing savepoint"), PSQLState.IO_ERROR);
-      }
-    }
-  }
-
-  private void rollbackIfRequired(boolean autosave, SQLException e) throws SQLException {
-    if (autosave
-        && getTransactionState() == TransactionState.FAILED
-        && (getAutoSave() == AutoSave.ALWAYS || willHealOnRetry(e))) {
-      try {
-        // ROLLBACK and AUTOSAVE are executed as simple always to overcome "statement no longer exists S_xx"
-        execute(restoreToAutoSave, SimpleQuery.NO_PARAMETERS, new ResultHandlerDelegate(null),
-            1, 0, QUERY_NO_RESULTS | QUERY_NO_METADATA | QUERY_EXECUTE_AS_SIMPLE);
-      } catch (SQLException e2) {
-        // That's O(N), sorry
-        e.setNextException(e2);
-      }
-    }
-    throw e;
-  }
-
-  // Deadlock avoidance:
-  //
-  // It's possible for the send and receive streams to get "deadlocked" against each other since
-  // we do not have a separate thread. The scenario is this: we have two streams:
-  //
-  // driver -> TCP buffering -> server
-  // server -> TCP buffering -> driver
-  //
-  // The server behaviour is roughly:
-  // while true:
-  // read message
-  // execute message
-  // write results
-  //
-  // If the server -> driver stream has a full buffer, the write will block.
-  // If the driver is still writing when this happens, and the driver -> server
-  // stream also fills up, we deadlock: the driver is blocked on write() waiting
-  // for the server to read some more data, and the server is blocked on write()
-  // waiting for the driver to read some more data.
-  //
-  // To avoid this, we guess at how much response data we can request from the
-  // server before the server -> driver stream's buffer is full (MAX_BUFFERED_RECV_BYTES).
-  // This is the point where the server blocks on write and stops reading data. If we
-  // reach this point, we force a Sync message and read pending data from the server
-  // until ReadyForQuery, then go back to writing more queries unless we saw an error.
-  //
-  // This is not 100% reliable -- it's only done in the batch-query case and only
-  // at a reasonably high level (per query, not per message), and it's only an estimate
-  // -- so it might break. To do it correctly in all cases would seem to require a
-  // separate send or receive thread as we can only do the Sync-and-read-results
-  // operation at particular points, and also as we don't really know how much data
-  // the server is sending.
-  //
-  // Our message size estimation is coarse, and disregards asynchronous
-  // notifications, warnings/info/debug messages, etc, so the response size may be
-  // quite different from the 250 bytes assumed here even for queries that don't
-  // return data.
-  //
-  // See github issue #194 and #195 .
-  //
-  // Assume 64k server->client buffering, which is extremely conservative. A typical
-  // system will have 200kb or more of buffers for its receive buffers, and the sending
-  // system will typically have the same on the send side, giving us 400kb or to work
-  // with. (We could check Java's receive buffer size, but prefer to assume a very
-  // conservative buffer instead, and we don't know how big the server's send
-  // buffer is.)
-  //
-  private static final int MAX_BUFFERED_RECV_BYTES = 64000;
-  private static final int NODATA_QUERY_RESPONSE_SIZE_BYTES = 250;
-
-  @Override
-  public void execute(Query[] queries, ParameterList[] parameterLists,
-      BatchResultHandler batchHandler, int maxRows, int fetchSize, int flags) throws SQLException {
-    execute(queries, parameterLists, batchHandler, maxRows, fetchSize, flags, false);
-  }
-
-  @Override
-  public void execute(Query[] queries, ParameterList[] parameterLists,
-      BatchResultHandler batchHandler, int maxRows, int fetchSize, int flags, boolean adaptiveFetch)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      waitOnLock();
-      if (LOGGER.isLoggable(Level.FINEST)) {
-        LOGGER.log(Level.FINEST, "  batch execute {0} queries, handler={1}, maxRows={2}, fetchSize={3}, flags={4}",
-            new Object[]{queries.length, batchHandler, maxRows, fetchSize, flags});
-      }
-
-      flags = updateQueryMode(flags);
-
-      boolean describeOnly = (QUERY_DESCRIBE_ONLY & flags) != 0;
-      // Check parameters and resolve OIDs.
-      if (!describeOnly) {
-        for (ParameterList parameterList : parameterLists) {
-          if (parameterList != null) {
-            ((V3ParameterList) parameterList).checkAllParametersSet();
-          }
+            sendOneQuery(autoSaveQuery, SimpleQuery.NO_PARAMETERS, 1, 0,
+                    QUERY_NO_RESULTS | QUERY_NO_METADATA
+                            // PostgreSQL does not support bind, exec, simple, sync message flow,
+                            // so we force autosavepoint to use simple if the main query is using simple
+                            | QUERY_EXECUTE_AS_SIMPLE);
+            return true;
         }
-      }
-
-      boolean autosave = false;
-      ResultHandler handler = batchHandler;
-      try {
-        handler = sendQueryPreamble(batchHandler, flags);
-        autosave = sendAutomaticSavepoint(queries[0], flags);
-        estimatedReceiveBufferBytes = 0;
-
-        for (int i = 0; i < queries.length; i++) {
-          Query query = queries[i];
-          V3ParameterList parameters = (V3ParameterList) parameterLists[i];
-          if (parameters == null) {
-            parameters = SimpleQuery.NO_PARAMETERS;
-          }
-
-          sendQuery(query, parameters, maxRows, fetchSize, flags, handler, batchHandler, adaptiveFetch);
-
-          if (handler.getException() != null) {
-            break;
-          }
-        }
-
-        if (handler.getException() == null) {
-          if ((flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0) {
-            // Sync message is not required for 'Q' execution as 'Q' ends with ReadyForQuery message
-            // on its own
-          } else {
-            sendSync();
-          }
-          processResults(handler, flags, adaptiveFetch);
-          estimatedReceiveBufferBytes = 0;
-        }
-      } catch (IOException e) {
-        abort();
-        handler.handleError(
-            new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
-                PSQLState.CONNECTION_FAILURE, e));
-      }
-
-      try {
-        handler.handleCompletion();
-        if (cleanupSavePoints) {
-          releaseSavePoint(autosave, flags);
-        }
-      } catch (SQLException e) {
-        rollbackIfRequired(autosave, e);
-      }
-    }
-  }
-
-  private ResultHandler sendQueryPreamble(final ResultHandler delegateHandler, int flags)
-      throws IOException {
-    // First, send CloseStatements for finalized SimpleQueries that had statement names assigned.
-    processDeadParsedQueries();
-    processDeadPortals();
-
-    // Send BEGIN on first statement in transaction.
-    if ((flags & QueryExecutor.QUERY_SUPPRESS_BEGIN) != 0
-        || getTransactionState() != TransactionState.IDLE) {
-      return delegateHandler;
+        return false;
     }
 
-    int beginFlags = QueryExecutor.QUERY_NO_METADATA;
-    if ((flags & QueryExecutor.QUERY_ONESHOT) != 0) {
-      beginFlags |= QueryExecutor.QUERY_ONESHOT;
-    }
+    private void releaseSavePoint(boolean autosave, int flags) throws SQLException {
+        if (autosave
+                && getAutoSave() == AutoSave.ALWAYS
+                && getTransactionState() == TransactionState.OPEN) {
+            try {
+                sendOneQuery(releaseAutoSave, SimpleQuery.NO_PARAMETERS, 1, 0,
+                        QUERY_NO_RESULTS | QUERY_NO_METADATA
+                                | QUERY_EXECUTE_AS_SIMPLE);
 
-    beginFlags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
-
-    beginFlags = updateQueryMode(beginFlags);
-
-    final SimpleQuery beginQuery = (flags & QueryExecutor.QUERY_READ_ONLY_HINT) == 0 ? beginTransactionQuery : beginReadOnlyTransactionQuery;
-
-    sendOneQuery(beginQuery, SimpleQuery.NO_PARAMETERS, 0, 0, beginFlags);
-
-    // Insert a handler that intercepts the BEGIN.
-    return new ResultHandlerDelegate(delegateHandler) {
-      private boolean sawBegin = false;
-
-      @Override
-      public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
-          ResultCursor cursor) {
-        if (sawBegin) {
-          super.handleResultRows(fromQuery, fields, tuples, cursor);
-        }
-      }
-
-      @Override
-      public void handleCommandStatus(String status, long updateCount, long insertOID) {
-        if (!sawBegin) {
-          sawBegin = true;
-          if (!"BEGIN".equals(status)) {
-            handleError(new PSQLException(GT.tr("Expected command status BEGIN, got {0}.", status),
-                PSQLState.PROTOCOL_VIOLATION));
-          }
-        } else {
-          super.handleCommandStatus(status, updateCount, insertOID);
-        }
-      }
-    };
-  }
-
-  //
-  // Fastpath
-  //
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public byte [] fastpathCall(int fnid, ParameterList parameters,
-      boolean suppressBegin)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      waitOnLock();
-      if (!suppressBegin) {
-        doSubprotocolBegin();
-      }
-      try {
-        sendFastpathCall(fnid, (SimpleParameterList) parameters);
-        return receiveFastpathResult();
-      } catch (IOException ioe) {
-        abort();
-        throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
-            PSQLState.CONNECTION_FAILURE, ioe);
-      }
-    }
-  }
-
-  public void doSubprotocolBegin() throws SQLException {
-    if (getTransactionState() == TransactionState.IDLE) {
-
-      LOGGER.log(Level.FINEST, "Issuing BEGIN before fastpath or copy call.");
-
-      ResultHandler handler = new ResultHandlerBase() {
-        private boolean sawBegin = false;
-
-        @Override
-        public void handleCommandStatus(String status, long updateCount, long insertOID) {
-          if (!sawBegin) {
-            if (!"BEGIN".equals(status)) {
-              handleError(
-                  new PSQLException(GT.tr("Expected command status BEGIN, got {0}.", status),
-                      PSQLState.PROTOCOL_VIOLATION));
+            } catch (IOException ex) {
+                throw new PSQLException(GT.tr("Error releasing savepoint"), PSQLState.IO_ERROR);
             }
-            sawBegin = true;
-          } else {
-            handleError(new PSQLException(GT.tr("Unexpected command status: {0}.", status),
-                PSQLState.PROTOCOL_VIOLATION));
-          }
         }
-
-        @Override
-        public void handleWarning(SQLWarning warning) {
-          // we don't want to ignore warnings and it would be tricky
-          // to chain them back to the connection, so since we don't
-          // expect to get them in the first place, we just consider
-          // them errors.
-          handleError(warning);
-        }
-      };
-
-      try {
-        /* Send BEGIN with simple protocol preferred */
-        int beginFlags = QueryExecutor.QUERY_NO_METADATA
-                         | QueryExecutor.QUERY_ONESHOT
-                         | QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
-        beginFlags = updateQueryMode(beginFlags);
-        sendOneQuery(beginTransactionQuery, SimpleQuery.NO_PARAMETERS, 0, 0, beginFlags);
-        sendSync();
-        processResults(handler, 0);
-        estimatedReceiveBufferBytes = 0;
-      } catch (IOException ioe) {
-        throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
-            PSQLState.CONNECTION_FAILURE, ioe);
-      }
     }
 
-  }
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public ParameterList createFastpathParameters(int count) {
-    return new SimpleParameterList(count, this);
-  }
-
-  private void sendFastpathCall(int fnid, SimpleParameterList params)
-      throws SQLException, IOException {
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      LOGGER.log(Level.FINEST, " FE=> FunctionCall({0}, {1} params)", new Object[]{fnid, params.getParameterCount()});
-    }
-
-    //
-    // Total size = 4 (length)
-    // + 4 (function OID)
-    // + 2 (format code count) + N * 2 (format codes)
-    // + 2 (parameter count) + encodedSize (parameters)
-    // + 2 (result format)
-
-    int paramCount = params.getParameterCount();
-    int encodedSize = 0;
-    for (int i = 1; i <= paramCount; i++) {
-      if (params.isNull(i)) {
-        encodedSize += 4;
-      } else {
-        encodedSize += 4 + params.getV3Length(i);
-      }
-    }
-
-    pgStream.sendChar('F');
-    pgStream.sendInteger4(4 + 4 + 2 + 2 * paramCount + 2 + encodedSize + 2);
-    pgStream.sendInteger4(fnid);
-    pgStream.sendInteger2(paramCount);
-    for (int i = 1; i <= paramCount; i++) {
-      pgStream.sendInteger2(params.isBinary(i) ? 1 : 0);
-    }
-    pgStream.sendInteger2(paramCount);
-    for (int i = 1; i <= paramCount; i++) {
-      if (params.isNull(i)) {
-        pgStream.sendInteger4(-1);
-      } else {
-        pgStream.sendInteger4(params.getV3Length(i)); // Parameter size
-        params.writeV3Value(i, pgStream);
-      }
-    }
-    pgStream.sendInteger2(1); // Binary result format
-    pgStream.flush();
-  }
-
-  // Just for API compatibility with previous versions.
-  @Override
-  public void processNotifies() throws SQLException {
-    processNotifies(-1);
-  }
-
-  /**
-   * @param timeoutMillis when &gt; 0, block for this time
-   *                      when =0, block forever
-   *                      when &lt; 0, don't block
-   */
-  @Override
-  public void processNotifies(int timeoutMillis) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      waitOnLock();
-      // Asynchronous notifies only arrive when we are not in a transaction
-      if (getTransactionState() != TransactionState.IDLE) {
-        return;
-      }
-
-      if (hasNotifications()) {
-        // No need to timeout when there are already notifications. We just check for more in this case.
-        timeoutMillis = -1;
-      }
-
-      boolean useTimeout = timeoutMillis > 0;
-      long startTime = 0L;
-      int oldTimeout = 0;
-      if (useTimeout) {
-        startTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
-        try {
-          oldTimeout = pgStream.getSocket().getSoTimeout();
-        } catch (SocketException e) {
-          throw new PSQLException(GT.tr("An error occurred while trying to get the socket "
-              + "timeout."), PSQLState.CONNECTION_FAILURE, e);
-        }
-      }
-
-      try {
-        while (timeoutMillis >= 0 || pgStream.hasMessagePending()) {
-          if (useTimeout && timeoutMillis >= 0) {
-            setSocketTimeout(timeoutMillis);
-          }
-          int c = pgStream.receiveChar();
-          if (useTimeout && timeoutMillis >= 0) {
-            setSocketTimeout(0); // Don't timeout after first char
-          }
-          switch (c) {
-            case 'A': // Asynchronous Notify
-              receiveAsyncNotify();
-              timeoutMillis = -1;
-              continue;
-            case 'E':
-              // Error Response (response to pretty much everything; backend then skips until Sync)
-              throw receiveErrorResponse();
-            case 'N': // Notice Response (warnings / info)
-              SQLWarning warning = receiveNoticeResponse();
-              addWarning(warning);
-              if (useTimeout) {
-                long newTimeMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
-                timeoutMillis = timeoutMillis + (int)(startTime - newTimeMillis); // Overflows after 49 days, ignore that
-                startTime = newTimeMillis;
-                if (timeoutMillis == 0) {
-                  timeoutMillis = -1; // Don't accidentally wait forever
-                }
-              }
-              break;
-            default:
-              throw new PSQLException(GT.tr("Unknown Response Type {0}.", (char) c),
-                  PSQLState.CONNECTION_FAILURE);
-          }
-        }
-      } catch (SocketTimeoutException ioe) {
-        // No notifications this time...
-      } catch (IOException ioe) {
-        throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
-            PSQLState.CONNECTION_FAILURE, ioe);
-      } finally {
-        if (useTimeout) {
-          setSocketTimeout(oldTimeout);
-        }
-      }
-    }
-  }
-
-  private void setSocketTimeout(int millis) throws PSQLException {
-    try {
-      Socket s = pgStream.getSocket();
-      if (!s.isClosed()) { // Is this check required?
-        pgStream.setNetworkTimeout(millis);
-      }
-    } catch (IOException e) {
-      throw new PSQLException(GT.tr("An error occurred while trying to reset the socket timeout."),
-        PSQLState.CONNECTION_FAILURE, e);
-    }
-  }
-
-  private byte [] receiveFastpathResult() throws IOException, SQLException {
-    boolean endQuery = false;
-    SQLException error = null;
-    byte[] returnValue = null;
-
-    while (!endQuery) {
-      int c = pgStream.receiveChar();
-      switch (c) {
-        case 'A': // Asynchronous Notify
-          receiveAsyncNotify();
-          break;
-
-        case 'E':
-          // Error Response (response to pretty much everything; backend then skips until Sync)
-          SQLException newError = receiveErrorResponse();
-          if (error == null) {
-            error = newError;
-          } else {
-            error.setNextException(newError);
-          }
-          // keep processing
-          break;
-
-        case 'N': // Notice Response (warnings / info)
-          SQLWarning warning = receiveNoticeResponse();
-          addWarning(warning);
-          break;
-
-        case 'Z': // Ready For Query (eventual response to Sync)
-          receiveRFQ();
-          endQuery = true;
-          break;
-
-        case 'V': // FunctionCallResponse
-          int msgLen = pgStream.receiveInteger4();
-          int valueLen = pgStream.receiveInteger4();
-
-          LOGGER.log(Level.FINEST, " <=BE FunctionCallResponse({0} bytes)", valueLen);
-
-          if (valueLen != -1) {
-            byte[] buf = new byte[valueLen];
-            pgStream.receive(buf, 0, valueLen);
-            returnValue = buf;
-          }
-
-          break;
-
-        case 'S': // Parameter Status
-          try {
-            receiveParameterStatus();
-          } catch (SQLException e) {
-            if (error == null) {
-              error = e;
-            } else {
-              error.setNextException(e);
+    private void rollbackIfRequired(boolean autosave, SQLException e) throws SQLException {
+        if (autosave
+                && getTransactionState() == TransactionState.FAILED
+                && (getAutoSave() == AutoSave.ALWAYS || willHealOnRetry(e))) {
+            try {
+                // ROLLBACK and AUTOSAVE are executed as simple always to overcome "statement no longer exists S_xx"
+                execute(restoreToAutoSave, SimpleQuery.NO_PARAMETERS, new ResultHandlerDelegate(null),
+                        1, 0, QUERY_NO_RESULTS | QUERY_NO_METADATA | QUERY_EXECUTE_AS_SIMPLE);
+            } catch (SQLException e2) {
+                // That's O(N), sorry
+                e.setNextException(e2);
             }
-            endQuery = true;
-          }
-          break;
-
-        default:
-          throw new PSQLException(GT.tr("Unknown Response Type {0}.", (char) c),
-              PSQLState.CONNECTION_FAILURE);
-      }
-
+        }
+        throw e;
     }
 
-    // did we get an error during this query?
-    if (error != null) {
-      throw error;
+    @Override
+    public void execute(Query[] queries, ParameterList[] parameterLists,
+                        BatchResultHandler batchHandler, int maxRows, int fetchSize, int flags) throws SQLException {
+        execute(queries, parameterLists, batchHandler, maxRows, fetchSize, flags, false);
     }
 
-    return returnValue;
-  }
-
-  //
-  // Copy subprotocol implementation
-  //
-
-  /**
-   * Sends given query to BE to start, initialize and lock connection for a CopyOperation.
-   *
-   * @param sql COPY FROM STDIN / COPY TO STDOUT statement
-   * @return CopyIn or CopyOut operation object
-   * @throws SQLException on failure
-   */
-  @Override
-  public CopyOperation startCopy(String sql, boolean suppressBegin)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      waitOnLock();
-      if (!suppressBegin) {
-        doSubprotocolBegin();
-      }
-      byte[] buf = sql.getBytes(StandardCharsets.UTF_8);
-
-      try {
-        LOGGER.log(Level.FINEST, " FE=> Query(CopyStart)");
-
-        pgStream.sendChar('Q');
-        pgStream.sendInteger4(buf.length + 4 + 1);
-        pgStream.send(buf);
-        pgStream.sendChar(0);
-        pgStream.flush();
-
-        return processCopyResults(null, true);
-        // expect a CopyInResponse or CopyOutResponse to our query above
-      } catch (IOException ioe) {
-        throw new PSQLException(GT.tr("Database connection failed when starting copy"),
-            PSQLState.CONNECTION_FAILURE, ioe);
-      }
-    }
-  }
-
-  /**
-   * Locks connection and calls initializer for a new CopyOperation Called via startCopy ->
-   * processCopyResults.
-   *
-   * @param op an uninitialized CopyOperation
-   * @throws SQLException on locking failure
-   * @throws IOException on database connection failure
-   */
-  private void initCopy(CopyOperationImpl op) throws SQLException, IOException {
-    try (ResourceLock ignore = lock.obtain()) {
-      pgStream.receiveInteger4(); // length not used
-      int rowFormat = pgStream.receiveChar();
-      int numFields = pgStream.receiveInteger2();
-      int[] fieldFormats = new int[numFields];
-
-      for (int i = 0; i < numFields; i++) {
-        fieldFormats[i] = pgStream.receiveInteger2();
-      }
-
-      lock(op);
-      op.init(this, rowFormat, fieldFormats);
-    }
-  }
-
-  /**
-   * Finishes a copy operation and unlocks connection discarding any exchanged data.
-   *
-   * @param op the copy operation presumably currently holding lock on this connection
-   * @throws SQLException on any additional failure
-   */
-  public void cancelCopy(CopyOperationImpl op) throws SQLException {
-    if (!hasLock(op)) {
-      throw new PSQLException(GT.tr("Tried to cancel an inactive copy operation"),
-          PSQLState.OBJECT_NOT_IN_STATE);
-    }
-
-    SQLException error = null;
-    int errors = 0;
-
-    try {
-      if (op instanceof CopyIn) {
+    @Override
+    public void execute(Query[] queries, ParameterList[] parameterLists,
+                        BatchResultHandler batchHandler, int maxRows, int fetchSize, int flags, boolean adaptiveFetch)
+            throws SQLException {
         try (ResourceLock ignore = lock.obtain()) {
-          LOGGER.log(Level.FINEST, "FE => CopyFail");
-          final byte[] msg = "Copy cancel requested".getBytes(StandardCharsets.US_ASCII);
-          pgStream.sendChar('f'); // CopyFail
-          pgStream.sendInteger4(5 + msg.length);
-          pgStream.send(msg);
-          pgStream.sendChar(0);
-          pgStream.flush();
-          do {
-            try {
-              processCopyResults(op, true); // discard rest of input
-            } catch (SQLException se) { // expected error response to failing copy
-              errors++;
-              if (error != null) {
-                SQLException e = se;
-                SQLException next;
-                while ((next = e.getNextException()) != null) {
-                  e = next;
-                }
-                e.setNextException(error);
-              }
-              error = se;
-            }
-          } while (hasLock(op));
-        }
-      } else if (op instanceof CopyOut) {
-        sendQueryCancel();
-      }
-
-    } catch (IOException ioe) {
-      throw new PSQLException(GT.tr("Database connection failed when canceling copy operation"),
-          PSQLState.CONNECTION_FAILURE, ioe);
-    } finally {
-      // Need to ensure the lock isn't held anymore, or else
-      // future operations, rather than failing due to the
-      // broken connection, will simply hang waiting for this
-      // lock.
-      try (ResourceLock ignore = lock.obtain()) {
-        if (hasLock(op)) {
-          unlock(op);
-        }
-      }
-    }
-
-    if (op instanceof CopyIn) {
-      if (errors < 1) {
-        throw new PSQLException(GT.tr("Missing expected error response to copy cancel request"),
-            PSQLState.COMMUNICATION_ERROR);
-      } else if (errors > 1) {
-        throw new PSQLException(
-            GT.tr("Got {0} error responses to single copy cancel request", String.valueOf(errors)),
-            PSQLState.COMMUNICATION_ERROR, error);
-      }
-    }
-  }
-
-  /**
-   * Finishes writing to copy and unlocks connection.
-   *
-   * @param op the copy operation presumably currently holding lock on this connection
-   * @return number of rows updated for server versions 8.2 or newer
-   * @throws SQLException on failure
-   */
-  public long endCopy(CopyOperationImpl op) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (!hasLock(op)) {
-        throw new PSQLException(GT.tr("Tried to end inactive copy"), PSQLState.OBJECT_NOT_IN_STATE);
-      }
-
-      try {
-        LOGGER.log(Level.FINEST, " FE=> CopyDone");
-
-        pgStream.sendChar('c'); // CopyDone
-        pgStream.sendInteger4(4);
-        pgStream.flush();
-
-        do {
-          processCopyResults(op, true);
-        } while (hasLock(op));
-        return op.getHandledRowCount();
-      } catch (IOException ioe) {
-        throw new PSQLException(GT.tr("Database connection failed when ending copy"),
-            PSQLState.CONNECTION_FAILURE, ioe);
-      }
-    }
-  }
-
-  /**
-   * Sends data during a live COPY IN operation. Only unlocks the connection if server suddenly
-   * returns CommandComplete, which should not happen
-   *
-   * @param op the CopyIn operation presumably currently holding lock on this connection
-   * @param data bytes to send
-   * @param off index of first byte to send (usually 0)
-   * @param siz number of bytes to send (usually data.length)
-   * @throws SQLException on failure
-   */
-  public void writeToCopy(CopyOperationImpl op, byte[] data, int off, int siz)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (!hasLock(op)) {
-        throw new PSQLException(GT.tr("Tried to write to an inactive copy operation"),
-            PSQLState.OBJECT_NOT_IN_STATE);
-      }
-
-      LOGGER.log(Level.FINEST, " FE=> CopyData({0})", siz);
-
-      try {
-        pgStream.sendChar('d');
-        pgStream.sendInteger4(siz + 4);
-        pgStream.send(data, off, siz);
-      } catch (IOException ioe) {
-        throw new PSQLException(GT.tr("Database connection failed when writing to copy"),
-            PSQLState.CONNECTION_FAILURE, ioe);
-      }
-    }
-  }
-
-  /**
-   * Sends data during a live COPY IN operation. Only unlocks the connection if server suddenly
-   * returns CommandComplete, which should not happen
-   *
-   * @param op   the CopyIn operation presumably currently holding lock on this connection
-   * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter
-   * @throws SQLException on failure
-   */
-  public void writeToCopy(CopyOperationImpl op, ByteStreamWriter from)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (!hasLock(op)) {
-        throw new PSQLException(GT.tr("Tried to write to an inactive copy operation"),
-            PSQLState.OBJECT_NOT_IN_STATE);
-      }
-
-      int siz = from.getLength();
-      LOGGER.log(Level.FINEST, " FE=> CopyData({0})", siz);
-
-      try {
-        pgStream.sendChar('d');
-        pgStream.sendInteger4(siz + 4);
-        pgStream.send(from);
-      } catch (IOException ioe) {
-        throw new PSQLException(GT.tr("Database connection failed when writing to copy"),
-            PSQLState.CONNECTION_FAILURE, ioe);
-      }
-    }
-  }
-
-  public void flushCopy(CopyOperationImpl op) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (!hasLock(op)) {
-        throw new PSQLException(GT.tr("Tried to write to an inactive copy operation"),
-            PSQLState.OBJECT_NOT_IN_STATE);
-      }
-
-      try {
-        pgStream.flush();
-      } catch (IOException ioe) {
-        throw new PSQLException(GT.tr("Database connection failed when writing to copy"),
-            PSQLState.CONNECTION_FAILURE, ioe);
-      }
-    }
-  }
-
-  /**
-   * Wait for a row of data to be received from server on an active copy operation
-   * Connection gets unlocked by processCopyResults() at end of operation.
-   *
-   * @param op the copy operation presumably currently holding lock on this connection
-   * @param block whether to block waiting for input
-   * @throws SQLException on any failure
-   */
-  void readFromCopy(CopyOperationImpl op, boolean block) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (!hasLock(op)) {
-        throw new PSQLException(GT.tr("Tried to read from inactive copy"),
-            PSQLState.OBJECT_NOT_IN_STATE);
-      }
-
-      try {
-        processCopyResults(op, block); // expect a call to handleCopydata() to store the data
-      } catch (IOException ioe) {
-        throw new PSQLException(GT.tr("Database connection failed when reading from copy"),
-            PSQLState.CONNECTION_FAILURE, ioe);
-      }
-    }
-  }
-
-  AtomicBoolean processingCopyResults = new AtomicBoolean(false);
-
-  /**
-   * Handles copy sub protocol responses from server. Unlocks at end of sub protocol, so operations
-   * on pgStream or QueryExecutor are not allowed in a method after calling this!
-   *
-   * @param block whether to block waiting for input
-   * @return CopyIn when COPY FROM STDIN starts; CopyOut when COPY TO STDOUT starts; null when copy
-   *         ends; otherwise, the operation given as parameter.
-   * @throws SQLException in case of misuse
-   * @throws IOException from the underlying connection
-   */
-  CopyOperationImpl processCopyResults(CopyOperationImpl op, boolean block)
-      throws SQLException, IOException {
-
-    /*
-    * fixes issue #1592 where one thread closes the stream and another is reading it
-     */
-    if (pgStream.isClosed()) {
-      throw new PSQLException(GT.tr("PGStream is closed"),
-          PSQLState.CONNECTION_DOES_NOT_EXIST);
-    }
-    /*
-    *  This is a hack as we should not end up here, but sometimes do with large copy operations.
-     */
-    if (!processingCopyResults.compareAndSet(false, true)) {
-      LOGGER.log(Level.INFO, "Ignoring request to process copy results, already processing");
-      return null;
-    }
-
-    // put this all in a try, finally block and reset the processingCopyResults in the finally clause
-    try {
-      boolean endReceiving = false;
-      SQLException error = null;
-      SQLException errors = null;
-      int len;
-
-      while (!endReceiving && (block || pgStream.hasMessagePending())) {
-
-        // There is a bug in the server's implementation of the copy
-        // protocol. It returns command complete immediately upon
-        // receiving the EOF marker in the binary protocol,
-        // potentially before we've issued CopyDone. When we are not
-        // blocking, we don't think we are done, so we hold off on
-        // processing command complete and any subsequent messages
-        // until we actually are done with the copy.
-        //
-        if (!block) {
-          int c = pgStream.peekChar();
-          if (c == 'C') {
-            // CommandComplete
-            LOGGER.log(Level.FINEST, " <=BE CommandStatus, Ignored until CopyDone");
-            break;
-          }
-        }
-
-        int c = pgStream.receiveChar();
-        switch (c) {
-
-          case 'A': // Asynchronous Notify
-
-            LOGGER.log(Level.FINEST, " <=BE Asynchronous Notification while copying");
-
-            receiveAsyncNotify();
-            break;
-
-          case 'N': // Notice Response
-
-            LOGGER.log(Level.FINEST, " <=BE Notification while copying");
-
-            addWarning(receiveNoticeResponse());
-            break;
-
-          case 'C': // Command Complete
-
-            String status = receiveCommandStatus();
-
-            try {
-              if (op == null) {
-                throw new PSQLException(GT
-                    .tr("Received CommandComplete ''{0}'' without an active copy operation", status),
-                    PSQLState.OBJECT_NOT_IN_STATE);
-              }
-              op.handleCommandStatus(status);
-            } catch (SQLException se) {
-              error = se;
-            }
-
-            block = true;
-            break;
-
-          case 'E': // ErrorMessage (expected response to CopyFail)
-
-            error = receiveErrorResponse();
-            // We've received the error and we now expect to receive
-            // Ready for query, but we must block because it might still be
-            // on the wire and not here yet.
-            block = true;
-            break;
-
-          case 'G': // CopyInResponse
-
-            LOGGER.log(Level.FINEST, " <=BE CopyInResponse");
-
-            if (op != null) {
-              error = new PSQLException(GT.tr("Got CopyInResponse from server during an active {0}",
-                  op.getClass().getName()), PSQLState.OBJECT_NOT_IN_STATE);
-            }
-
-            op = new CopyInImpl();
-            initCopy(op);
-            endReceiving = true;
-            break;
-
-          case 'H': // CopyOutResponse
-
-            LOGGER.log(Level.FINEST, " <=BE CopyOutResponse");
-
-            if (op != null) {
-              error = new PSQLException(GT.tr("Got CopyOutResponse from server during an active {0}",
-                  op.getClass().getName()), PSQLState.OBJECT_NOT_IN_STATE);
-            }
-
-            op = new CopyOutImpl();
-            initCopy(op);
-            endReceiving = true;
-            break;
-
-          case 'W': // CopyBothResponse
-
-            LOGGER.log(Level.FINEST, " <=BE CopyBothResponse");
-
-            if (op != null) {
-              error = new PSQLException(GT.tr("Got CopyBothResponse from server during an active {0}",
-                  op.getClass().getName()), PSQLState.OBJECT_NOT_IN_STATE);
-            }
-
-            op = new CopyDualImpl();
-            initCopy(op);
-            endReceiving = true;
-            break;
-
-          case 'd': // CopyData
-
-            LOGGER.log(Level.FINEST, " <=BE CopyData");
-
-            len = pgStream.receiveInteger4() - 4;
-
-            assert len > 0 : "Copy Data length must be greater than 4";
-
-            byte[] buf = pgStream.receive(len);
-            if (op == null) {
-              error = new PSQLException(GT.tr("Got CopyData without an active copy operation"),
-                  PSQLState.OBJECT_NOT_IN_STATE);
-            } else if (!(op instanceof CopyOut)) {
-              error = new PSQLException(
-                  GT.tr("Unexpected copydata from server for {0}", op.getClass().getName()),
-                  PSQLState.COMMUNICATION_ERROR);
-            } else {
-              op.handleCopydata(buf);
-            }
-            endReceiving = true;
-            break;
-
-          case 'c': // CopyDone (expected after all copydata received)
-
-            LOGGER.log(Level.FINEST, " <=BE CopyDone");
-
-            len = pgStream.receiveInteger4() - 4;
-            if (len > 0) {
-              pgStream.receive(len); // not in specification; should never appear
-            }
-
-            if (!(op instanceof CopyOut)) {
-              error = new PSQLException("Got CopyDone while not copying from server",
-                  PSQLState.OBJECT_NOT_IN_STATE);
-            }
-
-            // keep receiving since we expect a CommandComplete
-            block = true;
-            break;
-          case 'S': // Parameter Status
-            try {
-              receiveParameterStatus();
-            } catch (SQLException e) {
-              error = e;
-              endReceiving = true;
-            }
-            break;
-
-          case 'Z': // ReadyForQuery: After FE:CopyDone => BE:CommandComplete
-
-            receiveRFQ();
-            if (op != null && hasLock(op)) {
-              unlock(op);
-            }
-            op = null;
-            endReceiving = true;
-            break;
-
-          // If the user sends a non-copy query, we've got to handle some additional things.
-          //
-          case 'T': // Row Description (response to Describe)
-            LOGGER.log(Level.FINEST, " <=BE RowDescription (during copy ignored)");
-
-            skipMessage();
-            break;
-
-          case 'D': // DataRow
-            LOGGER.log(Level.FINEST, " <=BE DataRow (during copy ignored)");
-
-            skipMessage();
-            break;
-
-          default:
-            throw new IOException(
-                GT.tr("Unexpected packet type during copy: {0}", Integer.toString(c)));
-        }
-
-        // Collect errors into a neat chain for completeness
-        if (error != null) {
-          if (errors != null) {
-            error.setNextException(errors);
-          }
-          errors = error;
-          error = null;
-        }
-      }
-
-      if (errors != null) {
-        throw errors;
-      }
-      return op;
-
-    } finally {
-      /*
-      reset here in the finally block to make sure it really is cleared
-       */
-      processingCopyResults.set(false);
-    }
-  }
-
-  /*
-   * To prevent client/server protocol deadlocks, we try to manage the estimated recv buffer size
-   * and force a sync +flush and process results if we think it might be getting too full.
-   *
-   * See the comments above MAX_BUFFERED_RECV_BYTES's declaration for details.
-   */
-  private void flushIfDeadlockRisk(Query query, boolean disallowBatching,
-      ResultHandler resultHandler,
-      BatchResultHandler batchHandler,
-      final int flags) throws IOException {
-    // Assume all statements need at least this much reply buffer space,
-    // plus params
-    estimatedReceiveBufferBytes += NODATA_QUERY_RESPONSE_SIZE_BYTES;
-
-    SimpleQuery sq = (SimpleQuery) query;
-    if (sq.isStatementDescribed()) {
-      /*
-       * Estimate the response size of the fields and add it to the expected response size.
-       *
-       * It's impossible for us to estimate the rowcount. We'll assume one row, as that's the common
-       * case for batches and we're leaving plenty of breathing room in this approach. It's still
-       * not deadlock-proof though; see pgjdbc github issues #194 and #195.
-       */
-      int maxResultRowSize = sq.getMaxResultRowSize();
-      if (maxResultRowSize >= 0) {
-        estimatedReceiveBufferBytes += maxResultRowSize;
-      } else {
-        LOGGER.log(Level.FINEST, "Couldn't estimate result size or result size unbounded, "
-            + "disabling batching for this query.");
-        disallowBatching = true;
-      }
-    } else {
-      /*
-       * We only describe a statement if we're expecting results from it, so it's legal to batch
-       * unprepared statements. We'll abort later if we get any uresults from them where none are
-       * expected. For now all we can do is hope the user told us the truth and assume that
-       * NODATA_QUERY_RESPONSE_SIZE_BYTES is enough to cover it.
-       */
-    }
-
-    if (disallowBatching || estimatedReceiveBufferBytes >= MAX_BUFFERED_RECV_BYTES) {
-      LOGGER.log(Level.FINEST, "Forcing Sync, receive buffer full or batching disallowed");
-      sendSync();
-      processResults(resultHandler, flags);
-      estimatedReceiveBufferBytes = 0;
-      if (batchHandler != null) {
-        batchHandler.secureProgress();
-      }
-    }
-
-  }
-
-  /*
-   * Send a query to the backend.
-   */
-  private void sendQuery(Query query, V3ParameterList parameters, int maxRows, int fetchSize,
-      int flags, ResultHandler resultHandler,
-      BatchResultHandler batchHandler, boolean adaptiveFetch) throws IOException, SQLException {
-    // Now the query itself.
-    Query[] subqueries = query.getSubqueries();
-    SimpleParameterList[] subparams = parameters.getSubparams();
-
-    // We know this is deprecated, but still respect it in case anyone's using it.
-    // PgJDBC its self no longer does.
-    @SuppressWarnings("deprecation")
-    boolean disallowBatching = (flags & QueryExecutor.QUERY_DISALLOW_BATCHING) != 0;
-
-    if (subqueries == null) {
-      flushIfDeadlockRisk(query, disallowBatching, resultHandler, batchHandler, flags);
-
-      // If we saw errors, don't send anything more.
-      if (resultHandler.getException() == null) {
-        if (fetchSize != 0) {
-          adaptiveFetchCache.addNewQuery(adaptiveFetch, query);
-        }
-        sendOneQuery((SimpleQuery) query, (SimpleParameterList) parameters, maxRows, fetchSize,
-            flags);
-      }
-    } else {
-      for (int i = 0; i < subqueries.length; i++) {
-        final Query subquery = subqueries[i];
-        flushIfDeadlockRisk(subquery, disallowBatching, resultHandler, batchHandler, flags);
-
-        // If we saw errors, don't send anything more.
-        if (resultHandler.getException() != null) {
-          break;
-        }
-
-        // In the situation where parameters is already
-        // NO_PARAMETERS it cannot know the correct
-        // number of array elements to return in the
-        // above call to getSubparams(), so it must
-        // return null which we check for here.
-        //
-        SimpleParameterList subparam = SimpleQuery.NO_PARAMETERS;
-        if (subparams != null) {
-          subparam = subparams[i];
-        }
-        if (fetchSize != 0) {
-          adaptiveFetchCache.addNewQuery(adaptiveFetch, subquery);
-        }
-        sendOneQuery((SimpleQuery) subquery, subparam, maxRows, fetchSize, flags);
-      }
-    }
-  }
-
-  //
-  // Message sending
-  //
-
-  private void sendSync() throws IOException {
-    LOGGER.log(Level.FINEST, " FE=> Sync");
-
-    pgStream.sendChar('S'); // Sync
-    pgStream.sendInteger4(4); // Length
-    pgStream.flush();
-    // Below "add queues" are likely not required at all
-    pendingExecuteQueue.add(new ExecuteRequest(sync, null, true));
-    pendingDescribePortalQueue.add(sync);
-  }
-
-  private void sendParse(SimpleQuery query, SimpleParameterList params, boolean oneShot)
-      throws IOException {
-    // Already parsed, or we have a Parse pending and the types are right?
-    int[] typeOIDs = params.getTypeOIDs();
-    if (query.isPreparedFor(typeOIDs, deallocateEpoch)) {
-      return;
-    }
-
-    // Clean up any existing statement, as we can't use it.
-    query.unprepare();
-    processDeadParsedQueries();
-
-    // Remove any cached Field values. The re-parsed query might report different
-    // fields because input parameter types may result in different type inferences
-    // for unspecified types.
-    query.setFields(null);
-
-    String statementName = null;
-    if (!oneShot) {
-      // Generate a statement name to use.
-      statementName = "S_" + (nextUniqueID++);
-
-      // And prepare the new statement.
-      // NB: Must clone the OID array, as it's a direct reference to
-      // the SimpleParameterList's internal array that might be modified
-      // under us.
-      query.setStatementName(statementName, deallocateEpoch);
-      query.setPrepareTypes(typeOIDs);
-      registerParsedQuery(query, statementName);
-    }
-
-    byte[] encodedStatementName = query.getEncodedStatementName();
-    String nativeSql = query.getNativeSql();
-
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      StringBuilder sbuf = new StringBuilder(" FE=> Parse(stmt=" + statementName + ",query=\"");
-      sbuf.append(nativeSql);
-      sbuf.append("\",oids={");
-      for (int i = 1; i <= params.getParameterCount(); i++) {
-        if (i != 1) {
-          sbuf.append(",");
-        }
-        sbuf.append(params.getTypeOID(i));
-      }
-      sbuf.append("})");
-      LOGGER.log(Level.FINEST, sbuf.toString());
-    }
-
-    //
-    // Send Parse.
-    //
-
-    byte[] queryUtf8 = nativeSql.getBytes(StandardCharsets.UTF_8);
-
-    // Total size = 4 (size field)
-    // + N + 1 (statement name, zero-terminated)
-    // + N + 1 (query, zero terminated)
-    // + 2 (parameter count) + N * 4 (parameter types)
-    int encodedSize = 4
-        + (encodedStatementName == null ? 0 : encodedStatementName.length) + 1
-        + queryUtf8.length + 1
-        + 2 + 4 * params.getParameterCount();
-
-    pgStream.sendChar('P'); // Parse
-    pgStream.sendInteger4(encodedSize);
-    if (encodedStatementName != null) {
-      pgStream.send(encodedStatementName);
-    }
-    pgStream.sendChar(0); // End of statement name
-    pgStream.send(queryUtf8); // Query string
-    pgStream.sendChar(0); // End of query string.
-    pgStream.sendInteger2(params.getParameterCount()); // # of parameter types specified
-    for (int i = 1; i <= params.getParameterCount(); i++) {
-      pgStream.sendInteger4(params.getTypeOID(i));
-    }
-
-    pendingParseQueue.add(query);
-  }
-
-  private void sendBind(SimpleQuery query, SimpleParameterList params, Portal portal,
-      boolean noBinaryTransfer) throws IOException {
-    //
-    // Send Bind.
-    //
-
-    String statementName = query.getStatementName();
-    byte[] encodedStatementName = query.getEncodedStatementName();
-    byte[] encodedPortalName = portal == null ? null : portal.getEncodedPortalName();
-
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      StringBuilder sbuf = new StringBuilder(" FE=> Bind(stmt=" + statementName + ",portal=" + portal);
-      for (int i = 1; i <= params.getParameterCount(); i++) {
-        sbuf.append(",$").append(i).append("=<")
-            .append(params.toString(i, true))
-            .append(">,type=").append(Oid.toString(params.getTypeOID(i)));
-      }
-      sbuf.append(")");
-      LOGGER.log(Level.FINEST, sbuf.toString());
-    }
-
-    // Total size = 4 (size field) + N + 1 (destination portal)
-    // + N + 1 (statement name)
-    // + 2 (param format code count) + N * 2 (format codes)
-    // + 2 (param value count) + N (encoded param value size)
-    // + 2 (result format code count, 0)
-    long encodedSize = 0;
-    for (int i = 1; i <= params.getParameterCount(); i++) {
-      if (params.isNull(i)) {
-        encodedSize += 4;
-      } else {
-        encodedSize += (long) 4 + params.getV3Length(i);
-      }
-    }
-
-    Field[] fields = query.getFields();
-    if (!noBinaryTransfer && query.needUpdateFieldFormats() && fields != null) {
-      for (Field field : fields) {
-        if (useBinary(field)) {
-          field.setFormat(Field.BINARY_FORMAT);
-          query.setHasBinaryFields(true);
-        }
-      }
-    }
-    // If text-only results are required (e.g. updateable resultset), and the query has binary columns,
-    // flip to text format.
-    if (noBinaryTransfer && query.hasBinaryFields() && fields != null) {
-      for (Field field : fields) {
-        if (field.getFormat() != Field.TEXT_FORMAT) {
-          field.setFormat(Field.TEXT_FORMAT);
-        }
-      }
-      query.resetNeedUpdateFieldFormats();
-      query.setHasBinaryFields(false);
-    }
-
-    // This is not the number of binary fields, but the total number
-    // of fields if any of them are binary or zero if all of them
-    // are text.
-    int numBinaryFields = !noBinaryTransfer && query.hasBinaryFields() && fields != null
-        ? fields.length : 0;
-
-    encodedSize = 4
-        + (encodedPortalName == null ? 0 : encodedPortalName.length) + 1
-        + (encodedStatementName == null ? 0 : encodedStatementName.length) + 1
-        + 2 + params.getParameterCount() * 2
-        + 2 + encodedSize
-        + 2 + numBinaryFields * 2;
-
-    // backend's MaxAllocSize is the largest message that can
-    // be received from a client. If we have a bigger value
-    // from either very large parameters or incorrect length
-    // descriptions of setXXXStream we do not send the bind
-    // message.
-    //
-    if (encodedSize > 0x3fffffff) {
-      throw new PGBindException(new IOException(GT.tr(
-          "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.",
-          encodedSize)));
-    }
-
-    pgStream.sendChar('B'); // Bind
-    pgStream.sendInteger4((int) encodedSize); // Message size
-    if (encodedPortalName != null) {
-      pgStream.send(encodedPortalName); // Destination portal name.
-    }
-    pgStream.sendChar(0); // End of portal name.
-    if (encodedStatementName != null) {
-      pgStream.send(encodedStatementName); // Source statement name.
-    }
-    pgStream.sendChar(0); // End of statement name.
-
-    pgStream.sendInteger2(params.getParameterCount()); // # of parameter format codes
-    for (int i = 1; i <= params.getParameterCount(); i++) {
-      pgStream.sendInteger2(params.isBinary(i) ? 1 : 0); // Parameter format code
-    }
-
-    pgStream.sendInteger2(params.getParameterCount()); // # of parameter values
-
-    // If an error occurs when reading a stream we have to
-    // continue pumping out data to match the length we
-    // said we would. Once we've done that we throw
-    // this exception. Multiple exceptions can occur and
-    // it really doesn't matter which one is reported back
-    // to the caller.
-    //
-    PGBindException bindException = null;
-
-    for (int i = 1; i <= params.getParameterCount(); i++) {
-      if (params.isNull(i)) {
-        pgStream.sendInteger4(-1); // Magic size of -1 means NULL
-      } else {
-        pgStream.sendInteger4(params.getV3Length(i)); // Parameter size
-        try {
-          params.writeV3Value(i, pgStream); // Parameter value
-        } catch (PGBindException be) {
-          bindException = be;
-        }
-      }
-    }
-
-    pgStream.sendInteger2(numBinaryFields); // # of result format codes
-    for (int i = 0; fields != null && i < numBinaryFields; i++) {
-      pgStream.sendInteger2(fields[i].getFormat());
-    }
-
-    pendingBindQueue.add(portal == null ? UNNAMED_PORTAL : portal);
-
-    if (bindException != null) {
-      throw bindException;
-    }
-  }
-
-  /**
-   * Returns true if the specified field should be retrieved using binary encoding.
-   *
-   * @param field The field whose Oid type to analyse.
-   * @return True if {@link Field#BINARY_FORMAT} should be used, false if
-   *         {@link Field#BINARY_FORMAT}.
-   */
-  private boolean useBinary(Field field) {
-    int oid = field.getOID();
-    return useBinaryForReceive(oid);
-  }
-
-  private void sendDescribePortal(SimpleQuery query, Portal portal) throws IOException {
-    //
-    // Send Describe.
-    //
-
-    LOGGER.log(Level.FINEST, " FE=> Describe(portal={0})", portal);
-
-    byte[] encodedPortalName = portal == null ? null : portal.getEncodedPortalName();
-
-    // Total size = 4 (size field) + 1 (describe type, 'P') + N + 1 (portal name)
-    int encodedSize = 4 + 1 + (encodedPortalName == null ? 0 : encodedPortalName.length) + 1;
-
-    pgStream.sendChar('D'); // Describe
-    pgStream.sendInteger4(encodedSize); // message size
-    pgStream.sendChar('P'); // Describe (Portal)
-    if (encodedPortalName != null) {
-      pgStream.send(encodedPortalName); // portal name to close
-    }
-    pgStream.sendChar(0); // end of portal name
-
-    pendingDescribePortalQueue.add(query);
-    query.setPortalDescribed(true);
-  }
-
-  private void sendDescribeStatement(SimpleQuery query, SimpleParameterList params,
-      boolean describeOnly) throws IOException {
-    // Send Statement Describe
-
-    LOGGER.log(Level.FINEST, " FE=> Describe(statement={0})", query.getStatementName());
-
-    byte[] encodedStatementName = query.getEncodedStatementName();
-
-    // Total size = 4 (size field) + 1 (describe type, 'S') + N + 1 (portal name)
-    int encodedSize = 4 + 1 + (encodedStatementName == null ? 0 : encodedStatementName.length) + 1;
-
-    pgStream.sendChar('D'); // Describe
-    pgStream.sendInteger4(encodedSize); // Message size
-    pgStream.sendChar('S'); // Describe (Statement);
-    if (encodedStatementName != null) {
-      pgStream.send(encodedStatementName); // Statement name
-    }
-    pgStream.sendChar(0); // end message
-
-    // Note: statement name can change over time for the same query object
-    // Thus we take a snapshot of the query name
-    pendingDescribeStatementQueue.add(
-        new DescribeRequest(query, params, describeOnly, query.getStatementName()));
-    pendingDescribePortalQueue.add(query);
-    query.setStatementDescribed(true);
-    query.setPortalDescribed(true);
-  }
-
-  private void sendExecute(SimpleQuery query, Portal portal, int limit)
-      throws IOException {
-    //
-    // Send Execute.
-    //
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      LOGGER.log(Level.FINEST, " FE=> Execute(portal={0},limit={1})", new Object[]{portal, limit});
-    }
-
-    byte[] encodedPortalName = portal == null ? null : portal.getEncodedPortalName();
-    int encodedSize = encodedPortalName == null ? 0 : encodedPortalName.length;
-
-    // Total size = 4 (size field) + 1 + N (source portal) + 4 (max rows)
-    pgStream.sendChar('E'); // Execute
-    pgStream.sendInteger4(4 + 1 + encodedSize + 4); // message size
-    if (encodedPortalName != null) {
-      pgStream.send(encodedPortalName); // portal name
-    }
-    pgStream.sendChar(0); // portal name terminator
-    pgStream.sendInteger4(limit); // row limit
-
-    pendingExecuteQueue.add(new ExecuteRequest(query, portal, false));
-  }
-
-  private void sendClosePortal(String portalName) throws IOException {
-    //
-    // Send Close.
-    //
-
-    LOGGER.log(Level.FINEST, " FE=> ClosePortal({0})", portalName);
-
-    byte[] encodedPortalName = portalName == null ? null : portalName.getBytes(StandardCharsets.UTF_8);
-    int encodedSize = encodedPortalName == null ? 0 : encodedPortalName.length;
-
-    // Total size = 4 (size field) + 1 (close type, 'P') + 1 + N (portal name)
-    pgStream.sendChar('C'); // Close
-    pgStream.sendInteger4(4 + 1 + 1 + encodedSize); // message size
-    pgStream.sendChar('P'); // Close (Portal)
-    if (encodedPortalName != null) {
-      pgStream.send(encodedPortalName);
-    }
-    pgStream.sendChar(0); // unnamed portal
-  }
-
-  private void sendCloseStatement(String statementName) throws IOException {
-    //
-    // Send Close.
-    //
-
-    LOGGER.log(Level.FINEST, " FE=> CloseStatement({0})", statementName);
-
-    byte[] encodedStatementName = statementName.getBytes(StandardCharsets.UTF_8);
-
-    // Total size = 4 (size field) + 1 (close type, 'S') + N + 1 (statement name)
-    pgStream.sendChar('C'); // Close
-    pgStream.sendInteger4(4 + 1 + encodedStatementName.length + 1); // message size
-    pgStream.sendChar('S'); // Close (Statement)
-    pgStream.send(encodedStatementName); // statement to close
-    pgStream.sendChar(0); // statement name terminator
-  }
-
-  // sendOneQuery sends a single statement via the extended query protocol.
-  // Per the FE/BE docs this is essentially the same as how a simple query runs
-  // (except that it generates some extra acknowledgement messages, and we
-  // can send several queries before doing the Sync)
-  //
-  // Parse S_n from "query string with parameter placeholders"; skipped if already done previously
-  // or if oneshot
-  // Bind C_n from S_n plus parameters (or from unnamed statement for oneshot queries)
-  // Describe C_n; skipped if caller doesn't want metadata
-  // Execute C_n with maxRows limit; maxRows = 1 if caller doesn't want results
-  // (above repeats once per call to sendOneQuery)
-  // Sync (sent by caller)
-  //
-  private void sendOneQuery(SimpleQuery query, SimpleParameterList params, int maxRows,
-      int fetchSize, int flags) throws IOException {
-    boolean asSimple = (flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0;
-    if (asSimple) {
-      assert (flags & QueryExecutor.QUERY_DESCRIBE_ONLY) == 0
-          : "Simple mode does not support describe requests. sql = " + query.getNativeSql()
-          + ", flags = " + flags;
-      sendSimpleQuery(query, params);
-      return;
-    }
-
-    assert !query.getNativeQuery().multiStatement
-        : "Queries that might contain ; must be executed with QueryExecutor.QUERY_EXECUTE_AS_SIMPLE mode. "
-        + "Given query is " + query.getNativeSql();
-
-    // Per https://www.postgresql.org/docs/current/static/protocol-flow.html#PROTOCOL-FLOW-EXT-QUERY
-    // A Bind message can use the unnamed prepared statement to create a named portal.
-    // If the Bind is successful, an Execute message can reference that named portal until either
-    //      the end of the current transaction
-    //   or the named portal is explicitly destroyed
-
-    boolean noResults = (flags & QueryExecutor.QUERY_NO_RESULTS) != 0;
-    boolean noMeta = (flags & QueryExecutor.QUERY_NO_METADATA) != 0;
-    boolean describeOnly = (flags & QueryExecutor.QUERY_DESCRIBE_ONLY) != 0;
-    // extended queries always use a portal
-    // the usePortal flag controls whether or not we use a *named* portal
-    boolean usePortal = (flags & QueryExecutor.QUERY_FORWARD_CURSOR) != 0 && !noResults && !noMeta
-        && fetchSize > 0 && !describeOnly;
-    boolean oneShot = (flags & QueryExecutor.QUERY_ONESHOT) != 0;
-    boolean noBinaryTransfer = (flags & QUERY_NO_BINARY_TRANSFER) != 0;
-    boolean forceDescribePortal = (flags & QUERY_FORCE_DESCRIBE_PORTAL) != 0;
-
-    // Work out how many rows to fetch in this pass.
-
-    int rows;
-    if (noResults) {
-      rows = 1; // We're discarding any results anyway, so limit data transfer to a minimum
-    } else if (!usePortal) {
-      rows = maxRows; // Not using a portal -- fetchSize is irrelevant
-    } else if (maxRows != 0 && fetchSize > maxRows) {
-      // fetchSize > maxRows, use maxRows (nb: fetchSize cannot be 0 if usePortal == true)
-      rows = maxRows;
-    } else {
-      rows = fetchSize; // maxRows > fetchSize
-    }
-
-    sendParse(query, params, oneShot);
-
-    // Must do this after sendParse to pick up any changes to the
-    // query's state.
-    //
-    boolean queryHasUnknown = query.hasUnresolvedTypes();
-    boolean paramsHasUnknown = params.hasUnresolvedTypes();
-
-    boolean describeStatement = describeOnly
-        || (!oneShot && paramsHasUnknown && queryHasUnknown && !query.isStatementDescribed());
-
-    if (!describeStatement && paramsHasUnknown && !queryHasUnknown) {
-      int[] queryOIDs = query.getPrepareTypes();
-      int[] paramOIDs = params.getTypeOIDs();
-      for (int i = 0; i < paramOIDs.length; i++) {
-        // Only supply type information when there isn't any
-        // already, don't arbitrarily overwrite user supplied
-        // type information.
-        if (paramOIDs[i] == Oid.UNSPECIFIED) {
-          params.setResolvedType(i + 1, queryOIDs[i]);
-        }
-      }
-    }
-
-    if (describeStatement) {
-      sendDescribeStatement(query, params, describeOnly);
-      if (describeOnly) {
-        return;
-      }
-    }
-
-    // Construct a new portal if needed.
-    Portal portal = null;
-    if (usePortal) {
-      String portalName = "C_" + (nextUniqueID++);
-      portal = new Portal(query, portalName);
-    }
-
-    sendBind(query, params, portal, noBinaryTransfer);
-
-    // A statement describe will also output a RowDescription,
-    // so don't reissue it here if we've already done so.
-    //
-    if (!noMeta && !describeStatement) {
-      /*
-       * don't send describe if we already have cached the row description from previous executions
-       *
-       * XXX Clearing the fields / unpreparing the query (in sendParse) is incorrect, see bug #267.
-       * We might clear the cached fields in a later execution of this query if the bind parameter
-       * types change, but we're assuming here that they'll still be valid when we come to process
-       * the results of this query, so we don't send a new describe here. We re-describe after the
-       * fields are cleared, but the result of that gets processed after processing the results from
-       * earlier executions that we didn't describe because we didn't think we had to.
-       *
-       * To work around this, force a Describe at each execution in batches where this can be a
-       * problem. It won't cause more round trips so the performance impact is low, and it'll ensure
-       * that the field information available when we decoded the results. This is undeniably a
-       * hack, but there aren't many good alternatives.
-       */
-      if (!query.isPortalDescribed() || forceDescribePortal) {
-        sendDescribePortal(query, portal);
-      }
-    }
-
-    sendExecute(query, portal, rows);
-  }
-
-  private void sendSimpleQuery(SimpleQuery query, SimpleParameterList params) throws IOException {
-    String nativeSql = query.toString(params);
-
-    LOGGER.log(Level.FINEST, " FE=> SimpleQuery(query=\"{0}\")", nativeSql);
-    Encoding encoding = pgStream.getEncoding();
-
-    byte[] encoded = encoding.encode(nativeSql);
-    pgStream.sendChar('Q');
-    pgStream.sendInteger4(encoded.length + 4 + 1);
-    pgStream.send(encoded);
-    pgStream.sendChar(0);
-    pgStream.flush();
-    pendingExecuteQueue.add(new ExecuteRequest(query, null, true));
-    pendingDescribePortalQueue.add(query);
-  }
-
-  //
-  // Garbage collection of parsed statements.
-  //
-  // When a statement is successfully parsed, registerParsedQuery is called.
-  // This creates a PhantomReference referring to the "owner" of the statement
-  // (the originating Query object) and inserts that reference as a key in
-  // parsedQueryMap. The values of parsedQueryMap are the corresponding allocated
-  // statement names. The originating Query object also holds a reference to the
-  // PhantomReference.
-  //
-  // When the owning Query object is closed, it enqueues and clears the associated
-  // PhantomReference.
-  //
-  // If the owning Query object becomes unreachable (see java.lang.ref javadoc) before
-  // being closed, the corresponding PhantomReference is enqueued on
-  // parsedQueryCleanupQueue. In the Sun JVM, phantom references are only enqueued
-  // when a GC occurs, so this is not necessarily prompt but should eventually happen.
-  //
-  // Periodically (currently, just before query execution), the parsedQueryCleanupQueue
-  // is polled. For each enqueued PhantomReference we find, we remove the corresponding
-  // entry from parsedQueryMap, obtaining the name of the underlying statement in the
-  // process. Then we send a message to the backend to deallocate that statement.
-  //
-
-  private final HashMap<PhantomReference<SimpleQuery>, String> parsedQueryMap =
-      new HashMap<>();
-  private final ReferenceQueue<SimpleQuery> parsedQueryCleanupQueue =
-      new ReferenceQueue<>();
-
-  private void registerParsedQuery(SimpleQuery query, String statementName) {
-    if (statementName == null) {
-      return;
-    }
-
-    PhantomReference<SimpleQuery> cleanupRef =
-        new PhantomReference<>(query, parsedQueryCleanupQueue);
-    parsedQueryMap.put(cleanupRef, statementName);
-    query.setCleanupRef(cleanupRef);
-  }
-
-  private void processDeadParsedQueries() throws IOException {
-    Reference<? extends SimpleQuery> deadQuery;
-    while ((deadQuery = parsedQueryCleanupQueue.poll()) != null) {
-      String statementName = parsedQueryMap.remove(deadQuery);
-      sendCloseStatement(statementName);
-      deadQuery.clear();
-    }
-  }
-
-  //
-  // Essentially the same strategy is used for the cleanup of portals.
-  // Note that each Portal holds a reference to the corresponding Query
-  // that generated it, so the Query won't be collected (and the statement
-  // closed) until all the Portals are, too. This is required by the mechanics
-  // of the backend protocol: when a statement is closed, all dependent portals
-  // are also closed.
-  //
-
-  private final HashMap<PhantomReference<Portal>, String> openPortalMap =
-      new HashMap<>();
-  private final ReferenceQueue<Portal> openPortalCleanupQueue = new ReferenceQueue<>();
-
-  private static final Portal UNNAMED_PORTAL = new Portal(null, "unnamed");
-
-  private void registerOpenPortal(Portal portal) {
-    if (portal == UNNAMED_PORTAL) {
-      return; // Using the unnamed portal.
-    }
-
-    String portalName = portal.getPortalName();
-    PhantomReference<Portal> cleanupRef =
-        new PhantomReference<>(portal, openPortalCleanupQueue);
-    openPortalMap.put(cleanupRef, portalName);
-    portal.setCleanupRef(cleanupRef);
-  }
-
-  private void processDeadPortals() throws IOException {
-    Reference<? extends Portal> deadPortal;
-    while ((deadPortal = openPortalCleanupQueue.poll()) != null) {
-      String portalName = openPortalMap.remove(deadPortal);
-      sendClosePortal(portalName);
-      deadPortal.clear();
-    }
-  }
-
-  protected void processResults(ResultHandler handler, int flags) throws IOException {
-    processResults(handler, flags, false);
-  }
-
-  protected void processResults(ResultHandler handler, int flags, boolean adaptiveFetch)
-      throws IOException {
-    boolean noResults = (flags & QueryExecutor.QUERY_NO_RESULTS) != 0;
-    boolean bothRowsAndStatus = (flags & QueryExecutor.QUERY_BOTH_ROWS_AND_STATUS) != 0;
-
-    List<Tuple> tuples = null;
-
-    int c;
-    boolean endQuery = false;
-
-    // At the end of a command execution we have the CommandComplete
-    // message to tell us we're done, but with a describeOnly command
-    // we have no real flag to let us know we're done. We've got to
-    // look for the next RowDescription or NoData message and return
-    // from there.
-    boolean doneAfterRowDescNoData = false;
-
-    while (!endQuery) {
-      c = pgStream.receiveChar();
-      switch (c) {
-        case 'A': // Asynchronous Notify
-          receiveAsyncNotify();
-          break;
-
-        case '1': // Parse Complete (response to Parse)
-          pgStream.receiveInteger4(); // len, discarded
-
-          SimpleQuery parsedQuery = pendingParseQueue.removeFirst();
-          String parsedStatementName = parsedQuery.getStatementName();
-
-          LOGGER.log(Level.FINEST, " <=BE ParseComplete [{0}]", parsedStatementName);
-
-          break;
-
-        case 't': { // ParameterDescription
-          pgStream.receiveInteger4(); // len, discarded
-
-          LOGGER.log(Level.FINEST, " <=BE ParameterDescription");
-
-          DescribeRequest describeData = pendingDescribeStatementQueue.getFirst();
-          SimpleQuery query = describeData.query;
-          SimpleParameterList params = describeData.parameterList;
-          boolean describeOnly = describeData.describeOnly;
-          // This might differ from query.getStatementName if the query was re-prepared
-          String origStatementName = describeData.statementName;
-
-          int numParams = pgStream.receiveInteger2();
-
-          for (int i = 1; i <= numParams; i++) {
-            int typeOid = pgStream.receiveInteger4();
-            params.setResolvedType(i, typeOid);
-          }
-
-          // Since we can issue multiple Parse and DescribeStatement
-          // messages in a single network trip, we need to make
-          // sure the describe results we requested are still
-          // applicable to the latest parsed query.
-          //
-          if ((origStatementName == null && query.getStatementName() == null)
-              || (origStatementName != null
-                  && origStatementName.equals(query.getStatementName()))) {
-            query.setPrepareTypes(params.getTypeOIDs());
-          }
-
-          if (describeOnly) {
-            doneAfterRowDescNoData = true;
-          } else {
-            pendingDescribeStatementQueue.removeFirst();
-          }
-          break;
-        }
-
-        case '2': // Bind Complete (response to Bind)
-          pgStream.receiveInteger4(); // len, discarded
-
-          Portal boundPortal = pendingBindQueue.removeFirst();
-          LOGGER.log(Level.FINEST, " <=BE BindComplete [{0}]", boundPortal);
-
-          registerOpenPortal(boundPortal);
-          break;
-
-        case '3': // Close Complete (response to Close)
-          pgStream.receiveInteger4(); // len, discarded
-          LOGGER.log(Level.FINEST, " <=BE CloseComplete");
-          break;
-
-        case 'n': // No Data (response to Describe)
-          pgStream.receiveInteger4(); // len, discarded
-          LOGGER.log(Level.FINEST, " <=BE NoData");
-
-          pendingDescribePortalQueue.removeFirst();
-
-          if (doneAfterRowDescNoData) {
-            DescribeRequest describeData = pendingDescribeStatementQueue.removeFirst();
-            SimpleQuery currentQuery = describeData.query;
-
-            Field[] fields = currentQuery.getFields();
-
-            if (fields != null) { // There was a resultset.
-              tuples = new ArrayList<>();
-              handler.handleResultRows(currentQuery, fields, tuples, null);
-              tuples = null;
-            }
-          }
-          break;
-
-        case 's': { // Portal Suspended (end of Execute)
-          // nb: this appears *instead* of CommandStatus.
-          // Must be a SELECT if we suspended, so don't worry about it.
-
-          pgStream.receiveInteger4(); // len, discarded
-          LOGGER.log(Level.FINEST, " <=BE PortalSuspended");
-
-          ExecuteRequest executeData = pendingExecuteQueue.removeFirst();
-          SimpleQuery currentQuery = executeData.query;
-          Portal currentPortal = executeData.portal;
-
-          if (currentPortal != null) {
-            // Existence of portal defines if query was using fetching.
-            adaptiveFetchCache
-              .updateQueryFetchSize(adaptiveFetch, currentQuery, pgStream.getMaxRowSizeBytes());
-          }
-          pgStream.clearMaxRowSizeBytes();
-
-          Field[] fields = currentQuery.getFields();
-          if (fields != null && tuples == null) {
-            // When no results expected, pretend an empty resultset was returned
-            // Not sure if new ArrayList can be always replaced with emptyList
-            tuples = noResults ? Collections.emptyList() : new ArrayList<Tuple>();
-          }
-
-          if (fields != null && tuples != null) {
-            handler.handleResultRows(currentQuery, fields, tuples, currentPortal);
-          }
-          tuples = null;
-          break;
-        }
-
-        case 'C': { // Command Status (end of Execute)
-          // Handle status.
-          String status = receiveCommandStatus();
-          if (isFlushCacheOnDeallocate()
-              && (status.startsWith("DEALLOCATE ALL") || status.startsWith("DISCARD ALL"))) {
-            deallocateEpoch++;
-          }
-
-          doneAfterRowDescNoData = false;
-
-          ExecuteRequest executeData = pendingExecuteQueue.peekFirst();
-          SimpleQuery currentQuery = executeData.query;
-          Portal currentPortal = executeData.portal;
-
-          if (currentPortal != null) {
-            // Existence of portal defines if query was using fetching.
-
-            // Command executed, adaptive fetch size can be removed for this query, max row size can be cleared
-            adaptiveFetchCache.removeQuery(adaptiveFetch, currentQuery);
-            // Update to change fetch size for other fetch portals of this query
-            adaptiveFetchCache
-                .updateQueryFetchSize(adaptiveFetch, currentQuery, pgStream.getMaxRowSizeBytes());
-          }
-          pgStream.clearMaxRowSizeBytes();
-
-          if (status.startsWith("SET")) {
-            String nativeSql = currentQuery.getNativeQuery().nativeSql;
-            // Scan only the first 1024 characters to
-            // avoid big overhead for long queries.
-            if (nativeSql.lastIndexOf("search_path", 1024) != -1
-                && !nativeSql.equals(lastSetSearchPathQuery)) {
-              // Search path was changed, invalidate prepared statement cache
-              lastSetSearchPathQuery = nativeSql;
-              deallocateEpoch++;
-            }
-          }
-
-          if (!executeData.asSimple) {
-            pendingExecuteQueue.removeFirst();
-          } else {
-            // For simple 'Q' queries, executeQueue is cleared via ReadyForQuery message
-          }
-
-          // we want to make sure we do not add any results from these queries to the result set
-          if (currentQuery == autoSaveQuery
-              || currentQuery == releaseAutoSave) {
-            // ignore "SAVEPOINT" or RELEASE SAVEPOINT status from autosave query
-            break;
-          }
-
-          Field[] fields = currentQuery.getFields();
-          if (fields != null && tuples == null) {
-            // When no results expected, pretend an empty resultset was returned
-            // Not sure if new ArrayList can be always replaced with emptyList
-            tuples = noResults ? Collections.emptyList() : new ArrayList<Tuple>();
-          }
-
-          // If we received tuples we must know the structure of the
-          // resultset, otherwise we won't be able to fetch columns
-          // from it, etc, later.
-          if (fields == null && tuples != null) {
-            throw new IllegalStateException(
-                "Received resultset tuples, but no field structure for them");
-          }
-
-          if (fields != null && tuples != null) {
-            // There was a resultset.
-            handler.handleResultRows(currentQuery, fields, tuples, null);
-            tuples = null;
-
-            if (bothRowsAndStatus) {
-              interpretCommandStatus(status, handler);
-            }
-          } else {
-            interpretCommandStatus(status, handler);
-          }
-
-          if (executeData.asSimple) {
-            // Simple queries might return several resultsets, thus we clear
-            // fields, so queries like "select 1;update; select2" will properly
-            // identify that "update" did not return any results
-            currentQuery.setFields(null);
-          }
-
-          if (currentPortal != null) {
-            currentPortal.close();
-          }
-          break;
-        }
-
-        case 'D': // Data Transfer (ongoing Execute response)
-          Tuple tuple = null;
-          try {
-            tuple = pgStream.receiveTupleV3();
-          } catch (OutOfMemoryError oome) {
-            if (!noResults) {
-              handler.handleError(
-                  new PSQLException(GT.tr("Ran out of memory retrieving query results."),
-                      PSQLState.OUT_OF_MEMORY, oome));
-            }
-          } catch (SQLException e) {
-            handler.handleError(e);
-          }
-          if (!noResults) {
-            if (tuples == null) {
-              tuples = new ArrayList<>();
-            }
-            if (tuple != null) {
-              tuples.add(tuple);
-            }
-          }
-
-          if (LOGGER.isLoggable(Level.FINEST)) {
-            int length;
-            if (tuple == null) {
-              length = -1;
-            } else {
-              length = tuple.length();
-            }
-            LOGGER.log(Level.FINEST, " <=BE DataRow(len={0})", length);
-          }
-
-          break;
-
-        case 'E':
-          // Error Response (response to pretty much everything; backend then skips until Sync)
-          SQLException error = receiveErrorResponse();
-          handler.handleError(error);
-          if (willHealViaReparse(error)) {
-            // prepared statement ... is not valid kind of error
-            // Technically speaking, the error is unexpected, thus we invalidate other
-            // server-prepared statements just in case.
-            deallocateEpoch++;
+            waitOnLock();
             if (LOGGER.isLoggable(Level.FINEST)) {
-              LOGGER.log(Level.FINEST, " FE: received {0}, will invalidate statements. deallocateEpoch is now {1}",
-                  new Object[]{error.getSQLState(), deallocateEpoch});
+                LOGGER.log(Level.FINEST, "  batch execute {0} queries, handler={1}, maxRows={2}, fetchSize={3}, flags={4}",
+                        new Object[]{queries.length, batchHandler, maxRows, fetchSize, flags});
             }
-          }
-          // keep processing
-          break;
 
-        case 'I': { // Empty Query (end of Execute)
-          pgStream.receiveInteger4();
+            flags = updateQueryMode(flags);
 
-          LOGGER.log(Level.FINEST, " <=BE EmptyQuery");
+            boolean describeOnly = (QUERY_DESCRIBE_ONLY & flags) != 0;
+            // Check parameters and resolve OIDs.
+            if (!describeOnly) {
+                for (ParameterList parameterList : parameterLists) {
+                    if (parameterList != null) {
+                        ((V3ParameterList) parameterList).checkAllParametersSet();
+                    }
+                }
+            }
 
-          ExecuteRequest executeData = pendingExecuteQueue.removeFirst();
-          Portal currentPortal = executeData.portal;
-          handler.handleCommandStatus("EMPTY", 0, 0);
-          if (currentPortal != null) {
-            currentPortal.close();
-          }
-          break;
+            boolean autosave = false;
+            ResultHandler handler = batchHandler;
+            try {
+                handler = sendQueryPreamble(batchHandler, flags);
+                autosave = sendAutomaticSavepoint(queries[0], flags);
+                estimatedReceiveBufferBytes = 0;
+
+                for (int i = 0; i < queries.length; i++) {
+                    Query query = queries[i];
+                    V3ParameterList parameters = (V3ParameterList) parameterLists[i];
+                    if (parameters == null) {
+                        parameters = SimpleQuery.NO_PARAMETERS;
+                    }
+
+                    sendQuery(query, parameters, maxRows, fetchSize, flags, handler, batchHandler, adaptiveFetch);
+
+                    if (handler.getException() != null) {
+                        break;
+                    }
+                }
+
+                if (handler.getException() == null) {
+                    if ((flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0) {
+                        // Sync message is not required for 'Q' execution as 'Q' ends with ReadyForQuery message
+                        // on its own
+                    } else {
+                        sendSync();
+                    }
+                    processResults(handler, flags, adaptiveFetch);
+                    estimatedReceiveBufferBytes = 0;
+                }
+            } catch (IOException e) {
+                abort();
+                handler.handleError(
+                        new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
+                                PSQLState.CONNECTION_FAILURE, e));
+            }
+
+            try {
+                handler.handleCompletion();
+                if (cleanupSavePoints) {
+                    releaseSavePoint(autosave, flags);
+                }
+            } catch (SQLException e) {
+                rollbackIfRequired(autosave, e);
+            }
         }
-
-        case 'N': // Notice Response
-          SQLWarning warning = receiveNoticeResponse();
-          handler.handleWarning(warning);
-          break;
-
-        case 'S': // Parameter Status
-          try {
-            receiveParameterStatus();
-          } catch (SQLException e) {
-            handler.handleError(e);
-            endQuery = true;
-          }
-          break;
-
-        case 'T': // Row Description (response to Describe)
-          Field[] fields = receiveFields();
-          tuples = new ArrayList<>();
-
-          SimpleQuery query = pendingDescribePortalQueue.peekFirst();
-          if (!pendingExecuteQueue.isEmpty()
-              && !pendingExecuteQueue.peekFirst().asSimple) {
-            pendingDescribePortalQueue.removeFirst();
-          }
-          query.setFields(fields);
-
-          if (doneAfterRowDescNoData) {
-            DescribeRequest describeData = pendingDescribeStatementQueue.removeFirst();
-            SimpleQuery currentQuery = describeData.query;
-            currentQuery.setFields(fields);
-
-            handler.handleResultRows(currentQuery, fields, tuples, null);
-            tuples = null;
-          }
-          break;
-
-        case 'Z': // Ready For Query (eventual response to Sync)
-          receiveRFQ();
-          if (!pendingExecuteQueue.isEmpty()
-              && pendingExecuteQueue.peekFirst().asSimple) {
-            tuples = null;
-            pgStream.clearResultBufferCount();
-
-            ExecuteRequest executeRequest = pendingExecuteQueue.removeFirst();
-            // Simple queries might return several resultsets, thus we clear
-            // fields, so queries like "select 1;update; select2" will properly
-            // identify that "update" did not return any results
-            executeRequest.query.setFields(null);
-
-            pendingDescribePortalQueue.removeFirst();
-            if (!pendingExecuteQueue.isEmpty()) {
-              if (getTransactionState() == TransactionState.IDLE) {
-                handler.secureProgress();
-              }
-              // process subsequent results (e.g. for cases like batched execution of simple 'Q' queries)
-              break;
-            }
-          }
-          endQuery = true;
-
-          // Reset the statement name of Parses that failed.
-          while (!pendingParseQueue.isEmpty()) {
-            SimpleQuery failedQuery = pendingParseQueue.removeFirst();
-            failedQuery.unprepare();
-          }
-
-          pendingParseQueue.clear(); // No more ParseComplete messages expected.
-          // Pending "describe" requests might be there in case of error
-          // If that is the case, reset "described" status, so the statement is properly
-          // described on next execution
-          while (!pendingDescribeStatementQueue.isEmpty()) {
-            DescribeRequest request = pendingDescribeStatementQueue.removeFirst();
-            LOGGER.log(Level.FINEST, " FE marking setStatementDescribed(false) for query {0}", request.query);
-            request.query.setStatementDescribed(false);
-          }
-          while (!pendingDescribePortalQueue.isEmpty()) {
-            SimpleQuery describePortalQuery = pendingDescribePortalQueue.removeFirst();
-            LOGGER.log(Level.FINEST, " FE marking setPortalDescribed(false) for query {0}", describePortalQuery);
-            describePortalQuery.setPortalDescribed(false);
-          }
-          pendingBindQueue.clear(); // No more BindComplete messages expected.
-          pendingExecuteQueue.clear(); // No more query executions expected.
-          break;
-
-        case 'G': // CopyInResponse
-          LOGGER.log(Level.FINEST, " <=BE CopyInResponse");
-          LOGGER.log(Level.FINEST, " FE=> CopyFail");
-
-          // COPY sub-protocol is not implemented yet
-          // We'll send a CopyFail message for COPY FROM STDIN so that
-          // server does not wait for the data.
-
-          byte[] buf = "COPY commands are only supported using the CopyManager API.".getBytes(StandardCharsets.US_ASCII);
-          pgStream.sendChar('f');
-          pgStream.sendInteger4(buf.length + 4 + 1);
-          pgStream.send(buf);
-          pgStream.sendChar(0);
-          pgStream.flush();
-          sendSync(); // send sync message
-          skipMessage(); // skip the response message
-          break;
-
-        case 'H': // CopyOutResponse
-          LOGGER.log(Level.FINEST, " <=BE CopyOutResponse");
-
-          skipMessage();
-          // In case of CopyOutResponse, we cannot abort data transfer,
-          // so just throw an error and ignore CopyData messages
-          handler.handleError(
-              new PSQLException(GT.tr("COPY commands are only supported using the CopyManager API."),
-                  PSQLState.NOT_IMPLEMENTED));
-          break;
-
-        case 'c': // CopyDone
-          skipMessage();
-          LOGGER.log(Level.FINEST, " <=BE CopyDone");
-          break;
-
-        case 'd': // CopyData
-          skipMessage();
-          LOGGER.log(Level.FINEST, " <=BE CopyData");
-          break;
-
-        default:
-          throw new IOException("Unexpected packet type: " + c);
-      }
-
     }
-  }
 
-  /**
-   * Ignore the response message by reading the message length and skipping over those bytes in the
-   * communication stream.
-   */
-  private void skipMessage() throws IOException {
-    int len = pgStream.receiveInteger4();
+    //
+    // Message sending
+    //
 
-    assert len >= 4 : "Length from skip message must be at least 4 ";
-
-    // skip len-4 (length includes the 4 bytes for message length itself
-    pgStream.skip(len - 4);
-  }
-
-  @Override
-  public void fetch(ResultCursor cursor, ResultHandler handler, int fetchSize,
-      boolean adaptiveFetch) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      waitOnLock();
-      final Portal portal = (Portal) cursor;
-
-      // Insert a ResultHandler that turns bare command statuses into empty datasets
-      // (if the fetch returns no rows, we see just a CommandStatus..)
-      final ResultHandler delegateHandler = handler;
-      final SimpleQuery query = portal.getQuery();
-      handler = new ResultHandlerDelegate(delegateHandler) {
-        @Override
-        public void handleCommandStatus(String status, long updateCount, long insertOID) {
-          handleResultRows(query, NO_FIELDS, new ArrayList<>(), null);
-        }
-      };
-
-      // Now actually run it.
-
-      try {
+    private ResultHandler sendQueryPreamble(final ResultHandler delegateHandler, int flags)
+            throws IOException {
+        // First, send CloseStatements for finalized SimpleQueries that had statement names assigned.
         processDeadParsedQueries();
         processDeadPortals();
 
-        sendExecute(query, portal, fetchSize);
-        sendSync();
-
-        processResults(handler, 0, adaptiveFetch);
-        estimatedReceiveBufferBytes = 0;
-      } catch (IOException e) {
-        abort();
-        handler.handleError(
-            new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
-                PSQLState.CONNECTION_FAILURE, e));
-      }
-
-      handler.handleCompletion();
-    }
-  }
-
-  @Override
-  public int getAdaptiveFetchSize(boolean adaptiveFetch, ResultCursor cursor) {
-    if (cursor instanceof Portal) {
-      Query query = ((Portal) cursor).getQuery();
-      if (Objects.nonNull(query)) {
-        return adaptiveFetchCache
-            .getFetchSizeForQuery(adaptiveFetch, query);
-      }
-    }
-    return -1;
-  }
-
-  @Override
-  public void setAdaptiveFetch(boolean adaptiveFetch) {
-    this.adaptiveFetchCache.setAdaptiveFetch(adaptiveFetch);
-  }
-
-  @Override
-  public boolean getAdaptiveFetch() {
-    return this.adaptiveFetchCache.getAdaptiveFetch();
-  }
-
-  @Override
-  public void addQueryToAdaptiveFetchCache(boolean adaptiveFetch, ResultCursor cursor) {
-    if (cursor instanceof Portal) {
-      Query query = ((Portal) cursor).getQuery();
-      if (Objects.nonNull(query)) {
-        adaptiveFetchCache.addNewQuery(adaptiveFetch, query);
-      }
-    }
-  }
-
-  @Override
-  public void removeQueryFromAdaptiveFetchCache(boolean adaptiveFetch, ResultCursor cursor) {
-    if (cursor instanceof Portal) {
-      Query query = ((Portal) cursor).getQuery();
-      if (Objects.nonNull(query)) {
-        adaptiveFetchCache.removeQuery(adaptiveFetch, query);
-      }
-    }
-  }
-
-  /*
-   * Receive the field descriptions from the back end.
-   */
-  private Field[] receiveFields() throws IOException {
-    pgStream.receiveInteger4(); // MESSAGE SIZE
-    int size = pgStream.receiveInteger2();
-    Field[] fields = new Field[size];
-
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      LOGGER.log(Level.FINEST, " <=BE RowDescription({0})", size);
-    }
-
-    for (int i = 0; i < fields.length; i++) {
-      String columnLabel = pgStream.receiveCanonicalString();
-      int tableOid = pgStream.receiveInteger4();
-      short positionInTable = (short) pgStream.receiveInteger2();
-      int typeOid = pgStream.receiveInteger4();
-      int typeLength = pgStream.receiveInteger2();
-      int typeModifier = pgStream.receiveInteger4();
-      int formatType = pgStream.receiveInteger2();
-      fields[i] = new Field(columnLabel,
-          typeOid, typeLength, typeModifier, tableOid, positionInTable);
-      fields[i].setFormat(formatType);
-
-      LOGGER.log(Level.FINEST, "        {0}", fields[i]);
-    }
-
-    return fields;
-  }
-
-  private void receiveAsyncNotify() throws IOException {
-    int len = pgStream.receiveInteger4(); // MESSAGE SIZE
-    assert len > 4 : "Length for AsyncNotify must be at least 4";
-
-    int pid = pgStream.receiveInteger4();
-    String msg = pgStream.receiveCanonicalString();
-    String param = pgStream.receiveString();
-    addNotification(new Notification(msg, pid, param));
-
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      LOGGER.log(Level.FINEST, " <=BE AsyncNotify({0},{1},{2})", new Object[]{pid, msg, param});
-    }
-  }
-
-  private SQLException receiveErrorResponse() throws IOException {
-    // it's possible to get more than one error message for a query
-    // see libpq comments wrt backend closing a connection
-    // so, append messages to a string buffer and keep processing
-    // check at the bottom to see if we need to throw an exception
-
-    int elen = pgStream.receiveInteger4();
-    assert elen > 4 : "Error response length must be greater than 4";
-
-    EncodingPredictor.DecodeResult totalMessage = pgStream.receiveErrorString(elen - 4);
-    ServerErrorMessage errorMsg = new ServerErrorMessage(totalMessage);
-
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      LOGGER.log(Level.FINEST, " <=BE ErrorMessage({0})", errorMsg.toString());
-    }
-
-    PSQLException error = new PSQLException(errorMsg, this.logServerErrorDetail);
-    if (transactionFailCause == null) {
-      transactionFailCause = error;
-    } else {
-      error.initCause(transactionFailCause);
-    }
-    return error;
-  }
-
-  private SQLWarning receiveNoticeResponse() throws IOException {
-    int nlen = pgStream.receiveInteger4();
-    assert nlen > 4 : "Notice Response length must be greater than 4";
-
-    ServerErrorMessage warnMsg = new ServerErrorMessage(pgStream.receiveString(nlen - 4));
-
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      LOGGER.log(Level.FINEST, " <=BE NoticeResponse({0})", warnMsg.toString());
-    }
-
-    return new PSQLWarning(warnMsg);
-  }
-
-  private String receiveCommandStatus() throws IOException {
-    // TODO: better handle the msg len
-    int len = pgStream.receiveInteger4();
-    // read len -5 bytes (-4 for len and -1 for trailing \0)
-    String status = pgStream.receiveString(len - 5);
-    // now read and discard the trailing \0
-    pgStream.receiveChar(); // Receive(1) would allocate new byte[1], so avoid it
-
-    LOGGER.log(Level.FINEST, " <=BE CommandStatus({0})", status);
-
-    return status;
-  }
-
-  private void interpretCommandStatus(String status, ResultHandler handler) {
-    try {
-      commandCompleteParser.parse(status);
-    } catch (SQLException e) {
-      handler.handleError(e);
-      return;
-    }
-    long oid = commandCompleteParser.getOid();
-    long count = commandCompleteParser.getRows();
-
-    handler.handleCommandStatus(status, count, oid);
-  }
-
-  private void receiveRFQ() throws IOException {
-    if (pgStream.receiveInteger4() != 5) {
-      throw new IOException("unexpected length of ReadyForQuery message");
-    }
-
-    char tStatus = (char) pgStream.receiveChar();
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      LOGGER.log(Level.FINEST, " <=BE ReadyForQuery({0})", tStatus);
-    }
-
-    // Update connection state.
-    switch (tStatus) {
-      case 'I':
-        transactionFailCause = null;
-        setTransactionState(TransactionState.IDLE);
-        break;
-      case 'T':
-        transactionFailCause = null;
-        setTransactionState(TransactionState.OPEN);
-        break;
-      case 'E':
-        setTransactionState(TransactionState.FAILED);
-        break;
-      default:
-        throw new IOException(
-            "unexpected transaction state in ReadyForQuery message: " + (int) tStatus);
-    }
-  }
-
-  @Override
-  @SuppressWarnings("deprecation")
-  protected void sendCloseMessage() throws IOException {
-    closeAction.sendCloseMessage(pgStream);
-  }
-
-  public void readStartupMessages() throws IOException, SQLException {
-    for (int i = 0; i < 1000; i++) {
-      int beresp = pgStream.receiveChar();
-      switch (beresp) {
-        case 'Z':
-          receiveRFQ();
-          // Ready For Query; we're done.
-          return;
-
-        case 'K':
-          // BackendKeyData
-          int msgLen = pgStream.receiveInteger4();
-          if (msgLen != 12) {
-            throw new PSQLException(GT.tr("Protocol error.  Session setup failed."),
-                PSQLState.PROTOCOL_VIOLATION);
-          }
-
-          int pid = pgStream.receiveInteger4();
-          int ckey = pgStream.receiveInteger4();
-
-          if (LOGGER.isLoggable(Level.FINEST)) {
-            LOGGER.log(Level.FINEST, " <=BE BackendKeyData(pid={0},ckey={1})", new Object[]{pid, ckey});
-          }
-
-          setBackendKeyData(pid, ckey);
-          break;
-
-        case 'E':
-          // Error
-          throw receiveErrorResponse();
-
-        case 'N':
-          // Warning
-          addWarning(receiveNoticeResponse());
-          break;
-
-        case 'S':
-          // ParameterStatus
-          receiveParameterStatus();
-
-          break;
-
-        default:
-          if (LOGGER.isLoggable(Level.FINEST)) {
-            LOGGER.log(Level.FINEST, "  invalid message type={0}", (char) beresp);
-          }
-          throw new PSQLException(GT.tr("Protocol error.  Session setup failed."),
-              PSQLState.PROTOCOL_VIOLATION);
-      }
-    }
-    throw new PSQLException(GT.tr("Protocol error.  Session setup failed."),
-        PSQLState.PROTOCOL_VIOLATION);
-  }
-
-  public void receiveParameterStatus() throws IOException, SQLException {
-    // ParameterStatus
-    pgStream.receiveInteger4(); // MESSAGE SIZE
-    final String name = pgStream.receiveCanonicalStringIfPresent();
-    final String value = pgStream.receiveCanonicalStringIfPresent();
-
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      LOGGER.log(Level.FINEST, " <=BE ParameterStatus({0} = {1})", new Object[]{name, value});
-    }
-
-    // if the name is empty, there is nothing to do
-    if (name.isEmpty()) {
-      return;
-    }
-
-    // Update client-visible parameter status map for getParameterStatuses()
-    onParameterStatus(name, value);
-
-    if ("client_encoding".equals(name)) {
-      if (allowEncodingChanges) {
-        if (!"UTF8".equalsIgnoreCase(value) && !"UTF-8".equalsIgnoreCase(value)) {
-          LOGGER.log(Level.FINE,
-              "pgjdbc expects client_encoding to be UTF8 for proper operation. Actual encoding is {0}",
-              value);
+        // Send BEGIN on first statement in transaction.
+        if ((flags & QueryExecutor.QUERY_SUPPRESS_BEGIN) != 0
+                || getTransactionState() != TransactionState.IDLE) {
+            return delegateHandler;
         }
-        pgStream.setEncoding(Encoding.getDatabaseEncoding(value));
-      } else if (!"UTF8".equalsIgnoreCase(value) && !"UTF-8".equalsIgnoreCase(value)) {
-        close(); // we're screwed now; we can't trust any subsequent string.
-        throw new PSQLException(GT.tr(
-            "The server''s client_encoding parameter was changed to {0}. The JDBC driver requires client_encoding to be UTF8 for correct operation.",
-            value), PSQLState.CONNECTION_FAILURE);
 
-      }
+        int beginFlags = QueryExecutor.QUERY_NO_METADATA;
+        if ((flags & QueryExecutor.QUERY_ONESHOT) != 0) {
+            beginFlags |= QueryExecutor.QUERY_ONESHOT;
+        }
+
+        beginFlags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
+
+        beginFlags = updateQueryMode(beginFlags);
+
+        final SimpleQuery beginQuery = (flags & QueryExecutor.QUERY_READ_ONLY_HINT) == 0 ? beginTransactionQuery : beginReadOnlyTransactionQuery;
+
+        sendOneQuery(beginQuery, SimpleQuery.NO_PARAMETERS, 0, 0, beginFlags);
+
+        // Insert a handler that intercepts the BEGIN.
+        return new ResultHandlerDelegate(delegateHandler) {
+            private boolean sawBegin = false;
+
+            @Override
+            public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
+                                         ResultCursor cursor) {
+                if (sawBegin) {
+                    super.handleResultRows(fromQuery, fields, tuples, cursor);
+                }
+            }
+
+            @Override
+            public void handleCommandStatus(String status, long updateCount, long insertOID) {
+                if (!sawBegin) {
+                    sawBegin = true;
+                    if (!"BEGIN".equals(status)) {
+                        handleError(new PSQLException(GT.tr("Expected command status BEGIN, got {0}.", status),
+                                PSQLState.PROTOCOL_VIOLATION));
+                    }
+                } else {
+                    super.handleCommandStatus(status, updateCount, insertOID);
+                }
+            }
+        };
     }
 
-    if ("DateStyle".equals(name) && !value.startsWith("ISO")
-        && !value.toUpperCase(Locale.ROOT).startsWith("ISO")) {
-      close(); // we're screwed now; we can't trust any subsequent date.
-      throw new PSQLException(GT.tr(
-          "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.",
-          value), PSQLState.CONNECTION_FAILURE);
+    @Override
+    @SuppressWarnings("deprecation")
+    public byte[] fastpathCall(int fnid, ParameterList parameters,
+                               boolean suppressBegin)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            waitOnLock();
+            if (!suppressBegin) {
+                doSubprotocolBegin();
+            }
+            try {
+                sendFastpathCall(fnid, (SimpleParameterList) parameters);
+                return receiveFastpathResult();
+            } catch (IOException ioe) {
+                abort();
+                throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
+                        PSQLState.CONNECTION_FAILURE, ioe);
+            }
+        }
     }
 
-    if ("standard_conforming_strings".equals(name)) {
-      if ("on".equals(value)) {
-        setStandardConformingStrings(true);
-      } else if ("off".equals(value)) {
-        setStandardConformingStrings(false);
-      } else {
-        close();
-        // we're screwed now; we don't know how to escape string literals
-        throw new PSQLException(GT.tr(
-            "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.",
-            value), PSQLState.CONNECTION_FAILURE);
-      }
-      return;
+    public void doSubprotocolBegin() throws SQLException {
+        if (getTransactionState() == TransactionState.IDLE) {
+
+            LOGGER.log(Level.FINEST, "Issuing BEGIN before fastpath or copy call.");
+
+            ResultHandler handler = new ResultHandlerBase() {
+                private boolean sawBegin = false;
+
+                @Override
+                public void handleCommandStatus(String status, long updateCount, long insertOID) {
+                    if (!sawBegin) {
+                        if (!"BEGIN".equals(status)) {
+                            handleError(
+                                    new PSQLException(GT.tr("Expected command status BEGIN, got {0}.", status),
+                                            PSQLState.PROTOCOL_VIOLATION));
+                        }
+                        sawBegin = true;
+                    } else {
+                        handleError(new PSQLException(GT.tr("Unexpected command status: {0}.", status),
+                                PSQLState.PROTOCOL_VIOLATION));
+                    }
+                }
+
+                @Override
+                public void handleWarning(SQLWarning warning) {
+                    // we don't want to ignore warnings and it would be tricky
+                    // to chain them back to the connection, so since we don't
+                    // expect to get them in the first place, we just consider
+                    // them errors.
+                    handleError(warning);
+                }
+            };
+
+            try {
+                /* Send BEGIN with simple protocol preferred */
+                int beginFlags = QueryExecutor.QUERY_NO_METADATA
+                        | QueryExecutor.QUERY_ONESHOT
+                        | QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
+                beginFlags = updateQueryMode(beginFlags);
+                sendOneQuery(beginTransactionQuery, SimpleQuery.NO_PARAMETERS, 0, 0, beginFlags);
+                sendSync();
+                processResults(handler, 0);
+                estimatedReceiveBufferBytes = 0;
+            } catch (IOException ioe) {
+                throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
+                        PSQLState.CONNECTION_FAILURE, ioe);
+            }
+        }
+
     }
 
-    if ("TimeZone".equals(name)) {
-      setTimeZone(TimestampUtils.parseBackendTimeZone(value));
-    } else if ("application_name".equals(name)) {
-      setApplicationName(value);
-    } else if ("server_version_num".equals(name)) {
-      setServerVersionNum(Integer.parseInt(value));
-    } else if ("server_version".equals(name)) {
-      setServerVersion(value);
-    }  else if ("integer_datetimes".equals(name)) {
-      if ("on".equals(value)) {
-        setIntegerDateTimes(true);
-      } else if ("off".equals(value)) {
-        setIntegerDateTimes(false);
-      } else {
+    @Override
+    @SuppressWarnings("deprecation")
+    public ParameterList createFastpathParameters(int count) {
+        return new SimpleParameterList(count, this);
+    }
+
+    private void sendFastpathCall(int fnid, SimpleParameterList params)
+            throws SQLException, IOException {
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            LOGGER.log(Level.FINEST, " FE=> FunctionCall({0}, {1} params)", new Object[]{fnid, params.getParameterCount()});
+        }
+
+        //
+        // Total size = 4 (length)
+        // + 4 (function OID)
+        // + 2 (format code count) + N * 2 (format codes)
+        // + 2 (parameter count) + encodedSize (parameters)
+        // + 2 (result format)
+
+        int paramCount = params.getParameterCount();
+        int encodedSize = 0;
+        for (int i = 1; i <= paramCount; i++) {
+            if (params.isNull(i)) {
+                encodedSize += 4;
+            } else {
+                encodedSize += 4 + params.getV3Length(i);
+            }
+        }
+
+        pgStream.sendChar('F');
+        pgStream.sendInteger4(4 + 4 + 2 + 2 * paramCount + 2 + encodedSize + 2);
+        pgStream.sendInteger4(fnid);
+        pgStream.sendInteger2(paramCount);
+        for (int i = 1; i <= paramCount; i++) {
+            pgStream.sendInteger2(params.isBinary(i) ? 1 : 0);
+        }
+        pgStream.sendInteger2(paramCount);
+        for (int i = 1; i <= paramCount; i++) {
+            if (params.isNull(i)) {
+                pgStream.sendInteger4(-1);
+            } else {
+                pgStream.sendInteger4(params.getV3Length(i)); // Parameter size
+                params.writeV3Value(i, pgStream);
+            }
+        }
+        pgStream.sendInteger2(1); // Binary result format
+        pgStream.flush();
+    }
+
+    // Just for API compatibility with previous versions.
+    @Override
+    public void processNotifies() throws SQLException {
+        processNotifies(-1);
+    }
+
+    /**
+     * @param timeoutMillis when &gt; 0, block for this time
+     *                      when =0, block forever
+     *                      when &lt; 0, don't block
+     */
+    @Override
+    public void processNotifies(int timeoutMillis) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            waitOnLock();
+            // Asynchronous notifies only arrive when we are not in a transaction
+            if (getTransactionState() != TransactionState.IDLE) {
+                return;
+            }
+
+            if (hasNotifications()) {
+                // No need to timeout when there are already notifications. We just check for more in this case.
+                timeoutMillis = -1;
+            }
+
+            boolean useTimeout = timeoutMillis > 0;
+            long startTime = 0L;
+            int oldTimeout = 0;
+            if (useTimeout) {
+                startTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
+                try {
+                    oldTimeout = pgStream.getSocket().getSoTimeout();
+                } catch (SocketException e) {
+                    throw new PSQLException(GT.tr("An error occurred while trying to get the socket "
+                            + "timeout."), PSQLState.CONNECTION_FAILURE, e);
+                }
+            }
+
+            try {
+                while (timeoutMillis >= 0 || pgStream.hasMessagePending()) {
+                    if (useTimeout && timeoutMillis >= 0) {
+                        setSocketTimeout(timeoutMillis);
+                    }
+                    int c = pgStream.receiveChar();
+                    if (useTimeout && timeoutMillis >= 0) {
+                        setSocketTimeout(0); // Don't timeout after first char
+                    }
+                    switch (c) {
+                        case 'A': // Asynchronous Notify
+                            receiveAsyncNotify();
+                            timeoutMillis = -1;
+                            continue;
+                        case 'E':
+                            // Error Response (response to pretty much everything; backend then skips until Sync)
+                            throw receiveErrorResponse();
+                        case 'N': // Notice Response (warnings / info)
+                            SQLWarning warning = receiveNoticeResponse();
+                            addWarning(warning);
+                            if (useTimeout) {
+                                long newTimeMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
+                                timeoutMillis = timeoutMillis + (int) (startTime - newTimeMillis); // Overflows after 49 days, ignore that
+                                startTime = newTimeMillis;
+                                if (timeoutMillis == 0) {
+                                    timeoutMillis = -1; // Don't accidentally wait forever
+                                }
+                            }
+                            break;
+                        default:
+                            throw new PSQLException(GT.tr("Unknown Response Type {0}.", (char) c),
+                                    PSQLState.CONNECTION_FAILURE);
+                    }
+                }
+            } catch (SocketTimeoutException ioe) {
+                // No notifications this time...
+            } catch (IOException ioe) {
+                throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
+                        PSQLState.CONNECTION_FAILURE, ioe);
+            } finally {
+                if (useTimeout) {
+                    setSocketTimeout(oldTimeout);
+                }
+            }
+        }
+    }
+
+    private void setSocketTimeout(int millis) throws PSQLException {
+        try {
+            Socket s = pgStream.getSocket();
+            if (!s.isClosed()) { // Is this check required?
+                pgStream.setNetworkTimeout(millis);
+            }
+        } catch (IOException e) {
+            throw new PSQLException(GT.tr("An error occurred while trying to reset the socket timeout."),
+                    PSQLState.CONNECTION_FAILURE, e);
+        }
+    }
+
+    private byte[] receiveFastpathResult() throws IOException, SQLException {
+        boolean endQuery = false;
+        SQLException error = null;
+        byte[] returnValue = null;
+
+        while (!endQuery) {
+            int c = pgStream.receiveChar();
+            switch (c) {
+                case 'A': // Asynchronous Notify
+                    receiveAsyncNotify();
+                    break;
+
+                case 'E':
+                    // Error Response (response to pretty much everything; backend then skips until Sync)
+                    SQLException newError = receiveErrorResponse();
+                    if (error == null) {
+                        error = newError;
+                    } else {
+                        error.setNextException(newError);
+                    }
+                    // keep processing
+                    break;
+
+                case 'N': // Notice Response (warnings / info)
+                    SQLWarning warning = receiveNoticeResponse();
+                    addWarning(warning);
+                    break;
+
+                case 'Z': // Ready For Query (eventual response to Sync)
+                    receiveRFQ();
+                    endQuery = true;
+                    break;
+
+                case 'V': // FunctionCallResponse
+                    int msgLen = pgStream.receiveInteger4();
+                    int valueLen = pgStream.receiveInteger4();
+
+                    LOGGER.log(Level.FINEST, " <=BE FunctionCallResponse({0} bytes)", valueLen);
+
+                    if (valueLen != -1) {
+                        byte[] buf = new byte[valueLen];
+                        pgStream.receive(buf, 0, valueLen);
+                        returnValue = buf;
+                    }
+
+                    break;
+
+                case 'S': // Parameter Status
+                    try {
+                        receiveParameterStatus();
+                    } catch (SQLException e) {
+                        if (error == null) {
+                            error = e;
+                        } else {
+                            error.setNextException(e);
+                        }
+                        endQuery = true;
+                    }
+                    break;
+
+                default:
+                    throw new PSQLException(GT.tr("Unknown Response Type {0}.", (char) c),
+                            PSQLState.CONNECTION_FAILURE);
+            }
+
+        }
+
+        // did we get an error during this query?
+        if (error != null) {
+            throw error;
+        }
+
+        return returnValue;
+    }
+
+    /**
+     * Sends given query to BE to start, initialize and lock connection for a CopyOperation.
+     *
+     * @param sql COPY FROM STDIN / COPY TO STDOUT statement
+     * @return CopyIn or CopyOut operation object
+     * @throws SQLException on failure
+     */
+    @Override
+    public CopyOperation startCopy(String sql, boolean suppressBegin)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            waitOnLock();
+            if (!suppressBegin) {
+                doSubprotocolBegin();
+            }
+            byte[] buf = sql.getBytes(StandardCharsets.UTF_8);
+
+            try {
+                LOGGER.log(Level.FINEST, " FE=> Query(CopyStart)");
+
+                pgStream.sendChar('Q');
+                pgStream.sendInteger4(buf.length + 4 + 1);
+                pgStream.send(buf);
+                pgStream.sendChar(0);
+                pgStream.flush();
+
+                return processCopyResults(null, true);
+                // expect a CopyInResponse or CopyOutResponse to our query above
+            } catch (IOException ioe) {
+                throw new PSQLException(GT.tr("Database connection failed when starting copy"),
+                        PSQLState.CONNECTION_FAILURE, ioe);
+            }
+        }
+    }
+
+    /**
+     * Locks connection and calls initializer for a new CopyOperation Called via startCopy ->
+     * processCopyResults.
+     *
+     * @param op an uninitialized CopyOperation
+     * @throws SQLException on locking failure
+     * @throws IOException  on database connection failure
+     */
+    private void initCopy(CopyOperationImpl op) throws SQLException, IOException {
+        try (ResourceLock ignore = lock.obtain()) {
+            pgStream.receiveInteger4(); // length not used
+            int rowFormat = pgStream.receiveChar();
+            int numFields = pgStream.receiveInteger2();
+            int[] fieldFormats = new int[numFields];
+
+            for (int i = 0; i < numFields; i++) {
+                fieldFormats[i] = pgStream.receiveInteger2();
+            }
+
+            lock(op);
+            op.init(this, rowFormat, fieldFormats);
+        }
+    }
+
+    //
+    // Garbage collection of parsed statements.
+    //
+    // When a statement is successfully parsed, registerParsedQuery is called.
+    // This creates a PhantomReference referring to the "owner" of the statement
+    // (the originating Query object) and inserts that reference as a key in
+    // parsedQueryMap. The values of parsedQueryMap are the corresponding allocated
+    // statement names. The originating Query object also holds a reference to the
+    // PhantomReference.
+    //
+    // When the owning Query object is closed, it enqueues and clears the associated
+    // PhantomReference.
+    //
+    // If the owning Query object becomes unreachable (see java.lang.ref javadoc) before
+    // being closed, the corresponding PhantomReference is enqueued on
+    // parsedQueryCleanupQueue. In the Sun JVM, phantom references are only enqueued
+    // when a GC occurs, so this is not necessarily prompt but should eventually happen.
+    //
+    // Periodically (currently, just before query execution), the parsedQueryCleanupQueue
+    // is polled. For each enqueued PhantomReference we find, we remove the corresponding
+    // entry from parsedQueryMap, obtaining the name of the underlying statement in the
+    // process. Then we send a message to the backend to deallocate that statement.
+    //
+
+    /**
+     * Finishes a copy operation and unlocks connection discarding any exchanged data.
+     *
+     * @param op the copy operation presumably currently holding lock on this connection
+     * @throws SQLException on any additional failure
+     */
+    public void cancelCopy(CopyOperationImpl op) throws SQLException {
+        if (!hasLock(op)) {
+            throw new PSQLException(GT.tr("Tried to cancel an inactive copy operation"),
+                    PSQLState.OBJECT_NOT_IN_STATE);
+        }
+
+        SQLException error = null;
+        int errors = 0;
+
+        try {
+            if (op instanceof CopyIn) {
+                try (ResourceLock ignore = lock.obtain()) {
+                    LOGGER.log(Level.FINEST, "FE => CopyFail");
+                    final byte[] msg = "Copy cancel requested".getBytes(StandardCharsets.US_ASCII);
+                    pgStream.sendChar('f'); // CopyFail
+                    pgStream.sendInteger4(5 + msg.length);
+                    pgStream.send(msg);
+                    pgStream.sendChar(0);
+                    pgStream.flush();
+                    do {
+                        try {
+                            processCopyResults(op, true); // discard rest of input
+                        } catch (SQLException se) { // expected error response to failing copy
+                            errors++;
+                            if (error != null) {
+                                SQLException e = se;
+                                SQLException next;
+                                while ((next = e.getNextException()) != null) {
+                                    e = next;
+                                }
+                                e.setNextException(error);
+                            }
+                            error = se;
+                        }
+                    } while (hasLock(op));
+                }
+            } else if (op instanceof CopyOut) {
+                sendQueryCancel();
+            }
+
+        } catch (IOException ioe) {
+            throw new PSQLException(GT.tr("Database connection failed when canceling copy operation"),
+                    PSQLState.CONNECTION_FAILURE, ioe);
+        } finally {
+            // Need to ensure the lock isn't held anymore, or else
+            // future operations, rather than failing due to the
+            // broken connection, will simply hang waiting for this
+            // lock.
+            try (ResourceLock ignore = lock.obtain()) {
+                if (hasLock(op)) {
+                    unlock(op);
+                }
+            }
+        }
+
+        if (op instanceof CopyIn) {
+            if (errors < 1) {
+                throw new PSQLException(GT.tr("Missing expected error response to copy cancel request"),
+                        PSQLState.COMMUNICATION_ERROR);
+            } else if (errors > 1) {
+                throw new PSQLException(
+                        GT.tr("Got {0} error responses to single copy cancel request", String.valueOf(errors)),
+                        PSQLState.COMMUNICATION_ERROR, error);
+            }
+        }
+    }
+
+    /**
+     * Finishes writing to copy and unlocks connection.
+     *
+     * @param op the copy operation presumably currently holding lock on this connection
+     * @return number of rows updated for server versions 8.2 or newer
+     * @throws SQLException on failure
+     */
+    public long endCopy(CopyOperationImpl op) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (!hasLock(op)) {
+                throw new PSQLException(GT.tr("Tried to end inactive copy"), PSQLState.OBJECT_NOT_IN_STATE);
+            }
+
+            try {
+                LOGGER.log(Level.FINEST, " FE=> CopyDone");
+
+                pgStream.sendChar('c'); // CopyDone
+                pgStream.sendInteger4(4);
+                pgStream.flush();
+
+                do {
+                    processCopyResults(op, true);
+                } while (hasLock(op));
+                return op.getHandledRowCount();
+            } catch (IOException ioe) {
+                throw new PSQLException(GT.tr("Database connection failed when ending copy"),
+                        PSQLState.CONNECTION_FAILURE, ioe);
+            }
+        }
+    }
+
+    /**
+     * Sends data during a live COPY IN operation. Only unlocks the connection if server suddenly
+     * returns CommandComplete, which should not happen
+     *
+     * @param op   the CopyIn operation presumably currently holding lock on this connection
+     * @param data bytes to send
+     * @param off  index of first byte to send (usually 0)
+     * @param siz  number of bytes to send (usually data.length)
+     * @throws SQLException on failure
+     */
+    public void writeToCopy(CopyOperationImpl op, byte[] data, int off, int siz)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (!hasLock(op)) {
+                throw new PSQLException(GT.tr("Tried to write to an inactive copy operation"),
+                        PSQLState.OBJECT_NOT_IN_STATE);
+            }
+
+            LOGGER.log(Level.FINEST, " FE=> CopyData({0})", siz);
+
+            try {
+                pgStream.sendChar('d');
+                pgStream.sendInteger4(siz + 4);
+                pgStream.send(data, off, siz);
+            } catch (IOException ioe) {
+                throw new PSQLException(GT.tr("Database connection failed when writing to copy"),
+                        PSQLState.CONNECTION_FAILURE, ioe);
+            }
+        }
+    }
+
+    /**
+     * Sends data during a live COPY IN operation. Only unlocks the connection if server suddenly
+     * returns CommandComplete, which should not happen
+     *
+     * @param op   the CopyIn operation presumably currently holding lock on this connection
+     * @param from the source of bytes, e.g. a ByteBufferByteStreamWriter
+     * @throws SQLException on failure
+     */
+    public void writeToCopy(CopyOperationImpl op, ByteStreamWriter from)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (!hasLock(op)) {
+                throw new PSQLException(GT.tr("Tried to write to an inactive copy operation"),
+                        PSQLState.OBJECT_NOT_IN_STATE);
+            }
+
+            int siz = from.getLength();
+            LOGGER.log(Level.FINEST, " FE=> CopyData({0})", siz);
+
+            try {
+                pgStream.sendChar('d');
+                pgStream.sendInteger4(siz + 4);
+                pgStream.send(from);
+            } catch (IOException ioe) {
+                throw new PSQLException(GT.tr("Database connection failed when writing to copy"),
+                        PSQLState.CONNECTION_FAILURE, ioe);
+            }
+        }
+    }
+
+    //
+    // Essentially the same strategy is used for the cleanup of portals.
+    // Note that each Portal holds a reference to the corresponding Query
+    // that generated it, so the Query won't be collected (and the statement
+    // closed) until all the Portals are, too. This is required by the mechanics
+    // of the backend protocol: when a statement is closed, all dependent portals
+    // are also closed.
+    //
+
+    public void flushCopy(CopyOperationImpl op) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (!hasLock(op)) {
+                throw new PSQLException(GT.tr("Tried to write to an inactive copy operation"),
+                        PSQLState.OBJECT_NOT_IN_STATE);
+            }
+
+            try {
+                pgStream.flush();
+            } catch (IOException ioe) {
+                throw new PSQLException(GT.tr("Database connection failed when writing to copy"),
+                        PSQLState.CONNECTION_FAILURE, ioe);
+            }
+        }
+    }
+
+    /**
+     * Wait for a row of data to be received from server on an active copy operation
+     * Connection gets unlocked by processCopyResults() at end of operation.
+     *
+     * @param op    the copy operation presumably currently holding lock on this connection
+     * @param block whether to block waiting for input
+     * @throws SQLException on any failure
+     */
+    void readFromCopy(CopyOperationImpl op, boolean block) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (!hasLock(op)) {
+                throw new PSQLException(GT.tr("Tried to read from inactive copy"),
+                        PSQLState.OBJECT_NOT_IN_STATE);
+            }
+
+            try {
+                processCopyResults(op, block); // expect a call to handleCopydata() to store the data
+            } catch (IOException ioe) {
+                throw new PSQLException(GT.tr("Database connection failed when reading from copy"),
+                        PSQLState.CONNECTION_FAILURE, ioe);
+            }
+        }
+    }
+
+    /**
+     * Handles copy sub protocol responses from server. Unlocks at end of sub protocol, so operations
+     * on pgStream or QueryExecutor are not allowed in a method after calling this!
+     *
+     * @param block whether to block waiting for input
+     * @return CopyIn when COPY FROM STDIN starts; CopyOut when COPY TO STDOUT starts; null when copy
+     * ends; otherwise, the operation given as parameter.
+     * @throws SQLException in case of misuse
+     * @throws IOException  from the underlying connection
+     */
+    CopyOperationImpl processCopyResults(CopyOperationImpl op, boolean block)
+            throws SQLException, IOException {
+
+        /*
+         * fixes issue #1592 where one thread closes the stream and another is reading it
+         */
+        if (pgStream.isClosed()) {
+            throw new PSQLException(GT.tr("PGStream is closed"),
+                    PSQLState.CONNECTION_DOES_NOT_EXIST);
+        }
+        /*
+         *  This is a hack as we should not end up here, but sometimes do with large copy operations.
+         */
+        if (!processingCopyResults.compareAndSet(false, true)) {
+            LOGGER.log(Level.INFO, "Ignoring request to process copy results, already processing");
+            return null;
+        }
+
+        // put this all in a try, finally block and reset the processingCopyResults in the finally clause
+        try {
+            boolean endReceiving = false;
+            SQLException error = null;
+            SQLException errors = null;
+            int len;
+
+            while (!endReceiving && (block || pgStream.hasMessagePending())) {
+
+                // There is a bug in the server's implementation of the copy
+                // protocol. It returns command complete immediately upon
+                // receiving the EOF marker in the binary protocol,
+                // potentially before we've issued CopyDone. When we are not
+                // blocking, we don't think we are done, so we hold off on
+                // processing command complete and any subsequent messages
+                // until we actually are done with the copy.
+                //
+                if (!block) {
+                    int c = pgStream.peekChar();
+                    if (c == 'C') {
+                        // CommandComplete
+                        LOGGER.log(Level.FINEST, " <=BE CommandStatus, Ignored until CopyDone");
+                        break;
+                    }
+                }
+
+                int c = pgStream.receiveChar();
+                switch (c) {
+
+                    case 'A': // Asynchronous Notify
+
+                        LOGGER.log(Level.FINEST, " <=BE Asynchronous Notification while copying");
+
+                        receiveAsyncNotify();
+                        break;
+
+                    case 'N': // Notice Response
+
+                        LOGGER.log(Level.FINEST, " <=BE Notification while copying");
+
+                        addWarning(receiveNoticeResponse());
+                        break;
+
+                    case 'C': // Command Complete
+
+                        String status = receiveCommandStatus();
+
+                        try {
+                            if (op == null) {
+                                throw new PSQLException(GT
+                                        .tr("Received CommandComplete ''{0}'' without an active copy operation", status),
+                                        PSQLState.OBJECT_NOT_IN_STATE);
+                            }
+                            op.handleCommandStatus(status);
+                        } catch (SQLException se) {
+                            error = se;
+                        }
+
+                        block = true;
+                        break;
+
+                    case 'E': // ErrorMessage (expected response to CopyFail)
+
+                        error = receiveErrorResponse();
+                        // We've received the error and we now expect to receive
+                        // Ready for query, but we must block because it might still be
+                        // on the wire and not here yet.
+                        block = true;
+                        break;
+
+                    case 'G': // CopyInResponse
+
+                        LOGGER.log(Level.FINEST, " <=BE CopyInResponse");
+
+                        if (op != null) {
+                            error = new PSQLException(GT.tr("Got CopyInResponse from server during an active {0}",
+                                    op.getClass().getName()), PSQLState.OBJECT_NOT_IN_STATE);
+                        }
+
+                        op = new CopyInImpl();
+                        initCopy(op);
+                        endReceiving = true;
+                        break;
+
+                    case 'H': // CopyOutResponse
+
+                        LOGGER.log(Level.FINEST, " <=BE CopyOutResponse");
+
+                        if (op != null) {
+                            error = new PSQLException(GT.tr("Got CopyOutResponse from server during an active {0}",
+                                    op.getClass().getName()), PSQLState.OBJECT_NOT_IN_STATE);
+                        }
+
+                        op = new CopyOutImpl();
+                        initCopy(op);
+                        endReceiving = true;
+                        break;
+
+                    case 'W': // CopyBothResponse
+
+                        LOGGER.log(Level.FINEST, " <=BE CopyBothResponse");
+
+                        if (op != null) {
+                            error = new PSQLException(GT.tr("Got CopyBothResponse from server during an active {0}",
+                                    op.getClass().getName()), PSQLState.OBJECT_NOT_IN_STATE);
+                        }
+
+                        op = new CopyDualImpl();
+                        initCopy(op);
+                        endReceiving = true;
+                        break;
+
+                    case 'd': // CopyData
+
+                        LOGGER.log(Level.FINEST, " <=BE CopyData");
+
+                        len = pgStream.receiveInteger4() - 4;
+
+                        assert len > 0 : "Copy Data length must be greater than 4";
+
+                        byte[] buf = pgStream.receive(len);
+                        if (op == null) {
+                            error = new PSQLException(GT.tr("Got CopyData without an active copy operation"),
+                                    PSQLState.OBJECT_NOT_IN_STATE);
+                        } else if (!(op instanceof CopyOut)) {
+                            error = new PSQLException(
+                                    GT.tr("Unexpected copydata from server for {0}", op.getClass().getName()),
+                                    PSQLState.COMMUNICATION_ERROR);
+                        } else {
+                            op.handleCopydata(buf);
+                        }
+                        endReceiving = true;
+                        break;
+
+                    case 'c': // CopyDone (expected after all copydata received)
+
+                        LOGGER.log(Level.FINEST, " <=BE CopyDone");
+
+                        len = pgStream.receiveInteger4() - 4;
+                        if (len > 0) {
+                            pgStream.receive(len); // not in specification; should never appear
+                        }
+
+                        if (!(op instanceof CopyOut)) {
+                            error = new PSQLException("Got CopyDone while not copying from server",
+                                    PSQLState.OBJECT_NOT_IN_STATE);
+                        }
+
+                        // keep receiving since we expect a CommandComplete
+                        block = true;
+                        break;
+                    case 'S': // Parameter Status
+                        try {
+                            receiveParameterStatus();
+                        } catch (SQLException e) {
+                            error = e;
+                            endReceiving = true;
+                        }
+                        break;
+
+                    case 'Z': // ReadyForQuery: After FE:CopyDone => BE:CommandComplete
+
+                        receiveRFQ();
+                        if (op != null && hasLock(op)) {
+                            unlock(op);
+                        }
+                        op = null;
+                        endReceiving = true;
+                        break;
+
+                    // If the user sends a non-copy query, we've got to handle some additional things.
+                    //
+                    case 'T': // Row Description (response to Describe)
+                        LOGGER.log(Level.FINEST, " <=BE RowDescription (during copy ignored)");
+
+                        skipMessage();
+                        break;
+
+                    case 'D': // DataRow
+                        LOGGER.log(Level.FINEST, " <=BE DataRow (during copy ignored)");
+
+                        skipMessage();
+                        break;
+
+                    default:
+                        throw new IOException(
+                                GT.tr("Unexpected packet type during copy: {0}", Integer.toString(c)));
+                }
+
+                // Collect errors into a neat chain for completeness
+                if (error != null) {
+                    if (errors != null) {
+                        error.setNextException(errors);
+                    }
+                    errors = error;
+                    error = null;
+                }
+            }
+
+            if (errors != null) {
+                throw errors;
+            }
+            return op;
+
+        } finally {
+      /*
+      reset here in the finally block to make sure it really is cleared
+       */
+            processingCopyResults.set(false);
+        }
+    }
+
+    /*
+     * To prevent client/server protocol deadlocks, we try to manage the estimated recv buffer size
+     * and force a sync +flush and process results if we think it might be getting too full.
+     *
+     * See the comments above MAX_BUFFERED_RECV_BYTES's declaration for details.
+     */
+    private void flushIfDeadlockRisk(Query query, boolean disallowBatching,
+                                     ResultHandler resultHandler,
+                                     BatchResultHandler batchHandler,
+                                     final int flags) throws IOException {
+        // Assume all statements need at least this much reply buffer space,
+        // plus params
+        estimatedReceiveBufferBytes += NODATA_QUERY_RESPONSE_SIZE_BYTES;
+
+        SimpleQuery sq = (SimpleQuery) query;
+        if (sq.isStatementDescribed()) {
+            /*
+             * Estimate the response size of the fields and add it to the expected response size.
+             *
+             * It's impossible for us to estimate the rowcount. We'll assume one row, as that's the common
+             * case for batches and we're leaving plenty of breathing room in this approach. It's still
+             * not deadlock-proof though; see pgjdbc github issues #194 and #195.
+             */
+            int maxResultRowSize = sq.getMaxResultRowSize();
+            if (maxResultRowSize >= 0) {
+                estimatedReceiveBufferBytes += maxResultRowSize;
+            } else {
+                LOGGER.log(Level.FINEST, "Couldn't estimate result size or result size unbounded, "
+                        + "disabling batching for this query.");
+                disallowBatching = true;
+            }
+        } else {
+            /*
+             * We only describe a statement if we're expecting results from it, so it's legal to batch
+             * unprepared statements. We'll abort later if we get any uresults from them where none are
+             * expected. For now all we can do is hope the user told us the truth and assume that
+             * NODATA_QUERY_RESPONSE_SIZE_BYTES is enough to cover it.
+             */
+        }
+
+        if (disallowBatching || estimatedReceiveBufferBytes >= MAX_BUFFERED_RECV_BYTES) {
+            LOGGER.log(Level.FINEST, "Forcing Sync, receive buffer full or batching disallowed");
+            sendSync();
+            processResults(resultHandler, flags);
+            estimatedReceiveBufferBytes = 0;
+            if (batchHandler != null) {
+                batchHandler.secureProgress();
+            }
+        }
+
+    }
+
+    /*
+     * Send a query to the backend.
+     */
+    private void sendQuery(Query query, V3ParameterList parameters, int maxRows, int fetchSize,
+                           int flags, ResultHandler resultHandler,
+                           BatchResultHandler batchHandler, boolean adaptiveFetch) throws IOException, SQLException {
+        // Now the query itself.
+        Query[] subqueries = query.getSubqueries();
+        SimpleParameterList[] subparams = parameters.getSubparams();
+
+        // We know this is deprecated, but still respect it in case anyone's using it.
+        // PgJDBC its self no longer does.
+        @SuppressWarnings("deprecation")
+        boolean disallowBatching = (flags & QueryExecutor.QUERY_DISALLOW_BATCHING) != 0;
+
+        if (subqueries == null) {
+            flushIfDeadlockRisk(query, disallowBatching, resultHandler, batchHandler, flags);
+
+            // If we saw errors, don't send anything more.
+            if (resultHandler.getException() == null) {
+                if (fetchSize != 0) {
+                    adaptiveFetchCache.addNewQuery(adaptiveFetch, query);
+                }
+                sendOneQuery((SimpleQuery) query, (SimpleParameterList) parameters, maxRows, fetchSize,
+                        flags);
+            }
+        } else {
+            for (int i = 0; i < subqueries.length; i++) {
+                final Query subquery = subqueries[i];
+                flushIfDeadlockRisk(subquery, disallowBatching, resultHandler, batchHandler, flags);
+
+                // If we saw errors, don't send anything more.
+                if (resultHandler.getException() != null) {
+                    break;
+                }
+
+                // In the situation where parameters is already
+                // NO_PARAMETERS it cannot know the correct
+                // number of array elements to return in the
+                // above call to getSubparams(), so it must
+                // return null which we check for here.
+                //
+                SimpleParameterList subparam = SimpleQuery.NO_PARAMETERS;
+                if (subparams != null) {
+                    subparam = subparams[i];
+                }
+                if (fetchSize != 0) {
+                    adaptiveFetchCache.addNewQuery(adaptiveFetch, subquery);
+                }
+                sendOneQuery((SimpleQuery) subquery, subparam, maxRows, fetchSize, flags);
+            }
+        }
+    }
+
+    private void sendSync() throws IOException {
+        LOGGER.log(Level.FINEST, " FE=> Sync");
+
+        pgStream.sendChar('S'); // Sync
+        pgStream.sendInteger4(4); // Length
+        pgStream.flush();
+        // Below "add queues" are likely not required at all
+        pendingExecuteQueue.add(new ExecuteRequest(sync, null, true));
+        pendingDescribePortalQueue.add(sync);
+    }
+
+    private void sendParse(SimpleQuery query, SimpleParameterList params, boolean oneShot)
+            throws IOException {
+        // Already parsed, or we have a Parse pending and the types are right?
+        int[] typeOIDs = params.getTypeOIDs();
+        if (query.isPreparedFor(typeOIDs, deallocateEpoch)) {
+            return;
+        }
+
+        // Clean up any existing statement, as we can't use it.
+        query.unprepare();
+        processDeadParsedQueries();
+
+        // Remove any cached Field values. The re-parsed query might report different
+        // fields because input parameter types may result in different type inferences
+        // for unspecified types.
+        query.setFields(null);
+
+        String statementName = null;
+        if (!oneShot) {
+            // Generate a statement name to use.
+            statementName = "S_" + (nextUniqueID++);
+
+            // And prepare the new statement.
+            // NB: Must clone the OID array, as it's a direct reference to
+            // the SimpleParameterList's internal array that might be modified
+            // under us.
+            query.setStatementName(statementName, deallocateEpoch);
+            query.setPrepareTypes(typeOIDs);
+            registerParsedQuery(query, statementName);
+        }
+
+        byte[] encodedStatementName = query.getEncodedStatementName();
+        String nativeSql = query.getNativeSql();
+
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            StringBuilder sbuf = new StringBuilder(" FE=> Parse(stmt=" + statementName + ",query=\"");
+            sbuf.append(nativeSql);
+            sbuf.append("\",oids={");
+            for (int i = 1; i <= params.getParameterCount(); i++) {
+                if (i != 1) {
+                    sbuf.append(",");
+                }
+                sbuf.append(params.getTypeOID(i));
+            }
+            sbuf.append("})");
+            LOGGER.log(Level.FINEST, sbuf.toString());
+        }
+
+        //
+        // Send Parse.
+        //
+
+        byte[] queryUtf8 = nativeSql.getBytes(StandardCharsets.UTF_8);
+
+        // Total size = 4 (size field)
+        // + N + 1 (statement name, zero-terminated)
+        // + N + 1 (query, zero terminated)
+        // + 2 (parameter count) + N * 4 (parameter types)
+        int encodedSize = 4
+                + (encodedStatementName == null ? 0 : encodedStatementName.length) + 1
+                + queryUtf8.length + 1
+                + 2 + 4 * params.getParameterCount();
+
+        pgStream.sendChar('P'); // Parse
+        pgStream.sendInteger4(encodedSize);
+        if (encodedStatementName != null) {
+            pgStream.send(encodedStatementName);
+        }
+        pgStream.sendChar(0); // End of statement name
+        pgStream.send(queryUtf8); // Query string
+        pgStream.sendChar(0); // End of query string.
+        pgStream.sendInteger2(params.getParameterCount()); // # of parameter types specified
+        for (int i = 1; i <= params.getParameterCount(); i++) {
+            pgStream.sendInteger4(params.getTypeOID(i));
+        }
+
+        pendingParseQueue.add(query);
+    }
+
+    private void sendBind(SimpleQuery query, SimpleParameterList params, Portal portal,
+                          boolean noBinaryTransfer) throws IOException {
+        //
+        // Send Bind.
+        //
+
+        String statementName = query.getStatementName();
+        byte[] encodedStatementName = query.getEncodedStatementName();
+        byte[] encodedPortalName = portal == null ? null : portal.getEncodedPortalName();
+
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            StringBuilder sbuf = new StringBuilder(" FE=> Bind(stmt=" + statementName + ",portal=" + portal);
+            for (int i = 1; i <= params.getParameterCount(); i++) {
+                sbuf.append(",$").append(i).append("=<")
+                        .append(params.toString(i, true))
+                        .append(">,type=").append(Oid.toString(params.getTypeOID(i)));
+            }
+            sbuf.append(")");
+            LOGGER.log(Level.FINEST, sbuf.toString());
+        }
+
+        // Total size = 4 (size field) + N + 1 (destination portal)
+        // + N + 1 (statement name)
+        // + 2 (param format code count) + N * 2 (format codes)
+        // + 2 (param value count) + N (encoded param value size)
+        // + 2 (result format code count, 0)
+        long encodedSize = 0;
+        for (int i = 1; i <= params.getParameterCount(); i++) {
+            if (params.isNull(i)) {
+                encodedSize += 4;
+            } else {
+                encodedSize += (long) 4 + params.getV3Length(i);
+            }
+        }
+
+        Field[] fields = query.getFields();
+        if (!noBinaryTransfer && query.needUpdateFieldFormats() && fields != null) {
+            for (Field field : fields) {
+                if (useBinary(field)) {
+                    field.setFormat(Field.BINARY_FORMAT);
+                    query.setHasBinaryFields(true);
+                }
+            }
+        }
+        // If text-only results are required (e.g. updateable resultset), and the query has binary columns,
+        // flip to text format.
+        if (noBinaryTransfer && query.hasBinaryFields() && fields != null) {
+            for (Field field : fields) {
+                if (field.getFormat() != Field.TEXT_FORMAT) {
+                    field.setFormat(Field.TEXT_FORMAT);
+                }
+            }
+            query.resetNeedUpdateFieldFormats();
+            query.setHasBinaryFields(false);
+        }
+
+        // This is not the number of binary fields, but the total number
+        // of fields if any of them are binary or zero if all of them
+        // are text.
+        int numBinaryFields = !noBinaryTransfer && query.hasBinaryFields() && fields != null
+                ? fields.length : 0;
+
+        encodedSize = 4
+                + (encodedPortalName == null ? 0 : encodedPortalName.length) + 1
+                + (encodedStatementName == null ? 0 : encodedStatementName.length) + 1
+                + 2 + params.getParameterCount() * 2
+                + 2 + encodedSize
+                + 2 + numBinaryFields * 2;
+
+        // backend's MaxAllocSize is the largest message that can
+        // be received from a client. If we have a bigger value
+        // from either very large parameters or incorrect length
+        // descriptions of setXXXStream we do not send the bind
+        // message.
+        //
+        if (encodedSize > 0x3fffffff) {
+            throw new PGBindException(new IOException(GT.tr(
+                    "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.",
+                    encodedSize)));
+        }
+
+        pgStream.sendChar('B'); // Bind
+        pgStream.sendInteger4((int) encodedSize); // Message size
+        if (encodedPortalName != null) {
+            pgStream.send(encodedPortalName); // Destination portal name.
+        }
+        pgStream.sendChar(0); // End of portal name.
+        if (encodedStatementName != null) {
+            pgStream.send(encodedStatementName); // Source statement name.
+        }
+        pgStream.sendChar(0); // End of statement name.
+
+        pgStream.sendInteger2(params.getParameterCount()); // # of parameter format codes
+        for (int i = 1; i <= params.getParameterCount(); i++) {
+            pgStream.sendInteger2(params.isBinary(i) ? 1 : 0); // Parameter format code
+        }
+
+        pgStream.sendInteger2(params.getParameterCount()); // # of parameter values
+
+        // If an error occurs when reading a stream we have to
+        // continue pumping out data to match the length we
+        // said we would. Once we've done that we throw
+        // this exception. Multiple exceptions can occur and
+        // it really doesn't matter which one is reported back
+        // to the caller.
+        //
+        PGBindException bindException = null;
+
+        for (int i = 1; i <= params.getParameterCount(); i++) {
+            if (params.isNull(i)) {
+                pgStream.sendInteger4(-1); // Magic size of -1 means NULL
+            } else {
+                pgStream.sendInteger4(params.getV3Length(i)); // Parameter size
+                try {
+                    params.writeV3Value(i, pgStream); // Parameter value
+                } catch (PGBindException be) {
+                    bindException = be;
+                }
+            }
+        }
+
+        pgStream.sendInteger2(numBinaryFields); // # of result format codes
+        for (int i = 0; fields != null && i < numBinaryFields; i++) {
+            pgStream.sendInteger2(fields[i].getFormat());
+        }
+
+        pendingBindQueue.add(portal == null ? UNNAMED_PORTAL : portal);
+
+        if (bindException != null) {
+            throw bindException;
+        }
+    }
+
+    /**
+     * Returns true if the specified field should be retrieved using binary encoding.
+     *
+     * @param field The field whose Oid type to analyse.
+     * @return True if {@link Field#BINARY_FORMAT} should be used, false if
+     * {@link Field#BINARY_FORMAT}.
+     */
+    private boolean useBinary(Field field) {
+        int oid = field.getOID();
+        return useBinaryForReceive(oid);
+    }
+
+    private void sendDescribePortal(SimpleQuery query, Portal portal) throws IOException {
+        //
+        // Send Describe.
+        //
+
+        LOGGER.log(Level.FINEST, " FE=> Describe(portal={0})", portal);
+
+        byte[] encodedPortalName = portal == null ? null : portal.getEncodedPortalName();
+
+        // Total size = 4 (size field) + 1 (describe type, 'P') + N + 1 (portal name)
+        int encodedSize = 4 + 1 + (encodedPortalName == null ? 0 : encodedPortalName.length) + 1;
+
+        pgStream.sendChar('D'); // Describe
+        pgStream.sendInteger4(encodedSize); // message size
+        pgStream.sendChar('P'); // Describe (Portal)
+        if (encodedPortalName != null) {
+            pgStream.send(encodedPortalName); // portal name to close
+        }
+        pgStream.sendChar(0); // end of portal name
+
+        pendingDescribePortalQueue.add(query);
+        query.setPortalDescribed(true);
+    }
+
+    private void sendDescribeStatement(SimpleQuery query, SimpleParameterList params,
+                                       boolean describeOnly) throws IOException {
+        // Send Statement Describe
+
+        LOGGER.log(Level.FINEST, " FE=> Describe(statement={0})", query.getStatementName());
+
+        byte[] encodedStatementName = query.getEncodedStatementName();
+
+        // Total size = 4 (size field) + 1 (describe type, 'S') + N + 1 (portal name)
+        int encodedSize = 4 + 1 + (encodedStatementName == null ? 0 : encodedStatementName.length) + 1;
+
+        pgStream.sendChar('D'); // Describe
+        pgStream.sendInteger4(encodedSize); // Message size
+        pgStream.sendChar('S'); // Describe (Statement);
+        if (encodedStatementName != null) {
+            pgStream.send(encodedStatementName); // Statement name
+        }
+        pgStream.sendChar(0); // end message
+
+        // Note: statement name can change over time for the same query object
+        // Thus we take a snapshot of the query name
+        pendingDescribeStatementQueue.add(
+                new DescribeRequest(query, params, describeOnly, query.getStatementName()));
+        pendingDescribePortalQueue.add(query);
+        query.setStatementDescribed(true);
+        query.setPortalDescribed(true);
+    }
+
+    private void sendExecute(SimpleQuery query, Portal portal, int limit)
+            throws IOException {
+        //
+        // Send Execute.
+        //
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            LOGGER.log(Level.FINEST, " FE=> Execute(portal={0},limit={1})", new Object[]{portal, limit});
+        }
+
+        byte[] encodedPortalName = portal == null ? null : portal.getEncodedPortalName();
+        int encodedSize = encodedPortalName == null ? 0 : encodedPortalName.length;
+
+        // Total size = 4 (size field) + 1 + N (source portal) + 4 (max rows)
+        pgStream.sendChar('E'); // Execute
+        pgStream.sendInteger4(4 + 1 + encodedSize + 4); // message size
+        if (encodedPortalName != null) {
+            pgStream.send(encodedPortalName); // portal name
+        }
+        pgStream.sendChar(0); // portal name terminator
+        pgStream.sendInteger4(limit); // row limit
+
+        pendingExecuteQueue.add(new ExecuteRequest(query, portal, false));
+    }
+
+    private void sendClosePortal(String portalName) throws IOException {
+        //
+        // Send Close.
+        //
+
+        LOGGER.log(Level.FINEST, " FE=> ClosePortal({0})", portalName);
+
+        byte[] encodedPortalName = portalName == null ? null : portalName.getBytes(StandardCharsets.UTF_8);
+        int encodedSize = encodedPortalName == null ? 0 : encodedPortalName.length;
+
+        // Total size = 4 (size field) + 1 (close type, 'P') + 1 + N (portal name)
+        pgStream.sendChar('C'); // Close
+        pgStream.sendInteger4(4 + 1 + 1 + encodedSize); // message size
+        pgStream.sendChar('P'); // Close (Portal)
+        if (encodedPortalName != null) {
+            pgStream.send(encodedPortalName);
+        }
+        pgStream.sendChar(0); // unnamed portal
+    }
+
+    private void sendCloseStatement(String statementName) throws IOException {
+        //
+        // Send Close.
+        //
+
+        LOGGER.log(Level.FINEST, " FE=> CloseStatement({0})", statementName);
+
+        byte[] encodedStatementName = statementName.getBytes(StandardCharsets.UTF_8);
+
+        // Total size = 4 (size field) + 1 (close type, 'S') + N + 1 (statement name)
+        pgStream.sendChar('C'); // Close
+        pgStream.sendInteger4(4 + 1 + encodedStatementName.length + 1); // message size
+        pgStream.sendChar('S'); // Close (Statement)
+        pgStream.send(encodedStatementName); // statement to close
+        pgStream.sendChar(0); // statement name terminator
+    }
+
+    // sendOneQuery sends a single statement via the extended query protocol.
+    // Per the FE/BE docs this is essentially the same as how a simple query runs
+    // (except that it generates some extra acknowledgement messages, and we
+    // can send several queries before doing the Sync)
+    //
+    // Parse S_n from "query string with parameter placeholders"; skipped if already done previously
+    // or if oneshot
+    // Bind C_n from S_n plus parameters (or from unnamed statement for oneshot queries)
+    // Describe C_n; skipped if caller doesn't want metadata
+    // Execute C_n with maxRows limit; maxRows = 1 if caller doesn't want results
+    // (above repeats once per call to sendOneQuery)
+    // Sync (sent by caller)
+    //
+    private void sendOneQuery(SimpleQuery query, SimpleParameterList params, int maxRows,
+                              int fetchSize, int flags) throws IOException {
+        boolean asSimple = (flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) != 0;
+        if (asSimple) {
+            assert (flags & QueryExecutor.QUERY_DESCRIBE_ONLY) == 0
+                    : "Simple mode does not support describe requests. sql = " + query.getNativeSql()
+                    + ", flags = " + flags;
+            sendSimpleQuery(query, params);
+            return;
+        }
+
+        assert !query.getNativeQuery().multiStatement
+                : "Queries that might contain ; must be executed with QueryExecutor.QUERY_EXECUTE_AS_SIMPLE mode. "
+                + "Given query is " + query.getNativeSql();
+
+        // Per https://www.postgresql.org/docs/current/static/protocol-flow.html#PROTOCOL-FLOW-EXT-QUERY
+        // A Bind message can use the unnamed prepared statement to create a named portal.
+        // If the Bind is successful, an Execute message can reference that named portal until either
+        //      the end of the current transaction
+        //   or the named portal is explicitly destroyed
+
+        boolean noResults = (flags & QueryExecutor.QUERY_NO_RESULTS) != 0;
+        boolean noMeta = (flags & QueryExecutor.QUERY_NO_METADATA) != 0;
+        boolean describeOnly = (flags & QueryExecutor.QUERY_DESCRIBE_ONLY) != 0;
+        // extended queries always use a portal
+        // the usePortal flag controls whether or not we use a *named* portal
+        boolean usePortal = (flags & QueryExecutor.QUERY_FORWARD_CURSOR) != 0 && !noResults && !noMeta
+                && fetchSize > 0 && !describeOnly;
+        boolean oneShot = (flags & QueryExecutor.QUERY_ONESHOT) != 0;
+        boolean noBinaryTransfer = (flags & QUERY_NO_BINARY_TRANSFER) != 0;
+        boolean forceDescribePortal = (flags & QUERY_FORCE_DESCRIBE_PORTAL) != 0;
+
+        // Work out how many rows to fetch in this pass.
+
+        int rows;
+        if (noResults) {
+            rows = 1; // We're discarding any results anyway, so limit data transfer to a minimum
+        } else if (!usePortal) {
+            rows = maxRows; // Not using a portal -- fetchSize is irrelevant
+        } else if (maxRows != 0 && fetchSize > maxRows) {
+            // fetchSize > maxRows, use maxRows (nb: fetchSize cannot be 0 if usePortal == true)
+            rows = maxRows;
+        } else {
+            rows = fetchSize; // maxRows > fetchSize
+        }
+
+        sendParse(query, params, oneShot);
+
+        // Must do this after sendParse to pick up any changes to the
+        // query's state.
+        //
+        boolean queryHasUnknown = query.hasUnresolvedTypes();
+        boolean paramsHasUnknown = params.hasUnresolvedTypes();
+
+        boolean describeStatement = describeOnly
+                || (!oneShot && paramsHasUnknown && queryHasUnknown && !query.isStatementDescribed());
+
+        if (!describeStatement && paramsHasUnknown && !queryHasUnknown) {
+            int[] queryOIDs = query.getPrepareTypes();
+            int[] paramOIDs = params.getTypeOIDs();
+            for (int i = 0; i < paramOIDs.length; i++) {
+                // Only supply type information when there isn't any
+                // already, don't arbitrarily overwrite user supplied
+                // type information.
+                if (paramOIDs[i] == Oid.UNSPECIFIED) {
+                    params.setResolvedType(i + 1, queryOIDs[i]);
+                }
+            }
+        }
+
+        if (describeStatement) {
+            sendDescribeStatement(query, params, describeOnly);
+            if (describeOnly) {
+                return;
+            }
+        }
+
+        // Construct a new portal if needed.
+        Portal portal = null;
+        if (usePortal) {
+            String portalName = "C_" + (nextUniqueID++);
+            portal = new Portal(query, portalName);
+        }
+
+        sendBind(query, params, portal, noBinaryTransfer);
+
+        // A statement describe will also output a RowDescription,
+        // so don't reissue it here if we've already done so.
+        //
+        if (!noMeta && !describeStatement) {
+            /*
+             * don't send describe if we already have cached the row description from previous executions
+             *
+             * XXX Clearing the fields / unpreparing the query (in sendParse) is incorrect, see bug #267.
+             * We might clear the cached fields in a later execution of this query if the bind parameter
+             * types change, but we're assuming here that they'll still be valid when we come to process
+             * the results of this query, so we don't send a new describe here. We re-describe after the
+             * fields are cleared, but the result of that gets processed after processing the results from
+             * earlier executions that we didn't describe because we didn't think we had to.
+             *
+             * To work around this, force a Describe at each execution in batches where this can be a
+             * problem. It won't cause more round trips so the performance impact is low, and it'll ensure
+             * that the field information available when we decoded the results. This is undeniably a
+             * hack, but there aren't many good alternatives.
+             */
+            if (!query.isPortalDescribed() || forceDescribePortal) {
+                sendDescribePortal(query, portal);
+            }
+        }
+
+        sendExecute(query, portal, rows);
+    }
+
+    private void sendSimpleQuery(SimpleQuery query, SimpleParameterList params) throws IOException {
+        String nativeSql = query.toString(params);
+
+        LOGGER.log(Level.FINEST, " FE=> SimpleQuery(query=\"{0}\")", nativeSql);
+        Encoding encoding = pgStream.getEncoding();
+
+        byte[] encoded = encoding.encode(nativeSql);
+        pgStream.sendChar('Q');
+        pgStream.sendInteger4(encoded.length + 4 + 1);
+        pgStream.send(encoded);
+        pgStream.sendChar(0);
+        pgStream.flush();
+        pendingExecuteQueue.add(new ExecuteRequest(query, null, true));
+        pendingDescribePortalQueue.add(query);
+    }
+
+    private void registerParsedQuery(SimpleQuery query, String statementName) {
+        if (statementName == null) {
+            return;
+        }
+
+        PhantomReference<SimpleQuery> cleanupRef =
+                new PhantomReference<>(query, parsedQueryCleanupQueue);
+        parsedQueryMap.put(cleanupRef, statementName);
+        query.setCleanupRef(cleanupRef);
+    }
+
+    private void processDeadParsedQueries() throws IOException {
+        Reference<? extends SimpleQuery> deadQuery;
+        while ((deadQuery = parsedQueryCleanupQueue.poll()) != null) {
+            String statementName = parsedQueryMap.remove(deadQuery);
+            sendCloseStatement(statementName);
+            deadQuery.clear();
+        }
+    }
+
+    private void registerOpenPortal(Portal portal) {
+        if (portal == UNNAMED_PORTAL) {
+            return; // Using the unnamed portal.
+        }
+
+        String portalName = portal.getPortalName();
+        PhantomReference<Portal> cleanupRef =
+                new PhantomReference<>(portal, openPortalCleanupQueue);
+        openPortalMap.put(cleanupRef, portalName);
+        portal.setCleanupRef(cleanupRef);
+    }
+
+    private void processDeadPortals() throws IOException {
+        Reference<? extends Portal> deadPortal;
+        while ((deadPortal = openPortalCleanupQueue.poll()) != null) {
+            String portalName = openPortalMap.remove(deadPortal);
+            sendClosePortal(portalName);
+            deadPortal.clear();
+        }
+    }
+
+    protected void processResults(ResultHandler handler, int flags) throws IOException {
+        processResults(handler, flags, false);
+    }
+
+    protected void processResults(ResultHandler handler, int flags, boolean adaptiveFetch)
+            throws IOException {
+        boolean noResults = (flags & QueryExecutor.QUERY_NO_RESULTS) != 0;
+        boolean bothRowsAndStatus = (flags & QueryExecutor.QUERY_BOTH_ROWS_AND_STATUS) != 0;
+
+        List<Tuple> tuples = null;
+
+        int c;
+        boolean endQuery = false;
+
+        // At the end of a command execution we have the CommandComplete
+        // message to tell us we're done, but with a describeOnly command
+        // we have no real flag to let us know we're done. We've got to
+        // look for the next RowDescription or NoData message and return
+        // from there.
+        boolean doneAfterRowDescNoData = false;
+
+        while (!endQuery) {
+            c = pgStream.receiveChar();
+            switch (c) {
+                case 'A': // Asynchronous Notify
+                    receiveAsyncNotify();
+                    break;
+
+                case '1': // Parse Complete (response to Parse)
+                    pgStream.receiveInteger4(); // len, discarded
+
+                    SimpleQuery parsedQuery = pendingParseQueue.removeFirst();
+                    String parsedStatementName = parsedQuery.getStatementName();
+
+                    LOGGER.log(Level.FINEST, " <=BE ParseComplete [{0}]", parsedStatementName);
+
+                    break;
+
+                case 't': { // ParameterDescription
+                    pgStream.receiveInteger4(); // len, discarded
+
+                    LOGGER.log(Level.FINEST, " <=BE ParameterDescription");
+
+                    DescribeRequest describeData = pendingDescribeStatementQueue.getFirst();
+                    SimpleQuery query = describeData.query;
+                    SimpleParameterList params = describeData.parameterList;
+                    boolean describeOnly = describeData.describeOnly;
+                    // This might differ from query.getStatementName if the query was re-prepared
+                    String origStatementName = describeData.statementName;
+
+                    int numParams = pgStream.receiveInteger2();
+
+                    for (int i = 1; i <= numParams; i++) {
+                        int typeOid = pgStream.receiveInteger4();
+                        params.setResolvedType(i, typeOid);
+                    }
+
+                    // Since we can issue multiple Parse and DescribeStatement
+                    // messages in a single network trip, we need to make
+                    // sure the describe results we requested are still
+                    // applicable to the latest parsed query.
+                    //
+                    if ((origStatementName == null && query.getStatementName() == null)
+                            || (origStatementName != null
+                            && origStatementName.equals(query.getStatementName()))) {
+                        query.setPrepareTypes(params.getTypeOIDs());
+                    }
+
+                    if (describeOnly) {
+                        doneAfterRowDescNoData = true;
+                    } else {
+                        pendingDescribeStatementQueue.removeFirst();
+                    }
+                    break;
+                }
+
+                case '2': // Bind Complete (response to Bind)
+                    pgStream.receiveInteger4(); // len, discarded
+
+                    Portal boundPortal = pendingBindQueue.removeFirst();
+                    LOGGER.log(Level.FINEST, " <=BE BindComplete [{0}]", boundPortal);
+
+                    registerOpenPortal(boundPortal);
+                    break;
+
+                case '3': // Close Complete (response to Close)
+                    pgStream.receiveInteger4(); // len, discarded
+                    LOGGER.log(Level.FINEST, " <=BE CloseComplete");
+                    break;
+
+                case 'n': // No Data (response to Describe)
+                    pgStream.receiveInteger4(); // len, discarded
+                    LOGGER.log(Level.FINEST, " <=BE NoData");
+
+                    pendingDescribePortalQueue.removeFirst();
+
+                    if (doneAfterRowDescNoData) {
+                        DescribeRequest describeData = pendingDescribeStatementQueue.removeFirst();
+                        SimpleQuery currentQuery = describeData.query;
+
+                        Field[] fields = currentQuery.getFields();
+
+                        if (fields != null) { // There was a resultset.
+                            tuples = new ArrayList<>();
+                            handler.handleResultRows(currentQuery, fields, tuples, null);
+                            tuples = null;
+                        }
+                    }
+                    break;
+
+                case 's': { // Portal Suspended (end of Execute)
+                    // nb: this appears *instead* of CommandStatus.
+                    // Must be a SELECT if we suspended, so don't worry about it.
+
+                    pgStream.receiveInteger4(); // len, discarded
+                    LOGGER.log(Level.FINEST, " <=BE PortalSuspended");
+
+                    ExecuteRequest executeData = pendingExecuteQueue.removeFirst();
+                    SimpleQuery currentQuery = executeData.query;
+                    Portal currentPortal = executeData.portal;
+
+                    if (currentPortal != null) {
+                        // Existence of portal defines if query was using fetching.
+                        adaptiveFetchCache
+                                .updateQueryFetchSize(adaptiveFetch, currentQuery, pgStream.getMaxRowSizeBytes());
+                    }
+                    pgStream.clearMaxRowSizeBytes();
+
+                    Field[] fields = currentQuery.getFields();
+                    if (fields != null && tuples == null) {
+                        // When no results expected, pretend an empty resultset was returned
+                        // Not sure if new ArrayList can be always replaced with emptyList
+                        tuples = noResults ? Collections.emptyList() : new ArrayList<Tuple>();
+                    }
+
+                    if (fields != null && tuples != null) {
+                        handler.handleResultRows(currentQuery, fields, tuples, currentPortal);
+                    }
+                    tuples = null;
+                    break;
+                }
+
+                case 'C': { // Command Status (end of Execute)
+                    // Handle status.
+                    String status = receiveCommandStatus();
+                    if (isFlushCacheOnDeallocate()
+                            && (status.startsWith("DEALLOCATE ALL") || status.startsWith("DISCARD ALL"))) {
+                        deallocateEpoch++;
+                    }
+
+                    doneAfterRowDescNoData = false;
+
+                    ExecuteRequest executeData = pendingExecuteQueue.peekFirst();
+                    SimpleQuery currentQuery = executeData.query;
+                    Portal currentPortal = executeData.portal;
+
+                    if (currentPortal != null) {
+                        // Existence of portal defines if query was using fetching.
+
+                        // Command executed, adaptive fetch size can be removed for this query, max row size can be cleared
+                        adaptiveFetchCache.removeQuery(adaptiveFetch, currentQuery);
+                        // Update to change fetch size for other fetch portals of this query
+                        adaptiveFetchCache
+                                .updateQueryFetchSize(adaptiveFetch, currentQuery, pgStream.getMaxRowSizeBytes());
+                    }
+                    pgStream.clearMaxRowSizeBytes();
+
+                    if (status.startsWith("SET")) {
+                        String nativeSql = currentQuery.getNativeQuery().nativeSql;
+                        // Scan only the first 1024 characters to
+                        // avoid big overhead for long queries.
+                        if (nativeSql.lastIndexOf("search_path", 1024) != -1
+                                && !nativeSql.equals(lastSetSearchPathQuery)) {
+                            // Search path was changed, invalidate prepared statement cache
+                            lastSetSearchPathQuery = nativeSql;
+                            deallocateEpoch++;
+                        }
+                    }
+
+                    if (!executeData.asSimple) {
+                        pendingExecuteQueue.removeFirst();
+                    } else {
+                        // For simple 'Q' queries, executeQueue is cleared via ReadyForQuery message
+                    }
+
+                    // we want to make sure we do not add any results from these queries to the result set
+                    if (currentQuery == autoSaveQuery
+                            || currentQuery == releaseAutoSave) {
+                        // ignore "SAVEPOINT" or RELEASE SAVEPOINT status from autosave query
+                        break;
+                    }
+
+                    Field[] fields = currentQuery.getFields();
+                    if (fields != null && tuples == null) {
+                        // When no results expected, pretend an empty resultset was returned
+                        // Not sure if new ArrayList can be always replaced with emptyList
+                        tuples = noResults ? Collections.emptyList() : new ArrayList<Tuple>();
+                    }
+
+                    // If we received tuples we must know the structure of the
+                    // resultset, otherwise we won't be able to fetch columns
+                    // from it, etc, later.
+                    if (fields == null && tuples != null) {
+                        throw new IllegalStateException(
+                                "Received resultset tuples, but no field structure for them");
+                    }
+
+                    if (fields != null && tuples != null) {
+                        // There was a resultset.
+                        handler.handleResultRows(currentQuery, fields, tuples, null);
+                        tuples = null;
+
+                        if (bothRowsAndStatus) {
+                            interpretCommandStatus(status, handler);
+                        }
+                    } else {
+                        interpretCommandStatus(status, handler);
+                    }
+
+                    if (executeData.asSimple) {
+                        // Simple queries might return several resultsets, thus we clear
+                        // fields, so queries like "select 1;update; select2" will properly
+                        // identify that "update" did not return any results
+                        currentQuery.setFields(null);
+                    }
+
+                    if (currentPortal != null) {
+                        currentPortal.close();
+                    }
+                    break;
+                }
+
+                case 'D': // Data Transfer (ongoing Execute response)
+                    Tuple tuple = null;
+                    try {
+                        tuple = pgStream.receiveTupleV3();
+                    } catch (OutOfMemoryError oome) {
+                        if (!noResults) {
+                            handler.handleError(
+                                    new PSQLException(GT.tr("Ran out of memory retrieving query results."),
+                                            PSQLState.OUT_OF_MEMORY, oome));
+                        }
+                    } catch (SQLException e) {
+                        handler.handleError(e);
+                    }
+                    if (!noResults) {
+                        if (tuples == null) {
+                            tuples = new ArrayList<>();
+                        }
+                        if (tuple != null) {
+                            tuples.add(tuple);
+                        }
+                    }
+
+                    if (LOGGER.isLoggable(Level.FINEST)) {
+                        int length;
+                        if (tuple == null) {
+                            length = -1;
+                        } else {
+                            length = tuple.length();
+                        }
+                        LOGGER.log(Level.FINEST, " <=BE DataRow(len={0})", length);
+                    }
+
+                    break;
+
+                case 'E':
+                    // Error Response (response to pretty much everything; backend then skips until Sync)
+                    SQLException error = receiveErrorResponse();
+                    handler.handleError(error);
+                    if (willHealViaReparse(error)) {
+                        // prepared statement ... is not valid kind of error
+                        // Technically speaking, the error is unexpected, thus we invalidate other
+                        // server-prepared statements just in case.
+                        deallocateEpoch++;
+                        if (LOGGER.isLoggable(Level.FINEST)) {
+                            LOGGER.log(Level.FINEST, " FE: received {0}, will invalidate statements. deallocateEpoch is now {1}",
+                                    new Object[]{error.getSQLState(), deallocateEpoch});
+                        }
+                    }
+                    // keep processing
+                    break;
+
+                case 'I': { // Empty Query (end of Execute)
+                    pgStream.receiveInteger4();
+
+                    LOGGER.log(Level.FINEST, " <=BE EmptyQuery");
+
+                    ExecuteRequest executeData = pendingExecuteQueue.removeFirst();
+                    Portal currentPortal = executeData.portal;
+                    handler.handleCommandStatus("EMPTY", 0, 0);
+                    if (currentPortal != null) {
+                        currentPortal.close();
+                    }
+                    break;
+                }
+
+                case 'N': // Notice Response
+                    SQLWarning warning = receiveNoticeResponse();
+                    handler.handleWarning(warning);
+                    break;
+
+                case 'S': // Parameter Status
+                    try {
+                        receiveParameterStatus();
+                    } catch (SQLException e) {
+                        handler.handleError(e);
+                        endQuery = true;
+                    }
+                    break;
+
+                case 'T': // Row Description (response to Describe)
+                    Field[] fields = receiveFields();
+                    tuples = new ArrayList<>();
+
+                    SimpleQuery query = pendingDescribePortalQueue.peekFirst();
+                    if (!pendingExecuteQueue.isEmpty()
+                            && !pendingExecuteQueue.peekFirst().asSimple) {
+                        pendingDescribePortalQueue.removeFirst();
+                    }
+                    query.setFields(fields);
+
+                    if (doneAfterRowDescNoData) {
+                        DescribeRequest describeData = pendingDescribeStatementQueue.removeFirst();
+                        SimpleQuery currentQuery = describeData.query;
+                        currentQuery.setFields(fields);
+
+                        handler.handleResultRows(currentQuery, fields, tuples, null);
+                        tuples = null;
+                    }
+                    break;
+
+                case 'Z': // Ready For Query (eventual response to Sync)
+                    receiveRFQ();
+                    if (!pendingExecuteQueue.isEmpty()
+                            && pendingExecuteQueue.peekFirst().asSimple) {
+                        tuples = null;
+                        pgStream.clearResultBufferCount();
+
+                        ExecuteRequest executeRequest = pendingExecuteQueue.removeFirst();
+                        // Simple queries might return several resultsets, thus we clear
+                        // fields, so queries like "select 1;update; select2" will properly
+                        // identify that "update" did not return any results
+                        executeRequest.query.setFields(null);
+
+                        pendingDescribePortalQueue.removeFirst();
+                        if (!pendingExecuteQueue.isEmpty()) {
+                            if (getTransactionState() == TransactionState.IDLE) {
+                                handler.secureProgress();
+                            }
+                            // process subsequent results (e.g. for cases like batched execution of simple 'Q' queries)
+                            break;
+                        }
+                    }
+                    endQuery = true;
+
+                    // Reset the statement name of Parses that failed.
+                    while (!pendingParseQueue.isEmpty()) {
+                        SimpleQuery failedQuery = pendingParseQueue.removeFirst();
+                        failedQuery.unprepare();
+                    }
+
+                    pendingParseQueue.clear(); // No more ParseComplete messages expected.
+                    // Pending "describe" requests might be there in case of error
+                    // If that is the case, reset "described" status, so the statement is properly
+                    // described on next execution
+                    while (!pendingDescribeStatementQueue.isEmpty()) {
+                        DescribeRequest request = pendingDescribeStatementQueue.removeFirst();
+                        LOGGER.log(Level.FINEST, " FE marking setStatementDescribed(false) for query {0}", request.query);
+                        request.query.setStatementDescribed(false);
+                    }
+                    while (!pendingDescribePortalQueue.isEmpty()) {
+                        SimpleQuery describePortalQuery = pendingDescribePortalQueue.removeFirst();
+                        LOGGER.log(Level.FINEST, " FE marking setPortalDescribed(false) for query {0}", describePortalQuery);
+                        describePortalQuery.setPortalDescribed(false);
+                    }
+                    pendingBindQueue.clear(); // No more BindComplete messages expected.
+                    pendingExecuteQueue.clear(); // No more query executions expected.
+                    break;
+
+                case 'G': // CopyInResponse
+                    LOGGER.log(Level.FINEST, " <=BE CopyInResponse");
+                    LOGGER.log(Level.FINEST, " FE=> CopyFail");
+
+                    // COPY sub-protocol is not implemented yet
+                    // We'll send a CopyFail message for COPY FROM STDIN so that
+                    // server does not wait for the data.
+
+                    byte[] buf = "COPY commands are only supported using the CopyManager API.".getBytes(StandardCharsets.US_ASCII);
+                    pgStream.sendChar('f');
+                    pgStream.sendInteger4(buf.length + 4 + 1);
+                    pgStream.send(buf);
+                    pgStream.sendChar(0);
+                    pgStream.flush();
+                    sendSync(); // send sync message
+                    skipMessage(); // skip the response message
+                    break;
+
+                case 'H': // CopyOutResponse
+                    LOGGER.log(Level.FINEST, " <=BE CopyOutResponse");
+
+                    skipMessage();
+                    // In case of CopyOutResponse, we cannot abort data transfer,
+                    // so just throw an error and ignore CopyData messages
+                    handler.handleError(
+                            new PSQLException(GT.tr("COPY commands are only supported using the CopyManager API."),
+                                    PSQLState.NOT_IMPLEMENTED));
+                    break;
+
+                case 'c': // CopyDone
+                    skipMessage();
+                    LOGGER.log(Level.FINEST, " <=BE CopyDone");
+                    break;
+
+                case 'd': // CopyData
+                    skipMessage();
+                    LOGGER.log(Level.FINEST, " <=BE CopyData");
+                    break;
+
+                default:
+                    throw new IOException("Unexpected packet type: " + c);
+            }
+
+        }
+    }
+
+    /**
+     * Ignore the response message by reading the message length and skipping over those bytes in the
+     * communication stream.
+     */
+    private void skipMessage() throws IOException {
+        int len = pgStream.receiveInteger4();
+
+        assert len >= 4 : "Length from skip message must be at least 4 ";
+
+        // skip len-4 (length includes the 4 bytes for message length itself
+        pgStream.skip(len - 4);
+    }
+
+    @Override
+    public void fetch(ResultCursor cursor, ResultHandler handler, int fetchSize,
+                      boolean adaptiveFetch) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            waitOnLock();
+            final Portal portal = (Portal) cursor;
+
+            // Insert a ResultHandler that turns bare command statuses into empty datasets
+            // (if the fetch returns no rows, we see just a CommandStatus..)
+            final ResultHandler delegateHandler = handler;
+            final SimpleQuery query = portal.getQuery();
+            handler = new ResultHandlerDelegate(delegateHandler) {
+                @Override
+                public void handleCommandStatus(String status, long updateCount, long insertOID) {
+                    handleResultRows(query, NO_FIELDS, new ArrayList<>(), null);
+                }
+            };
+
+            // Now actually run it.
+
+            try {
+                processDeadParsedQueries();
+                processDeadPortals();
+
+                sendExecute(query, portal, fetchSize);
+                sendSync();
+
+                processResults(handler, 0, adaptiveFetch);
+                estimatedReceiveBufferBytes = 0;
+            } catch (IOException e) {
+                abort();
+                handler.handleError(
+                        new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
+                                PSQLState.CONNECTION_FAILURE, e));
+            }
+
+            handler.handleCompletion();
+        }
+    }
+
+    @Override
+    public int getAdaptiveFetchSize(boolean adaptiveFetch, ResultCursor cursor) {
+        if (cursor instanceof Portal) {
+            Query query = ((Portal) cursor).getQuery();
+            if (Objects.nonNull(query)) {
+                return adaptiveFetchCache
+                        .getFetchSizeForQuery(adaptiveFetch, query);
+            }
+        }
+        return -1;
+    }
+
+    @Override
+    public boolean getAdaptiveFetch() {
+        return this.adaptiveFetchCache.getAdaptiveFetch();
+    }
+
+    @Override
+    public void setAdaptiveFetch(boolean adaptiveFetch) {
+        this.adaptiveFetchCache.setAdaptiveFetch(adaptiveFetch);
+    }
+
+    @Override
+    public void addQueryToAdaptiveFetchCache(boolean adaptiveFetch, ResultCursor cursor) {
+        if (cursor instanceof Portal) {
+            Query query = ((Portal) cursor).getQuery();
+            if (Objects.nonNull(query)) {
+                adaptiveFetchCache.addNewQuery(adaptiveFetch, query);
+            }
+        }
+    }
+
+    @Override
+    public void removeQueryFromAdaptiveFetchCache(boolean adaptiveFetch, ResultCursor cursor) {
+        if (cursor instanceof Portal) {
+            Query query = ((Portal) cursor).getQuery();
+            if (Objects.nonNull(query)) {
+                adaptiveFetchCache.removeQuery(adaptiveFetch, query);
+            }
+        }
+    }
+
+    /*
+     * Receive the field descriptions from the back end.
+     */
+    private Field[] receiveFields() throws IOException {
+        pgStream.receiveInteger4(); // MESSAGE SIZE
+        int size = pgStream.receiveInteger2();
+        Field[] fields = new Field[size];
+
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            LOGGER.log(Level.FINEST, " <=BE RowDescription({0})", size);
+        }
+
+        for (int i = 0; i < fields.length; i++) {
+            String columnLabel = pgStream.receiveCanonicalString();
+            int tableOid = pgStream.receiveInteger4();
+            short positionInTable = (short) pgStream.receiveInteger2();
+            int typeOid = pgStream.receiveInteger4();
+            int typeLength = pgStream.receiveInteger2();
+            int typeModifier = pgStream.receiveInteger4();
+            int formatType = pgStream.receiveInteger2();
+            fields[i] = new Field(columnLabel,
+                    typeOid, typeLength, typeModifier, tableOid, positionInTable);
+            fields[i].setFormat(formatType);
+
+            LOGGER.log(Level.FINEST, "        {0}", fields[i]);
+        }
+
+        return fields;
+    }
+
+    private void receiveAsyncNotify() throws IOException {
+        int len = pgStream.receiveInteger4(); // MESSAGE SIZE
+        assert len > 4 : "Length for AsyncNotify must be at least 4";
+
+        int pid = pgStream.receiveInteger4();
+        String msg = pgStream.receiveCanonicalString();
+        String param = pgStream.receiveString();
+        addNotification(new Notification(msg, pid, param));
+
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            LOGGER.log(Level.FINEST, " <=BE AsyncNotify({0},{1},{2})", new Object[]{pid, msg, param});
+        }
+    }
+
+    private SQLException receiveErrorResponse() throws IOException {
+        // it's possible to get more than one error message for a query
+        // see libpq comments wrt backend closing a connection
+        // so, append messages to a string buffer and keep processing
+        // check at the bottom to see if we need to throw an exception
+
+        int elen = pgStream.receiveInteger4();
+        assert elen > 4 : "Error response length must be greater than 4";
+
+        EncodingPredictor.DecodeResult totalMessage = pgStream.receiveErrorString(elen - 4);
+        ServerErrorMessage errorMsg = new ServerErrorMessage(totalMessage);
+
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            LOGGER.log(Level.FINEST, " <=BE ErrorMessage({0})", errorMsg.toString());
+        }
+
+        PSQLException error = new PSQLException(errorMsg, this.logServerErrorDetail);
+        if (transactionFailCause == null) {
+            transactionFailCause = error;
+        } else {
+            error.initCause(transactionFailCause);
+        }
+        return error;
+    }
+
+    private SQLWarning receiveNoticeResponse() throws IOException {
+        int nlen = pgStream.receiveInteger4();
+        assert nlen > 4 : "Notice Response length must be greater than 4";
+
+        ServerErrorMessage warnMsg = new ServerErrorMessage(pgStream.receiveString(nlen - 4));
+
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            LOGGER.log(Level.FINEST, " <=BE NoticeResponse({0})", warnMsg.toString());
+        }
+
+        return new PSQLWarning(warnMsg);
+    }
+
+    private String receiveCommandStatus() throws IOException {
+        // TODO: better handle the msg len
+        int len = pgStream.receiveInteger4();
+        // read len -5 bytes (-4 for len and -1 for trailing \0)
+        String status = pgStream.receiveString(len - 5);
+        // now read and discard the trailing \0
+        pgStream.receiveChar(); // Receive(1) would allocate new byte[1], so avoid it
+
+        LOGGER.log(Level.FINEST, " <=BE CommandStatus({0})", status);
+
+        return status;
+    }
+
+    private void interpretCommandStatus(String status, ResultHandler handler) {
+        try {
+            commandCompleteParser.parse(status);
+        } catch (SQLException e) {
+            handler.handleError(e);
+            return;
+        }
+        long oid = commandCompleteParser.getOid();
+        long count = commandCompleteParser.getRows();
+
+        handler.handleCommandStatus(status, count, oid);
+    }
+
+    private void receiveRFQ() throws IOException {
+        if (pgStream.receiveInteger4() != 5) {
+            throw new IOException("unexpected length of ReadyForQuery message");
+        }
+
+        char tStatus = (char) pgStream.receiveChar();
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            LOGGER.log(Level.FINEST, " <=BE ReadyForQuery({0})", tStatus);
+        }
+
+        // Update connection state.
+        switch (tStatus) {
+            case 'I':
+                transactionFailCause = null;
+                setTransactionState(TransactionState.IDLE);
+                break;
+            case 'T':
+                transactionFailCause = null;
+                setTransactionState(TransactionState.OPEN);
+                break;
+            case 'E':
+                setTransactionState(TransactionState.FAILED);
+                break;
+            default:
+                throw new IOException(
+                        "unexpected transaction state in ReadyForQuery message: " + (int) tStatus);
+        }
+    }
+
+    @Override
+    @SuppressWarnings("deprecation")
+    protected void sendCloseMessage() throws IOException {
+        closeAction.sendCloseMessage(pgStream);
+    }
+
+    public void readStartupMessages() throws IOException, SQLException {
+        for (int i = 0; i < 1000; i++) {
+            int beresp = pgStream.receiveChar();
+            switch (beresp) {
+                case 'Z':
+                    receiveRFQ();
+                    // Ready For Query; we're done.
+                    return;
+
+                case 'K':
+                    // BackendKeyData
+                    int msgLen = pgStream.receiveInteger4();
+                    if (msgLen != 12) {
+                        throw new PSQLException(GT.tr("Protocol error.  Session setup failed."),
+                                PSQLState.PROTOCOL_VIOLATION);
+                    }
+
+                    int pid = pgStream.receiveInteger4();
+                    int ckey = pgStream.receiveInteger4();
+
+                    if (LOGGER.isLoggable(Level.FINEST)) {
+                        LOGGER.log(Level.FINEST, " <=BE BackendKeyData(pid={0},ckey={1})", new Object[]{pid, ckey});
+                    }
+
+                    setBackendKeyData(pid, ckey);
+                    break;
+
+                case 'E':
+                    // Error
+                    throw receiveErrorResponse();
+
+                case 'N':
+                    // Warning
+                    addWarning(receiveNoticeResponse());
+                    break;
+
+                case 'S':
+                    // ParameterStatus
+                    receiveParameterStatus();
+
+                    break;
+
+                default:
+                    if (LOGGER.isLoggable(Level.FINEST)) {
+                        LOGGER.log(Level.FINEST, "  invalid message type={0}", (char) beresp);
+                    }
+                    throw new PSQLException(GT.tr("Protocol error.  Session setup failed."),
+                            PSQLState.PROTOCOL_VIOLATION);
+            }
+        }
         throw new PSQLException(GT.tr("Protocol error.  Session setup failed."),
-            PSQLState.PROTOCOL_VIOLATION);
-      }
+                PSQLState.PROTOCOL_VIOLATION);
     }
-  }
 
-  public void setTimeZone(TimeZone timeZone) {
-    this.timeZone = timeZone;
-  }
+    public void receiveParameterStatus() throws IOException, SQLException {
+        // ParameterStatus
+        pgStream.receiveInteger4(); // MESSAGE SIZE
+        final String name = pgStream.receiveCanonicalStringIfPresent();
+        final String value = pgStream.receiveCanonicalStringIfPresent();
 
-  @Override
-  public TimeZone getTimeZone() {
-    return timeZone;
-  }
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            LOGGER.log(Level.FINEST, " <=BE ParameterStatus({0} = {1})", new Object[]{name, value});
+        }
 
-  public void setApplicationName(String applicationName) {
-    this.applicationName = applicationName;
-  }
+        // if the name is empty, there is nothing to do
+        if (name.isEmpty()) {
+            return;
+        }
 
-  @Override
-  public String getApplicationName() {
-    if (applicationName == null) {
-      return "";
+        // Update client-visible parameter status map for getParameterStatuses()
+        onParameterStatus(name, value);
+
+        if ("client_encoding".equals(name)) {
+            if (allowEncodingChanges) {
+                if (!"UTF8".equalsIgnoreCase(value) && !"UTF-8".equalsIgnoreCase(value)) {
+                    LOGGER.log(Level.FINE,
+                            "pgjdbc expects client_encoding to be UTF8 for proper operation. Actual encoding is {0}",
+                            value);
+                }
+                pgStream.setEncoding(Encoding.getDatabaseEncoding(value));
+            } else if (!"UTF8".equalsIgnoreCase(value) && !"UTF-8".equalsIgnoreCase(value)) {
+                close(); // we're screwed now; we can't trust any subsequent string.
+                throw new PSQLException(GT.tr(
+                        "The server''s client_encoding parameter was changed to {0}. The JDBC driver requires client_encoding to be UTF8 for correct operation.",
+                        value), PSQLState.CONNECTION_FAILURE);
+
+            }
+        }
+
+        if ("DateStyle".equals(name) && !value.startsWith("ISO")
+                && !value.toUpperCase(Locale.ROOT).startsWith("ISO")) {
+            close(); // we're screwed now; we can't trust any subsequent date.
+            throw new PSQLException(GT.tr(
+                    "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.",
+                    value), PSQLState.CONNECTION_FAILURE);
+        }
+
+        if ("standard_conforming_strings".equals(name)) {
+            if ("on".equals(value)) {
+                setStandardConformingStrings(true);
+            } else if ("off".equals(value)) {
+                setStandardConformingStrings(false);
+            } else {
+                close();
+                // we're screwed now; we don't know how to escape string literals
+                throw new PSQLException(GT.tr(
+                        "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.",
+                        value), PSQLState.CONNECTION_FAILURE);
+            }
+            return;
+        }
+
+        if ("TimeZone".equals(name)) {
+            setTimeZone(TimestampUtils.parseBackendTimeZone(value));
+        } else if ("application_name".equals(name)) {
+            setApplicationName(value);
+        } else if ("server_version_num".equals(name)) {
+            setServerVersionNum(Integer.parseInt(value));
+        } else if ("server_version".equals(name)) {
+            setServerVersion(value);
+        } else if ("integer_datetimes".equals(name)) {
+            if ("on".equals(value)) {
+                setIntegerDateTimes(true);
+            } else if ("off".equals(value)) {
+                setIntegerDateTimes(false);
+            } else {
+                throw new PSQLException(GT.tr("Protocol error.  Session setup failed."),
+                        PSQLState.PROTOCOL_VIOLATION);
+            }
+        }
     }
-    return applicationName;
-  }
 
-  @Override
-  public ReplicationProtocol getReplicationProtocol() {
-    return replicationProtocol;
-  }
-
-  @Override
-  public void addBinaryReceiveOid(int oid) {
-    synchronized (useBinaryReceiveForOids) {
-      useBinaryReceiveForOids.add(oid);
+    @Override
+    public TimeZone getTimeZone() {
+        return timeZone;
     }
-  }
 
-  @Override
-  public void removeBinaryReceiveOid(int oid) {
-    synchronized (useBinaryReceiveForOids) {
-      useBinaryReceiveForOids.remove(oid);
+    public void setTimeZone(TimeZone timeZone) {
+        this.timeZone = timeZone;
     }
-  }
 
-  @Override
-  @SuppressWarnings("deprecation")
-  public Set<? extends Integer> getBinaryReceiveOids() {
-    // copy the values to prevent ConcurrentModificationException when reader accesses the elements
-    synchronized (useBinaryReceiveForOids) {
-      return new HashSet<>(useBinaryReceiveForOids);
+    @Override
+    public String getApplicationName() {
+        if (applicationName == null) {
+            return "";
+        }
+        return applicationName;
     }
-  }
 
-  @Override
-  public boolean useBinaryForReceive(int oid) {
-    synchronized (useBinaryReceiveForOids) {
-      return useBinaryReceiveForOids.contains(oid);
+    public void setApplicationName(String applicationName) {
+        this.applicationName = applicationName;
     }
-  }
 
-  @Override
-  public void setBinaryReceiveOids(Set<Integer> oids) {
-    synchronized (useBinaryReceiveForOids) {
-      useBinaryReceiveForOids.clear();
-      useBinaryReceiveForOids.addAll(oids);
+    @Override
+    public ReplicationProtocol getReplicationProtocol() {
+        return replicationProtocol;
     }
-  }
 
-  @Override
-  public void addBinarySendOid(int oid) {
-    synchronized (useBinarySendForOids) {
-      useBinarySendForOids.add(oid);
+    @Override
+    public void addBinaryReceiveOid(int oid) {
+        synchronized (useBinaryReceiveForOids) {
+            useBinaryReceiveForOids.add(oid);
+        }
     }
-  }
 
-  @Override
-  public void removeBinarySendOid(int oid) {
-    synchronized (useBinarySendForOids) {
-      useBinarySendForOids.remove(oid);
+    @Override
+    public void removeBinaryReceiveOid(int oid) {
+        synchronized (useBinaryReceiveForOids) {
+            useBinaryReceiveForOids.remove(oid);
+        }
     }
-  }
 
-  @Override
-  @SuppressWarnings("deprecation")
-  public Set<? extends Integer> getBinarySendOids() {
-    // copy the values to prevent ConcurrentModificationException when reader accesses the elements
-    synchronized (useBinarySendForOids) {
-      return new HashSet<>(useBinarySendForOids);
+    @Override
+    @SuppressWarnings("deprecation")
+    public Set<? extends Integer> getBinaryReceiveOids() {
+        // copy the values to prevent ConcurrentModificationException when reader accesses the elements
+        synchronized (useBinaryReceiveForOids) {
+            return new HashSet<>(useBinaryReceiveForOids);
+        }
     }
-  }
 
-  @Override
-  public boolean useBinaryForSend(int oid) {
-    synchronized (useBinarySendForOids) {
-      return useBinarySendForOids.contains(oid);
+    @Override
+    public void setBinaryReceiveOids(Set<Integer> oids) {
+        synchronized (useBinaryReceiveForOids) {
+            useBinaryReceiveForOids.clear();
+            useBinaryReceiveForOids.addAll(oids);
+        }
     }
-  }
 
-  @Override
-  public void setBinarySendOids(Set<Integer> oids) {
-    synchronized (useBinarySendForOids) {
-      useBinarySendForOids.clear();
-      useBinarySendForOids.addAll(oids);
+    @Override
+    public boolean useBinaryForReceive(int oid) {
+        synchronized (useBinaryReceiveForOids) {
+            return useBinaryReceiveForOids.contains(oid);
+        }
     }
-  }
 
-  private void setIntegerDateTimes(boolean state) {
-    integerDateTimes = state;
-  }
+    @Override
+    public void addBinarySendOid(int oid) {
+        synchronized (useBinarySendForOids) {
+            useBinarySendForOids.add(oid);
+        }
+    }
 
-  @Override
-  public boolean getIntegerDateTimes() {
-    return integerDateTimes;
-  }
+    @Override
+    public void removeBinarySendOid(int oid) {
+        synchronized (useBinarySendForOids) {
+            useBinarySendForOids.remove(oid);
+        }
+    }
 
-  private final Deque<SimpleQuery> pendingParseQueue = new ArrayDeque<>();
-  private final Deque<Portal> pendingBindQueue = new ArrayDeque<>();
-  private final Deque<ExecuteRequest> pendingExecuteQueue = new ArrayDeque<>();
-  private final Deque<DescribeRequest> pendingDescribeStatementQueue =
-      new ArrayDeque<>();
-  private final Deque<SimpleQuery> pendingDescribePortalQueue = new ArrayDeque<>();
+    @Override
+    @SuppressWarnings("deprecation")
+    public Set<? extends Integer> getBinarySendOids() {
+        // copy the values to prevent ConcurrentModificationException when reader accesses the elements
+        synchronized (useBinarySendForOids) {
+            return new HashSet<>(useBinarySendForOids);
+        }
+    }
 
-  private long nextUniqueID = 1;
-  private final boolean allowEncodingChanges;
-  private final boolean cleanupSavePoints;
+    @Override
+    public void setBinarySendOids(Set<Integer> oids) {
+        synchronized (useBinarySendForOids) {
+            useBinarySendForOids.clear();
+            useBinarySendForOids.addAll(oids);
+        }
+    }
 
-  /**
-   * <p>The estimated server response size since we last consumed the input stream from the server, in
-   * bytes.</p>
-   *
-   * <p>Starts at zero, reset by every Sync message. Mainly used for batches.</p>
-   *
-   * <p>Used to avoid deadlocks, see MAX_BUFFERED_RECV_BYTES.</p>
-   */
-  private int estimatedReceiveBufferBytes;
+    @Override
+    public boolean useBinaryForSend(int oid) {
+        synchronized (useBinarySendForOids) {
+            return useBinarySendForOids.contains(oid);
+        }
+    }
 
-  private final SimpleQuery beginTransactionQuery =
-      new SimpleQuery(
-          new NativeQuery("BEGIN", null, false, SqlCommand.BLANK),
-          null, false);
+    @Override
+    public boolean getIntegerDateTimes() {
+        return integerDateTimes;
+    }
 
-  private final SimpleQuery beginReadOnlyTransactionQuery =
-      new SimpleQuery(
-          new NativeQuery("BEGIN READ ONLY", null, false, SqlCommand.BLANK),
-          null, false);
-
-  private final SimpleQuery emptyQuery =
-      new SimpleQuery(
-          new NativeQuery("", null, false,
-              SqlCommand.createStatementTypeInfo(SqlCommandType.BLANK)
-          ), null, false);
-
-  private final SimpleQuery autoSaveQuery =
-      new SimpleQuery(
-          new NativeQuery("SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
-          null, false);
-
-  private final SimpleQuery releaseAutoSave =
-      new SimpleQuery(
-          new NativeQuery("RELEASE SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
-          null, false);
-
-  /*
-  In autosave mode we use this query to roll back errored transactions
-   */
-  private final SimpleQuery restoreToAutoSave =
-      new SimpleQuery(
-          new NativeQuery("ROLLBACK TO SAVEPOINT PGJDBC_AUTOSAVE", null, false, SqlCommand.BLANK),
-          null, false);
+    private void setIntegerDateTimes(boolean state) {
+        integerDateTimes = state;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleParameterList.java b/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleParameterList.java
index 1e6571f..1a40a2c 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleParameterList.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleParameterList.java
@@ -6,6 +6,11 @@
 
 package org.postgresql.core.v3;
 
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
+import java.sql.SQLException;
+import java.util.Arrays;
 import org.postgresql.core.Oid;
 import org.postgresql.core.PGStream;
 import org.postgresql.core.ParameterList;
@@ -20,604 +25,596 @@ import org.postgresql.util.PSQLException;
 import org.postgresql.util.PSQLState;
 import org.postgresql.util.StreamWrapper;
 
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.charset.StandardCharsets;
-import java.sql.SQLException;
-import java.util.Arrays;
-
 /**
  * Parameter list for a single-statement V3 query.
  *
  * @author Oliver Jowett (oliver@opencloud.com)
  */
-class SimpleParameterList implements V3ParameterList {
+public class SimpleParameterList implements V3ParameterList {
 
-  private static final byte IN = 1;
-  private static final byte OUT = 2;
-  private static final byte INOUT = IN | OUT;
+    private static final byte IN = 1;
+    private static final byte OUT = 2;
+    private static final byte INOUT = IN | OUT;
 
-  private static final byte TEXT = 0;
-  private static final byte BINARY = 4;
+    private static final byte TEXT = 0;
+    private static final byte BINARY = 4;
+    /**
+     * Marker object representing NULL; this distinguishes "parameter never set" from "parameter set
+     * to null".
+     */
+    private static final Object NULL_OBJECT = new Object();
+    private final Object[] paramValues;
+    private final int[] paramTypes;
+    private final byte[] flags;
+    private final byte[][] encoded;
+    private final TypeTransferModeRegistry transferModeRegistry;
+    private int pos;
 
-  SimpleParameterList(int paramCount, TypeTransferModeRegistry transferModeRegistry) {
-    this.paramValues = new Object[paramCount];
-    this.paramTypes = new int[paramCount];
-    this.encoded = new byte[paramCount][];
-    this.flags = new byte[paramCount];
-    this.transferModeRegistry = transferModeRegistry;
-  }
-
-  @Override
-  public void registerOutParameter(int index, int sqlType) throws SQLException {
-    if (index < 1 || index > paramValues.length) {
-      throw new PSQLException(
-          GT.tr("The column index is out of range: {0}, number of columns: {1}.",
-              index, paramValues.length),
-          PSQLState.INVALID_PARAMETER_VALUE);
+    public SimpleParameterList(int paramCount, TypeTransferModeRegistry transferModeRegistry) {
+        this.paramValues = new Object[paramCount];
+        this.paramTypes = new int[paramCount];
+        this.encoded = new byte[paramCount][];
+        this.flags = new byte[paramCount];
+        this.transferModeRegistry = transferModeRegistry;
     }
 
-    flags[index - 1] |= OUT;
-  }
-
-  private void bind(int index, Object value, int oid, byte binary) throws SQLException {
-    if (index < 1 || index > paramValues.length) {
-      throw new PSQLException(
-          GT.tr("The column index is out of range: {0}, number of columns: {1}.",
-              index, paramValues.length),
-          PSQLState.INVALID_PARAMETER_VALUE);
+    /**
+     * <p>Escapes a given text value as a literal, wraps it in single quotes, casts it to the
+     * to the given data type, and finally wraps the whole thing in parentheses.</p>
+     *
+     * <p>For example, "123" and "int4" becomes "('123'::int)"</p>
+     *
+     * <p>The additional parentheses is added to ensure that the surrounding text of where the
+     * parameter value is entered does modify the interpretation of the value.</p>
+     *
+     * <p>For example if our input SQL is: <code>SELECT ?b</code></p>
+     *
+     * <p>Using a parameter value of '{}' and type of json we'd get:</p>
+     *
+     * <pre>
+     * test=# SELECT ('{}'::json)b;
+     *  b
+     * ----
+     *  {}
+     * </pre>
+     *
+     * <p>But without the parentheses the result changes:</p>
+     *
+     * <pre>
+     * test=# SELECT '{}'::jsonb;
+     * jsonb
+     * -------
+     * {}
+     * </pre>
+     **/
+    private static String quoteAndCast(String text, String type, boolean standardConformingStrings) {
+        StringBuilder sb = new StringBuilder((text.length() + 10) / 10 * 11); // Add 10% for escaping.
+        sb.append("('");
+        try {
+            Utils.escapeLiteral(sb, text, standardConformingStrings);
+        } catch (SQLException e) {
+            // This should only happen if we have an embedded null
+            // and there's not much we can do if we do hit one.
+            //
+            // To force a server side failure, we deliberately include
+            // a zero byte character in the literal to force the server
+            // to reject the command.
+            sb.append('\u0000');
+        }
+        sb.append("'");
+        if (type != null) {
+            sb.append("::");
+            sb.append(type);
+        }
+        sb.append(")");
+        return sb.toString();
     }
 
-    --index;
+    private static void streamBytea(PGStream pgStream, StreamWrapper wrapper) throws IOException {
+        byte[] rawData = wrapper.getBytes();
+        if (rawData != null) {
+            pgStream.send(rawData, wrapper.getOffset(), wrapper.getLength());
+            return;
+        }
 
-    encoded[index] = null;
-    paramValues[index] = value;
-    flags[index] = (byte) (direction(index) | IN | binary);
-
-    // If we are setting something to an UNSPECIFIED NULL, don't overwrite
-    // our existing type for it. We don't need the correct type info to
-    // send this value, and we don't want to overwrite and require a
-    // reparse.
-    if (oid == Oid.UNSPECIFIED && paramTypes[index] != Oid.UNSPECIFIED && value == NULL_OBJECT) {
-      return;
+        pgStream.sendStream(wrapper.getStream(), wrapper.getLength());
     }
 
-    paramTypes[index] = oid;
-    pos = index + 1;
-  }
-
-  @Override
-  public int getParameterCount() {
-    return paramValues.length;
-  }
-
-  @Override
-  public int getOutParameterCount() {
-    int count = 0;
-    for (int i = 0; i < paramTypes.length; i++) {
-      if ((direction(i) & OUT) == OUT) {
-        count++;
-      }
-    }
-    // Every function has at least one output.
-    if (count == 0) {
-      count = 1;
-    }
-    return count;
-
-  }
-
-  @Override
-  public int getInParameterCount() {
-    int count = 0;
-    for (int i = 0; i < paramTypes.length; i++) {
-      if (direction(i) != OUT) {
-        count++;
-      }
-    }
-    return count;
-  }
-
-  @Override
-  public void setIntParameter(int index, int value) throws SQLException {
-    byte[] data = new byte[4];
-    ByteConverter.int4(data, 0, value);
-    bind(index, data, Oid.INT4, BINARY);
-  }
-
-  @Override
-  public void setLiteralParameter(int index, String value, int oid) throws SQLException {
-    bind(index, value, oid, TEXT);
-  }
-
-  @Override
-  public void setStringParameter(int index, String value, int oid) throws SQLException {
-    bind(index, value, oid, TEXT);
-  }
-
-  @Override
-  public void setBinaryParameter(int index, byte[] value, int oid) throws SQLException {
-    bind(index, value, oid, BINARY);
-  }
-
-  @Override
-  public void setBytea(int index, byte[] data, int offset, int length) throws SQLException {
-    bind(index, new StreamWrapper(data, offset, length), Oid.BYTEA, BINARY);
-  }
-
-  @Override
-  public void setBytea(int index, InputStream stream, int length) throws SQLException {
-    bind(index, new StreamWrapper(stream, length), Oid.BYTEA, BINARY);
-  }
-
-  @Override
-  public void setBytea(int index, InputStream stream) throws SQLException {
-    bind(index, new StreamWrapper(stream), Oid.BYTEA, BINARY);
-  }
-
-  @Override
-  public void setBytea(int index, ByteStreamWriter writer) throws SQLException {
-    bind(index, writer, Oid.BYTEA, BINARY);
-  }
-
-  @Override
-  public void setText(int index, InputStream stream) throws SQLException {
-    bind(index, new StreamWrapper(stream), Oid.TEXT, TEXT);
-  }
-
-  @Override
-  public void setNull(int index, int oid) throws SQLException {
-
-    byte binaryTransfer = TEXT;
-
-    if (transferModeRegistry != null && transferModeRegistry.useBinaryForReceive(oid)) {
-      binaryTransfer = BINARY;
-    }
-    bind(index, NULL_OBJECT, oid, binaryTransfer);
-  }
-
-  /**
-   * <p>Escapes a given text value as a literal, wraps it in single quotes, casts it to the
-   * to the given data type, and finally wraps the whole thing in parentheses.</p>
-   *
-   * <p>For example, "123" and "int4" becomes "('123'::int)"</p>
-   *
-   * <p>The additional parentheses is added to ensure that the surrounding text of where the
-   * parameter value is entered does modify the interpretation of the value.</p>
-   *
-   * <p>For example if our input SQL is: <code>SELECT ?b</code></p>
-   *
-   * <p>Using a parameter value of '{}' and type of json we'd get:</p>
-   *
-   * <pre>
-   * test=# SELECT ('{}'::json)b;
-   *  b
-   * ----
-   *  {}
-   * </pre>
-   *
-   * <p>But without the parentheses the result changes:</p>
-   *
-   * <pre>
-   * test=# SELECT '{}'::jsonb;
-   * jsonb
-   * -------
-   * {}
-   * </pre>
-   **/
-  private static String quoteAndCast(String text, String type, boolean standardConformingStrings) {
-    StringBuilder sb = new StringBuilder((text.length() + 10) / 10 * 11); // Add 10% for escaping.
-    sb.append("('");
-    try {
-      Utils.escapeLiteral(sb, text, standardConformingStrings);
-    } catch (SQLException e) {
-      // This should only happen if we have an embedded null
-      // and there's not much we can do if we do hit one.
-      //
-      // To force a server side failure, we deliberately include
-      // a zero byte character in the literal to force the server
-      // to reject the command.
-      sb.append('\u0000');
-    }
-    sb.append("'");
-    if (type != null) {
-      sb.append("::");
-      sb.append(type);
-    }
-    sb.append(")");
-    return sb.toString();
-  }
-
-  @Override
-  public String toString(int index, boolean standardConformingStrings) {
-    --index;
-    Object paramValue = paramValues[index];
-    if (paramValue == null) {
-      return "?";
-    } else if (paramValue == NULL_OBJECT) {
-      return "(NULL)";
-    }
-    String textValue;
-    String type;
-    if ((flags[index] & BINARY) == BINARY) {
-      // handle some of the numeric types
-      switch (paramTypes[index]) {
-        case Oid.INT2:
-          short s = ByteConverter.int2((byte[]) paramValue, 0);
-          textValue = Short.toString(s);
-          type = "int2";
-          break;
-
-        case Oid.INT4:
-          int i = ByteConverter.int4((byte[]) paramValue, 0);
-          textValue = Integer.toString(i);
-          type = "int4";
-          break;
-
-        case Oid.INT8:
-          long l = ByteConverter.int8((byte[]) paramValue, 0);
-          textValue = Long.toString(l);
-          type = "int8";
-          break;
-
-        case Oid.FLOAT4:
-          float f = ByteConverter.float4((byte[]) paramValue, 0);
-          if (Float.isNaN(f)) {
-            return "('NaN'::real)";
-          }
-          textValue = Float.toString(f);
-          type = "real";
-          break;
-
-        case Oid.FLOAT8:
-          double d = ByteConverter.float8((byte[]) paramValue, 0);
-          if (Double.isNaN(d)) {
-            return "('NaN'::double precision)";
-          }
-          textValue = Double.toString(d);
-          type = "double precision";
-          break;
-
-        case Oid.NUMERIC:
-          Number n = ByteConverter.numeric((byte[]) paramValue);
-          if (n instanceof Double) {
-            assert ((Double) n).isNaN();
-            return "('NaN'::numeric)";
-          }
-          textValue = n.toString();
-          type = "numeric";
-          break;
-
-        case Oid.UUID:
-          textValue =
-              new UUIDArrayAssistant().buildElement((byte[]) paramValue, 0, 16).toString();
-          type = "uuid";
-          break;
-
-        case Oid.POINT:
-          PGpoint pgPoint = new PGpoint();
-          pgPoint.setByteValue((byte[]) paramValue, 0);
-          textValue = pgPoint.toString();
-          type = "point";
-          break;
-
-        case Oid.BOX:
-          PGbox pgBox = new PGbox();
-          pgBox.setByteValue((byte[]) paramValue, 0);
-          textValue = pgBox.toString();
-          type = "box";
-          break;
-
-        default:
-          return "?";
-      }
-    } else {
-      textValue = paramValue.toString();
-      switch (paramTypes[index]) {
-        case Oid.INT2:
-          type = "int2";
-          break;
-        case Oid.INT4:
-          type = "int4";
-          break;
-        case Oid.INT8:
-          type = "int8";
-          break;
-        case Oid.FLOAT4:
-          type = "real";
-          break;
-        case Oid.FLOAT8:
-          type = "double precision";
-          break;
-        case Oid.TIMESTAMP:
-          type = "timestamp";
-          break;
-        case Oid.TIMESTAMPTZ:
-          type = "timestamp with time zone";
-          break;
-        case Oid.TIME:
-          type = "time";
-          break;
-        case Oid.TIMETZ:
-          type = "time with time zone";
-          break;
-        case Oid.DATE:
-          type = "date";
-          break;
-        case Oid.INTERVAL:
-          type = "interval";
-          break;
-        case Oid.NUMERIC:
-          type = "numeric";
-          break;
-        case Oid.UUID:
-          type = "uuid";
-          break;
-        case Oid.BOOL:
-          type = "boolean";
-          break;
-        case Oid.BOX:
-          type = "box";
-          break;
-        case Oid.POINT:
-          type = "point";
-          break;
-        default:
-          type = null;
-      }
-    }
-    return quoteAndCast(textValue, type, standardConformingStrings);
-  }
-
-  @Override
-  public void checkAllParametersSet() throws SQLException {
-    for (int i = 0; i < paramTypes.length; i++) {
-      if (direction(i) != OUT && paramValues[i] == null) {
-        throw new PSQLException(GT.tr("No value specified for parameter {0}.", i + 1),
-            PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    }
-  }
-
-  @Override
-  public void convertFunctionOutParameters() {
-    for (int i = 0; i < paramTypes.length; i++) {
-      if (direction(i) == OUT) {
-        paramTypes[i] = Oid.VOID;
-        paramValues[i] = NULL_OBJECT;
-      }
-    }
-  }
-
-  //
-  // bytea helper
-  //
-
-  private static void streamBytea(PGStream pgStream, StreamWrapper wrapper) throws IOException {
-    byte[] rawData = wrapper.getBytes();
-    if (rawData != null) {
-      pgStream.send(rawData, wrapper.getOffset(), wrapper.getLength());
-      return;
+    private static void streamBytea(PGStream pgStream, ByteStreamWriter writer) throws IOException {
+        pgStream.send(writer);
     }
 
-    pgStream.sendStream(wrapper.getStream(), wrapper.getLength());
-  }
+    @Override
+    public void registerOutParameter(int index, int sqlType) throws SQLException {
+        if (index < 1 || index > paramValues.length) {
+            throw new PSQLException(
+                    GT.tr("The column index is out of range: {0}, number of columns: {1}.",
+                            index, paramValues.length),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
 
-  //
-  // byte stream writer support
-  //
-
-  private static void streamBytea(PGStream pgStream, ByteStreamWriter writer) throws IOException {
-    pgStream.send(writer);
-  }
-
-  @Override
-  public int[] getTypeOIDs() {
-    return paramTypes;
-  }
-
-  //
-  // Package-private V3 accessors
-  //
-
-  int getTypeOID(int index) {
-    return paramTypes[index - 1];
-  }
-
-  boolean hasUnresolvedTypes() {
-    for (int paramType : paramTypes) {
-      if (paramType == Oid.UNSPECIFIED) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  void setResolvedType(int index, int oid) {
-    // only allow overwriting an unknown value or VOID value
-    if (paramTypes[index - 1] == Oid.UNSPECIFIED || paramTypes[index - 1] == Oid.VOID) {
-      paramTypes[index - 1] = oid;
-    } else if (paramTypes[index - 1] != oid) {
-      throw new IllegalArgumentException("Can't change resolved type for param: " + index + " from "
-          + paramTypes[index - 1] + " to " + oid);
-    }
-  }
-
-  boolean isNull(int index) {
-    return paramValues[index - 1] == NULL_OBJECT;
-  }
-
-  boolean isBinary(int index) {
-    return (flags[index - 1] & BINARY) != 0;
-  }
-
-  private byte direction(int index) {
-    return (byte) (flags[index] & INOUT);
-  }
-
-  int getV3Length(int index) {
-    --index;
-
-    // Null?
-    Object value = paramValues[index];
-    if (value == null || value == NULL_OBJECT) {
-      throw new IllegalArgumentException("can't getV3Length() on a null parameter");
+        flags[index - 1] |= OUT;
     }
 
-    // Directly encoded?
-    if (value instanceof byte[]) {
-      return ((byte[]) value).length;
+    private void bind(int index, Object value, int oid, byte binary) throws SQLException {
+        if (index < 1 || index > paramValues.length) {
+            throw new PSQLException(
+                    GT.tr("The column index is out of range: {0}, number of columns: {1}.",
+                            index, paramValues.length),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+
+        --index;
+
+        encoded[index] = null;
+        paramValues[index] = value;
+        flags[index] = (byte) (direction(index) | IN | binary);
+
+        // If we are setting something to an UNSPECIFIED NULL, don't overwrite
+        // our existing type for it. We don't need the correct type info to
+        // send this value, and we don't want to overwrite and require a
+        // reparse.
+        if (oid == Oid.UNSPECIFIED && paramTypes[index] != Oid.UNSPECIFIED && value == NULL_OBJECT) {
+            return;
+        }
+
+        paramTypes[index] = oid;
+        pos = index + 1;
     }
 
-    // Binary-format bytea?
-    if (value instanceof StreamWrapper) {
-      return ((StreamWrapper) value).getLength();
+    @Override
+    public int getParameterCount() {
+        return paramValues.length;
     }
 
-    // Binary-format bytea?
-    if (value instanceof ByteStreamWriter) {
-      return ((ByteStreamWriter) value).getLength();
+    @Override
+    public int getOutParameterCount() {
+        int count = 0;
+        for (int i = 0; i < paramTypes.length; i++) {
+            if ((direction(i) & OUT) == OUT) {
+                count++;
+            }
+        }
+        // Every function has at least one output.
+        if (count == 0) {
+            count = 1;
+        }
+        return count;
+
     }
 
-    // Already encoded?
-    byte[] encoded = this.encoded[index];
-    if (encoded == null) {
-      // Encode value and compute actual length using UTF-8.
-      this.encoded[index] = encoded = value.toString().getBytes(StandardCharsets.UTF_8);
+    @Override
+    public int getInParameterCount() {
+        int count = 0;
+        for (int i = 0; i < paramTypes.length; i++) {
+            if (direction(i) != OUT) {
+                count++;
+            }
+        }
+        return count;
     }
 
-    return encoded.length;
-  }
-
-  void writeV3Value(int index, PGStream pgStream) throws IOException {
-    --index;
-
-    // Null?
-    Object paramValue = paramValues[index];
-    if (paramValue == null || paramValue == NULL_OBJECT) {
-      throw new IllegalArgumentException("can't writeV3Value() on a null parameter");
+    @Override
+    public void setIntParameter(int index, int value) throws SQLException {
+        byte[] data = new byte[4];
+        ByteConverter.int4(data, 0, value);
+        bind(index, data, Oid.INT4, BINARY);
     }
 
-    // Directly encoded?
-    if (paramValue instanceof byte[]) {
-      pgStream.send((byte[]) paramValue);
-      return;
+    @Override
+    public void setLiteralParameter(int index, String value, int oid) throws SQLException {
+        bind(index, value, oid, TEXT);
     }
 
-    // Binary-format bytea?
-    if (paramValue instanceof StreamWrapper) {
-      try (StreamWrapper streamWrapper = (StreamWrapper) paramValue) {
-        streamBytea(pgStream, streamWrapper);
-      }
-      return;
+    @Override
+    public void setStringParameter(int index, String value, int oid) throws SQLException {
+        bind(index, value, oid, TEXT);
     }
 
-    // Streamed bytea?
-    if (paramValue instanceof ByteStreamWriter) {
-      streamBytea(pgStream, (ByteStreamWriter) paramValue);
-      return;
+    @Override
+    public void setBinaryParameter(int index, byte[] value, int oid) throws SQLException {
+        bind(index, value, oid, BINARY);
     }
 
-    // Encoded string.
-    if (encoded[index] == null) {
-      encoded[index] = ((String) paramValue).getBytes(StandardCharsets.UTF_8);
+    //
+    // bytea helper
+    //
+
+    @Override
+    public void setBytea(int index, byte[] data, int offset, int length) throws SQLException {
+        bind(index, new StreamWrapper(data, offset, length), Oid.BYTEA, BINARY);
     }
-    pgStream.send(encoded[index]);
-  }
 
-  @Override
-  public ParameterList copy() {
-    SimpleParameterList newCopy = new SimpleParameterList(paramValues.length, transferModeRegistry);
-    System.arraycopy(paramValues, 0, newCopy.paramValues, 0, paramValues.length);
-    System.arraycopy(paramTypes, 0, newCopy.paramTypes, 0, paramTypes.length);
-    System.arraycopy(flags, 0, newCopy.flags, 0, flags.length);
-    newCopy.pos = pos;
-    return newCopy;
-  }
+    //
+    // byte stream writer support
+    //
 
-  @Override
-  public void clear() {
-    Arrays.fill(paramValues, null);
-    Arrays.fill(paramTypes, 0);
-    Arrays.fill(encoded, null);
-    Arrays.fill(flags, (byte) 0);
-    pos = 0;
-  }
+    @Override
+    public void setBytea(int index, InputStream stream, int length) throws SQLException {
+        bind(index, new StreamWrapper(stream, length), Oid.BYTEA, BINARY);
+    }
 
-  @Override
-  public SimpleParameterList [] getSubparams() {
-    return null;
-  }
+    @Override
+    public void setBytea(int index, InputStream stream) throws SQLException {
+        bind(index, new StreamWrapper(stream), Oid.BYTEA, BINARY);
+    }
 
-  @Override
-  public Object[] getValues() {
-    return paramValues;
-  }
+    //
+    // Package-private V3 accessors
+    //
 
-  @Override
-  public int[] getParamTypes() {
-    return paramTypes;
-  }
+    @Override
+    public void setBytea(int index, ByteStreamWriter writer) throws SQLException {
+        bind(index, writer, Oid.BYTEA, BINARY);
+    }
 
-  @Override
-  public byte[] getFlags() {
-    return flags;
-  }
+    @Override
+    public void setText(int index, InputStream stream) throws SQLException {
+        bind(index, new StreamWrapper(stream), Oid.TEXT, TEXT);
+    }
 
-  @Override
-  public byte[] [] getEncoding() {
-    return encoded;
-  }
+    @Override
+    public void setNull(int index, int oid) throws SQLException {
 
-  @Override
-  public void appendAll(ParameterList list) throws SQLException {
-    if (list instanceof SimpleParameterList ) {
+        byte binaryTransfer = TEXT;
+
+        if (transferModeRegistry != null && transferModeRegistry.useBinaryForReceive(oid)) {
+            binaryTransfer = BINARY;
+        }
+        bind(index, NULL_OBJECT, oid, binaryTransfer);
+    }
+
+    @Override
+    public String toString(int index, boolean standardConformingStrings) {
+        --index;
+        Object paramValue = paramValues[index];
+        if (paramValue == null) {
+            return "?";
+        } else if (paramValue == NULL_OBJECT) {
+            return "(NULL)";
+        }
+        String textValue;
+        String type;
+        if ((flags[index] & BINARY) == BINARY) {
+            // handle some of the numeric types
+            switch (paramTypes[index]) {
+                case Oid.INT2:
+                    short s = ByteConverter.int2((byte[]) paramValue, 0);
+                    textValue = Short.toString(s);
+                    type = "int2";
+                    break;
+
+                case Oid.INT4:
+                    int i = ByteConverter.int4((byte[]) paramValue, 0);
+                    textValue = Integer.toString(i);
+                    type = "int4";
+                    break;
+
+                case Oid.INT8:
+                    long l = ByteConverter.int8((byte[]) paramValue, 0);
+                    textValue = Long.toString(l);
+                    type = "int8";
+                    break;
+
+                case Oid.FLOAT4:
+                    float f = ByteConverter.float4((byte[]) paramValue, 0);
+                    if (Float.isNaN(f)) {
+                        return "('NaN'::real)";
+                    }
+                    textValue = Float.toString(f);
+                    type = "real";
+                    break;
+
+                case Oid.FLOAT8:
+                    double d = ByteConverter.float8((byte[]) paramValue, 0);
+                    if (Double.isNaN(d)) {
+                        return "('NaN'::double precision)";
+                    }
+                    textValue = Double.toString(d);
+                    type = "double precision";
+                    break;
+
+                case Oid.NUMERIC:
+                    Number n = ByteConverter.numeric((byte[]) paramValue);
+                    if (n instanceof Double) {
+                        assert ((Double) n).isNaN();
+                        return "('NaN'::numeric)";
+                    }
+                    textValue = n.toString();
+                    type = "numeric";
+                    break;
+
+                case Oid.UUID:
+                    textValue =
+                            new UUIDArrayAssistant().buildElement((byte[]) paramValue, 0, 16).toString();
+                    type = "uuid";
+                    break;
+
+                case Oid.POINT:
+                    PGpoint pgPoint = new PGpoint();
+                    pgPoint.setByteValue((byte[]) paramValue, 0);
+                    textValue = pgPoint.toString();
+                    type = "point";
+                    break;
+
+                case Oid.BOX:
+                    PGbox pgBox = new PGbox();
+                    pgBox.setByteValue((byte[]) paramValue, 0);
+                    textValue = pgBox.toString();
+                    type = "box";
+                    break;
+
+                default:
+                    return "?";
+            }
+        } else {
+            textValue = paramValue.toString();
+            switch (paramTypes[index]) {
+                case Oid.INT2:
+                    type = "int2";
+                    break;
+                case Oid.INT4:
+                    type = "int4";
+                    break;
+                case Oid.INT8:
+                    type = "int8";
+                    break;
+                case Oid.FLOAT4:
+                    type = "real";
+                    break;
+                case Oid.FLOAT8:
+                    type = "double precision";
+                    break;
+                case Oid.TIMESTAMP:
+                    type = "timestamp";
+                    break;
+                case Oid.TIMESTAMPTZ:
+                    type = "timestamp with time zone";
+                    break;
+                case Oid.TIME:
+                    type = "time";
+                    break;
+                case Oid.TIMETZ:
+                    type = "time with time zone";
+                    break;
+                case Oid.DATE:
+                    type = "date";
+                    break;
+                case Oid.INTERVAL:
+                    type = "interval";
+                    break;
+                case Oid.NUMERIC:
+                    type = "numeric";
+                    break;
+                case Oid.UUID:
+                    type = "uuid";
+                    break;
+                case Oid.BOOL:
+                    type = "boolean";
+                    break;
+                case Oid.BOX:
+                    type = "box";
+                    break;
+                case Oid.POINT:
+                    type = "point";
+                    break;
+                default:
+                    type = null;
+            }
+        }
+        return quoteAndCast(textValue, type, standardConformingStrings);
+    }
+
+    @Override
+    public void checkAllParametersSet() throws SQLException {
+        for (int i = 0; i < paramTypes.length; i++) {
+            if (direction(i) != OUT && paramValues[i] == null) {
+                throw new PSQLException(GT.tr("No value specified for parameter {0}.", i + 1),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        }
+    }
+
+    @Override
+    public void convertFunctionOutParameters() {
+        for (int i = 0; i < paramTypes.length; i++) {
+            if (direction(i) == OUT) {
+                paramTypes[i] = Oid.VOID;
+                paramValues[i] = NULL_OBJECT;
+            }
+        }
+    }
+
+    @Override
+    public int[] getTypeOIDs() {
+        return paramTypes;
+    }
+
+    int getTypeOID(int index) {
+        return paramTypes[index - 1];
+    }
+
+    boolean hasUnresolvedTypes() {
+        for (int paramType : paramTypes) {
+            if (paramType == Oid.UNSPECIFIED) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    void setResolvedType(int index, int oid) {
+        // only allow overwriting an unknown value or VOID value
+        if (paramTypes[index - 1] == Oid.UNSPECIFIED || paramTypes[index - 1] == Oid.VOID) {
+            paramTypes[index - 1] = oid;
+        } else if (paramTypes[index - 1] != oid) {
+            throw new IllegalArgumentException("Can't change resolved type for param: " + index + " from "
+                    + paramTypes[index - 1] + " to " + oid);
+        }
+    }
+
+    boolean isNull(int index) {
+        return paramValues[index - 1] == NULL_OBJECT;
+    }
+
+    boolean isBinary(int index) {
+        return (flags[index - 1] & BINARY) != 0;
+    }
+
+    private byte direction(int index) {
+        return (byte) (flags[index] & INOUT);
+    }
+
+    int getV3Length(int index) {
+        --index;
+
+        // Null?
+        Object value = paramValues[index];
+        if (value == null || value == NULL_OBJECT) {
+            throw new IllegalArgumentException("can't getV3Length() on a null parameter");
+        }
+
+        // Directly encoded?
+        if (value instanceof byte[]) {
+            return ((byte[]) value).length;
+        }
+
+        // Binary-format bytea?
+        if (value instanceof StreamWrapper) {
+            return ((StreamWrapper) value).getLength();
+        }
+
+        // Binary-format bytea?
+        if (value instanceof ByteStreamWriter) {
+            return ((ByteStreamWriter) value).getLength();
+        }
+
+        // Already encoded?
+        byte[] encoded = this.encoded[index];
+        if (encoded == null) {
+            // Encode value and compute actual length using UTF-8.
+            this.encoded[index] = encoded = value.toString().getBytes(StandardCharsets.UTF_8);
+        }
+
+        return encoded.length;
+    }
+
+    void writeV3Value(int index, PGStream pgStream) throws IOException {
+        --index;
+
+        // Null?
+        Object paramValue = paramValues[index];
+        if (paramValue == null || paramValue == NULL_OBJECT) {
+            throw new IllegalArgumentException("can't writeV3Value() on a null parameter");
+        }
+
+        // Directly encoded?
+        if (paramValue instanceof byte[]) {
+            pgStream.send((byte[]) paramValue);
+            return;
+        }
+
+        // Binary-format bytea?
+        if (paramValue instanceof StreamWrapper) {
+            try (StreamWrapper streamWrapper = (StreamWrapper) paramValue) {
+                streamBytea(pgStream, streamWrapper);
+            }
+            return;
+        }
+
+        // Streamed bytea?
+        if (paramValue instanceof ByteStreamWriter) {
+            streamBytea(pgStream, (ByteStreamWriter) paramValue);
+            return;
+        }
+
+        // Encoded string.
+        if (encoded[index] == null) {
+            encoded[index] = ((String) paramValue).getBytes(StandardCharsets.UTF_8);
+        }
+        pgStream.send(encoded[index]);
+    }
+
+    @Override
+    public ParameterList copy() {
+        SimpleParameterList newCopy = new SimpleParameterList(paramValues.length, transferModeRegistry);
+        System.arraycopy(paramValues, 0, newCopy.paramValues, 0, paramValues.length);
+        System.arraycopy(paramTypes, 0, newCopy.paramTypes, 0, paramTypes.length);
+        System.arraycopy(flags, 0, newCopy.flags, 0, flags.length);
+        newCopy.pos = pos;
+        return newCopy;
+    }
+
+    @Override
+    public void clear() {
+        Arrays.fill(paramValues, null);
+        Arrays.fill(paramTypes, 0);
+        Arrays.fill(encoded, null);
+        Arrays.fill(flags, (byte) 0);
+        pos = 0;
+    }
+
+    @Override
+    public SimpleParameterList[] getSubparams() {
+        return null;
+    }
+
+    @Override
+    public Object[] getValues() {
+        return paramValues;
+    }
+
+    @Override
+    public int[] getParamTypes() {
+        return paramTypes;
+    }
+
+    @Override
+    public byte[] getFlags() {
+        return flags;
+    }
+
+    @Override
+    public byte[][] getEncoding() {
+        return encoded;
+    }
+
+    @Override
+    public void appendAll(ParameterList list) throws SQLException {
+        if (list instanceof SimpleParameterList) {
       /* only v3.SimpleParameterList is compatible with this type
       we need to create copies of our parameters, otherwise the values can be changed */
-      SimpleParameterList spl = (SimpleParameterList) list;
-      int inParamCount = spl.getInParameterCount();
-      if ((pos + inParamCount) > paramValues.length) {
-        throw new PSQLException(
-          GT.tr("Added parameters index out of range: {0}, number of columns: {1}.",
-              (pos + inParamCount), paramValues.length),
-              PSQLState.INVALID_PARAMETER_VALUE);
-      }
-      System.arraycopy(spl.getValues(), 0, this.paramValues, pos, inParamCount);
-      System.arraycopy(spl.getParamTypes(), 0, this.paramTypes, pos, inParamCount);
-      System.arraycopy(spl.getFlags(), 0, this.flags, pos, inParamCount);
-      System.arraycopy(spl.getEncoding(), 0, this.encoded, pos, inParamCount);
-      pos += inParamCount;
+            SimpleParameterList spl = (SimpleParameterList) list;
+            int inParamCount = spl.getInParameterCount();
+            if ((pos + inParamCount) > paramValues.length) {
+                throw new PSQLException(
+                        GT.tr("Added parameters index out of range: {0}, number of columns: {1}.",
+                                (pos + inParamCount), paramValues.length),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+            System.arraycopy(spl.getValues(), 0, this.paramValues, pos, inParamCount);
+            System.arraycopy(spl.getParamTypes(), 0, this.paramTypes, pos, inParamCount);
+            System.arraycopy(spl.getFlags(), 0, this.flags, pos, inParamCount);
+            System.arraycopy(spl.getEncoding(), 0, this.encoded, pos, inParamCount);
+            pos += inParamCount;
+        }
     }
-  }
 
-  /**
-   * Useful implementation of toString.
-   * @return String representation of the list values
-   */
-  @Override
-  public String toString() {
-    StringBuilder ts = new StringBuilder("<[");
-    if (paramValues.length > 0) {
-      ts.append(toString(1, true));
-      for (int c = 2; c <= paramValues.length; c++) {
-        ts.append(" ,").append(toString(c, true));
-      }
+    /**
+     * Useful implementation of toString.
+     *
+     * @return String representation of the list values
+     */
+    @Override
+    public String toString() {
+        StringBuilder ts = new StringBuilder("<[");
+        if (paramValues.length > 0) {
+            ts.append(toString(1, true));
+            for (int c = 2; c <= paramValues.length; c++) {
+                ts.append(" ,").append(toString(c, true));
+            }
+        }
+        ts.append("]>");
+        return ts.toString();
     }
-    ts.append("]>");
-    return ts.toString();
-  }
-
-  private final Object[] paramValues;
-  private final int[] paramTypes;
-  private final byte[] flags;
-  private final byte[] [] encoded;
-  private final TypeTransferModeRegistry transferModeRegistry;
-
-  /**
-   * Marker object representing NULL; this distinguishes "parameter never set" from "parameter set
-   * to null".
-   */
-  private static final Object NULL_OBJECT = new Object();
-
-  private int pos;
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleQuery.java b/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleQuery.java
index d405f4b..b134fd8 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleQuery.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/SimpleQuery.java
@@ -6,6 +6,12 @@
 
 package org.postgresql.core.v3;
 
+import java.lang.ref.PhantomReference;
+import java.nio.charset.StandardCharsets;
+import java.util.BitSet;
+import java.util.Map;
+import java.util.logging.Level;
+import java.util.logging.Logger;
 import org.postgresql.core.Field;
 import org.postgresql.core.NativeQuery;
 import org.postgresql.core.Oid;
@@ -14,13 +20,6 @@ import org.postgresql.core.Query;
 import org.postgresql.core.SqlCommand;
 import org.postgresql.jdbc.PgResultSet;
 
-import java.lang.ref.PhantomReference;
-import java.nio.charset.StandardCharsets;
-import java.util.BitSet;
-import java.util.Map;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
 /**
  * V3 Query implementation for a single-statement query. This also holds the state of any associated
  * server-side named statement. We use a PhantomReference managed by the QueryExecutor to handle
@@ -29,353 +28,348 @@ import java.util.logging.Logger;
  * @author Oliver Jowett (oliver@opencloud.com)
  */
 class SimpleQuery implements Query {
-  private static final Logger LOGGER = Logger.getLogger(SimpleQuery.class.getName());
+    static final SimpleParameterList NO_PARAMETERS = new SimpleParameterList(0, null);
+    private static final Logger LOGGER = Logger.getLogger(SimpleQuery.class.getName());
+    private final NativeQuery nativeQuery;
+    private final TypeTransferModeRegistry transferModeRegistry;
+    private final boolean sanitiserDisabled;
+    private Map<String, Integer> resultSetColumnNameIndexMap;
+    private String statementName;
+    private byte[] encodedStatementName;
+    /**
+     * The stored fields from previous execution or describe of a prepared statement. Always null for
+     * non-prepared statements.
+     */
+    private Field[] fields;
 
-  SimpleQuery(SimpleQuery src) {
-    this(src.nativeQuery, src.transferModeRegistry, src.sanitiserDisabled);
-  }
+    //
+    // Implementation guts
+    //
+    private boolean needUpdateFieldFormats;
+    private boolean hasBinaryFields;
+    private boolean portalDescribed;
+    private boolean statementDescribed;
+    private PhantomReference<?> cleanupRef;
+    private int[] preparedTypes;
+    private BitSet unspecifiedParams;
+    private short deallocateEpoch;
+    private Integer cachedMaxResultRowSize;
 
-  SimpleQuery(NativeQuery query, TypeTransferModeRegistry transferModeRegistry,
-      boolean sanitiserDisabled) {
-    this.nativeQuery = query;
-    this.transferModeRegistry = transferModeRegistry;
-    this.sanitiserDisabled = sanitiserDisabled;
-  }
-
-  @Override
-  public ParameterList createParameterList() {
-    if (nativeQuery.bindPositions.length == 0) {
-      return NO_PARAMETERS;
+    SimpleQuery(SimpleQuery src) {
+        this(src.nativeQuery, src.transferModeRegistry, src.sanitiserDisabled);
     }
 
-    return new SimpleParameterList(getBindCount(), transferModeRegistry);
-  }
-
-  @Override
-  public String toString(ParameterList parameters) {
-    return nativeQuery.toString(parameters);
-  }
-
-  @Override
-  public String toString() {
-    return toString(null);
-  }
-
-  @Override
-  public void close() {
-    unprepare();
-  }
-
-  @Override
-  public SimpleQuery [] getSubqueries() {
-    return null;
-  }
-
-  /**
-   * <p>Return maximum size in bytes that each result row from this query may return. Mainly used for
-   * batches that return results.</p>
-   *
-   * <p>Results are cached until/unless the query is re-described.</p>
-   *
-   * @return Max size of result data in bytes according to returned fields, 0 if no results, -1 if
-   *         result is unbounded.
-   * @throws IllegalStateException if the query is not described
-   */
-  public int getMaxResultRowSize() {
-    if (cachedMaxResultRowSize != null) {
-      return cachedMaxResultRowSize;
+    SimpleQuery(NativeQuery query, TypeTransferModeRegistry transferModeRegistry,
+                boolean sanitiserDisabled) {
+        this.nativeQuery = query;
+        this.transferModeRegistry = transferModeRegistry;
+        this.sanitiserDisabled = sanitiserDisabled;
     }
-    if (!this.statementDescribed) {
-      throw new IllegalStateException(
-          "Cannot estimate result row size on a statement that is not described");
-    }
-    int maxResultRowSize = 0;
-    if (fields != null) {
-      for (Field f : fields) {
-        final int fieldLength = f.getLength();
-        if (fieldLength < 1 || fieldLength >= 65535) {
-          /*
-           * Field length unknown or large; we can't make any safe estimates about the result size,
-           * so we have to fall back to sending queries individually.
-           */
-          maxResultRowSize = -1;
-          break;
+
+    @Override
+    public ParameterList createParameterList() {
+        if (nativeQuery.bindPositions.length == 0) {
+            return NO_PARAMETERS;
         }
-        maxResultRowSize += fieldLength;
-      }
+
+        return new SimpleParameterList(getBindCount(), transferModeRegistry);
     }
-    cachedMaxResultRowSize = maxResultRowSize;
-    return maxResultRowSize;
-  }
 
-  //
-  // Implementation guts
-  //
+    @Override
+    public String toString(ParameterList parameters) {
+        return nativeQuery.toString(parameters);
+    }
 
-  @Override
-  public String getNativeSql() {
-    return nativeQuery.nativeSql;
-  }
+    @Override
+    public String toString() {
+        return toString(null);
+    }
 
-  void setStatementName(String statementName, short deallocateEpoch) {
-    assert statementName != null : "statement name should not be null";
-    this.statementName = statementName;
-    this.encodedStatementName = statementName.getBytes(StandardCharsets.UTF_8);
-    this.deallocateEpoch = deallocateEpoch;
-  }
+    @Override
+    public void close() {
+        unprepare();
+    }
 
-  void setPrepareTypes(int[] paramTypes) {
-    // Remember which parameters were unspecified since the parameters will be overridden later by
-    // ParameterDescription message
-    for (int i = 0; i < paramTypes.length; i++) {
-      int paramType = paramTypes[i];
-      if (paramType == Oid.UNSPECIFIED) {
-        if (this.unspecifiedParams == null) {
-          this.unspecifiedParams = new BitSet();
+    @Override
+    public SimpleQuery[] getSubqueries() {
+        return null;
+    }
+
+    /**
+     * <p>Return maximum size in bytes that each result row from this query may return. Mainly used for
+     * batches that return results.</p>
+     *
+     * <p>Results are cached until/unless the query is re-described.</p>
+     *
+     * @return Max size of result data in bytes according to returned fields, 0 if no results, -1 if
+     * result is unbounded.
+     * @throws IllegalStateException if the query is not described
+     */
+    public int getMaxResultRowSize() {
+        if (cachedMaxResultRowSize != null) {
+            return cachedMaxResultRowSize;
         }
-        this.unspecifiedParams.set(i);
-      }
+        if (!this.statementDescribed) {
+            throw new IllegalStateException(
+                    "Cannot estimate result row size on a statement that is not described");
+        }
+        int maxResultRowSize = 0;
+        if (fields != null) {
+            for (Field f : fields) {
+                final int fieldLength = f.getLength();
+                if (fieldLength < 1 || fieldLength >= 65535) {
+                    /*
+                     * Field length unknown or large; we can't make any safe estimates about the result size,
+                     * so we have to fall back to sending queries individually.
+                     */
+                    maxResultRowSize = -1;
+                    break;
+                }
+                maxResultRowSize += fieldLength;
+            }
+        }
+        cachedMaxResultRowSize = maxResultRowSize;
+        return maxResultRowSize;
     }
 
-    // paramTypes is changed by "describe statement" response, so we clone the array
-    // However, we can reuse array if there is one
-    if (this.preparedTypes == null) {
-      this.preparedTypes = paramTypes.clone();
-      return;
-    }
-    System.arraycopy(paramTypes, 0, this.preparedTypes, 0, paramTypes.length);
-  }
-
-  int [] getPrepareTypes() {
-    return preparedTypes;
-  }
-
-  String getStatementName() {
-    return statementName;
-  }
-
-  boolean isPreparedFor(int[] paramTypes, short deallocateEpoch) {
-    if (statementName == null || preparedTypes == null) {
-      return false; // Not prepared.
-    }
-    if (this.deallocateEpoch != deallocateEpoch) {
-      return false;
+    @Override
+    public String getNativeSql() {
+        return nativeQuery.nativeSql;
     }
 
-    assert paramTypes.length == preparedTypes.length
-        : String.format("paramTypes:%1$d preparedTypes:%2$d", paramTypes.length,
-        preparedTypes.length);
-    // Check for compatible types.
-    BitSet unspecified = this.unspecifiedParams;
-    for (int i = 0; i < paramTypes.length; i++) {
-      int paramType = paramTypes[i];
-      // Either paramType should match prepared type
-      // Or paramType==UNSPECIFIED and the prepare type was UNSPECIFIED
+    void setStatementName(String statementName, short deallocateEpoch) {
+        assert statementName != null : "statement name should not be null";
+        this.statementName = statementName;
+        this.encodedStatementName = statementName.getBytes(StandardCharsets.UTF_8);
+        this.deallocateEpoch = deallocateEpoch;
+    }
 
-      // Note: preparedTypes can be updated by "statement describe"
-      // 1) parse(name="S_01", sql="select ?::timestamp", types={UNSPECIFIED})
-      // 2) statement describe: bind 1 type is TIMESTAMP
-      // 3) SimpleQuery.preparedTypes is updated to TIMESTAMP
-      // ...
-      // 4.1) bind(name="S_01", ..., types={TIMESTAMP}) -> OK (since preparedTypes is equal to TIMESTAMP)
-      // 4.2) bind(name="S_01", ..., types={UNSPECIFIED}) -> OK (since the query was initially parsed with UNSPECIFIED)
-      // 4.3) bind(name="S_01", ..., types={DATE}) -> KO, unprepare and parse required
+    int[] getPrepareTypes() {
+        return preparedTypes;
+    }
 
-      int preparedType = preparedTypes[i];
-      if (paramType != preparedType
-          && (paramType != Oid.UNSPECIFIED
-          || unspecified == null
-          || !unspecified.get(i))) {
-        if (LOGGER.isLoggable(Level.FINER)) {
-          LOGGER.log(Level.FINER,
-              "Statement {0} does not match new parameter types. Will have to un-prepare it and parse once again."
-                  + " To avoid performance issues, use the same data type for the same bind position. Bind index (1-based) is {1},"
-                  + " preparedType was {2} (after describe {3}), current bind type is {4}",
-              new Object[]{statementName, i + 1,
-                  Oid.toString(unspecified != null && unspecified.get(i) ? 0 : preparedType),
-                  Oid.toString(preparedType), Oid.toString(paramType)});
+    void setPrepareTypes(int[] paramTypes) {
+        // Remember which parameters were unspecified since the parameters will be overridden later by
+        // ParameterDescription message
+        for (int i = 0; i < paramTypes.length; i++) {
+            int paramType = paramTypes[i];
+            if (paramType == Oid.UNSPECIFIED) {
+                if (this.unspecifiedParams == null) {
+                    this.unspecifiedParams = new BitSet();
+                }
+                this.unspecifiedParams.set(i);
+            }
+        }
+
+        // paramTypes is changed by "describe statement" response, so we clone the array
+        // However, we can reuse array if there is one
+        if (this.preparedTypes == null) {
+            this.preparedTypes = paramTypes.clone();
+            return;
+        }
+        System.arraycopy(paramTypes, 0, this.preparedTypes, 0, paramTypes.length);
+    }
+
+    String getStatementName() {
+        return statementName;
+    }
+
+    boolean isPreparedFor(int[] paramTypes, short deallocateEpoch) {
+        if (statementName == null || preparedTypes == null) {
+            return false; // Not prepared.
+        }
+        if (this.deallocateEpoch != deallocateEpoch) {
+            return false;
+        }
+
+        assert paramTypes.length == preparedTypes.length
+                : String.format("paramTypes:%1$d preparedTypes:%2$d", paramTypes.length,
+                preparedTypes.length);
+        // Check for compatible types.
+        BitSet unspecified = this.unspecifiedParams;
+        for (int i = 0; i < paramTypes.length; i++) {
+            int paramType = paramTypes[i];
+            // Either paramType should match prepared type
+            // Or paramType==UNSPECIFIED and the prepare type was UNSPECIFIED
+
+            // Note: preparedTypes can be updated by "statement describe"
+            // 1) parse(name="S_01", sql="select ?::timestamp", types={UNSPECIFIED})
+            // 2) statement describe: bind 1 type is TIMESTAMP
+            // 3) SimpleQuery.preparedTypes is updated to TIMESTAMP
+            // ...
+            // 4.1) bind(name="S_01", ..., types={TIMESTAMP}) -> OK (since preparedTypes is equal to TIMESTAMP)
+            // 4.2) bind(name="S_01", ..., types={UNSPECIFIED}) -> OK (since the query was initially parsed with UNSPECIFIED)
+            // 4.3) bind(name="S_01", ..., types={DATE}) -> KO, unprepare and parse required
+
+            int preparedType = preparedTypes[i];
+            if (paramType != preparedType
+                    && (paramType != Oid.UNSPECIFIED
+                    || unspecified == null
+                    || !unspecified.get(i))) {
+                if (LOGGER.isLoggable(Level.FINER)) {
+                    LOGGER.log(Level.FINER,
+                            "Statement {0} does not match new parameter types. Will have to un-prepare it and parse once again."
+                                    + " To avoid performance issues, use the same data type for the same bind position. Bind index (1-based) is {1},"
+                                    + " preparedType was {2} (after describe {3}), current bind type is {4}",
+                            new Object[]{statementName, i + 1,
+                                    Oid.toString(unspecified != null && unspecified.get(i) ? 0 : preparedType),
+                                    Oid.toString(preparedType), Oid.toString(paramType)});
+                }
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+    boolean hasUnresolvedTypes() {
+        if (preparedTypes == null) {
+            return true;
+        }
+
+        return this.unspecifiedParams != null && !this.unspecifiedParams.isEmpty();
+    }
+
+    byte[] getEncodedStatementName() {
+        return encodedStatementName;
+    }
+
+    /**
+     * Returns the fields that this query will return. If the result set fields are not known returns
+     * null.
+     *
+     * @return the fields that this query will return.
+     */
+    Field[] getFields() {
+        return fields;
+    }
+
+    /**
+     * Sets the fields that this query will return.
+     *
+     * @param fields The fields that this query will return.
+     */
+    void setFields(Field[] fields) {
+        this.fields = fields;
+        this.resultSetColumnNameIndexMap = null;
+        this.cachedMaxResultRowSize = null;
+        this.needUpdateFieldFormats = fields != null;
+        this.hasBinaryFields = false; // just in case
+    }
+
+    /**
+     * Returns true if current query needs field formats be adjusted as per connection configuration.
+     * Subsequent invocations would return {@code false}. The idea is to perform adjustments only
+     * once, not for each
+     * {@link QueryExecutorImpl#sendBind(SimpleQuery, SimpleParameterList, Portal, boolean)}
+     *
+     * @return true if current query needs field formats be adjusted as per connection configuration
+     */
+    boolean needUpdateFieldFormats() {
+        if (needUpdateFieldFormats) {
+            needUpdateFieldFormats = false;
+            return true;
         }
         return false;
-      }
     }
 
-    return true;
-  }
-
-  boolean hasUnresolvedTypes() {
-    if (preparedTypes == null) {
-      return true;
+    public void resetNeedUpdateFieldFormats() {
+        needUpdateFieldFormats = fields != null;
     }
 
-    return this.unspecifiedParams != null && !this.unspecifiedParams.isEmpty();
-  }
-
-  byte [] getEncodedStatementName() {
-    return encodedStatementName;
-  }
-
-  /**
-   * Sets the fields that this query will return.
-   *
-   * @param fields The fields that this query will return.
-   */
-  void setFields(Field [] fields) {
-    this.fields = fields;
-    this.resultSetColumnNameIndexMap = null;
-    this.cachedMaxResultRowSize = null;
-    this.needUpdateFieldFormats = fields != null;
-    this.hasBinaryFields = false; // just in case
-  }
-
-  /**
-   * Returns the fields that this query will return. If the result set fields are not known returns
-   * null.
-   *
-   * @return the fields that this query will return.
-   */
-  Field [] getFields() {
-    return fields;
-  }
-
-  /**
-   * Returns true if current query needs field formats be adjusted as per connection configuration.
-   * Subsequent invocations would return {@code false}. The idea is to perform adjustments only
-   * once, not for each
-   * {@link QueryExecutorImpl#sendBind(SimpleQuery, SimpleParameterList, Portal, boolean)}
-   *
-   * @return true if current query needs field formats be adjusted as per connection configuration
-   */
-  boolean needUpdateFieldFormats() {
-    if (needUpdateFieldFormats) {
-      needUpdateFieldFormats = false;
-      return true;
-    }
-    return false;
-  }
-
-  public void resetNeedUpdateFieldFormats() {
-    needUpdateFieldFormats = fields != null;
-  }
-
-  public boolean hasBinaryFields() {
-    return hasBinaryFields;
-  }
-
-  public void setHasBinaryFields(boolean hasBinaryFields) {
-    this.hasBinaryFields = hasBinaryFields;
-  }
-
-  // Have we sent a Describe Portal message for this query yet?
-  boolean isPortalDescribed() {
-    return portalDescribed;
-  }
-
-  void setPortalDescribed(boolean portalDescribed) {
-    this.portalDescribed = portalDescribed;
-    this.cachedMaxResultRowSize = null;
-  }
-
-  // Have we sent a Describe Statement message for this query yet?
-  // Note that we might not have need to, so this may always be false.
-  @Override
-  public boolean isStatementDescribed() {
-    return statementDescribed;
-  }
-
-  void setStatementDescribed(boolean statementDescribed) {
-    this.statementDescribed = statementDescribed;
-    this.cachedMaxResultRowSize = null;
-  }
-
-  @Override
-  public boolean isEmpty() {
-    return getNativeSql().isEmpty();
-  }
-
-  void setCleanupRef(PhantomReference<?> cleanupRef) {
-    PhantomReference<?> oldCleanupRef = this.cleanupRef;
-    if (oldCleanupRef != null) {
-      oldCleanupRef.clear();
-      oldCleanupRef.enqueue();
-    }
-    this.cleanupRef = cleanupRef;
-  }
-
-  void unprepare() {
-    PhantomReference<?> cleanupRef = this.cleanupRef;
-    if (cleanupRef != null) {
-      cleanupRef.clear();
-      cleanupRef.enqueue();
-      this.cleanupRef = null;
-    }
-    if (this.unspecifiedParams != null) {
-      this.unspecifiedParams.clear();
+    public boolean hasBinaryFields() {
+        return hasBinaryFields;
     }
 
-    statementName = null;
-    encodedStatementName = null;
-    fields = null;
-    this.resultSetColumnNameIndexMap = null;
-    portalDescribed = false;
-    statementDescribed = false;
-    cachedMaxResultRowSize = null;
-  }
-
-  @Override
-  public int getBatchSize() {
-    return 1;
-  }
-
-  NativeQuery getNativeQuery() {
-    return nativeQuery;
-  }
-
-  public final int getBindCount() {
-    return nativeQuery.bindPositions.length * getBatchSize();
-  }
-
-  private Map<String, Integer> resultSetColumnNameIndexMap;
-
-  @Override
-  public Map<String, Integer> getResultSetColumnNameIndexMap() {
-    Map<String, Integer> columnPositions = this.resultSetColumnNameIndexMap;
-    if (columnPositions == null && fields != null) {
-      columnPositions =
-          PgResultSet.createColumnNameIndexMap(fields, sanitiserDisabled);
-      if (statementName != null) {
-        // Cache column positions for server-prepared statements only
-        this.resultSetColumnNameIndexMap = columnPositions;
-      }
+    public void setHasBinaryFields(boolean hasBinaryFields) {
+        this.hasBinaryFields = hasBinaryFields;
     }
-    return columnPositions;
-  }
 
-  @Override
-  public SqlCommand getSqlCommand() {
-    return nativeQuery.getCommand();
-  }
+    // Have we sent a Describe Portal message for this query yet?
+    boolean isPortalDescribed() {
+        return portalDescribed;
+    }
 
-  private final NativeQuery nativeQuery;
+    void setPortalDescribed(boolean portalDescribed) {
+        this.portalDescribed = portalDescribed;
+        this.cachedMaxResultRowSize = null;
+    }
 
-  private final TypeTransferModeRegistry transferModeRegistry;
-  private String statementName;
-  private byte [] encodedStatementName;
-  /**
-   * The stored fields from previous execution or describe of a prepared statement. Always null for
-   * non-prepared statements.
-   */
-  private Field [] fields;
-  private boolean needUpdateFieldFormats;
-  private boolean hasBinaryFields;
-  private boolean portalDescribed;
-  private boolean statementDescribed;
-  private final boolean sanitiserDisabled;
-  private PhantomReference<?> cleanupRef;
-  private int [] preparedTypes;
-  private BitSet unspecifiedParams;
-  private short deallocateEpoch;
+    // Have we sent a Describe Statement message for this query yet?
+    // Note that we might not have need to, so this may always be false.
+    @Override
+    public boolean isStatementDescribed() {
+        return statementDescribed;
+    }
 
-  private Integer cachedMaxResultRowSize;
+    void setStatementDescribed(boolean statementDescribed) {
+        this.statementDescribed = statementDescribed;
+        this.cachedMaxResultRowSize = null;
+    }
 
-  static final SimpleParameterList NO_PARAMETERS = new SimpleParameterList(0, null);
+    @Override
+    public boolean isEmpty() {
+        return getNativeSql().isEmpty();
+    }
+
+    void setCleanupRef(PhantomReference<?> cleanupRef) {
+        PhantomReference<?> oldCleanupRef = this.cleanupRef;
+        if (oldCleanupRef != null) {
+            oldCleanupRef.clear();
+            oldCleanupRef.enqueue();
+        }
+        this.cleanupRef = cleanupRef;
+    }
+
+    void unprepare() {
+        PhantomReference<?> cleanupRef = this.cleanupRef;
+        if (cleanupRef != null) {
+            cleanupRef.clear();
+            cleanupRef.enqueue();
+            this.cleanupRef = null;
+        }
+        if (this.unspecifiedParams != null) {
+            this.unspecifiedParams.clear();
+        }
+
+        statementName = null;
+        encodedStatementName = null;
+        fields = null;
+        this.resultSetColumnNameIndexMap = null;
+        portalDescribed = false;
+        statementDescribed = false;
+        cachedMaxResultRowSize = null;
+    }
+
+    @Override
+    public int getBatchSize() {
+        return 1;
+    }
+
+    NativeQuery getNativeQuery() {
+        return nativeQuery;
+    }
+
+    public final int getBindCount() {
+        return nativeQuery.bindPositions.length * getBatchSize();
+    }
+
+    @Override
+    public Map<String, Integer> getResultSetColumnNameIndexMap() {
+        Map<String, Integer> columnPositions = this.resultSetColumnNameIndexMap;
+        if (columnPositions == null && fields != null) {
+            columnPositions =
+                    PgResultSet.createColumnNameIndexMap(fields, sanitiserDisabled);
+            if (statementName != null) {
+                // Cache column positions for server-prepared statements only
+                this.resultSetColumnNameIndexMap = columnPositions;
+            }
+        }
+        return columnPositions;
+    }
+
+    @Override
+    public SqlCommand getSqlCommand() {
+        return nativeQuery.getCommand();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/TypeTransferModeRegistry.java b/pgjdbc/src/main/java/org/postgresql/core/v3/TypeTransferModeRegistry.java
index c50570c..02bdb0d 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/TypeTransferModeRegistry.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/TypeTransferModeRegistry.java
@@ -6,17 +6,19 @@
 package org.postgresql.core.v3;
 
 public interface TypeTransferModeRegistry {
-  /**
-   * Returns if given oid should be sent in binary format.
-   * @param oid type oid
-   * @return true if given oid should be sent in binary format
-   */
-  boolean useBinaryForSend(int oid);
+    /**
+     * Returns if given oid should be sent in binary format.
+     *
+     * @param oid type oid
+     * @return true if given oid should be sent in binary format
+     */
+    boolean useBinaryForSend(int oid);
 
-  /**
-   * Returns if given oid should be received in binary format.
-   * @param oid type oid
-   * @return true if given oid should be received in binary format
-   */
-  boolean useBinaryForReceive(int oid);
+    /**
+     * Returns if given oid should be received in binary format.
+     *
+     * @param oid type oid
+     * @return true if given oid should be received in binary format
+     */
+    boolean useBinaryForReceive(int oid);
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/V3ParameterList.java b/pgjdbc/src/main/java/org/postgresql/core/v3/V3ParameterList.java
index c49e0e0..92a6191 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/V3ParameterList.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/V3ParameterList.java
@@ -6,9 +6,8 @@
 
 package org.postgresql.core.v3;
 
-import org.postgresql.core.ParameterList;
-
 import java.sql.SQLException;
+import org.postgresql.core.ParameterList;
 
 /**
  * Common interface for all V3 parameter list implementations.
@@ -16,45 +15,48 @@ import java.sql.SQLException;
  * @author Oliver Jowett (oliver@opencloud.com)
  */
 interface V3ParameterList extends ParameterList {
-  /**
-   * Ensure that all parameters in this list have been assigned values. Return silently if all is
-   * well, otherwise throw an appropriate exception.
-   *
-   * @throws SQLException if not all parameters are set.
-   */
-  void checkAllParametersSet() throws SQLException;
+    /**
+     * Ensure that all parameters in this list have been assigned values. Return silently if all is
+     * well, otherwise throw an appropriate exception.
+     *
+     * @throws SQLException if not all parameters are set.
+     */
+    void checkAllParametersSet() throws SQLException;
 
-  /**
-   * Convert any function output parameters to the correct type (void) and set an ignorable value
-   * for it.
-   */
-  void convertFunctionOutParameters();
+    /**
+     * Convert any function output parameters to the correct type (void) and set an ignorable value
+     * for it.
+     */
+    void convertFunctionOutParameters();
 
-  /**
-   * Return a list of the SimpleParameterList objects that make up this parameter list. If this
-   * object is already a SimpleParameterList, returns null (avoids an extra array construction in
-   * the common case).
-   *
-   * @return an array of single-statement parameter lists, or <code>null</code> if this object is
-   *         already a single-statement parameter list.
-   */
-  SimpleParameterList [] getSubparams();
+    /**
+     * Return a list of the SimpleParameterList objects that make up this parameter list. If this
+     * object is already a SimpleParameterList, returns null (avoids an extra array construction in
+     * the common case).
+     *
+     * @return an array of single-statement parameter lists, or <code>null</code> if this object is
+     * already a single-statement parameter list.
+     */
+    SimpleParameterList[] getSubparams();
 
-  /**
-   * Return the parameter type information.
-   * @return an array of {@link org.postgresql.core.Oid} type information
-   */
-  int [] getParamTypes();
+    /**
+     * Return the parameter type information.
+     *
+     * @return an array of {@link org.postgresql.core.Oid} type information
+     */
+    int[] getParamTypes();
 
-  /**
-   * Return the flags for each parameter.
-   * @return an array of bytes used to store flags.
-   */
-  byte [] getFlags();
+    /**
+     * Return the flags for each parameter.
+     *
+     * @return an array of bytes used to store flags.
+     */
+    byte[] getFlags();
 
-  /**
-   * Return the encoding for each parameter.
-   * @return nested byte array of bytes with encoding information.
-   */
-  byte [] [] getEncoding();
+    /**
+     * Return the encoding for each parameter.
+     *
+     * @return nested byte array of bytes with encoding information.
+     */
+    byte[][] getEncoding();
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCache.java b/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCache.java
index 83e1c92..e2d68fa 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCache.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCache.java
@@ -5,13 +5,12 @@
 
 package org.postgresql.core.v3.adaptivefetch;
 
-import org.postgresql.PGProperty;
-import org.postgresql.core.Query;
-
 import java.sql.SQLException;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Properties;
+import org.postgresql.PGProperty;
+import org.postgresql.core.Query;
 
 /**
  * The main purpose of this class is to handle adaptive fetching process. Adaptive fetching is used
@@ -25,173 +24,173 @@ import java.util.Properties;
  */
 public class AdaptiveFetchCache {
 
-  private final Map<String, AdaptiveFetchCacheEntry> adaptiveFetchInfoMap;
-  private boolean adaptiveFetch;
-  private final int minimumAdaptiveFetchSize;
-  private int maximumAdaptiveFetchSize = -1;
-  private long maximumResultBufferSize = -1;
+    private final Map<String, AdaptiveFetchCacheEntry> adaptiveFetchInfoMap;
+    private final int minimumAdaptiveFetchSize;
+    private boolean adaptiveFetch;
+    private int maximumAdaptiveFetchSize = -1;
+    private long maximumResultBufferSize = -1;
 
-  public AdaptiveFetchCache(long maximumResultBufferSize, Properties info)
-      throws SQLException {
-    this.adaptiveFetchInfoMap = new HashMap<>();
+    public AdaptiveFetchCache(long maximumResultBufferSize, Properties info)
+            throws SQLException {
+        this.adaptiveFetchInfoMap = new HashMap<>();
 
-    this.adaptiveFetch = PGProperty.ADAPTIVE_FETCH.getBoolean(info);
-    this.minimumAdaptiveFetchSize = PGProperty.ADAPTIVE_FETCH_MINIMUM.getInt(info);
-    this.maximumAdaptiveFetchSize = PGProperty.ADAPTIVE_FETCH_MAXIMUM.getInt(info);
+        this.adaptiveFetch = PGProperty.ADAPTIVE_FETCH.getBoolean(info);
+        this.minimumAdaptiveFetchSize = PGProperty.ADAPTIVE_FETCH_MINIMUM.getInt(info);
+        this.maximumAdaptiveFetchSize = PGProperty.ADAPTIVE_FETCH_MAXIMUM.getInt(info);
 
-    this.maximumResultBufferSize = maximumResultBufferSize;
-  }
-
-  /**
-   * Add query to being cached and computing adaptive fetch size.
-   *
-   * @param adaptiveFetch state of adaptive fetch, which should be used during adding query
-   * @param query         query to be cached
-   */
-  public void addNewQuery(boolean adaptiveFetch, Query query) {
-    if (adaptiveFetch && maximumResultBufferSize != -1) {
-      String sql = query.getNativeSql().trim();
-      AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql);
-      if (adaptiveFetchCacheEntry == null) {
-        adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
-      }
-      adaptiveFetchCacheEntry.incrementCounter();
-
-      adaptiveFetchInfoMap.put(sql, adaptiveFetchCacheEntry);
+        this.maximumResultBufferSize = maximumResultBufferSize;
     }
-  }
 
-  /**
-   * Update adaptive fetch size for given query.
-   *
-   * @param adaptiveFetch       state of adaptive fetch, which should be used during updating fetch
-   *                            size for query
-   * @param query               query to be updated
-   * @param maximumRowSizeBytes max row size used during updating information about adaptive fetch
-   *                            size for given query
-   */
-  public void updateQueryFetchSize(boolean adaptiveFetch, Query query, int maximumRowSizeBytes) {
-    if (adaptiveFetch && maximumResultBufferSize != -1) {
-      String sql = query.getNativeSql().trim();
-      AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql);
-      if (adaptiveFetchCacheEntry != null) {
-        int adaptiveMaximumRowSize = adaptiveFetchCacheEntry.getMaximumRowSizeBytes();
-        if (adaptiveMaximumRowSize < maximumRowSizeBytes && maximumRowSizeBytes > 0) {
-          int newFetchSize = (int) (maximumResultBufferSize / maximumRowSizeBytes);
-          newFetchSize = adjustFetchSize(newFetchSize);
+    /**
+     * Add query to being cached and computing adaptive fetch size.
+     *
+     * @param adaptiveFetch state of adaptive fetch, which should be used during adding query
+     * @param query         query to be cached
+     */
+    public void addNewQuery(boolean adaptiveFetch, Query query) {
+        if (adaptiveFetch && maximumResultBufferSize != -1) {
+            String sql = query.getNativeSql().trim();
+            AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql);
+            if (adaptiveFetchCacheEntry == null) {
+                adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+            }
+            adaptiveFetchCacheEntry.incrementCounter();
 
-          adaptiveFetchCacheEntry.setMaximumRowSizeBytes(maximumRowSizeBytes);
-          adaptiveFetchCacheEntry.setSize(newFetchSize);
-
-          adaptiveFetchInfoMap.put(sql, adaptiveFetchCacheEntry);
+            adaptiveFetchInfoMap.put(sql, adaptiveFetchCacheEntry);
         }
-      }
     }
-  }
 
-  /**
-   * Get adaptive fetch size for given query.
-   *
-   * @param adaptiveFetch state of adaptive fetch, which should be used during getting fetch size
-   *                      for query
-   * @param query         query to which we want get adaptive fetch size
-   * @return adaptive fetch size for query or -1 if size doesn't exist/adaptive fetch state is false
-   */
-  public int getFetchSizeForQuery(boolean adaptiveFetch, Query query) {
-    if (adaptiveFetch && maximumResultBufferSize != -1) {
-      String sql = query.getNativeSql().trim();
-      AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql);
-      if (adaptiveFetchCacheEntry != null) {
-        return adaptiveFetchCacheEntry.getSize();
-      }
+    /**
+     * Update adaptive fetch size for given query.
+     *
+     * @param adaptiveFetch       state of adaptive fetch, which should be used during updating fetch
+     *                            size for query
+     * @param query               query to be updated
+     * @param maximumRowSizeBytes max row size used during updating information about adaptive fetch
+     *                            size for given query
+     */
+    public void updateQueryFetchSize(boolean adaptiveFetch, Query query, int maximumRowSizeBytes) {
+        if (adaptiveFetch && maximumResultBufferSize != -1) {
+            String sql = query.getNativeSql().trim();
+            AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql);
+            if (adaptiveFetchCacheEntry != null) {
+                int adaptiveMaximumRowSize = adaptiveFetchCacheEntry.getMaximumRowSizeBytes();
+                if (adaptiveMaximumRowSize < maximumRowSizeBytes && maximumRowSizeBytes > 0) {
+                    int newFetchSize = (int) (maximumResultBufferSize / maximumRowSizeBytes);
+                    newFetchSize = adjustFetchSize(newFetchSize);
+
+                    adaptiveFetchCacheEntry.setMaximumRowSizeBytes(maximumRowSizeBytes);
+                    adaptiveFetchCacheEntry.setSize(newFetchSize);
+
+                    adaptiveFetchInfoMap.put(sql, adaptiveFetchCacheEntry);
+                }
+            }
+        }
     }
-    return -1;
-  }
 
-  /**
-   * Remove query information from caching.
-   *
-   * @param adaptiveFetch state of adaptive fetch, which should be used during removing fetch size
-   *                      for query
-   * @param query         query to be removed from caching
-   */
-  public void removeQuery(boolean adaptiveFetch, Query query) {
-    if (adaptiveFetch && maximumResultBufferSize != -1) {
-      String sql = query.getNativeSql().trim();
-      AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql);
-      if (adaptiveFetchCacheEntry != null) {
-        adaptiveFetchCacheEntry.decrementCounter();
+    /**
+     * Get adaptive fetch size for given query.
+     *
+     * @param adaptiveFetch state of adaptive fetch, which should be used during getting fetch size
+     *                      for query
+     * @param query         query to which we want get adaptive fetch size
+     * @return adaptive fetch size for query or -1 if size doesn't exist/adaptive fetch state is false
+     */
+    public int getFetchSizeForQuery(boolean adaptiveFetch, Query query) {
+        if (adaptiveFetch && maximumResultBufferSize != -1) {
+            String sql = query.getNativeSql().trim();
+            AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql);
+            if (adaptiveFetchCacheEntry != null) {
+                return adaptiveFetchCacheEntry.getSize();
+            }
+        }
+        return -1;
+    }
 
-        if (adaptiveFetchCacheEntry.getCounter() < 1) {
-          adaptiveFetchInfoMap.remove(sql);
+    /**
+     * Remove query information from caching.
+     *
+     * @param adaptiveFetch state of adaptive fetch, which should be used during removing fetch size
+     *                      for query
+     * @param query         query to be removed from caching
+     */
+    public void removeQuery(boolean adaptiveFetch, Query query) {
+        if (adaptiveFetch && maximumResultBufferSize != -1) {
+            String sql = query.getNativeSql().trim();
+            AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = adaptiveFetchInfoMap.get(sql);
+            if (adaptiveFetchCacheEntry != null) {
+                adaptiveFetchCacheEntry.decrementCounter();
+
+                if (adaptiveFetchCacheEntry.getCounter() < 1) {
+                    adaptiveFetchInfoMap.remove(sql);
+                } else {
+                    adaptiveFetchInfoMap.put(sql, adaptiveFetchCacheEntry);
+                }
+            }
+        }
+    }
+
+    /**
+     * Set maximum and minimum constraints on given value.
+     *
+     * @param actualSize value which should be the computed fetch size
+     * @return value which meet the constraints
+     */
+    private int adjustFetchSize(int actualSize) {
+        int size = adjustMaximumFetchSize(actualSize);
+        size = adjustMinimumFetchSize(size);
+        return size;
+    }
+
+    /**
+     * Set minimum constraint on given value.
+     *
+     * @param actualSize value which should be the computed fetch size
+     * @return value which meet the minimum constraint
+     */
+    private int adjustMinimumFetchSize(int actualSize) {
+        if (minimumAdaptiveFetchSize == 0) {
+            return actualSize;
+        }
+        if (minimumAdaptiveFetchSize > actualSize) {
+            return minimumAdaptiveFetchSize;
         } else {
-          adaptiveFetchInfoMap.put(sql, adaptiveFetchCacheEntry);
+            return actualSize;
         }
-      }
     }
-  }
 
-  /**
-   * Set maximum and minimum constraints on given value.
-   *
-   * @param actualSize value which should be the computed fetch size
-   * @return value which meet the constraints
-   */
-  private int adjustFetchSize(int actualSize) {
-    int size = adjustMaximumFetchSize(actualSize);
-    size = adjustMinimumFetchSize(size);
-    return size;
-  }
-
-  /**
-   * Set minimum constraint on given value.
-   *
-   * @param actualSize value which should be the computed fetch size
-   * @return value which meet the minimum constraint
-   */
-  private int adjustMinimumFetchSize(int actualSize) {
-    if (minimumAdaptiveFetchSize == 0) {
-      return actualSize;
+    /**
+     * Set maximum constraint on given value.
+     *
+     * @param actualSize value which should be the computed fetch size
+     * @return value which meet the maximum constraint
+     */
+    private int adjustMaximumFetchSize(int actualSize) {
+        if (maximumAdaptiveFetchSize == -1) {
+            return actualSize;
+        }
+        if (maximumAdaptiveFetchSize < actualSize) {
+            return maximumAdaptiveFetchSize;
+        } else {
+            return actualSize;
+        }
     }
-    if (minimumAdaptiveFetchSize > actualSize) {
-      return minimumAdaptiveFetchSize;
-    } else {
-      return actualSize;
-    }
-  }
 
-  /**
-   * Set maximum constraint on given value.
-   *
-   * @param actualSize value which should be the computed fetch size
-   * @return value which meet the maximum constraint
-   */
-  private int adjustMaximumFetchSize(int actualSize) {
-    if (maximumAdaptiveFetchSize == -1) {
-      return actualSize;
+    /**
+     * Get state of adaptive fetch.
+     *
+     * @return state of adaptive fetch
+     */
+    public boolean getAdaptiveFetch() {
+        return adaptiveFetch;
     }
-    if (maximumAdaptiveFetchSize < actualSize) {
-      return maximumAdaptiveFetchSize;
-    } else {
-      return actualSize;
+
+    /**
+     * Set state of adaptive fetch.
+     *
+     * @param adaptiveFetch desired state of adaptive fetch
+     */
+    public void setAdaptiveFetch(boolean adaptiveFetch) {
+        this.adaptiveFetch = adaptiveFetch;
     }
-  }
-
-  /**
-   * Get state of adaptive fetch.
-   *
-   * @return state of adaptive fetch
-   */
-  public boolean getAdaptiveFetch() {
-    return adaptiveFetch;
-  }
-
-  /**
-   * Set state of adaptive fetch.
-   *
-   * @param adaptiveFetch desired state of adaptive fetch
-   */
-  public void setAdaptiveFetch(boolean adaptiveFetch) {
-    this.adaptiveFetch = adaptiveFetch;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheEntry.java b/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheEntry.java
index 97a12ff..5b4511c 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheEntry.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheEntry.java
@@ -7,39 +7,39 @@ package org.postgresql.core.v3.adaptivefetch;
 
 public class AdaptiveFetchCacheEntry {
 
-  private int size = -1; // Holds information about adaptive fetch size for query
-  private int counter; // Number of queries in execution using that query info
-  private int maximumRowSizeBytes = -1; // Maximum row size in bytes saved for query so far
+    private int size = -1; // Holds information about adaptive fetch size for query
+    private int counter; // Number of queries in execution using that query info
+    private int maximumRowSizeBytes = -1; // Maximum row size in bytes saved for query so far
 
-  public int getSize() {
-    return size;
-  }
+    public int getSize() {
+        return size;
+    }
 
-  public void setSize(int size) {
-    this.size = size;
-  }
+    public void setSize(int size) {
+        this.size = size;
+    }
 
-  public int getCounter() {
-    return counter;
-  }
+    public int getCounter() {
+        return counter;
+    }
 
-  public void setCounter(int counter) {
-    this.counter = counter;
-  }
+    public void setCounter(int counter) {
+        this.counter = counter;
+    }
 
-  public int getMaximumRowSizeBytes() {
-    return maximumRowSizeBytes;
-  }
+    public int getMaximumRowSizeBytes() {
+        return maximumRowSizeBytes;
+    }
 
-  public void setMaximumRowSizeBytes(int maximumRowSizeBytes) {
-    this.maximumRowSizeBytes = maximumRowSizeBytes;
-  }
+    public void setMaximumRowSizeBytes(int maximumRowSizeBytes) {
+        this.maximumRowSizeBytes = maximumRowSizeBytes;
+    }
 
-  public void incrementCounter() {
-    counter++;
-  }
+    public void incrementCounter() {
+        counter++;
+    }
 
-  public void decrementCounter() {
-    counter--;
-  }
+    public void decrementCounter() {
+        counter--;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3PGReplicationStream.java b/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3PGReplicationStream.java
index f1fb9fa..9d31f57 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3PGReplicationStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3PGReplicationStream.java
@@ -5,14 +5,6 @@
 
 package org.postgresql.core.v3.replication;
 
-import org.postgresql.copy.CopyDual;
-import org.postgresql.replication.LogSequenceNumber;
-import org.postgresql.replication.PGReplicationStream;
-import org.postgresql.replication.ReplicationType;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
 import java.net.SocketTimeoutException;
 import java.nio.ByteBuffer;
 import java.sql.SQLException;
@@ -20,279 +12,286 @@ import java.util.Date;
 import java.util.concurrent.TimeUnit;
 import java.util.logging.Level;
 import java.util.logging.Logger;
+import org.postgresql.copy.CopyDual;
+import org.postgresql.replication.LogSequenceNumber;
+import org.postgresql.replication.PGReplicationStream;
+import org.postgresql.replication.ReplicationType;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
 
 public class V3PGReplicationStream implements PGReplicationStream {
 
-  private static final Logger LOGGER = Logger.getLogger(V3PGReplicationStream.class.getName());
-  public static final long POSTGRES_EPOCH_2000_01_01 = 946684800000L;
-  private static final long NANOS_PER_MILLISECOND = 1000000L;
+    public static final long POSTGRES_EPOCH_2000_01_01 = 946684800000L;
+    private static final Logger LOGGER = Logger.getLogger(V3PGReplicationStream.class.getName());
+    private static final long NANOS_PER_MILLISECOND = 1000000L;
 
-  private final CopyDual copyDual;
-  private final long updateInterval;
-  private final ReplicationType replicationType;
-  private long lastStatusUpdate;
-  private boolean closeFlag;
+    private final CopyDual copyDual;
+    private final long updateInterval;
+    private final ReplicationType replicationType;
+    private long lastStatusUpdate;
+    private boolean closeFlag;
 
-  private LogSequenceNumber lastServerLSN = LogSequenceNumber.INVALID_LSN;
-  /**
-   * Last receive LSN + payload size.
-   */
-  private volatile LogSequenceNumber lastReceiveLSN = LogSequenceNumber.INVALID_LSN;
-  private volatile LogSequenceNumber lastAppliedLSN = LogSequenceNumber.INVALID_LSN;
-  private volatile LogSequenceNumber lastFlushedLSN = LogSequenceNumber.INVALID_LSN;
-  private volatile LogSequenceNumber startOfLastMessageLSN = LogSequenceNumber.INVALID_LSN;
-  private volatile LogSequenceNumber explicitlyFlushedLSN = LogSequenceNumber.INVALID_LSN;
+    private LogSequenceNumber lastServerLSN = LogSequenceNumber.INVALID_LSN;
+    /**
+     * Last receive LSN + payload size.
+     */
+    private volatile LogSequenceNumber lastReceiveLSN = LogSequenceNumber.INVALID_LSN;
+    private volatile LogSequenceNumber lastAppliedLSN = LogSequenceNumber.INVALID_LSN;
+    private volatile LogSequenceNumber lastFlushedLSN = LogSequenceNumber.INVALID_LSN;
+    private volatile LogSequenceNumber startOfLastMessageLSN = LogSequenceNumber.INVALID_LSN;
+    private volatile LogSequenceNumber explicitlyFlushedLSN = LogSequenceNumber.INVALID_LSN;
 
-  /**
-   * @param copyDual         bidirectional copy protocol
-   * @param startLSN         the position in the WAL that we want to initiate replication from
-   *                         usually the currentLSN returned by calling pg_current_wal_lsn()for v10
-   *                         above or pg_current_xlog_location() depending on the version of the
-   *                         server
-   * @param updateIntervalMs the number of millisecond between status packets sent back to the
-   *                         server.  A value of zero disables the periodic status updates
-   *                         completely, although an update will still be sent when requested by the
-   *                         server, to avoid timeout disconnect.
-   * @param replicationType  LOGICAL or PHYSICAL
-   */
-  public V3PGReplicationStream(CopyDual copyDual, LogSequenceNumber startLSN, long updateIntervalMs,
-      ReplicationType replicationType
-  ) {
-    this.copyDual = copyDual;
-    this.updateInterval = updateIntervalMs * NANOS_PER_MILLISECOND;
-    this.lastStatusUpdate = System.nanoTime() - (updateIntervalMs * NANOS_PER_MILLISECOND);
-    this.lastReceiveLSN = startLSN;
-    this.replicationType = replicationType;
-  }
-
-  @Override
-  public ByteBuffer read() throws SQLException {
-    checkClose();
-
-    ByteBuffer payload = null;
-    while (payload == null && copyDual.isActive()) {
-      payload = readInternal(true);
+    /**
+     * @param copyDual         bidirectional copy protocol
+     * @param startLSN         the position in the WAL that we want to initiate replication from
+     *                         usually the currentLSN returned by calling pg_current_wal_lsn()for v10
+     *                         above or pg_current_xlog_location() depending on the version of the
+     *                         server
+     * @param updateIntervalMs the number of millisecond between status packets sent back to the
+     *                         server.  A value of zero disables the periodic status updates
+     *                         completely, although an update will still be sent when requested by the
+     *                         server, to avoid timeout disconnect.
+     * @param replicationType  LOGICAL or PHYSICAL
+     */
+    public V3PGReplicationStream(CopyDual copyDual, LogSequenceNumber startLSN, long updateIntervalMs,
+                                 ReplicationType replicationType
+    ) {
+        this.copyDual = copyDual;
+        this.updateInterval = updateIntervalMs * NANOS_PER_MILLISECOND;
+        this.lastStatusUpdate = System.nanoTime() - (updateIntervalMs * NANOS_PER_MILLISECOND);
+        this.lastReceiveLSN = startLSN;
+        this.replicationType = replicationType;
     }
 
-    return payload;
-  }
+    @Override
+    public ByteBuffer read() throws SQLException {
+        checkClose();
 
-  @Override
-  public ByteBuffer readPending() throws SQLException {
-    checkClose();
-    return readInternal(false);
-  }
+        ByteBuffer payload = null;
+        while (payload == null && copyDual.isActive()) {
+            payload = readInternal(true);
+        }
 
-  @Override
-  public LogSequenceNumber getLastReceiveLSN() {
-    return lastReceiveLSN;
-  }
+        return payload;
+    }
 
-  @Override
-  public LogSequenceNumber getLastFlushedLSN() {
-    return lastFlushedLSN;
-  }
+    @Override
+    public ByteBuffer readPending() throws SQLException {
+        checkClose();
+        return readInternal(false);
+    }
 
-  @Override
-  public LogSequenceNumber getLastAppliedLSN() {
-    return lastAppliedLSN;
-  }
+    @Override
+    public LogSequenceNumber getLastReceiveLSN() {
+        return lastReceiveLSN;
+    }
 
-  @Override
-  public void setFlushedLSN(LogSequenceNumber flushed) {
-    this.lastFlushedLSN = flushed;
-  }
+    @Override
+    public LogSequenceNumber getLastFlushedLSN() {
+        return lastFlushedLSN;
+    }
 
-  @Override
-  public void setAppliedLSN(LogSequenceNumber applied) {
-    this.lastAppliedLSN = applied;
-  }
+    @Override
+    public LogSequenceNumber getLastAppliedLSN() {
+        return lastAppliedLSN;
+    }
 
-  @Override
-  public void forceUpdateStatus() throws SQLException {
-    checkClose();
-    updateStatusInternal(lastReceiveLSN, lastFlushedLSN, lastAppliedLSN, true);
-  }
+    @Override
+    public void setFlushedLSN(LogSequenceNumber flushed) {
+        this.lastFlushedLSN = flushed;
+    }
 
-  @Override
-  public boolean isClosed() {
-    return closeFlag || !copyDual.isActive();
-  }
+    @Override
+    public void setAppliedLSN(LogSequenceNumber applied) {
+        this.lastAppliedLSN = applied;
+    }
 
-  private ByteBuffer readInternal(boolean block) throws SQLException {
-    boolean updateStatusRequired = false;
-    while (copyDual.isActive()) {
+    @Override
+    public void forceUpdateStatus() throws SQLException {
+        checkClose();
+        updateStatusInternal(lastReceiveLSN, lastFlushedLSN, lastAppliedLSN, true);
+    }
 
-      ByteBuffer buffer = receiveNextData(block);
+    @Override
+    public boolean isClosed() {
+        return closeFlag || !copyDual.isActive();
+    }
 
-      if (updateStatusRequired || isTimeUpdate()) {
-        timeUpdateStatus();
-      }
+    private ByteBuffer readInternal(boolean block) throws SQLException {
+        boolean updateStatusRequired = false;
+        while (copyDual.isActive()) {
+
+            ByteBuffer buffer = receiveNextData(block);
+
+            if (updateStatusRequired || isTimeUpdate()) {
+                timeUpdateStatus();
+            }
+
+            if (buffer == null) {
+                return null;
+            }
+
+            int code = buffer.get();
+
+            switch (code) {
+
+                case 'k': //KeepAlive message
+                    updateStatusRequired = processKeepAliveMessage(buffer);
+                    updateStatusRequired |= updateInterval == 0;
+                    break;
+
+                case 'w': //XLogData
+                    return processXLogData(buffer);
+
+                default:
+                    throw new PSQLException(
+                            GT.tr("Unexpected packet type during replication: {0}", Integer.toString(code)),
+                            PSQLState.PROTOCOL_VIOLATION
+                    );
+            }
+        }
 
-      if (buffer == null) {
         return null;
-      }
-
-      int code = buffer.get();
-
-      switch (code) {
-
-        case 'k': //KeepAlive message
-          updateStatusRequired = processKeepAliveMessage(buffer);
-          updateStatusRequired |= updateInterval == 0;
-          break;
-
-        case 'w': //XLogData
-          return processXLogData(buffer);
-
-        default:
-          throw new PSQLException(
-              GT.tr("Unexpected packet type during replication: {0}", Integer.toString(code)),
-              PSQLState.PROTOCOL_VIOLATION
-          );
-      }
     }
 
-    return null;
-  }
+    private ByteBuffer receiveNextData(boolean block) throws SQLException {
+        try {
+            byte[] message = copyDual.readFromCopy(block);
+            if (message != null) {
+                return ByteBuffer.wrap(message);
+            } else {
+                return null;
+            }
+        } catch (PSQLException e) { //todo maybe replace on thread sleep?
+            if (e.getCause() instanceof SocketTimeoutException) {
+                //signal for keep alive
+                return null;
+            }
 
-  private ByteBuffer receiveNextData(boolean block) throws SQLException {
-    try {
-      byte[] message = copyDual.readFromCopy(block);
-      if (message != null) {
-        return ByteBuffer.wrap(message);
-      } else {
-        return null;
-      }
-    } catch (PSQLException e) { //todo maybe replace on thread sleep?
-      if (e.getCause() instanceof SocketTimeoutException) {
-        //signal for keep alive
-        return null;
-      }
-
-      throw e;
-    }
-  }
-
-  private boolean isTimeUpdate() {
-    /* a value of 0 disables automatic updates */
-    if ( updateInterval == 0 ) {
-      return false;
-    }
-    long diff = System.nanoTime() - lastStatusUpdate;
-    return diff >= updateInterval;
-  }
-
-  private void timeUpdateStatus() throws SQLException {
-    updateStatusInternal(lastReceiveLSN, lastFlushedLSN, lastAppliedLSN, false);
-  }
-
-  private void updateStatusInternal(
-      LogSequenceNumber received, LogSequenceNumber flushed, LogSequenceNumber applied,
-      boolean replyRequired)
-      throws SQLException {
-    byte[] reply = prepareUpdateStatus(received, flushed, applied, replyRequired);
-    copyDual.writeToCopy(reply, 0, reply.length);
-    copyDual.flushCopy();
-
-    explicitlyFlushedLSN = flushed;
-    lastStatusUpdate = System.nanoTime();
-  }
-
-  private byte[] prepareUpdateStatus(LogSequenceNumber received, LogSequenceNumber flushed,
-      LogSequenceNumber applied, boolean replyRequired) {
-    ByteBuffer byteBuffer = ByteBuffer.allocate(1 + 8 + 8 + 8 + 8 + 1);
-
-    long now = System.nanoTime() / NANOS_PER_MILLISECOND;
-    long systemClock = TimeUnit.MICROSECONDS.convert((now - POSTGRES_EPOCH_2000_01_01),
-        TimeUnit.MICROSECONDS);
-
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      LOGGER.log(Level.FINEST, " FE=> StandbyStatusUpdate(received: {0}, flushed: {1}, applied: {2}, clock: {3})",
-          new Object[]{received.asString(), flushed.asString(), applied.asString(), new Date(now)});
+            throw e;
+        }
     }
 
-    byteBuffer.put((byte) 'r');
-    byteBuffer.putLong(received.asLong());
-    byteBuffer.putLong(flushed.asLong());
-    byteBuffer.putLong(applied.asLong());
-    byteBuffer.putLong(systemClock);
-    if (replyRequired) {
-      byteBuffer.put((byte) 1);
-    } else {
-      byteBuffer.put(received == LogSequenceNumber.INVALID_LSN ? (byte) 1 : (byte) 0);
+    private boolean isTimeUpdate() {
+        /* a value of 0 disables automatic updates */
+        if (updateInterval == 0) {
+            return false;
+        }
+        long diff = System.nanoTime() - lastStatusUpdate;
+        return diff >= updateInterval;
     }
 
-    lastStatusUpdate = now;
-    return byteBuffer.array();
-  }
-
-  private boolean processKeepAliveMessage(ByteBuffer buffer) {
-    lastServerLSN = LogSequenceNumber.valueOf(buffer.getLong());
-    if (lastServerLSN.asLong() > lastReceiveLSN.asLong()) {
-      lastReceiveLSN = lastServerLSN;
-    }
-    // if the client has confirmed flush of last XLogData msg and KeepAlive shows ServerLSN is still
-    // advancing, we can safely advance FlushLSN to ServerLSN
-    if (explicitlyFlushedLSN.asLong() >= startOfLastMessageLSN.asLong()
-        && lastServerLSN.asLong() > explicitlyFlushedLSN.asLong()
-        && lastServerLSN.asLong() > lastFlushedLSN.asLong()) {
-      lastFlushedLSN = lastServerLSN;
+    private void timeUpdateStatus() throws SQLException {
+        updateStatusInternal(lastReceiveLSN, lastFlushedLSN, lastAppliedLSN, false);
     }
 
-    long lastServerClock = buffer.getLong();
+    private void updateStatusInternal(
+            LogSequenceNumber received, LogSequenceNumber flushed, LogSequenceNumber applied,
+            boolean replyRequired)
+            throws SQLException {
+        byte[] reply = prepareUpdateStatus(received, flushed, applied, replyRequired);
+        copyDual.writeToCopy(reply, 0, reply.length);
+        copyDual.flushCopy();
 
-    boolean replyRequired = buffer.get() != 0;
-
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      Date clockTime = new Date(
-          TimeUnit.MILLISECONDS.convert(lastServerClock, TimeUnit.MICROSECONDS)
-          + POSTGRES_EPOCH_2000_01_01);
-      LOGGER.log(Level.FINEST, "  <=BE Keepalive(lastServerWal: {0}, clock: {1} needReply: {2})",
-          new Object[]{lastServerLSN.asString(), clockTime, replyRequired});
+        explicitlyFlushedLSN = flushed;
+        lastStatusUpdate = System.nanoTime();
     }
 
-    return replyRequired;
-  }
+    private byte[] prepareUpdateStatus(LogSequenceNumber received, LogSequenceNumber flushed,
+                                       LogSequenceNumber applied, boolean replyRequired) {
+        ByteBuffer byteBuffer = ByteBuffer.allocate(1 + 8 + 8 + 8 + 8 + 1);
 
-  private ByteBuffer processXLogData(ByteBuffer buffer) {
-    long startLsn = buffer.getLong();
-    startOfLastMessageLSN = LogSequenceNumber.valueOf(startLsn);
-    lastServerLSN = LogSequenceNumber.valueOf(buffer.getLong());
-    long systemClock = buffer.getLong();
+        long now = System.nanoTime() / NANOS_PER_MILLISECOND;
+        long systemClock = TimeUnit.MICROSECONDS.convert((now - POSTGRES_EPOCH_2000_01_01),
+                TimeUnit.MICROSECONDS);
 
-    if (replicationType == ReplicationType.LOGICAL) {
-      lastReceiveLSN = LogSequenceNumber.valueOf(startLsn);
-    } else if (replicationType == ReplicationType.PHYSICAL) {
-      int payloadSize = buffer.limit() - buffer.position();
-      lastReceiveLSN = LogSequenceNumber.valueOf(startLsn + payloadSize);
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            LOGGER.log(Level.FINEST, " FE=> StandbyStatusUpdate(received: {0}, flushed: {1}, applied: {2}, clock: {3})",
+                    new Object[]{received.asString(), flushed.asString(), applied.asString(), new Date(now)});
+        }
+
+        byteBuffer.put((byte) 'r');
+        byteBuffer.putLong(received.asLong());
+        byteBuffer.putLong(flushed.asLong());
+        byteBuffer.putLong(applied.asLong());
+        byteBuffer.putLong(systemClock);
+        if (replyRequired) {
+            byteBuffer.put((byte) 1);
+        } else {
+            byteBuffer.put(received == LogSequenceNumber.INVALID_LSN ? (byte) 1 : (byte) 0);
+        }
+
+        lastStatusUpdate = now;
+        return byteBuffer.array();
     }
 
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      LOGGER.log(Level.FINEST, "  <=BE XLogData(currWal: {0}, lastServerWal: {1}, clock: {2})",
-          new Object[]{lastReceiveLSN.asString(), lastServerLSN.asString(), systemClock});
+    private boolean processKeepAliveMessage(ByteBuffer buffer) {
+        lastServerLSN = LogSequenceNumber.valueOf(buffer.getLong());
+        if (lastServerLSN.asLong() > lastReceiveLSN.asLong()) {
+            lastReceiveLSN = lastServerLSN;
+        }
+        // if the client has confirmed flush of last XLogData msg and KeepAlive shows ServerLSN is still
+        // advancing, we can safely advance FlushLSN to ServerLSN
+        if (explicitlyFlushedLSN.asLong() >= startOfLastMessageLSN.asLong()
+                && lastServerLSN.asLong() > explicitlyFlushedLSN.asLong()
+                && lastServerLSN.asLong() > lastFlushedLSN.asLong()) {
+            lastFlushedLSN = lastServerLSN;
+        }
+
+        long lastServerClock = buffer.getLong();
+
+        boolean replyRequired = buffer.get() != 0;
+
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            Date clockTime = new Date(
+                    TimeUnit.MILLISECONDS.convert(lastServerClock, TimeUnit.MICROSECONDS)
+                            + POSTGRES_EPOCH_2000_01_01);
+            LOGGER.log(Level.FINEST, "  <=BE Keepalive(lastServerWal: {0}, clock: {1} needReply: {2})",
+                    new Object[]{lastServerLSN.asString(), clockTime, replyRequired});
+        }
+
+        return replyRequired;
     }
 
-    return buffer.slice();
-  }
+    private ByteBuffer processXLogData(ByteBuffer buffer) {
+        long startLsn = buffer.getLong();
+        startOfLastMessageLSN = LogSequenceNumber.valueOf(startLsn);
+        lastServerLSN = LogSequenceNumber.valueOf(buffer.getLong());
+        long systemClock = buffer.getLong();
 
-  private void checkClose() throws PSQLException {
-    if (isClosed()) {
-      throw new PSQLException(GT.tr("This replication stream has been closed."),
-          PSQLState.CONNECTION_DOES_NOT_EXIST);
-    }
-  }
+        if (replicationType == ReplicationType.LOGICAL) {
+            lastReceiveLSN = LogSequenceNumber.valueOf(startLsn);
+        } else if (replicationType == ReplicationType.PHYSICAL) {
+            int payloadSize = buffer.limit() - buffer.position();
+            lastReceiveLSN = LogSequenceNumber.valueOf(startLsn + payloadSize);
+        }
 
-  @Override
-  public void close() throws SQLException {
-    if (isClosed()) {
-      return;
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            LOGGER.log(Level.FINEST, "  <=BE XLogData(currWal: {0}, lastServerWal: {1}, clock: {2})",
+                    new Object[]{lastReceiveLSN.asString(), lastServerLSN.asString(), systemClock});
+        }
+
+        return buffer.slice();
     }
 
-    LOGGER.log(Level.FINEST, " FE=> StopReplication");
+    private void checkClose() throws PSQLException {
+        if (isClosed()) {
+            throw new PSQLException(GT.tr("This replication stream has been closed."),
+                    PSQLState.CONNECTION_DOES_NOT_EXIST);
+        }
+    }
 
-    copyDual.endCopy();
+    @Override
+    public void close() throws SQLException {
+        if (isClosed()) {
+            return;
+        }
 
-    closeFlag = true;
-  }
+        LOGGER.log(Level.FINEST, " FE=> StopReplication");
+
+        copyDual.endCopy();
+
+        closeFlag = true;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3ReplicationProtocol.java b/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3ReplicationProtocol.java
index c522447..176dd0d 100644
--- a/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3ReplicationProtocol.java
+++ b/pgjdbc/src/main/java/org/postgresql/core/v3/replication/V3ReplicationProtocol.java
@@ -5,6 +5,11 @@
 
 package org.postgresql.core.v3.replication;
 
+import java.io.IOException;
+import java.sql.SQLException;
+import java.util.Properties;
+import java.util.logging.Level;
+import java.util.logging.Logger;
 import org.postgresql.copy.CopyDual;
 import org.postgresql.core.PGStream;
 import org.postgresql.core.QueryExecutor;
@@ -18,124 +23,118 @@ import org.postgresql.util.GT;
 import org.postgresql.util.PSQLException;
 import org.postgresql.util.PSQLState;
 
-import java.io.IOException;
-import java.sql.SQLException;
-import java.util.Properties;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
 public class V3ReplicationProtocol implements ReplicationProtocol {
 
-  private static final Logger LOGGER = Logger.getLogger(V3ReplicationProtocol.class.getName());
-  private final QueryExecutor queryExecutor;
-  private final PGStream pgStream;
+    private static final Logger LOGGER = Logger.getLogger(V3ReplicationProtocol.class.getName());
+    private final QueryExecutor queryExecutor;
+    private final PGStream pgStream;
 
-  public V3ReplicationProtocol(QueryExecutor queryExecutor, PGStream pgStream) {
-    this.queryExecutor = queryExecutor;
-    this.pgStream = pgStream;
-  }
-
-  @Override
-  public PGReplicationStream startLogical(LogicalReplicationOptions options)
-      throws SQLException {
-
-    String query = createStartLogicalQuery(options);
-    return initializeReplication(query, options, ReplicationType.LOGICAL);
-  }
-
-  @Override
-  public PGReplicationStream startPhysical(PhysicalReplicationOptions options)
-      throws SQLException {
-
-    String query = createStartPhysicalQuery(options);
-    return initializeReplication(query, options, ReplicationType.PHYSICAL);
-  }
-
-  private PGReplicationStream initializeReplication(String query, CommonOptions options,
-      ReplicationType replicationType)
-      throws SQLException {
-    LOGGER.log(Level.FINEST, " FE=> StartReplication(query: {0})", query);
-
-    configureSocketTimeout(options);
-    CopyDual copyDual = (CopyDual) queryExecutor.startCopy(query, true);
-
-    return new V3PGReplicationStream(
-        copyDual,
-        options.getStartLSNPosition(),
-        options.getStatusInterval(),
-        replicationType
-    );
-  }
-
-  /**
-   * START_REPLICATION [SLOT slot_name] [PHYSICAL] XXX/XXX.
-   */
-  private String createStartPhysicalQuery(PhysicalReplicationOptions options) {
-    StringBuilder builder = new StringBuilder();
-    builder.append("START_REPLICATION");
-
-    if (options.getSlotName() != null) {
-      builder.append(" SLOT ").append(options.getSlotName());
+    public V3ReplicationProtocol(QueryExecutor queryExecutor, PGStream pgStream) {
+        this.queryExecutor = queryExecutor;
+        this.pgStream = pgStream;
     }
 
-    builder.append(" PHYSICAL ").append(options.getStartLSNPosition().asString());
+    @Override
+    public PGReplicationStream startLogical(LogicalReplicationOptions options)
+            throws SQLException {
 
-    return builder.toString();
-  }
-
-  /**
-   * START_REPLICATION SLOT slot_name LOGICAL XXX/XXX [ ( option_name [option_value] [, ... ] ) ]
-   */
-  private String createStartLogicalQuery(LogicalReplicationOptions options) {
-    StringBuilder builder = new StringBuilder();
-    builder.append("START_REPLICATION SLOT ")
-        .append(options.getSlotName())
-        .append(" LOGICAL ")
-        .append(options.getStartLSNPosition().asString());
-
-    Properties slotOptions = options.getSlotOptions();
-    if (slotOptions.isEmpty()) {
-      return builder.toString();
+        String query = createStartLogicalQuery(options);
+        return initializeReplication(query, options, ReplicationType.LOGICAL);
     }
 
-    //todo replace on java 8
-    builder.append(" (");
-    boolean isFirst = true;
-    for (String name : slotOptions.stringPropertyNames()) {
-      if (isFirst) {
-        isFirst = false;
-      } else {
-        builder.append(", ");
-      }
-      builder.append('\"').append(name).append('\"').append(" ")
-          .append('\'').append(slotOptions.getProperty(name)).append('\'');
-    }
-    builder.append(")");
+    @Override
+    public PGReplicationStream startPhysical(PhysicalReplicationOptions options)
+            throws SQLException {
 
-    return builder.toString();
-  }
-
-  private void configureSocketTimeout(CommonOptions options) throws PSQLException {
-    if (options.getStatusInterval() == 0) {
-      return;
+        String query = createStartPhysicalQuery(options);
+        return initializeReplication(query, options, ReplicationType.PHYSICAL);
     }
 
-    try {
-      int previousTimeOut = pgStream.getSocket().getSoTimeout();
+    private PGReplicationStream initializeReplication(String query, CommonOptions options,
+                                                      ReplicationType replicationType)
+            throws SQLException {
+        LOGGER.log(Level.FINEST, " FE=> StartReplication(query: {0})", query);
 
-      int minimalTimeOut;
-      if (previousTimeOut > 0) {
-        minimalTimeOut = Math.min(previousTimeOut, options.getStatusInterval());
-      } else {
-        minimalTimeOut = options.getStatusInterval();
-      }
+        configureSocketTimeout(options);
+        CopyDual copyDual = (CopyDual) queryExecutor.startCopy(query, true);
 
-      pgStream.getSocket().setSoTimeout(minimalTimeOut);
-      // Use blocking 1ms reads for `available()` checks
-      pgStream.setMinStreamAvailableCheckDelay(0);
-    } catch (IOException ioe) {
-      throw new PSQLException(GT.tr("The connection attempt failed."),
-          PSQLState.CONNECTION_UNABLE_TO_CONNECT, ioe);
+        return new V3PGReplicationStream(
+                copyDual,
+                options.getStartLSNPosition(),
+                options.getStatusInterval(),
+                replicationType
+        );
+    }
+
+    /**
+     * START_REPLICATION [SLOT slot_name] [PHYSICAL] XXX/XXX.
+     */
+    private String createStartPhysicalQuery(PhysicalReplicationOptions options) {
+        StringBuilder builder = new StringBuilder();
+        builder.append("START_REPLICATION");
+
+        if (options.getSlotName() != null) {
+            builder.append(" SLOT ").append(options.getSlotName());
+        }
+
+        builder.append(" PHYSICAL ").append(options.getStartLSNPosition().asString());
+
+        return builder.toString();
+    }
+
+    /**
+     * START_REPLICATION SLOT slot_name LOGICAL XXX/XXX [ ( option_name [option_value] [, ... ] ) ]
+     */
+    private String createStartLogicalQuery(LogicalReplicationOptions options) {
+        StringBuilder builder = new StringBuilder();
+        builder.append("START_REPLICATION SLOT ")
+                .append(options.getSlotName())
+                .append(" LOGICAL ")
+                .append(options.getStartLSNPosition().asString());
+
+        Properties slotOptions = options.getSlotOptions();
+        if (slotOptions.isEmpty()) {
+            return builder.toString();
+        }
+
+        //todo replace on java 8
+        builder.append(" (");
+        boolean isFirst = true;
+        for (String name : slotOptions.stringPropertyNames()) {
+            if (isFirst) {
+                isFirst = false;
+            } else {
+                builder.append(", ");
+            }
+            builder.append('\"').append(name).append('\"').append(" ")
+                    .append('\'').append(slotOptions.getProperty(name)).append('\'');
+        }
+        builder.append(")");
+
+        return builder.toString();
+    }
+
+    private void configureSocketTimeout(CommonOptions options) throws PSQLException {
+        if (options.getStatusInterval() == 0) {
+            return;
+        }
+
+        try {
+            int previousTimeOut = pgStream.getSocket().getSoTimeout();
+
+            int minimalTimeOut;
+            if (previousTimeOut > 0) {
+                minimalTimeOut = Math.min(previousTimeOut, options.getStatusInterval());
+            } else {
+                minimalTimeOut = options.getStatusInterval();
+            }
+
+            pgStream.getSocket().setSoTimeout(minimalTimeOut);
+            // Use blocking 1ms reads for `available()` checks
+            pgStream.setMinStreamAvailableCheckDelay(0);
+        } catch (IOException ioe) {
+            throw new PSQLException(GT.tr("The connection attempt failed."),
+                    PSQLState.CONNECTION_UNABLE_TO_CONNECT, ioe);
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ds/PGConnectionPoolDataSource.java b/pgjdbc/src/main/java/org/postgresql/ds/PGConnectionPoolDataSource.java
index 44edf9a..9ccef55 100644
--- a/pgjdbc/src/main/java/org/postgresql/ds/PGConnectionPoolDataSource.java
+++ b/pgjdbc/src/main/java/org/postgresql/ds/PGConnectionPoolDataSource.java
@@ -5,17 +5,15 @@
 
 package org.postgresql.ds;
 
-import org.postgresql.ds.common.BaseDataSource;
-import org.postgresql.util.DriverInfo;
-
 import java.io.IOException;
 import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
 import java.io.Serializable;
 import java.sql.SQLException;
-
 import javax.sql.ConnectionPoolDataSource;
 import javax.sql.PooledConnection;
+import org.postgresql.ds.common.BaseDataSource;
+import org.postgresql.util.DriverInfo;
 
 /**
  * PostgreSQL implementation of ConnectionPoolDataSource. The app server or middleware vendor should
@@ -39,68 +37,68 @@ import javax.sql.PooledConnection;
  */
 @SuppressWarnings("serial")
 public class PGConnectionPoolDataSource extends BaseDataSource
-    implements ConnectionPoolDataSource, Serializable {
-  private boolean defaultAutoCommit = true;
+        implements ConnectionPoolDataSource, Serializable {
+    private boolean defaultAutoCommit = true;
 
-  /**
-   * Gets a description of this DataSource.
-   */
-  @Override
-  public String getDescription() {
-    return "ConnectionPoolDataSource from " + DriverInfo.DRIVER_FULL_NAME;
-  }
+    /**
+     * Gets a description of this DataSource.
+     */
+    @Override
+    public String getDescription() {
+        return "ConnectionPoolDataSource from " + DriverInfo.DRIVER_FULL_NAME;
+    }
 
-  /**
-   * Gets a connection which may be pooled by the app server or middleware implementation of
-   * DataSource.
-   *
-   * @throws java.sql.SQLException Occurs when the physical database connection cannot be
-   *         established.
-   */
-  @Override
-  public PooledConnection getPooledConnection() throws SQLException {
-    return new PGPooledConnection(getConnection(), defaultAutoCommit);
-  }
+    /**
+     * Gets a connection which may be pooled by the app server or middleware implementation of
+     * DataSource.
+     *
+     * @throws java.sql.SQLException Occurs when the physical database connection cannot be
+     *                               established.
+     */
+    @Override
+    public PooledConnection getPooledConnection() throws SQLException {
+        return new PGPooledConnection(getConnection(), defaultAutoCommit);
+    }
 
-  /**
-   * Gets a connection which may be pooled by the app server or middleware implementation of
-   * DataSource.
-   *
-   * @throws java.sql.SQLException Occurs when the physical database connection cannot be
-   *         established.
-   */
-  @Override
-  public PooledConnection getPooledConnection(String user, String password) throws SQLException {
-    return new PGPooledConnection(getConnection(user, password), defaultAutoCommit);
-  }
+    /**
+     * Gets a connection which may be pooled by the app server or middleware implementation of
+     * DataSource.
+     *
+     * @throws java.sql.SQLException Occurs when the physical database connection cannot be
+     *                               established.
+     */
+    @Override
+    public PooledConnection getPooledConnection(String user, String password) throws SQLException {
+        return new PGPooledConnection(getConnection(user, password), defaultAutoCommit);
+    }
 
-  /**
-   * Gets whether connections supplied by this pool will have autoCommit turned on by default. The
-   * default value is {@code true}, so that autoCommit will be turned on by default.
-   *
-   * @return true if connections supplied by this pool will have autoCommit
-   */
-  public boolean isDefaultAutoCommit() {
-    return defaultAutoCommit;
-  }
+    /**
+     * Gets whether connections supplied by this pool will have autoCommit turned on by default. The
+     * default value is {@code true}, so that autoCommit will be turned on by default.
+     *
+     * @return true if connections supplied by this pool will have autoCommit
+     */
+    public boolean isDefaultAutoCommit() {
+        return defaultAutoCommit;
+    }
 
-  /**
-   * Sets whether connections supplied by this pool will have autoCommit turned on by default. The
-   * default value is {@code true}, so that autoCommit will be turned on by default.
-   *
-   * @param defaultAutoCommit whether connections supplied by this pool will have autoCommit
-   */
-  public void setDefaultAutoCommit(boolean defaultAutoCommit) {
-    this.defaultAutoCommit = defaultAutoCommit;
-  }
+    /**
+     * Sets whether connections supplied by this pool will have autoCommit turned on by default. The
+     * default value is {@code true}, so that autoCommit will be turned on by default.
+     *
+     * @param defaultAutoCommit whether connections supplied by this pool will have autoCommit
+     */
+    public void setDefaultAutoCommit(boolean defaultAutoCommit) {
+        this.defaultAutoCommit = defaultAutoCommit;
+    }
 
-  private void writeObject(ObjectOutputStream out) throws IOException {
-    writeBaseObject(out);
-    out.writeBoolean(defaultAutoCommit);
-  }
+    private void writeObject(ObjectOutputStream out) throws IOException {
+        writeBaseObject(out);
+        out.writeBoolean(defaultAutoCommit);
+    }
 
-  private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
-    readBaseObject(in);
-    defaultAutoCommit = in.readBoolean();
-  }
+    private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
+        readBaseObject(in);
+        defaultAutoCommit = in.readBoolean();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ds/PGPooledConnection.java b/pgjdbc/src/main/java/org/postgresql/ds/PGPooledConnection.java
index 147e7bc..c4bc809 100644
--- a/pgjdbc/src/main/java/org/postgresql/ds/PGPooledConnection.java
+++ b/pgjdbc/src/main/java/org/postgresql/ds/PGPooledConnection.java
@@ -5,12 +5,6 @@
 
 package org.postgresql.ds;
 
-import org.postgresql.PGConnection;
-import org.postgresql.PGStatement;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
@@ -22,11 +16,15 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.LinkedList;
 import java.util.List;
-
 import javax.sql.ConnectionEvent;
 import javax.sql.ConnectionEventListener;
 import javax.sql.PooledConnection;
 import javax.sql.StatementEventListener;
+import org.postgresql.PGConnection;
+import org.postgresql.PGStatement;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
 
 /**
  * PostgreSQL implementation of the PooledConnection interface. This shouldn't be used directly, as
@@ -38,426 +36,425 @@ import javax.sql.StatementEventListener;
  */
 @SuppressWarnings("rawtypes")
 public class PGPooledConnection implements PooledConnection {
-  private final List<ConnectionEventListener> listeners = new LinkedList<>();
-  private Connection con;
-  private ConnectionHandler last;
-  private final boolean autoCommit;
-  private final boolean isXA;
+    // Classes we consider fatal.
+    private static final String[] fatalClasses = {
+            "08", // connection error
+            "53", // insufficient resources
 
-  /**
-   * Creates a new PooledConnection representing the specified physical connection.
-   *
-   * @param con connection
-   * @param autoCommit whether to autocommit
-   * @param isXA whether connection is a XA connection
-   */
-  public PGPooledConnection(Connection con, boolean autoCommit, boolean isXA) {
-    this.con = con;
-    this.autoCommit = autoCommit;
-    this.isXA = isXA;
-  }
+            // nb: not just "57" as that includes query cancel which is nonfatal
+            "57P01", // admin shutdown
+            "57P02", // crash shutdown
+            "57P03", // cannot connect now
 
-  public PGPooledConnection(Connection con, boolean autoCommit) {
-    this(con, autoCommit, false);
-  }
-
-  /**
-   * Adds a listener for close or fatal error events on the connection handed out to a client.
-   */
-  @Override
-  public void addConnectionEventListener(ConnectionEventListener connectionEventListener) {
-    listeners.add(connectionEventListener);
-  }
-
-  /**
-   * Removes a listener for close or fatal error events on the connection handed out to a client.
-   */
-  @Override
-  public void removeConnectionEventListener(ConnectionEventListener connectionEventListener) {
-    listeners.remove(connectionEventListener);
-  }
-
-  /**
-   * Closes the physical database connection represented by this PooledConnection. If any client has
-   * a connection based on this PooledConnection, it is forcibly closed as well.
-   */
-  @Override
-  public void close() throws SQLException {
-    Connection con = this.con;
-    ConnectionHandler last = this.last;
-    if (last != null) {
-      last.close();
-      if (con != null && !con.isClosed()) {
-        if (!con.getAutoCommit()) {
-          try {
-            con.rollback();
-          } catch (SQLException ignored) {
-          }
-        }
-      }
-    }
-    if (con == null) {
-      return;
-    }
-    try {
-      con.close();
-    } finally {
-      this.con = null;
-    }
-  }
-
-  /**
-   * Gets a handle for a client to use. This is a wrapper around the physical connection, so the
-   * client can call close and it will just return the connection to the pool without really closing
-   * the physical connection.
-   *
-   * <p>
-   * According to the JDBC 2.0 Optional Package spec (6.2.3), only one client may have an active
-   * handle to the connection at a time, so if there is a previous handle active when this is
-   * called, the previous one is forcibly closed and its work rolled back.
-   * </p>
-   */
-  @Override
-  public Connection getConnection() throws SQLException {
-    Connection con = this.con;
-    if (con == null) {
-      // Before throwing the exception, let's notify the registered listeners about the error
-      PSQLException sqlException =
-          new PSQLException(GT.tr("This PooledConnection has already been closed."),
-              PSQLState.CONNECTION_DOES_NOT_EXIST);
-      fireConnectionFatalError(sqlException);
-      throw sqlException;
-    }
-    // If any error occurs while opening a new connection, the listeners
-    // have to be notified. This gives a chance to connection pools to
-    // eliminate bad pooled connections.
-    try {
-      // Only one connection can be open at a time from this PooledConnection. See JDBC 2.0 Optional
-      // Package spec section 6.2.3
-      ConnectionHandler last = this.last;
-      if (last != null) {
-        last.close();
-        if (con != null) {
-          if (!con.getAutoCommit()) {
-            try {
-              con.rollback();
-            } catch (SQLException ignored) {
-            }
-          }
-          con.clearWarnings();
-        }
-      }
-      /*
-       * In XA-mode, autocommit is handled in PGXAConnection, because it depends on whether an
-       * XA-transaction is open or not
-       */
-      if (!isXA && con != null) {
-        con.setAutoCommit(autoCommit);
-      }
-    } catch (SQLException sqlException) {
-      fireConnectionFatalError(sqlException);
-      throw (SQLException) sqlException.fillInStackTrace();
-    }
-    ConnectionHandler handler = new ConnectionHandler(con);
-    last = handler;
-
-    Connection proxyCon = (Connection) Proxy.newProxyInstance(getClass().getClassLoader(),
-        new Class[]{Connection.class, PGConnection.class}, handler);
-    handler.setProxy(proxyCon);
-    return proxyCon;
-  }
-
-  /**
-   * Used to fire a connection closed event to all listeners.
-   */
-  void fireConnectionClosed() {
-    ConnectionEvent evt = null;
-    // Copy the listener list so the listener can remove itself during this method call
-    ConnectionEventListener[] local =
-        listeners.toArray(new ConnectionEventListener[0]);
-    for (ConnectionEventListener listener : local) {
-      if (evt == null) {
-        evt = createConnectionEvent(null);
-      }
-      listener.connectionClosed(evt);
-    }
-  }
-
-  /**
-   * Used to fire a connection error event to all listeners.
-   */
-  void fireConnectionFatalError(SQLException e) {
-    ConnectionEvent evt = null;
-    // Copy the listener list so the listener can remove itself during this method call
-    ConnectionEventListener[] local =
-        listeners.toArray(new ConnectionEventListener[0]);
-    for (ConnectionEventListener listener : local) {
-      if (evt == null) {
-        evt = createConnectionEvent(e);
-      }
-      listener.connectionErrorOccurred(evt);
-    }
-  }
-
-  protected ConnectionEvent createConnectionEvent(SQLException e) {
-    return e == null ? new ConnectionEvent(this) : new ConnectionEvent(this, e);
-  }
-
-  // Classes we consider fatal.
-  private static final String[] fatalClasses = {
-      "08", // connection error
-      "53", // insufficient resources
-
-      // nb: not just "57" as that includes query cancel which is nonfatal
-      "57P01", // admin shutdown
-      "57P02", // crash shutdown
-      "57P03", // cannot connect now
-
-      "58", // system error (backend)
-      "60", // system error (driver)
-      "99", // unexpected error
-      "F0", // configuration file error (backend)
-      "XX", // internal error (backend)
-  };
-
-  private static boolean isFatalState(String state) {
-    if (state == null) {
-      // no info, assume fatal
-      return true;
-    }
-    if (state.length() < 2) {
-      // no class info, assume fatal
-      return true;
-    }
-
-    for (String fatalClass : fatalClasses) {
-      if (state.startsWith(fatalClass)) {
-        return true; // fatal
-      }
-    }
-
-    return false;
-  }
-
-  /**
-   * Fires a connection error event, but only if we think the exception is fatal.
-   *
-   * @param e the SQLException to consider
-   */
-  private void fireConnectionError(SQLException e) {
-    if (!isFatalState(e.getSQLState())) {
-      return;
-    }
-
-    fireConnectionFatalError(e);
-  }
-
-  /**
-   * Instead of declaring a class implementing Connection, which would have to be updated for every
-   * JDK rev, use a dynamic proxy to handle all calls through the Connection interface. This is the
-   * part that requires JDK 1.3 or higher, though JDK 1.2 could be supported with a 3rd-party proxy
-   * package.
-   */
-  private class ConnectionHandler implements InvocationHandler {
+            "58", // system error (backend)
+            "60", // system error (driver)
+            "99", // unexpected error
+            "F0", // configuration file error (backend)
+            "XX", // internal error (backend)
+    };
+    private final List<ConnectionEventListener> listeners = new LinkedList<>();
+    private final boolean autoCommit;
+    private final boolean isXA;
     private Connection con;
-    private Connection proxy; // the Connection the client is currently using, which is a proxy
-    private boolean automatic;
+    private ConnectionHandler last;
 
-    ConnectionHandler(Connection con) {
-      this.con = con;
+    /**
+     * Creates a new PooledConnection representing the specified physical connection.
+     *
+     * @param con        connection
+     * @param autoCommit whether to autocommit
+     * @param isXA       whether connection is a XA connection
+     */
+    public PGPooledConnection(Connection con, boolean autoCommit, boolean isXA) {
+        this.con = con;
+        this.autoCommit = autoCommit;
+        this.isXA = isXA;
     }
 
+    public PGPooledConnection(Connection con, boolean autoCommit) {
+        this(con, autoCommit, false);
+    }
+
+    private static boolean isFatalState(String state) {
+        if (state == null) {
+            // no info, assume fatal
+            return true;
+        }
+        if (state.length() < 2) {
+            // no class info, assume fatal
+            return true;
+        }
+
+        for (String fatalClass : fatalClasses) {
+            if (state.startsWith(fatalClass)) {
+                return true; // fatal
+            }
+        }
+
+        return false;
+    }
+
+    /**
+     * Adds a listener for close or fatal error events on the connection handed out to a client.
+     */
     @Override
-    @SuppressWarnings("throwing.nullable")
-    public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
-      final String methodName = method.getName();
-      // From Object
-      if (method.getDeclaringClass() == Object.class) {
-        if ("toString".equals(methodName)) {
-          return "Pooled connection wrapping physical connection " + con;
+    public void addConnectionEventListener(ConnectionEventListener connectionEventListener) {
+        listeners.add(connectionEventListener);
+    }
+
+    /**
+     * Removes a listener for close or fatal error events on the connection handed out to a client.
+     */
+    @Override
+    public void removeConnectionEventListener(ConnectionEventListener connectionEventListener) {
+        listeners.remove(connectionEventListener);
+    }
+
+    /**
+     * Closes the physical database connection represented by this PooledConnection. If any client has
+     * a connection based on this PooledConnection, it is forcibly closed as well.
+     */
+    @Override
+    public void close() throws SQLException {
+        Connection con = this.con;
+        ConnectionHandler last = this.last;
+        if (last != null) {
+            last.close();
+            if (con != null && !con.isClosed()) {
+                if (!con.getAutoCommit()) {
+                    try {
+                        con.rollback();
+                    } catch (SQLException ignored) {
+                    }
+                }
+            }
         }
-        if ("equals".equals(methodName)) {
-          return proxy == args[0];
-        }
-        if ("hashCode".equals(methodName)) {
-          return System.identityHashCode(proxy);
+        if (con == null) {
+            return;
         }
         try {
-          return method.invoke(con, args);
-        } catch (InvocationTargetException e) {
-          // throwing.nullable
-          throw e.getTargetException();
+            con.close();
+        } finally {
+            this.con = null;
         }
-      }
+    }
 
-      // All the rest is from the Connection or PGConnection interface
-      Connection con = this.con;
-      if ("isClosed".equals(methodName)) {
-        return con == null || con.isClosed();
-      }
-      if ("close".equals(methodName)) {
-        // we are already closed and a double close
-        // is not an error.
+    /**
+     * Gets a handle for a client to use. This is a wrapper around the physical connection, so the
+     * client can call close and it will just return the connection to the pool without really closing
+     * the physical connection.
+     *
+     * <p>
+     * According to the JDBC 2.0 Optional Package spec (6.2.3), only one client may have an active
+     * handle to the connection at a time, so if there is a previous handle active when this is
+     * called, the previous one is forcibly closed and its work rolled back.
+     * </p>
+     */
+    @Override
+    public Connection getConnection() throws SQLException {
+        Connection con = this.con;
         if (con == null) {
-          return null;
+            // Before throwing the exception, let's notify the registered listeners about the error
+            PSQLException sqlException =
+                    new PSQLException(GT.tr("This PooledConnection has already been closed."),
+                            PSQLState.CONNECTION_DOES_NOT_EXIST);
+            fireConnectionFatalError(sqlException);
+            throw sqlException;
         }
-
-        SQLException ex = null;
-        if (!con.isClosed()) {
-          if (!isXA && !con.getAutoCommit()) {
-            try {
-              con.rollback();
-            } catch (SQLException e) {
-              ex = e;
+        // If any error occurs while opening a new connection, the listeners
+        // have to be notified. This gives a chance to connection pools to
+        // eliminate bad pooled connections.
+        try {
+            // Only one connection can be open at a time from this PooledConnection. See JDBC 2.0 Optional
+            // Package spec section 6.2.3
+            ConnectionHandler last = this.last;
+            if (last != null) {
+                last.close();
+                if (con != null) {
+                    if (!con.getAutoCommit()) {
+                        try {
+                            con.rollback();
+                        } catch (SQLException ignored) {
+                        }
+                    }
+                    con.clearWarnings();
+                }
             }
-          }
-          con.clearWarnings();
+            /*
+             * In XA-mode, autocommit is handled in PGXAConnection, because it depends on whether an
+             * XA-transaction is open or not
+             */
+            if (!isXA && con != null) {
+                con.setAutoCommit(autoCommit);
+            }
+        } catch (SQLException sqlException) {
+            fireConnectionFatalError(sqlException);
+            throw (SQLException) sqlException.fillInStackTrace();
         }
-        this.con = null;
-        this.proxy = null;
-        last = null;
-        fireConnectionClosed();
-        if (ex != null) {
-          throw ex;
+        ConnectionHandler handler = new ConnectionHandler(con);
+        last = handler;
+
+        Connection proxyCon = (Connection) Proxy.newProxyInstance(getClass().getClassLoader(),
+                new Class[]{Connection.class, PGConnection.class}, handler);
+        handler.setProxy(proxyCon);
+        return proxyCon;
+    }
+
+    /**
+     * Used to fire a connection closed event to all listeners.
+     */
+    void fireConnectionClosed() {
+        ConnectionEvent evt = null;
+        // Copy the listener list so the listener can remove itself during this method call
+        ConnectionEventListener[] local =
+                listeners.toArray(new ConnectionEventListener[0]);
+        for (ConnectionEventListener listener : local) {
+            if (evt == null) {
+                evt = createConnectionEvent(null);
+            }
+            listener.connectionClosed(evt);
         }
-        return null;
-      }
-      if (con == null || con.isClosed()) {
-        throw new PSQLException(automatic
-            ? GT.tr(
-                "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.")
-            : GT.tr("Connection has been closed."), PSQLState.CONNECTION_DOES_NOT_EXIST);
-      }
+    }
 
-      // From here on in, we invoke via reflection, catch exceptions,
-      // and check if they're fatal before rethrowing.
-      try {
-        if ("createStatement".equals(methodName)) {
-          Statement st = (Statement) method.invoke(con, args);
-          return Proxy.newProxyInstance(getClass().getClassLoader(),
-              new Class[]{Statement.class, PGStatement.class},
-              new StatementHandler(this, st));
-        } else if ("prepareCall".equals(methodName)) {
-          Statement st = (Statement) method.invoke(con, args);
-          return Proxy.newProxyInstance(getClass().getClassLoader(),
-              new Class[]{CallableStatement.class, PGStatement.class},
-              new StatementHandler(this, st));
-        } else if ("prepareStatement".equals(methodName)) {
-          Statement st = (Statement) method.invoke(con, args);
-          return Proxy.newProxyInstance(getClass().getClassLoader(),
-              new Class[]{PreparedStatement.class, PGStatement.class},
-              new StatementHandler(this, st));
-        } else {
-          return method.invoke(con, args);
+    /**
+     * Used to fire a connection error event to all listeners.
+     */
+    void fireConnectionFatalError(SQLException e) {
+        ConnectionEvent evt = null;
+        // Copy the listener list so the listener can remove itself during this method call
+        ConnectionEventListener[] local =
+                listeners.toArray(new ConnectionEventListener[0]);
+        for (ConnectionEventListener listener : local) {
+            if (evt == null) {
+                evt = createConnectionEvent(e);
+            }
+            listener.connectionErrorOccurred(evt);
         }
-      } catch (final InvocationTargetException ite) {
-        final Throwable te = ite.getTargetException();
-        if (te instanceof SQLException) {
-          fireConnectionError((SQLException) te); // Tell listeners about exception if it's fatal
+    }
+
+    protected ConnectionEvent createConnectionEvent(SQLException e) {
+        return e == null ? new ConnectionEvent(this) : new ConnectionEvent(this, e);
+    }
+
+    /**
+     * Fires a connection error event, but only if we think the exception is fatal.
+     *
+     * @param e the SQLException to consider
+     */
+    private void fireConnectionError(SQLException e) {
+        if (!isFatalState(e.getSQLState())) {
+            return;
         }
-        throw te;
-      }
-    }
 
-    Connection getProxy() {
-      return proxy;
-    }
-
-    void setProxy(Connection proxy) {
-      this.proxy = proxy;
-    }
-
-    public void close() {
-      if (con != null) {
-        automatic = true;
-      }
-      con = null;
-      proxy = null;
-      // No close event fired here: see JDBC 2.0 Optional Package spec section 6.3
-    }
-
-    public boolean isClosed() {
-      return con == null;
-    }
-  }
-
-  /**
-   * <p>Instead of declaring classes implementing Statement, PreparedStatement, and CallableStatement,
-   * which would have to be updated for every JDK rev, use a dynamic proxy to handle all calls
-   * through the Statement interfaces. This is the part that requires JDK 1.3 or higher, though JDK
-   * 1.2 could be supported with a 3rd-party proxy package.</p>
-   *
-   * <p>The StatementHandler is required in order to return the proper Connection proxy for the
-   * getConnection method.</p>
-   */
-  private class StatementHandler implements InvocationHandler {
-    private ConnectionHandler con;
-    private Statement st;
-
-    StatementHandler(ConnectionHandler con, Statement st) {
-      this.con = con;
-      this.st = st;
+        fireConnectionFatalError(e);
     }
 
     @Override
-    @SuppressWarnings("throwing.nullable")
-    public Object invoke(Object proxy, Method method, Object[] args)
-        throws Throwable {
-      final String methodName = method.getName();
-      // From Object
-      if (method.getDeclaringClass() == Object.class) {
-        if ("toString".equals(methodName)) {
-          return "Pooled statement wrapping physical statement " + st;
-        }
-        if ("hashCode".equals(methodName)) {
-          return System.identityHashCode(proxy);
-        }
-        if ("equals".equals(methodName)) {
-          return proxy == args[0];
-        }
-        return method.invoke(st, args);
-      }
-
-      Statement st = this.st;
-      // All the rest is from the Statement interface
-      if ("isClosed".equals(methodName)) {
-        return st == null || st.isClosed();
-      }
-      if ("close".equals(methodName)) {
-        if (st == null || st.isClosed()) {
-          return null;
-        }
-        con = null;
-        this.st = null;
-        st.close();
-        return null;
-      }
-      if (st == null || st.isClosed()) {
-        throw new PSQLException(GT.tr("Statement has been closed."), PSQLState.OBJECT_NOT_IN_STATE);
-      }
-      if ("getConnection".equals(methodName)) {
-        return con.getProxy(); // the proxied connection, not a physical connection
-      }
-
-      // Delegate the call to the proxied Statement.
-      try {
-        return method.invoke(st, args);
-      } catch (final InvocationTargetException ite) {
-        final Throwable te = ite.getTargetException();
-        if (te instanceof SQLException) {
-          fireConnectionError((SQLException) te); // Tell listeners about exception if it's fatal
-        }
-        throw te;
-      }
+    public void removeStatementEventListener(StatementEventListener listener) {
     }
-  }
 
-  @Override
-  public void removeStatementEventListener(StatementEventListener listener) {
-  }
+    @Override
+    public void addStatementEventListener(StatementEventListener listener) {
+    }
 
-  @Override
-  public void addStatementEventListener(StatementEventListener listener) {
-  }
+    /**
+     * Instead of declaring a class implementing Connection, which would have to be updated for every
+     * JDK rev, use a dynamic proxy to handle all calls through the Connection interface. This is the
+     * part that requires JDK 1.3 or higher, though JDK 1.2 could be supported with a 3rd-party proxy
+     * package.
+     */
+    private class ConnectionHandler implements InvocationHandler {
+        private Connection con;
+        private Connection proxy; // the Connection the client is currently using, which is a proxy
+        private boolean automatic;
+
+        ConnectionHandler(Connection con) {
+            this.con = con;
+        }
+
+        @Override
+        @SuppressWarnings("throwing.nullable")
+        public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
+            final String methodName = method.getName();
+            // From Object
+            if (method.getDeclaringClass() == Object.class) {
+                if ("toString".equals(methodName)) {
+                    return "Pooled connection wrapping physical connection " + con;
+                }
+                if ("equals".equals(methodName)) {
+                    return proxy == args[0];
+                }
+                if ("hashCode".equals(methodName)) {
+                    return System.identityHashCode(proxy);
+                }
+                try {
+                    return method.invoke(con, args);
+                } catch (InvocationTargetException e) {
+                    // throwing.nullable
+                    throw e.getTargetException();
+                }
+            }
+
+            // All the rest is from the Connection or PGConnection interface
+            Connection con = this.con;
+            if ("isClosed".equals(methodName)) {
+                return con == null || con.isClosed();
+            }
+            if ("close".equals(methodName)) {
+                // we are already closed and a double close
+                // is not an error.
+                if (con == null) {
+                    return null;
+                }
+
+                SQLException ex = null;
+                if (!con.isClosed()) {
+                    if (!isXA && !con.getAutoCommit()) {
+                        try {
+                            con.rollback();
+                        } catch (SQLException e) {
+                            ex = e;
+                        }
+                    }
+                    con.clearWarnings();
+                }
+                this.con = null;
+                this.proxy = null;
+                last = null;
+                fireConnectionClosed();
+                if (ex != null) {
+                    throw ex;
+                }
+                return null;
+            }
+            if (con == null || con.isClosed()) {
+                throw new PSQLException(automatic
+                        ? GT.tr(
+                        "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.")
+                        : GT.tr("Connection has been closed."), PSQLState.CONNECTION_DOES_NOT_EXIST);
+            }
+
+            // From here on in, we invoke via reflection, catch exceptions,
+            // and check if they're fatal before rethrowing.
+            try {
+                if ("createStatement".equals(methodName)) {
+                    Statement st = (Statement) method.invoke(con, args);
+                    return Proxy.newProxyInstance(getClass().getClassLoader(),
+                            new Class[]{Statement.class, PGStatement.class},
+                            new StatementHandler(this, st));
+                } else if ("prepareCall".equals(methodName)) {
+                    Statement st = (Statement) method.invoke(con, args);
+                    return Proxy.newProxyInstance(getClass().getClassLoader(),
+                            new Class[]{CallableStatement.class, PGStatement.class},
+                            new StatementHandler(this, st));
+                } else if ("prepareStatement".equals(methodName)) {
+                    Statement st = (Statement) method.invoke(con, args);
+                    return Proxy.newProxyInstance(getClass().getClassLoader(),
+                            new Class[]{PreparedStatement.class, PGStatement.class},
+                            new StatementHandler(this, st));
+                } else {
+                    return method.invoke(con, args);
+                }
+            } catch (final InvocationTargetException ite) {
+                final Throwable te = ite.getTargetException();
+                if (te instanceof SQLException) {
+                    fireConnectionError((SQLException) te); // Tell listeners about exception if it's fatal
+                }
+                throw te;
+            }
+        }
+
+        Connection getProxy() {
+            return proxy;
+        }
+
+        void setProxy(Connection proxy) {
+            this.proxy = proxy;
+        }
+
+        public void close() {
+            if (con != null) {
+                automatic = true;
+            }
+            con = null;
+            proxy = null;
+            // No close event fired here: see JDBC 2.0 Optional Package spec section 6.3
+        }
+
+        public boolean isClosed() {
+            return con == null;
+        }
+    }
+
+    /**
+     * <p>Instead of declaring classes implementing Statement, PreparedStatement, and CallableStatement,
+     * which would have to be updated for every JDK rev, use a dynamic proxy to handle all calls
+     * through the Statement interfaces. This is the part that requires JDK 1.3 or higher, though JDK
+     * 1.2 could be supported with a 3rd-party proxy package.</p>
+     *
+     * <p>The StatementHandler is required in order to return the proper Connection proxy for the
+     * getConnection method.</p>
+     */
+    private class StatementHandler implements InvocationHandler {
+        private ConnectionHandler con;
+        private Statement st;
+
+        StatementHandler(ConnectionHandler con, Statement st) {
+            this.con = con;
+            this.st = st;
+        }
+
+        @Override
+        @SuppressWarnings("throwing.nullable")
+        public Object invoke(Object proxy, Method method, Object[] args)
+                throws Throwable {
+            final String methodName = method.getName();
+            // From Object
+            if (method.getDeclaringClass() == Object.class) {
+                if ("toString".equals(methodName)) {
+                    return "Pooled statement wrapping physical statement " + st;
+                }
+                if ("hashCode".equals(methodName)) {
+                    return System.identityHashCode(proxy);
+                }
+                if ("equals".equals(methodName)) {
+                    return proxy == args[0];
+                }
+                return method.invoke(st, args);
+            }
+
+            Statement st = this.st;
+            // All the rest is from the Statement interface
+            if ("isClosed".equals(methodName)) {
+                return st == null || st.isClosed();
+            }
+            if ("close".equals(methodName)) {
+                if (st == null || st.isClosed()) {
+                    return null;
+                }
+                con = null;
+                this.st = null;
+                st.close();
+                return null;
+            }
+            if (st == null || st.isClosed()) {
+                throw new PSQLException(GT.tr("Statement has been closed."), PSQLState.OBJECT_NOT_IN_STATE);
+            }
+            if ("getConnection".equals(methodName)) {
+                return con.getProxy(); // the proxied connection, not a physical connection
+            }
+
+            // Delegate the call to the proxied Statement.
+            try {
+                return method.invoke(st, args);
+            } catch (final InvocationTargetException ite) {
+                final Throwable te = ite.getTargetException();
+                if (te instanceof SQLException) {
+                    fireConnectionError((SQLException) te); // Tell listeners about exception if it's fatal
+                }
+                throw te;
+            }
+        }
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ds/PGPoolingDataSource.java b/pgjdbc/src/main/java/org/postgresql/ds/PGPoolingDataSource.java
index 4743bb9..14e0032 100644
--- a/pgjdbc/src/main/java/org/postgresql/ds/PGPoolingDataSource.java
+++ b/pgjdbc/src/main/java/org/postgresql/ds/PGPoolingDataSource.java
@@ -5,13 +5,6 @@
 
 package org.postgresql.ds;
 
-import org.postgresql.ds.common.BaseDataSource;
-import org.postgresql.jdbc.ResourceLock;
-import org.postgresql.util.DriverInfo;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
 import java.sql.Connection;
 import java.sql.SQLException;
 import java.util.Stack;
@@ -19,7 +12,6 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Condition;
-
 import javax.naming.NamingException;
 import javax.naming.Reference;
 import javax.naming.StringRefAddr;
@@ -27,6 +19,12 @@ import javax.sql.ConnectionEvent;
 import javax.sql.ConnectionEventListener;
 import javax.sql.DataSource;
 import javax.sql.PooledConnection;
+import org.postgresql.ds.common.BaseDataSource;
+import org.postgresql.jdbc.ResourceLock;
+import org.postgresql.util.DriverInfo;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
 
 /**
  * DataSource which uses connection pooling. <span style="color: red;">Don't use this if your
@@ -57,426 +55,423 @@ import javax.sql.PooledConnection;
  * </p>
  *
  * @author Aaron Mulder (ammulder@chariotsolutions.com)
- *
  * @deprecated Since 42.0.0, instead of this class you should use a fully featured connection pool
- *     like HikariCP, vibur-dbcp, commons-dbcp, c3p0, etc.
+ * like HikariCP, vibur-dbcp, commons-dbcp, c3p0, etc.
  */
 @SuppressWarnings("try")
 @Deprecated
 public class PGPoolingDataSource extends BaseDataSource implements DataSource {
-  protected static ConcurrentMap<String, PGPoolingDataSource> dataSources =
-      new ConcurrentHashMap<>();
-
-  public static PGPoolingDataSource getDataSource(String name) {
-    return dataSources.get(name);
-  }
-
-  // Additional Data Source properties
-  protected String dataSourceName; // Must be protected for subclasses to sync updates to it
-  private int initialConnections;
-  private int maxConnections;
-  // State variables
-  private boolean initialized;
-  private final Stack<PooledConnection> available = new Stack<>();
-  private final Stack<PooledConnection> used = new Stack<>();
-  private boolean isClosed;
-  private final ResourceLock lock = new ResourceLock();
-  private final Condition lockCondition = lock.newCondition();
-  private PGConnectionPoolDataSource source;
-
-  /**
-   * Gets a description of this DataSource.
-   */
-  @Override
-  public String getDescription() {
-    return "Pooling DataSource '" + dataSourceName + " from " + DriverInfo.DRIVER_FULL_NAME;
-  }
-
-  /**
-   * Ensures the DataSource properties are not changed after the DataSource has been used.
-   *
-   * @throws IllegalStateException The Server Name cannot be changed after the DataSource has been
-   *         used.
-   */
-  @Override
-  public void setServerName(String serverName) {
-    if (initialized) {
-      throw new IllegalStateException(
-          "Cannot set Data Source properties after DataSource has been used");
-    }
-    super.setServerName(serverName);
-  }
-
-  /**
-   * Ensures the DataSource properties are not changed after the DataSource has been used.
-   *
-   * @throws IllegalStateException The Database Name cannot be changed after the DataSource has been
-   *         used.
-   */
-  @Override
-  public void setDatabaseName(String databaseName) {
-    if (initialized) {
-      throw new IllegalStateException(
-          "Cannot set Data Source properties after DataSource has been used");
-    }
-    super.setDatabaseName(databaseName);
-  }
-
-  /**
-   * Ensures the DataSource properties are not changed after the DataSource has been used.
-   *
-   * @throws IllegalStateException The User cannot be changed after the DataSource has been used.
-   */
-  @Override
-  public void setUser(String user) {
-    if (initialized) {
-      throw new IllegalStateException(
-          "Cannot set Data Source properties after DataSource has been used");
-    }
-    super.setUser(user);
-  }
-
-  /**
-   * Ensures the DataSource properties are not changed after the DataSource has been used.
-   *
-   * @throws IllegalStateException The Password cannot be changed after the DataSource has been
-   *         used.
-   */
-  @Override
-  public void setPassword(String password) {
-    if (initialized) {
-      throw new IllegalStateException(
-          "Cannot set Data Source properties after DataSource has been used");
-    }
-    super.setPassword(password);
-  }
-
-  /**
-   * Ensures the DataSource properties are not changed after the DataSource has been used.
-   *
-   * @throws IllegalStateException The Port Number cannot be changed after the DataSource has been
-   *         used.
-   */
-  @Override
-  public void setPortNumber(int portNumber) {
-    if (initialized) {
-      throw new IllegalStateException(
-          "Cannot set Data Source properties after DataSource has been used");
-    }
-    super.setPortNumber(portNumber);
-  }
-
-  /**
-   * Gets the number of connections that will be created when this DataSource is initialized. If you
-   * do not call initialize explicitly, it will be initialized the first time a connection is drawn
-   * from it.
-   *
-   * @return number of connections that will be created when this DataSource is initialized
-   */
-  public int getInitialConnections() {
-    return initialConnections;
-  }
-
-  /**
-   * Sets the number of connections that will be created when this DataSource is initialized. If you
-   * do not call initialize explicitly, it will be initialized the first time a connection is drawn
-   * from it.
-   *
-   * @param initialConnections number of initial connections
-   * @throws IllegalStateException The Initial Connections cannot be changed after the DataSource
-   *         has been used.
-   */
-  public void setInitialConnections(int initialConnections) {
-    if (initialized) {
-      throw new IllegalStateException(
-          "Cannot set Data Source properties after DataSource has been used");
-    }
-    this.initialConnections = initialConnections;
-  }
-
-  /**
-   * Gets the maximum number of connections that the pool will allow. If a request comes in and this
-   * many connections are in use, the request will block until a connection is available. Note that
-   * connections for a user other than the default user will not be pooled and don't count against
-   * this limit.
-   *
-   * @return The maximum number of pooled connection allowed, or 0 for no maximum.
-   */
-  public int getMaxConnections() {
-    return maxConnections;
-  }
-
-  /**
-   * Sets the maximum number of connections that the pool will allow. If a request comes in and this
-   * many connections are in use, the request will block until a connection is available. Note that
-   * connections for a user other than the default user will not be pooled and don't count against
-   * this limit.
-   *
-   * @param maxConnections The maximum number of pooled connection to allow, or 0 for no maximum.
-   * @throws IllegalStateException The Maximum Connections cannot be changed after the DataSource
-   *         has been used.
-   */
-  public void setMaxConnections(int maxConnections) {
-    if (initialized) {
-      throw new IllegalStateException(
-          "Cannot set Data Source properties after DataSource has been used");
-    }
-    this.maxConnections = maxConnections;
-  }
-
-  /**
-   * Gets the name of this DataSource. This uniquely identifies the DataSource. You cannot use more
-   * than one DataSource in the same VM with the same name.
-   *
-   * @return name of this DataSource
-   */
-  public String getDataSourceName() {
-    return dataSourceName;
-  }
-
-  /**
-   * Sets the name of this DataSource. This is required, and uniquely identifies the DataSource. You
-   * cannot create or use more than one DataSource in the same VM with the same name.
-   *
-   * @param dataSourceName datasource name
-   * @throws IllegalStateException The Data Source Name cannot be changed after the DataSource has
-   *         been used.
-   * @throws IllegalArgumentException Another PoolingDataSource with the same dataSourceName already
-   *         exists.
-   */
-  public void setDataSourceName(String dataSourceName) {
-    if (initialized) {
-      throw new IllegalStateException(
-          "Cannot set Data Source properties after DataSource has been used");
-    }
-    if (this.dataSourceName != null && dataSourceName != null
-        && dataSourceName.equals(this.dataSourceName)) {
-      return;
-    }
-    PGPoolingDataSource previous = dataSources.putIfAbsent(dataSourceName, this);
-    if (previous != null) {
-      throw new IllegalArgumentException(
-          "DataSource with name '" + dataSourceName + "' already exists!");
-    }
-    if (this.dataSourceName != null) {
-      dataSources.remove(this.dataSourceName);
-    }
-    this.dataSourceName = dataSourceName;
-  }
-
-  /**
-   * Initializes this DataSource. If the initialConnections is greater than zero, that number of
-   * connections will be created. After this method is called, the DataSource properties cannot be
-   * changed. If you do not call this explicitly, it will be called the first time you get a
-   * connection from the DataSource.
-   *
-   * @throws SQLException Occurs when the initialConnections is greater than zero, but the
-   *         DataSource is not able to create enough physical connections.
-   */
-  public void initialize() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      PGConnectionPoolDataSource source = createConnectionPool();
-      this.source = source;
-      try {
-        source.initializeFrom(this);
-      } catch (Exception e) {
-        throw new PSQLException(GT.tr("Failed to setup DataSource."), PSQLState.UNEXPECTED_ERROR,
-            e);
-      }
-
-      while (available.size() < initialConnections) {
-        available.push(source.getPooledConnection());
-      }
-
-      initialized = true;
-    }
-  }
-
-  protected boolean isInitialized() {
-    return initialized;
-  }
-
-  /**
-   * Creates the appropriate ConnectionPool to use for this DataSource.
-   *
-   * @return appropriate ConnectionPool to use for this DataSource
-   */
-  protected PGConnectionPoolDataSource createConnectionPool() {
-    return new PGConnectionPoolDataSource();
-  }
-
-  /**
-   * Gets a <b>non-pooled</b> connection, unless the user and password are the same as the default
-   * values for this connection pool.
-   *
-   * @return A pooled connection.
-   * @throws SQLException Occurs when no pooled connection is available, and a new physical
-   *         connection cannot be created.
-   */
-  @Override
-  public Connection getConnection(String user, String password)
-      throws SQLException {
-    // If this is for the default user/password, use a pooled connection
-    if (user == null || (user.equals(getUser()) && ((password == null && getPassword() == null)
-        || (password != null && password.equals(getPassword()))))) {
-      return getConnection();
-    }
-    // Otherwise, use a non-pooled connection
-    if (!initialized) {
-      initialize();
-    }
-    return super.getConnection(user, password);
-  }
-
-  /**
-   * Gets a connection from the connection pool.
-   *
-   * @return A pooled connection.
-   * @throws SQLException Occurs when no pooled connection is available, and a new physical
-   *         connection cannot be created.
-   */
-  @Override
-  public Connection getConnection() throws SQLException {
-    if (!initialized) {
-      initialize();
-    }
-    return getPooledConnection();
-  }
-
-  /**
-   * Closes this DataSource, and all the pooled connections, whether in use or not.
-   */
-  public void close() {
-    try (ResourceLock ignore = lock.obtain()) {
-      isClosed = true;
-      while (!available.isEmpty()) {
-        PooledConnection pci = available.pop();
-        try {
-          pci.close();
-        } catch (SQLException ignored) {
+    protected static ConcurrentMap<String, PGPoolingDataSource> dataSources =
+            new ConcurrentHashMap<>();
+    private final Stack<PooledConnection> available = new Stack<>();
+    private final Stack<PooledConnection> used = new Stack<>();
+    private final ResourceLock lock = new ResourceLock();
+    private final Condition lockCondition = lock.newCondition();
+    // Additional Data Source properties
+    protected String dataSourceName; // Must be protected for subclasses to sync updates to it
+    private int initialConnections;
+    private int maxConnections;
+    // State variables
+    private boolean initialized;
+    private boolean isClosed;
+    /**
+     * Notified when a pooled connection is closed, or a fatal error occurs on a pooled connection.
+     * This is the only way connections are marked as unused.
+     */
+    private final ConnectionEventListener connectionEventListener = new ConnectionEventListener() {
+        @Override
+        public void connectionClosed(ConnectionEvent event) {
+            ((PooledConnection) event.getSource()).removeConnectionEventListener(this);
+            try (ResourceLock ignore = lock.obtain()) {
+                if (isClosed) {
+                    return; // DataSource has been closed
+                }
+                boolean removed = used.remove(event.getSource());
+                if (removed) {
+                    available.push((PooledConnection) event.getSource());
+                    // There's now a new connection available
+                    lockCondition.signal();
+                } else {
+                    // a connection error occurred
+                }
+            }
         }
-      }
-      while (!used.isEmpty()) {
-        PooledConnection pci = used.pop();
-        pci.removeConnectionEventListener(connectionEventListener);
-        try {
-          pci.close();
-        } catch (SQLException ignored) {
-        }
-      }
-    }
-    removeStoredDataSource();
-  }
 
-  protected void removeStoredDataSource() {
-    dataSources.remove(dataSourceName);
-  }
+        /**
+         * This is only called for fatal errors, where the physical connection is useless afterward and
+         * should be removed from the pool.
+         */
+        @Override
+        public void connectionErrorOccurred(ConnectionEvent event) {
+            ((PooledConnection) event.getSource()).removeConnectionEventListener(this);
+            try (ResourceLock ignore = lock.obtain()) {
+                if (isClosed) {
+                    return; // DataSource has been closed
+                }
+                used.remove(event.getSource());
+                // We're now at least 1 connection under the max
+                lockCondition.signal();
+            }
+        }
+    };
+    private PGConnectionPoolDataSource source;
 
-  protected void addDataSource(String dataSourceName) {
-    dataSources.put(dataSourceName, this);
-  }
-
-  /**
-   * Gets a connection from the pool. Will get an available one if present, or create a new one if
-   * under the max limit. Will block if all used and a new one would exceed the max.
-   */
-  private Connection getPooledConnection() throws SQLException {
-    PooledConnection pc = null;
-    try (ResourceLock ignore = lock.obtain()) {
-      if (isClosed) {
-        throw new PSQLException(GT.tr("DataSource has been closed."),
-            PSQLState.CONNECTION_DOES_NOT_EXIST);
-      }
-      while (true) {
-        if (!available.isEmpty()) {
-          pc = available.pop();
-          used.push(pc);
-          break;
-        }
-        if (maxConnections == 0 || used.size() < maxConnections) {
-          pc = source.getPooledConnection();
-          used.push(pc);
-          break;
-        } else {
-          try {
-            // Wake up every second at a minimum
-            lockCondition.await(1000L, TimeUnit.MILLISECONDS);
-          } catch (InterruptedException ignored) {
-          }
-        }
-      }
-    }
-    pc.addConnectionEventListener(connectionEventListener);
-    return pc.getConnection();
-  }
-
-  /**
-   * Notified when a pooled connection is closed, or a fatal error occurs on a pooled connection.
-   * This is the only way connections are marked as unused.
-   */
-  private final ConnectionEventListener connectionEventListener = new ConnectionEventListener() {
-    @Override
-    public void connectionClosed(ConnectionEvent event) {
-      ((PooledConnection) event.getSource()).removeConnectionEventListener(this);
-      try (ResourceLock ignore = lock.obtain()) {
-        if (isClosed) {
-          return; // DataSource has been closed
-        }
-        boolean removed = used.remove(event.getSource());
-        if (removed) {
-          available.push((PooledConnection) event.getSource());
-          // There's now a new connection available
-          lockCondition.signal();
-        } else {
-          // a connection error occurred
-        }
-      }
+    public static PGPoolingDataSource getDataSource(String name) {
+        return dataSources.get(name);
     }
 
     /**
-     * This is only called for fatal errors, where the physical connection is useless afterward and
-     * should be removed from the pool.
+     * Gets a description of this DataSource.
      */
     @Override
-    public void connectionErrorOccurred(ConnectionEvent event) {
-      ((PooledConnection) event.getSource()).removeConnectionEventListener(this);
-      try (ResourceLock ignore = lock.obtain()) {
-        if (isClosed) {
-          return; // DataSource has been closed
+    public String getDescription() {
+        return "Pooling DataSource '" + dataSourceName + " from " + DriverInfo.DRIVER_FULL_NAME;
+    }
+
+    /**
+     * Ensures the DataSource properties are not changed after the DataSource has been used.
+     *
+     * @throws IllegalStateException The Server Name cannot be changed after the DataSource has been
+     *                               used.
+     */
+    @Override
+    public void setServerName(String serverName) {
+        if (initialized) {
+            throw new IllegalStateException(
+                    "Cannot set Data Source properties after DataSource has been used");
         }
-        used.remove(event.getSource());
-        // We're now at least 1 connection under the max
-        lockCondition.signal();
-      }
+        super.setServerName(serverName);
     }
-  };
 
-  /**
-   * Adds custom properties for this DataSource to the properties defined in the superclass.
-   */
-  @Override
-  public Reference getReference() throws NamingException {
-    Reference ref = super.getReference();
-    ref.add(new StringRefAddr("dataSourceName", dataSourceName));
-    if (initialConnections > 0) {
-      ref.add(new StringRefAddr("initialConnections", Integer.toString(initialConnections)));
+    /**
+     * Ensures the DataSource properties are not changed after the DataSource has been used.
+     *
+     * @throws IllegalStateException The Database Name cannot be changed after the DataSource has been
+     *                               used.
+     */
+    @Override
+    public void setDatabaseName(String databaseName) {
+        if (initialized) {
+            throw new IllegalStateException(
+                    "Cannot set Data Source properties after DataSource has been used");
+        }
+        super.setDatabaseName(databaseName);
     }
-    if (maxConnections > 0) {
-      ref.add(new StringRefAddr("maxConnections", Integer.toString(maxConnections)));
-    }
-    return ref;
-  }
 
-  @Override
-  public boolean isWrapperFor(Class<?> iface) throws SQLException {
-    return iface.isAssignableFrom(getClass());
-  }
-
-  @Override
-  public <T> T unwrap(Class<T> iface) throws SQLException {
-    if (iface.isAssignableFrom(getClass())) {
-      return iface.cast(this);
+    /**
+     * Ensures the DataSource properties are not changed after the DataSource has been used.
+     *
+     * @throws IllegalStateException The User cannot be changed after the DataSource has been used.
+     */
+    @Override
+    public void setUser(String user) {
+        if (initialized) {
+            throw new IllegalStateException(
+                    "Cannot set Data Source properties after DataSource has been used");
+        }
+        super.setUser(user);
+    }
+
+    /**
+     * Ensures the DataSource properties are not changed after the DataSource has been used.
+     *
+     * @throws IllegalStateException The Password cannot be changed after the DataSource has been
+     *                               used.
+     */
+    @Override
+    public void setPassword(String password) {
+        if (initialized) {
+            throw new IllegalStateException(
+                    "Cannot set Data Source properties after DataSource has been used");
+        }
+        super.setPassword(password);
+    }
+
+    /**
+     * Ensures the DataSource properties are not changed after the DataSource has been used.
+     *
+     * @throws IllegalStateException The Port Number cannot be changed after the DataSource has been
+     *                               used.
+     */
+    @Override
+    public void setPortNumber(int portNumber) {
+        if (initialized) {
+            throw new IllegalStateException(
+                    "Cannot set Data Source properties after DataSource has been used");
+        }
+        super.setPortNumber(portNumber);
+    }
+
+    /**
+     * Gets the number of connections that will be created when this DataSource is initialized. If you
+     * do not call initialize explicitly, it will be initialized the first time a connection is drawn
+     * from it.
+     *
+     * @return number of connections that will be created when this DataSource is initialized
+     */
+    public int getInitialConnections() {
+        return initialConnections;
+    }
+
+    /**
+     * Sets the number of connections that will be created when this DataSource is initialized. If you
+     * do not call initialize explicitly, it will be initialized the first time a connection is drawn
+     * from it.
+     *
+     * @param initialConnections number of initial connections
+     * @throws IllegalStateException The Initial Connections cannot be changed after the DataSource
+     *                               has been used.
+     */
+    public void setInitialConnections(int initialConnections) {
+        if (initialized) {
+            throw new IllegalStateException(
+                    "Cannot set Data Source properties after DataSource has been used");
+        }
+        this.initialConnections = initialConnections;
+    }
+
+    /**
+     * Gets the maximum number of connections that the pool will allow. If a request comes in and this
+     * many connections are in use, the request will block until a connection is available. Note that
+     * connections for a user other than the default user will not be pooled and don't count against
+     * this limit.
+     *
+     * @return The maximum number of pooled connection allowed, or 0 for no maximum.
+     */
+    public int getMaxConnections() {
+        return maxConnections;
+    }
+
+    /**
+     * Sets the maximum number of connections that the pool will allow. If a request comes in and this
+     * many connections are in use, the request will block until a connection is available. Note that
+     * connections for a user other than the default user will not be pooled and don't count against
+     * this limit.
+     *
+     * @param maxConnections The maximum number of pooled connection to allow, or 0 for no maximum.
+     * @throws IllegalStateException The Maximum Connections cannot be changed after the DataSource
+     *                               has been used.
+     */
+    public void setMaxConnections(int maxConnections) {
+        if (initialized) {
+            throw new IllegalStateException(
+                    "Cannot set Data Source properties after DataSource has been used");
+        }
+        this.maxConnections = maxConnections;
+    }
+
+    /**
+     * Gets the name of this DataSource. This uniquely identifies the DataSource. You cannot use more
+     * than one DataSource in the same VM with the same name.
+     *
+     * @return name of this DataSource
+     */
+    public String getDataSourceName() {
+        return dataSourceName;
+    }
+
+    /**
+     * Sets the name of this DataSource. This is required, and uniquely identifies the DataSource. You
+     * cannot create or use more than one DataSource in the same VM with the same name.
+     *
+     * @param dataSourceName datasource name
+     * @throws IllegalStateException    The Data Source Name cannot be changed after the DataSource has
+     *                                  been used.
+     * @throws IllegalArgumentException Another PoolingDataSource with the same dataSourceName already
+     *                                  exists.
+     */
+    public void setDataSourceName(String dataSourceName) {
+        if (initialized) {
+            throw new IllegalStateException(
+                    "Cannot set Data Source properties after DataSource has been used");
+        }
+        if (this.dataSourceName != null && dataSourceName != null
+                && dataSourceName.equals(this.dataSourceName)) {
+            return;
+        }
+        PGPoolingDataSource previous = dataSources.putIfAbsent(dataSourceName, this);
+        if (previous != null) {
+            throw new IllegalArgumentException(
+                    "DataSource with name '" + dataSourceName + "' already exists!");
+        }
+        if (this.dataSourceName != null) {
+            dataSources.remove(this.dataSourceName);
+        }
+        this.dataSourceName = dataSourceName;
+    }
+
+    /**
+     * Initializes this DataSource. If the initialConnections is greater than zero, that number of
+     * connections will be created. After this method is called, the DataSource properties cannot be
+     * changed. If you do not call this explicitly, it will be called the first time you get a
+     * connection from the DataSource.
+     *
+     * @throws SQLException Occurs when the initialConnections is greater than zero, but the
+     *                      DataSource is not able to create enough physical connections.
+     */
+    public void initialize() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            PGConnectionPoolDataSource source = createConnectionPool();
+            this.source = source;
+            try {
+                source.initializeFrom(this);
+            } catch (Exception e) {
+                throw new PSQLException(GT.tr("Failed to setup DataSource."), PSQLState.UNEXPECTED_ERROR,
+                        e);
+            }
+
+            while (available.size() < initialConnections) {
+                available.push(source.getPooledConnection());
+            }
+
+            initialized = true;
+        }
+    }
+
+    protected boolean isInitialized() {
+        return initialized;
+    }
+
+    /**
+     * Creates the appropriate ConnectionPool to use for this DataSource.
+     *
+     * @return appropriate ConnectionPool to use for this DataSource
+     */
+    protected PGConnectionPoolDataSource createConnectionPool() {
+        return new PGConnectionPoolDataSource();
+    }
+
+    /**
+     * Gets a <b>non-pooled</b> connection, unless the user and password are the same as the default
+     * values for this connection pool.
+     *
+     * @return A pooled connection.
+     * @throws SQLException Occurs when no pooled connection is available, and a new physical
+     *                      connection cannot be created.
+     */
+    @Override
+    public Connection getConnection(String user, String password)
+            throws SQLException {
+        // If this is for the default user/password, use a pooled connection
+        if (user == null || (user.equals(getUser()) && ((password == null && getPassword() == null)
+                || (password != null && password.equals(getPassword()))))) {
+            return getConnection();
+        }
+        // Otherwise, use a non-pooled connection
+        if (!initialized) {
+            initialize();
+        }
+        return super.getConnection(user, password);
+    }
+
+    /**
+     * Gets a connection from the connection pool.
+     *
+     * @return A pooled connection.
+     * @throws SQLException Occurs when no pooled connection is available, and a new physical
+     *                      connection cannot be created.
+     */
+    @Override
+    public Connection getConnection() throws SQLException {
+        if (!initialized) {
+            initialize();
+        }
+        return getPooledConnection();
+    }
+
+    /**
+     * Closes this DataSource, and all the pooled connections, whether in use or not.
+     */
+    public void close() {
+        try (ResourceLock ignore = lock.obtain()) {
+            isClosed = true;
+            while (!available.isEmpty()) {
+                PooledConnection pci = available.pop();
+                try {
+                    pci.close();
+                } catch (SQLException ignored) {
+                }
+            }
+            while (!used.isEmpty()) {
+                PooledConnection pci = used.pop();
+                pci.removeConnectionEventListener(connectionEventListener);
+                try {
+                    pci.close();
+                } catch (SQLException ignored) {
+                }
+            }
+        }
+        removeStoredDataSource();
+    }
+
+    protected void removeStoredDataSource() {
+        dataSources.remove(dataSourceName);
+    }
+
+    protected void addDataSource(String dataSourceName) {
+        dataSources.put(dataSourceName, this);
+    }
+
+    /**
+     * Gets a connection from the pool. Will get an available one if present, or create a new one if
+     * under the max limit. Will block if all used and a new one would exceed the max.
+     */
+    private Connection getPooledConnection() throws SQLException {
+        PooledConnection pc = null;
+        try (ResourceLock ignore = lock.obtain()) {
+            if (isClosed) {
+                throw new PSQLException(GT.tr("DataSource has been closed."),
+                        PSQLState.CONNECTION_DOES_NOT_EXIST);
+            }
+            while (true) {
+                if (!available.isEmpty()) {
+                    pc = available.pop();
+                    used.push(pc);
+                    break;
+                }
+                if (maxConnections == 0 || used.size() < maxConnections) {
+                    pc = source.getPooledConnection();
+                    used.push(pc);
+                    break;
+                } else {
+                    try {
+                        // Wake up every second at a minimum
+                        lockCondition.await(1000L, TimeUnit.MILLISECONDS);
+                    } catch (InterruptedException ignored) {
+                    }
+                }
+            }
+        }
+        pc.addConnectionEventListener(connectionEventListener);
+        return pc.getConnection();
+    }
+
+    /**
+     * Adds custom properties for this DataSource to the properties defined in the superclass.
+     */
+    @Override
+    public Reference getReference() throws NamingException {
+        Reference ref = super.getReference();
+        ref.add(new StringRefAddr("dataSourceName", dataSourceName));
+        if (initialConnections > 0) {
+            ref.add(new StringRefAddr("initialConnections", Integer.toString(initialConnections)));
+        }
+        if (maxConnections > 0) {
+            ref.add(new StringRefAddr("maxConnections", Integer.toString(maxConnections)));
+        }
+        return ref;
+    }
+
+    @Override
+    public boolean isWrapperFor(Class<?> iface) throws SQLException {
+        return iface.isAssignableFrom(getClass());
+    }
+
+    @Override
+    public <T> T unwrap(Class<T> iface) throws SQLException {
+        if (iface.isAssignableFrom(getClass())) {
+            return iface.cast(this);
+        }
+        throw new SQLException("Cannot unwrap to " + iface.getName());
     }
-    throw new SQLException("Cannot unwrap to " + iface.getName());
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ds/PGSimpleDataSource.java b/pgjdbc/src/main/java/org/postgresql/ds/PGSimpleDataSource.java
index f3865dc..3911108 100644
--- a/pgjdbc/src/main/java/org/postgresql/ds/PGSimpleDataSource.java
+++ b/pgjdbc/src/main/java/org/postgresql/ds/PGSimpleDataSource.java
@@ -5,16 +5,14 @@
 
 package org.postgresql.ds;
 
-import org.postgresql.ds.common.BaseDataSource;
-import org.postgresql.util.DriverInfo;
-
 import java.io.IOException;
 import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
 import java.io.Serializable;
 import java.sql.SQLException;
-
 import javax.sql.DataSource;
+import org.postgresql.ds.common.BaseDataSource;
+import org.postgresql.util.DriverInfo;
 
 /**
  * Simple DataSource which does not perform connection pooling. In order to use the DataSource, you
@@ -25,32 +23,32 @@ import javax.sql.DataSource;
  */
 @SuppressWarnings("serial")
 public class PGSimpleDataSource extends BaseDataSource implements DataSource, Serializable {
-  /**
-   * Gets a description of this DataSource.
-   */
-  @Override
-  public String getDescription() {
-    return "Non-Pooling DataSource from " + DriverInfo.DRIVER_FULL_NAME;
-  }
-
-  private void writeObject(ObjectOutputStream out) throws IOException {
-    writeBaseObject(out);
-  }
-
-  private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
-    readBaseObject(in);
-  }
-
-  @Override
-  public boolean isWrapperFor(Class<?> iface) throws SQLException {
-    return iface.isAssignableFrom(getClass());
-  }
-
-  @Override
-  public <T> T unwrap(Class<T> iface) throws SQLException {
-    if (iface.isAssignableFrom(getClass())) {
-      return iface.cast(this);
+    /**
+     * Gets a description of this DataSource.
+     */
+    @Override
+    public String getDescription() {
+        return "Non-Pooling DataSource from " + DriverInfo.DRIVER_FULL_NAME;
+    }
+
+    private void writeObject(ObjectOutputStream out) throws IOException {
+        writeBaseObject(out);
+    }
+
+    private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
+        readBaseObject(in);
+    }
+
+    @Override
+    public boolean isWrapperFor(Class<?> iface) throws SQLException {
+        return iface.isAssignableFrom(getClass());
+    }
+
+    @Override
+    public <T> T unwrap(Class<T> iface) throws SQLException {
+        if (iface.isAssignableFrom(getClass())) {
+            return iface.cast(this);
+        }
+        throw new SQLException("Cannot unwrap to " + iface.getName());
     }
-    throw new SQLException("Cannot unwrap to " + iface.getName());
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ds/common/BaseDataSource.java b/pgjdbc/src/main/java/org/postgresql/ds/common/BaseDataSource.java
index 612c26e..cf39f88 100644
--- a/pgjdbc/src/main/java/org/postgresql/ds/common/BaseDataSource.java
+++ b/pgjdbc/src/main/java/org/postgresql/ds/common/BaseDataSource.java
@@ -5,16 +5,6 @@
 
 package org.postgresql.ds.common;
 
-import org.postgresql.Driver;
-import org.postgresql.PGProperty;
-import org.postgresql.jdbc.AutoSave;
-import org.postgresql.jdbc.PreferQueryMode;
-import org.postgresql.util.ExpressionProperties;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-import org.postgresql.util.URLCoder;
-
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
@@ -28,13 +18,21 @@ import java.util.Arrays;
 import java.util.Properties;
 import java.util.logging.Level;
 import java.util.logging.Logger;
-
 import javax.naming.NamingException;
 import javax.naming.RefAddr;
 import javax.naming.Reference;
 import javax.naming.Referenceable;
 import javax.naming.StringRefAddr;
 import javax.sql.CommonDataSource;
+import org.postgresql.Driver;
+import org.postgresql.PGProperty;
+import org.postgresql.jdbc.AutoSave;
+import org.postgresql.jdbc.PreferQueryMode;
+import org.postgresql.util.ExpressionProperties;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+import org.postgresql.util.URLCoder;
 
 /**
  * Base class for data sources and related classes.
@@ -43,1806 +41,1800 @@ import javax.sql.CommonDataSource;
  */
 
 public abstract class BaseDataSource implements CommonDataSource, Referenceable {
-  private static final Logger LOGGER = Logger.getLogger(BaseDataSource.class.getName());
-
-  // Standard properties, defined in the JDBC 2.0 Optional Package spec
-  private String[] serverNames = new String[]{"localhost"};
-  private String databaseName = "";
-  private String user;
-  private String password;
-  private int[] portNumbers = new int[]{0};
-
-  // Map for all other properties
-  private Properties properties = new Properties();
-
-  /*
-   * Ensure the driver is loaded as JDBC Driver might be invisible to Java's ServiceLoader.
-   * Usually, {@code Class.forName(...)} is not required as {@link DriverManager} detects JDBC drivers
-   * via {@code META-INF/services/java.sql.Driver} entries. However there might be cases when the driver
-   * is located at the application level classloader, thus it might be required to perform manual
-   * registration of the driver.
-   */
-  static {
-    try {
-      Class.forName("org.postgresql.Driver");
-    } catch (ClassNotFoundException e) {
-      throw new IllegalStateException(
-        "BaseDataSource is unable to load org.postgresql.Driver. Please check if you have proper PostgreSQL JDBC Driver jar on the classpath",
-        e);
-    }
-  }
-
-  /**
-   * Gets a connection to the PostgreSQL database. The database is identified by the DataSource
-   * properties serverName, databaseName, and portNumber. The user to connect as is identified by
-   * the DataSource properties user and password.
-   *
-   * @return A valid database connection.
-   * @throws SQLException Occurs when the database connection cannot be established.
-   */
-  public Connection getConnection() throws SQLException {
-    return getConnection(user, password);
-  }
-
-  /**
-   * Gets a connection to the PostgreSQL database. The database is identified by the DataSource
-   * properties serverName, databaseName, and portNumber. The user to connect as is identified by
-   * the arguments user and password, which override the DataSource properties by the same name.
-   *
-   * @param user     user
-   * @param password password
-   * @return A valid database connection.
-   * @throws SQLException Occurs when the database connection cannot be established.
-   */
-  public Connection getConnection(String user, String password)
-      throws SQLException {
-    try {
-      Connection con = DriverManager.getConnection(getUrl(), user, password);
-      if (LOGGER.isLoggable(Level.FINE)) {
-        LOGGER.log(Level.FINE, "Created a {0} for {1} at {2}",
-            new Object[]{getDescription(), user, getUrl()});
-      }
-      return con;
-    } catch (SQLException e) {
-      LOGGER.log(Level.FINE, "Failed to create a {0} for {1} at {2}: {3}",
-          new Object[]{getDescription(), user, getUrl(), e});
-      throw e;
-    }
-  }
-
-  /**
-   * This implementation don't use a LogWriter.
-   */
-  @Override
-  public PrintWriter getLogWriter() {
-    return null;
-  }
-
-  /**
-   * This implementation don't use a LogWriter.
-   *
-   * @param printWriter Not used
-   */
-  @Override
-  public void setLogWriter(PrintWriter printWriter) {
-    // NOOP
-  }
-
-  /**
-   * Gets the name of the host the PostgreSQL database is running on.
-   *
-   * @return name of the host the PostgreSQL database is running on
-   * @deprecated use {@link #getServerNames()}
-   */
-  @Deprecated
-  public String getServerName() {
-    return serverNames[0];
-  }
-
-  /**
-   * Gets the name of the host(s) the PostgreSQL database is running on.
-   *
-   * @return name of the host(s) the PostgreSQL database is running on
-   */
-  public String[] getServerNames() {
-    return serverNames;
-  }
-
-  /**
-   * Sets the name of the host the PostgreSQL database is running on. If this is changed, it will
-   * only affect future calls to getConnection. The default value is {@code localhost}.
-   *
-   * @param serverName name of the host the PostgreSQL database is running on
-   * @deprecated use {@link #setServerNames(String[])}
-   */
-  @Deprecated
-  public void setServerName(String serverName) {
-    this.setServerNames(new String[]{serverName});
-  }
-
-  /**
-   * Sets the name of the host(s) the PostgreSQL database is running on. If this is changed, it will
-   * only affect future calls to getConnection. The default value is {@code localhost}.
-   *
-   * @param serverNames name of the host(s) the PostgreSQL database is running on
-   */
-  @SuppressWarnings("nullness")
-  public void setServerNames(String [] serverNames) {
-    if (serverNames == null || serverNames.length == 0) {
-      this.serverNames = new String[]{"localhost"};
-    } else {
-      serverNames = serverNames.clone();
-      for (int i = 0; i < serverNames.length; i++) {
-        String serverName = serverNames[i];
-        if (serverName == null || "".equals(serverName)) {
-          serverNames[i] = "localhost";
-        }
-      }
-      this.serverNames = serverNames;
-    }
-  }
-
-  /**
-   * Gets the name of the PostgreSQL database, running on the server identified by the serverName
-   * property.
-   *
-   * @return name of the PostgreSQL database
-   */
-  public String getDatabaseName() {
-    return databaseName;
-  }
-
-  /**
-   * Sets the name of the PostgreSQL database, running on the server identified by the serverName
-   * property. If this is changed, it will only affect future calls to getConnection.
-   *
-   * @param databaseName name of the PostgreSQL database
-   */
-  public void setDatabaseName(String databaseName) {
-    this.databaseName = databaseName;
-  }
-
-  /**
-   * Gets a description of this DataSource-ish thing. Must be customized by subclasses.
-   *
-   * @return description of this DataSource-ish thing
-   */
-  public abstract String getDescription();
-
-  /**
-   * Gets the user to connect as by default. If this is not specified, you must use the
-   * getConnection method which takes a user and password as parameters.
-   *
-   * @return user to connect as by default
-   */
-  public String getUser() {
-    return user;
-  }
-
-  /**
-   * Sets the user to connect as by default. If this is not specified, you must use the
-   * getConnection method which takes a user and password as parameters. If this is changed, it will
-   * only affect future calls to getConnection.
-   *
-   * @param user user to connect as by default
-   */
-  public void setUser(String user) {
-    this.user = user;
-  }
-
-  /**
-   * Gets the password to connect with by default. If this is not specified but a password is needed
-   * to log in, you must use the getConnection method which takes a user and password as parameters.
-   *
-   * @return password to connect with by default
-   */
-  public String getPassword() {
-    return password;
-  }
-
-  /**
-   * Sets the password to connect with by default. If this is not specified but a password is needed
-   * to log in, you must use the getConnection method which takes a user and password as parameters.
-   * If this is changed, it will only affect future calls to getConnection.
-   *
-   * @param password password to connect with by default
-   */
-  public void setPassword(String password) {
-    this.password = password;
-  }
-
-  /**
-   * Gets the port which the PostgreSQL server is listening on for TCP/IP connections.
-   *
-   * @return The port, or 0 if the default port will be used.
-   * @deprecated use {@link #getPortNumbers()}
-   */
-  @Deprecated
-  public int getPortNumber() {
-    if (portNumbers == null || portNumbers.length == 0) {
-      return 0;
-    }
-    return portNumbers[0];
-  }
-
-  /**
-   * Gets the port(s) which the PostgreSQL server is listening on for TCP/IP connections.
-   *
-   * @return The port(s), or 0 if the default port will be used.
-   */
-  public int[] getPortNumbers() {
-    return portNumbers;
-  }
-
-  /**
-   * Sets the port which the PostgreSQL server is listening on for TCP/IP connections. Be sure the
-   * -i flag is passed to postmaster when PostgreSQL is started. If this is not set, or set to 0,
-   * the default port will be used.
-   *
-   * @param portNumber port which the PostgreSQL server is listening on for TCP/IP
-   * @deprecated use {@link #setPortNumbers(int[])}
-   */
-  @Deprecated
-  public void setPortNumber(int portNumber) {
-    setPortNumbers(new int[]{portNumber});
-  }
-
-  /**
-   * Sets the port(s) which the PostgreSQL server is listening on for TCP/IP connections. Be sure the
-   * -i flag is passed to postmaster when PostgreSQL is started. If this is not set, or set to 0,
-   * the default port will be used.
-   *
-   * @param portNumbers port(s) which the PostgreSQL server is listening on for TCP/IP
-   */
-  public void setPortNumbers(int [] portNumbers) {
-    if (portNumbers == null || portNumbers.length == 0) {
-      portNumbers = new int[]{0};
-    }
-    this.portNumbers = Arrays.copyOf(portNumbers, portNumbers.length);
-  }
-
-  /**
-   * @return command line options for this connection
-   */
-  public String getOptions() {
-    return PGProperty.OPTIONS.getOrDefault(properties);
-  }
-
-  /**
-   * Set command line options for this connection
-   *
-   * @param options string to set options to
-   */
-  public void setOptions(String options) {
-    PGProperty.OPTIONS.set(properties, options);
-  }
-
-  /**
-   * @return login timeout
-   * @see PGProperty#LOGIN_TIMEOUT
-   */
-  @Override
-  public int getLoginTimeout() {
-    return PGProperty.LOGIN_TIMEOUT.getIntNoCheck(properties);
-  }
-
-  /**
-   * @param loginTimeout login timeout
-   * @see PGProperty#LOGIN_TIMEOUT
-   */
-  @Override
-  public void setLoginTimeout(int loginTimeout) {
-    PGProperty.LOGIN_TIMEOUT.set(properties, loginTimeout);
-  }
-
-  /**
-   * @return connect timeout
-   * @see PGProperty#CONNECT_TIMEOUT
-   */
-  public int getConnectTimeout() {
-    return PGProperty.CONNECT_TIMEOUT.getIntNoCheck(properties);
-  }
-
-  /**
-   * @param connectTimeout connect timeout
-   * @see PGProperty#CONNECT_TIMEOUT
-   */
-  public void setConnectTimeout(int connectTimeout) {
-    PGProperty.CONNECT_TIMEOUT.set(properties, connectTimeout);
-  }
-
-  /**
-   *
-   * @return GSS ResponseTimeout
-   * @see PGProperty#GSS_RESPONSE_TIMEOUT
-   */
-  public int getGssResponseTimeout() {
-    return PGProperty.GSS_RESPONSE_TIMEOUT.getIntNoCheck(properties);
-  }
-
-  /**
-   *
-   * @param gssResponseTimeout gss response timeout
-   * @see PGProperty#GSS_RESPONSE_TIMEOUT
-   */
-  public void setGssResponseTimeout(int gssResponseTimeout) {
-    PGProperty.GSS_RESPONSE_TIMEOUT.set(properties, gssResponseTimeout);
-  }
-
-  /**
-   *
-   * @return SSL ResponseTimeout
-   * @see PGProperty#SSL_RESPONSE_TIMEOUT
-   */
-  public int getSslResponseTimeout() {
-    return PGProperty.SSL_RESPONSE_TIMEOUT.getIntNoCheck(properties);
-  }
-
-  /**
-   *
-   * @param sslResponseTimeout ssl response timeout
-   * @see PGProperty#SSL_RESPONSE_TIMEOUT
-   */
-  public void setSslResponseTimeout(int sslResponseTimeout) {
-    PGProperty.SSL_RESPONSE_TIMEOUT.set(properties, sslResponseTimeout);
-  }
-
-  /**
-   * @return protocol version
-   * @see PGProperty#PROTOCOL_VERSION
-   */
-  public int getProtocolVersion() {
-    if (!PGProperty.PROTOCOL_VERSION.isPresent(properties)) {
-      return 0;
-    } else {
-      return PGProperty.PROTOCOL_VERSION.getIntNoCheck(properties);
-    }
-  }
-
-  /**
-   * @param protocolVersion protocol version
-   * @see PGProperty#PROTOCOL_VERSION
-   */
-  public void setProtocolVersion(int protocolVersion) {
-    if (protocolVersion == 0) {
-      PGProperty.PROTOCOL_VERSION.set(properties, null);
-    } else {
-      PGProperty.PROTOCOL_VERSION.set(properties, protocolVersion);
-    }
-  }
-
-  /**
-   * @return quoteReturningIdentifiers
-   * @see PGProperty#QUOTE_RETURNING_IDENTIFIERS
-   */
-  public boolean getQuoteReturningIdentifiers() {
-    return PGProperty.QUOTE_RETURNING_IDENTIFIERS.getBoolean(properties);
-  }
-
-  /**
-   * @param quoteIdentifiers indicate whether to quote identifiers
-   * @see PGProperty#QUOTE_RETURNING_IDENTIFIERS
-   */
-  public void setQuoteReturningIdentifiers(boolean quoteIdentifiers) {
-    PGProperty.QUOTE_RETURNING_IDENTIFIERS.set(properties, quoteIdentifiers);
-  }
-
-  /**
-   * @return receive buffer size
-   * @see PGProperty#RECEIVE_BUFFER_SIZE
-   */
-  public int getReceiveBufferSize() {
-    return PGProperty.RECEIVE_BUFFER_SIZE.getIntNoCheck(properties);
-  }
-
-  /**
-   * @param nbytes receive buffer size
-   * @see PGProperty#RECEIVE_BUFFER_SIZE
-   */
-  public void setReceiveBufferSize(int nbytes) {
-    PGProperty.RECEIVE_BUFFER_SIZE.set(properties, nbytes);
-  }
-
-  /**
-   * @return send buffer size
-   * @see PGProperty#SEND_BUFFER_SIZE
-   */
-  public int getSendBufferSize() {
-    return PGProperty.SEND_BUFFER_SIZE.getIntNoCheck(properties);
-  }
-
-  /**
-   * @param nbytes send buffer size
-   * @see PGProperty#SEND_BUFFER_SIZE
-   */
-  public void setSendBufferSize(int nbytes) {
-    PGProperty.SEND_BUFFER_SIZE.set(properties, nbytes);
-  }
-
-  /**
-   * @param count prepare threshold
-   * @see PGProperty#PREPARE_THRESHOLD
-   */
-  public void setPrepareThreshold(int count) {
-    PGProperty.PREPARE_THRESHOLD.set(properties, count);
-  }
-
-  /**
-   * @return prepare threshold
-   * @see PGProperty#PREPARE_THRESHOLD
-   */
-  public int getPrepareThreshold() {
-    return PGProperty.PREPARE_THRESHOLD.getIntNoCheck(properties);
-  }
-
-  /**
-   * @return prepared statement cache size (number of statements per connection)
-   * @see PGProperty#PREPARED_STATEMENT_CACHE_QUERIES
-   */
-  public int getPreparedStatementCacheQueries() {
-    return PGProperty.PREPARED_STATEMENT_CACHE_QUERIES.getIntNoCheck(properties);
-  }
-
-  /**
-   * @param cacheSize prepared statement cache size (number of statements per connection)
-   * @see PGProperty#PREPARED_STATEMENT_CACHE_QUERIES
-   */
-  public void setPreparedStatementCacheQueries(int cacheSize) {
-    PGProperty.PREPARED_STATEMENT_CACHE_QUERIES.set(properties, cacheSize);
-  }
-
-  /**
-   * @return prepared statement cache size (number of megabytes per connection)
-   * @see PGProperty#PREPARED_STATEMENT_CACHE_SIZE_MIB
-   */
-  public int getPreparedStatementCacheSizeMiB() {
-    return PGProperty.PREPARED_STATEMENT_CACHE_SIZE_MIB.getIntNoCheck(properties);
-  }
-
-  /**
-   * @param cacheSize statement cache size (number of megabytes per connection)
-   * @see PGProperty#PREPARED_STATEMENT_CACHE_SIZE_MIB
-   */
-  public void setPreparedStatementCacheSizeMiB(int cacheSize) {
-    PGProperty.PREPARED_STATEMENT_CACHE_SIZE_MIB.set(properties, cacheSize);
-  }
-
-  /**
-   * @return database metadata cache fields size (number of fields cached per connection)
-   * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS
-   */
-  public int getDatabaseMetadataCacheFields() {
-    return PGProperty.DATABASE_METADATA_CACHE_FIELDS.getIntNoCheck(properties);
-  }
-
-  /**
-   * @param cacheSize database metadata cache fields size (number of fields cached per connection)
-   * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS
-   */
-  public void setDatabaseMetadataCacheFields(int cacheSize) {
-    PGProperty.DATABASE_METADATA_CACHE_FIELDS.set(properties, cacheSize);
-  }
-
-  /**
-   * @return database metadata cache fields size (number of megabytes per connection)
-   * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS_MIB
-   */
-  public int getDatabaseMetadataCacheFieldsMiB() {
-    return PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.getIntNoCheck(properties);
-  }
-
-  /**
-   * @param cacheSize database metadata cache fields size (number of megabytes per connection)
-   * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS_MIB
-   */
-  public void setDatabaseMetadataCacheFieldsMiB(int cacheSize) {
-    PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.set(properties, cacheSize);
-  }
-
-  /**
-   * @param fetchSize default fetch size
-   * @see PGProperty#DEFAULT_ROW_FETCH_SIZE
-   */
-  public void setDefaultRowFetchSize(int fetchSize) {
-    PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, fetchSize);
-  }
-
-  /**
-   * @return default fetch size
-   * @see PGProperty#DEFAULT_ROW_FETCH_SIZE
-   */
-  public int getDefaultRowFetchSize() {
-    return PGProperty.DEFAULT_ROW_FETCH_SIZE.getIntNoCheck(properties);
-  }
-
-  /**
-   * @param unknownLength unknown length
-   * @see PGProperty#UNKNOWN_LENGTH
-   */
-  public void setUnknownLength(int unknownLength) {
-    PGProperty.UNKNOWN_LENGTH.set(properties, unknownLength);
-  }
-
-  /**
-   * @return unknown length
-   * @see PGProperty#UNKNOWN_LENGTH
-   */
-  public int getUnknownLength() {
-    return PGProperty.UNKNOWN_LENGTH.getIntNoCheck(properties);
-  }
-
-  /**
-   * @param seconds socket timeout
-   * @see PGProperty#SOCKET_TIMEOUT
-   */
-  public void setSocketTimeout(int seconds) {
-    PGProperty.SOCKET_TIMEOUT.set(properties, seconds);
-  }
-
-  /**
-   * @return socket timeout
-   * @see PGProperty#SOCKET_TIMEOUT
-   */
-  public int getSocketTimeout() {
-    return PGProperty.SOCKET_TIMEOUT.getIntNoCheck(properties);
-  }
-
-  /**
-   * @param seconds timeout that is used for sending cancel command
-   * @see PGProperty#CANCEL_SIGNAL_TIMEOUT
-   */
-  public void setCancelSignalTimeout(int seconds) {
-    PGProperty.CANCEL_SIGNAL_TIMEOUT.set(properties, seconds);
-  }
-
-  /**
-   * @return timeout that is used for sending cancel command in seconds
-   * @see PGProperty#CANCEL_SIGNAL_TIMEOUT
-   */
-  public int getCancelSignalTimeout() {
-    return PGProperty.CANCEL_SIGNAL_TIMEOUT.getIntNoCheck(properties);
-  }
-
-  /**
-   * @param enabled if SSL is enabled
-   * @see PGProperty#SSL
-   */
-  public void setSsl(boolean enabled) {
-    if (enabled) {
-      PGProperty.SSL.set(properties, true);
-    } else {
-      PGProperty.SSL.set(properties, false);
-    }
-  }
-
-  /**
-   * @return true if SSL is enabled
-   * @see PGProperty#SSL
-   */
-  public boolean getSsl() {
-    // "true" if "ssl" is set but empty
-    return PGProperty.SSL.getBoolean(properties) || "".equals(PGProperty.SSL.getOrDefault(properties));
-  }
-
-  /**
-   * @param classname SSL factory class name
-   * @see PGProperty#SSL_FACTORY
-   */
-  public void setSslfactory(String classname) {
-    PGProperty.SSL_FACTORY.set(properties, classname);
-  }
-
-  /**
-   * @return SSL factory class name
-   * @see PGProperty#SSL_FACTORY
-   */
-  public String getSslfactory() {
-    return PGProperty.SSL_FACTORY.getOrDefault(properties);
-  }
-
-  /**
-   * @return SSL mode
-   * @see PGProperty#SSL_MODE
-   */
-  public String getSslMode() {
-    return PGProperty.SSL_MODE.getOrDefault(properties);
-  }
-
-  /**
-   * @param mode SSL mode
-   * @see PGProperty#SSL_MODE
-   */
-  public void setSslMode(String mode) {
-    PGProperty.SSL_MODE.set(properties, mode);
-  }
-
-  /**
-   * @return SSL mode
-   * @see PGProperty#SSL_FACTORY_ARG
-   */
-  @SuppressWarnings("deprecation")
-  public String getSslFactoryArg() {
-    return PGProperty.SSL_FACTORY_ARG.getOrDefault(properties);
-  }
-
-  /**
-   * @param arg argument forwarded to SSL factory
-   * @see PGProperty#SSL_FACTORY_ARG
-   */
-  @SuppressWarnings("deprecation")
-  public void setSslFactoryArg(String arg) {
-    PGProperty.SSL_FACTORY_ARG.set(properties, arg);
-  }
-
-  /**
-   * @return argument forwarded to SSL factory
-   * @see PGProperty#SSL_HOSTNAME_VERIFIER
-   */
-  public String getSslHostnameVerifier() {
-    return PGProperty.SSL_HOSTNAME_VERIFIER.getOrDefault(properties);
-  }
-
-  /**
-   * @param className SSL hostname verifier
-   * @see PGProperty#SSL_HOSTNAME_VERIFIER
-   */
-  public void setSslHostnameVerifier(String className) {
-    PGProperty.SSL_HOSTNAME_VERIFIER.set(properties, className);
-  }
-
-  /**
-   * @return className SSL hostname verifier
-   * @see PGProperty#SSL_CERT
-   */
-  public String getSslCert() {
-    return PGProperty.SSL_CERT.getOrDefault(properties);
-  }
-
-  /**
-   * @param file SSL certificate
-   * @see PGProperty#SSL_CERT
-   */
-  public void setSslCert(String file) {
-    PGProperty.SSL_CERT.set(properties, file);
-  }
-
-  /**
-   * @return SSL certificate
-   * @see PGProperty#SSL_KEY
-   */
-  public String getSslKey() {
-    return PGProperty.SSL_KEY.getOrDefault(properties);
-  }
-
-  /**
-   * @param file SSL key
-   * @see PGProperty#SSL_KEY
-   */
-  public void setSslKey(String file) {
-    PGProperty.SSL_KEY.set(properties, file);
-  }
-
-  /**
-   * @return SSL root certificate
-   * @see PGProperty#SSL_ROOT_CERT
-   */
-  public String getSslRootCert() {
-    return PGProperty.SSL_ROOT_CERT.getOrDefault(properties);
-  }
-
-  /**
-   * @param file SSL root certificate
-   * @see PGProperty#SSL_ROOT_CERT
-   */
-  public void setSslRootCert(String file) {
-    PGProperty.SSL_ROOT_CERT.set(properties, file);
-  }
-
-  /**
-   * @return SSL password
-   * @see PGProperty#SSL_PASSWORD
-   */
-  public String getSslPassword() {
-    return PGProperty.SSL_PASSWORD.getOrDefault(properties);
-  }
-
-  /**
-   * @param password SSL password
-   * @see PGProperty#SSL_PASSWORD
-   */
-  public void setSslPassword(String password) {
-    PGProperty.SSL_PASSWORD.set(properties, password);
-  }
-
-  /**
-   * @return SSL password callback
-   * @see PGProperty#SSL_PASSWORD_CALLBACK
-   */
-  public String getSslPasswordCallback() {
-    return PGProperty.SSL_PASSWORD_CALLBACK.getOrDefault(properties);
-  }
-
-  /**
-   * @param className SSL password callback class name
-   * @see PGProperty#SSL_PASSWORD_CALLBACK
-   */
-  public void setSslPasswordCallback(String className) {
-    PGProperty.SSL_PASSWORD_CALLBACK.set(properties, className);
-  }
-
-  /**
-   * @param applicationName application name
-   * @see PGProperty#APPLICATION_NAME
-   */
-  public void setApplicationName(String applicationName) {
-    PGProperty.APPLICATION_NAME.set(properties, applicationName);
-  }
-
-  /**
-   * @return application name
-   * @see PGProperty#APPLICATION_NAME
-   */
-  public String getApplicationName() {
-    return PGProperty.APPLICATION_NAME.getOrDefault(properties);
-  }
-
-  /**
-   * @param targetServerType target server type
-   * @see PGProperty#TARGET_SERVER_TYPE
-   */
-  public void setTargetServerType(String targetServerType) {
-    PGProperty.TARGET_SERVER_TYPE.set(properties, targetServerType);
-  }
-
-  /**
-   * @return target server type
-   * @see PGProperty#TARGET_SERVER_TYPE
-   */
-  public String getTargetServerType() {
-    return PGProperty.TARGET_SERVER_TYPE.getOrDefault(properties);
-  }
-
-  /**
-   * @param loadBalanceHosts load balance hosts
-   * @see PGProperty#LOAD_BALANCE_HOSTS
-   */
-  public void setLoadBalanceHosts(boolean loadBalanceHosts) {
-    PGProperty.LOAD_BALANCE_HOSTS.set(properties, loadBalanceHosts);
-  }
-
-  /**
-   * @return load balance hosts
-   * @see PGProperty#LOAD_BALANCE_HOSTS
-   */
-  public boolean getLoadBalanceHosts() {
-    return PGProperty.LOAD_BALANCE_HOSTS.isPresent(properties);
-  }
-
-  /**
-   * @param hostRecheckSeconds host recheck seconds
-   * @see PGProperty#HOST_RECHECK_SECONDS
-   */
-  public void setHostRecheckSeconds(int hostRecheckSeconds) {
-    PGProperty.HOST_RECHECK_SECONDS.set(properties, hostRecheckSeconds);
-  }
-
-  /**
-   * @return host recheck seconds
-   * @see PGProperty#HOST_RECHECK_SECONDS
-   */
-  public int getHostRecheckSeconds() {
-    return PGProperty.HOST_RECHECK_SECONDS.getIntNoCheck(properties);
-  }
-
-  /**
-   * @param enabled if TCP keep alive should be enabled
-   * @see PGProperty#TCP_KEEP_ALIVE
-   */
-  public void setTcpKeepAlive(boolean enabled) {
-    PGProperty.TCP_KEEP_ALIVE.set(properties, enabled);
-  }
-
-  /**
-   * @return true if TCP keep alive is enabled
-   * @see PGProperty#TCP_KEEP_ALIVE
-   */
-  public boolean getTcpKeepAlive() {
-    return PGProperty.TCP_KEEP_ALIVE.getBoolean(properties);
-  }
-
-  /**
-   * @param enabled if TCP no delay should be enabled
-   * @see PGProperty#TCP_NO_DELAY
-   */
-  public void setTcpNoDelay(boolean enabled) {
-    PGProperty.TCP_NO_DELAY.set(properties, enabled);
-  }
-
-  /**
-   * @return true if TCP no delay is enabled
-   * @see PGProperty#TCP_NO_DELAY
-   */
-  public boolean getTcpNoDelay() {
-    return PGProperty.TCP_NO_DELAY.getBoolean(properties);
-  }
-
-  /**
-   * @param enabled if binary transfer should be enabled
-   * @see PGProperty#BINARY_TRANSFER
-   */
-  public void setBinaryTransfer(boolean enabled) {
-    PGProperty.BINARY_TRANSFER.set(properties, enabled);
-  }
-
-  /**
-   * @return true if binary transfer is enabled
-   * @see PGProperty#BINARY_TRANSFER
-   */
-  public boolean getBinaryTransfer() {
-    return PGProperty.BINARY_TRANSFER.getBoolean(properties);
-  }
-
-  /**
-   * @param oidList list of OIDs that are allowed to use binary transfer
-   * @see PGProperty#BINARY_TRANSFER_ENABLE
-   */
-  public void setBinaryTransferEnable(String oidList) {
-    PGProperty.BINARY_TRANSFER_ENABLE.set(properties, oidList);
-  }
-
-  /**
-   * @return list of OIDs that are allowed to use binary transfer
-   * @see PGProperty#BINARY_TRANSFER_ENABLE
-   */
-  public String getBinaryTransferEnable() {
-    return PGProperty.BINARY_TRANSFER_ENABLE.getOrDefault(properties);
-  }
-
-  /**
-   * @param oidList list of OIDs that are not allowed to use binary transfer
-   * @see PGProperty#BINARY_TRANSFER_DISABLE
-   */
-  public void setBinaryTransferDisable(String oidList) {
-    PGProperty.BINARY_TRANSFER_DISABLE.set(properties, oidList);
-  }
-
-  /**
-   * @return list of OIDs that are not allowed to use binary transfer
-   * @see PGProperty#BINARY_TRANSFER_DISABLE
-   */
-  public String getBinaryTransferDisable() {
-    return PGProperty.BINARY_TRANSFER_DISABLE.getOrDefault(properties);
-  }
-
-  /**
-   * @return string type
-   * @see PGProperty#STRING_TYPE
-   */
-  public String getStringType() {
-    return PGProperty.STRING_TYPE.getOrDefault(properties);
-  }
-
-  /**
-   * @param stringType string type
-   * @see PGProperty#STRING_TYPE
-   */
-  public void setStringType(String stringType) {
-    PGProperty.STRING_TYPE.set(properties, stringType);
-  }
-
-  /**
-   * @return true if column sanitizer is disabled
-   * @see PGProperty#DISABLE_COLUMN_SANITISER
-   */
-  public boolean isColumnSanitiserDisabled() {
-    return PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(properties);
-  }
-
-  /**
-   * @return true if column sanitizer is disabled
-   * @see PGProperty#DISABLE_COLUMN_SANITISER
-   */
-  public boolean getDisableColumnSanitiser() {
-    return PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(properties);
-  }
-
-  /**
-   * @param disableColumnSanitiser if column sanitizer should be disabled
-   * @see PGProperty#DISABLE_COLUMN_SANITISER
-   */
-  public void setDisableColumnSanitiser(boolean disableColumnSanitiser) {
-    PGProperty.DISABLE_COLUMN_SANITISER.set(properties, disableColumnSanitiser);
-  }
-
-  /**
-   * @return current schema
-   * @see PGProperty#CURRENT_SCHEMA
-   */
-  public String getCurrentSchema() {
-    return PGProperty.CURRENT_SCHEMA.getOrDefault(properties);
-  }
-
-  /**
-   * @param currentSchema current schema
-   * @see PGProperty#CURRENT_SCHEMA
-   */
-  public void setCurrentSchema(String currentSchema) {
-    PGProperty.CURRENT_SCHEMA.set(properties, currentSchema);
-  }
-
-  /**
-   * @return true if connection is readonly
-   * @see PGProperty#READ_ONLY
-   */
-  public boolean getReadOnly() {
-    return PGProperty.READ_ONLY.getBoolean(properties);
-  }
-
-  /**
-   * @param readOnly if connection should be readonly
-   * @see PGProperty#READ_ONLY
-   */
-  public void setReadOnly(boolean readOnly) {
-    PGProperty.READ_ONLY.set(properties, readOnly);
-  }
-
-  /**
-   * @return The behavior when set read only
-   * @see PGProperty#READ_ONLY_MODE
-   */
-  public String getReadOnlyMode() {
-    return PGProperty.READ_ONLY_MODE.getOrDefault(properties);
-  }
-
-  /**
-   * @param mode the behavior when set read only
-   * @see PGProperty#READ_ONLY_MODE
-   */
-  public void setReadOnlyMode(String mode) {
-    PGProperty.READ_ONLY_MODE.set(properties, mode);
-  }
-
-  /**
-   * @return true if driver should log unclosed connections
-   * @see PGProperty#LOG_UNCLOSED_CONNECTIONS
-   */
-  public boolean getLogUnclosedConnections() {
-    return PGProperty.LOG_UNCLOSED_CONNECTIONS.getBoolean(properties);
-  }
-
-  /**
-   * @param enabled true if driver should log unclosed connections
-   * @see PGProperty#LOG_UNCLOSED_CONNECTIONS
-   */
-  public void setLogUnclosedConnections(boolean enabled) {
-    PGProperty.LOG_UNCLOSED_CONNECTIONS.set(properties, enabled);
-  }
-
-  /**
-   * @return true if driver should log include detail in server error messages
-   * @see PGProperty#LOG_SERVER_ERROR_DETAIL
-   */
-  public boolean getLogServerErrorDetail() {
-    return PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(properties);
-  }
-
-  /**
-   * @param enabled true if driver should include detail in server error messages
-   * @see PGProperty#LOG_SERVER_ERROR_DETAIL
-   */
-  public void setLogServerErrorDetail(boolean enabled) {
-    PGProperty.LOG_SERVER_ERROR_DETAIL.set(properties, enabled);
-  }
-
-  /**
-   * @return assumed minimal server version
-   * @see PGProperty#ASSUME_MIN_SERVER_VERSION
-   */
-  public String getAssumeMinServerVersion() {
-    return PGProperty.ASSUME_MIN_SERVER_VERSION.getOrDefault(properties);
-  }
-
-  /**
-   * @param minVersion assumed minimal server version
-   * @see PGProperty#ASSUME_MIN_SERVER_VERSION
-   */
-  public void setAssumeMinServerVersion(String minVersion) {
-    PGProperty.ASSUME_MIN_SERVER_VERSION.set(properties, minVersion);
-  }
-
-  /**
-   * This is important in pool-by-transaction scenarios in order to make sure that all the statements
-   * reaches the same connection that is being initialized. If set then we will group the startup
-   * parameters in a transaction
-   * @return whether to group startup parameters or not
-   * @see PGProperty#GROUP_STARTUP_PARAMETERS
-   */
-  public boolean getGroupStartupParameters() {
-    return PGProperty.GROUP_STARTUP_PARAMETERS.getBoolean(properties);
-  }
-
-  /**
-   *
-   * @param groupStartupParameters whether to group startup Parameters in a transaction or not
-   * @see PGProperty#GROUP_STARTUP_PARAMETERS
-   */
-  public void setGroupStartupParameters(boolean groupStartupParameters) {
-    PGProperty.GROUP_STARTUP_PARAMETERS.set(properties, groupStartupParameters);
-  }
-
-  /**
-   * @return JAAS application name
-   * @see PGProperty#JAAS_APPLICATION_NAME
-   */
-  public String getJaasApplicationName() {
-    return PGProperty.JAAS_APPLICATION_NAME.getOrDefault(properties);
-  }
-
-  /**
-   * @param name JAAS application name
-   * @see PGProperty#JAAS_APPLICATION_NAME
-   */
-  public void setJaasApplicationName(String name) {
-    PGProperty.JAAS_APPLICATION_NAME.set(properties, name);
-  }
-
-  /**
-   * @return true if perform JAAS login before GSS authentication
-   * @see PGProperty#JAAS_LOGIN
-   */
-  public boolean getJaasLogin() {
-    return PGProperty.JAAS_LOGIN.getBoolean(properties);
-  }
-
-  /**
-   * @param doLogin true if perform JAAS login before GSS authentication
-   * @see PGProperty#JAAS_LOGIN
-   */
-  public void setJaasLogin(boolean doLogin) {
-    PGProperty.JAAS_LOGIN.set(properties, doLogin);
-  }
-
-  /**
-   * @return Kerberos server name
-   * @see PGProperty#KERBEROS_SERVER_NAME
-   */
-  public String getKerberosServerName() {
-    return PGProperty.KERBEROS_SERVER_NAME.getOrDefault(properties);
-  }
-
-  /**
-   * @param serverName Kerberos server name
-   * @see PGProperty#KERBEROS_SERVER_NAME
-   */
-  public void setKerberosServerName(String serverName) {
-    PGProperty.KERBEROS_SERVER_NAME.set(properties, serverName);
-  }
-
-  /**
-   * @return true if use SPNEGO
-   * @see PGProperty#USE_SPNEGO
-   */
-  public boolean getUseSpNego() {
-    return PGProperty.USE_SPNEGO.getBoolean(properties);
-  }
-
-  /**
-   * @param use true if use SPNEGO
-   * @see PGProperty#USE_SPNEGO
-   */
-  public void setUseSpNego(boolean use) {
-    PGProperty.USE_SPNEGO.set(properties, use);
-  }
-
-  /**
-   * @return GSS mode: auto, sspi, or gssapi
-   * @see PGProperty#GSS_LIB
-   */
-  public String getGssLib() {
-    return PGProperty.GSS_LIB.getOrDefault(properties);
-  }
-
-  /**
-   * @param lib GSS mode: auto, sspi, or gssapi
-   * @see PGProperty#GSS_LIB
-   */
-  public void setGssLib(String lib) {
-    PGProperty.GSS_LIB.set(properties, lib);
-  }
-
-  /**
-   *
-   * @return GSS encryption mode: disable, prefer or require
-   */
-  public String getGssEncMode() {
-    return PGProperty.GSS_ENC_MODE.getOrDefault(properties);
-  }
-
-  /**
-   *
-   * @param mode encryption mode: disable, prefer or require
-   */
-  public void setGssEncMode(String mode) {
-    PGProperty.GSS_ENC_MODE.set(properties, mode);
-  }
-
-  /**
-   * @return SSPI service class
-   * @see PGProperty#SSPI_SERVICE_CLASS
-   */
-  public String getSspiServiceClass() {
-    return PGProperty.SSPI_SERVICE_CLASS.getOrDefault(properties);
-  }
-
-  /**
-   * @param serviceClass SSPI service class
-   * @see PGProperty#SSPI_SERVICE_CLASS
-   */
-  public void setSspiServiceClass(String serviceClass) {
-    PGProperty.SSPI_SERVICE_CLASS.set(properties, serviceClass);
-  }
-
-  /**
-   * @return if connection allows encoding changes
-   * @see PGProperty#ALLOW_ENCODING_CHANGES
-   */
-  public boolean getAllowEncodingChanges() {
-    return PGProperty.ALLOW_ENCODING_CHANGES.getBoolean(properties);
-  }
-
-  /**
-   * @param allow if connection allows encoding changes
-   * @see PGProperty#ALLOW_ENCODING_CHANGES
-   */
-  public void setAllowEncodingChanges(boolean allow) {
-    PGProperty.ALLOW_ENCODING_CHANGES.set(properties, allow);
-  }
-
-  /**
-   * @return socket factory class name
-   * @see PGProperty#SOCKET_FACTORY
-   */
-  public String getSocketFactory() {
-    return PGProperty.SOCKET_FACTORY.getOrDefault(properties);
-  }
-
-  /**
-   * @param socketFactoryClassName socket factory class name
-   * @see PGProperty#SOCKET_FACTORY
-   */
-  public void setSocketFactory(String socketFactoryClassName) {
-    PGProperty.SOCKET_FACTORY.set(properties, socketFactoryClassName);
-  }
-
-  /**
-   * @return socket factory argument
-   * @see PGProperty#SOCKET_FACTORY_ARG
-   */
-  @SuppressWarnings("deprecation")
-  public String getSocketFactoryArg() {
-    return PGProperty.SOCKET_FACTORY_ARG.getOrDefault(properties);
-  }
-
-  /**
-   * @param socketFactoryArg socket factory argument
-   * @see PGProperty#SOCKET_FACTORY_ARG
-   */
-  @SuppressWarnings("deprecation")
-  public void setSocketFactoryArg(String socketFactoryArg) {
-    PGProperty.SOCKET_FACTORY_ARG.set(properties, socketFactoryArg);
-  }
-
-  /**
-   * @param replication set to 'database' for logical replication or 'true' for physical replication
-   * @see PGProperty#REPLICATION
-   */
-  public void setReplication(String replication) {
-    PGProperty.REPLICATION.set(properties, replication);
-  }
-
-  /**
-   * @return 'select', "callIfNoReturn', or 'call'
-   * @see PGProperty#ESCAPE_SYNTAX_CALL_MODE
-   */
-  public String getEscapeSyntaxCallMode() {
-    return PGProperty.ESCAPE_SYNTAX_CALL_MODE.getOrDefault(properties);
-  }
-
-  /**
-   * @param callMode the call mode to use for JDBC escape call syntax
-   * @see PGProperty#ESCAPE_SYNTAX_CALL_MODE
-   */
-  public void setEscapeSyntaxCallMode(String callMode) {
-    PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(properties, callMode);
-  }
-
-  /**
-   * @return null, 'database', or 'true
-   * @see PGProperty#REPLICATION
-   */
-  public String getReplication() {
-    return PGProperty.REPLICATION.getOrDefault(properties);
-  }
-
-  /**
-   * @return the localSocketAddress
-   * @see PGProperty#LOCAL_SOCKET_ADDRESS
-   */
-  public String getLocalSocketAddress() {
-    return PGProperty.LOCAL_SOCKET_ADDRESS.getOrDefault(properties);
-  }
-
-  /**
-   * @param localSocketAddress local address to bind client side to
-   * @see PGProperty#LOCAL_SOCKET_ADDRESS
-   */
-  public void setLocalSocketAddress(String localSocketAddress) {
-    PGProperty.LOCAL_SOCKET_ADDRESS.set(properties, localSocketAddress);
-  }
-
-  /**
-   * This property is no longer used by the driver and will be ignored.
-   * @return loggerLevel in properties
-   * @deprecated Configure via java.util.logging
-   */
-  @Deprecated
-  public String getLoggerLevel() {
-    return PGProperty.LOGGER_LEVEL.getOrDefault(properties);
-  }
-
-  /**
-   * This property is no longer used by the driver and will be ignored.
-   * @param loggerLevel loggerLevel to set, will be ignored
-   * @deprecated Configure via java.util.logging
-   */
-  @Deprecated
-  public void setLoggerLevel(String loggerLevel) {
-    PGProperty.LOGGER_LEVEL.set(properties, loggerLevel);
-  }
-
-  /**
-   * This property is no longer used by the driver and will be ignored.
-   * @return loggerFile in properties
-   * @deprecated Configure via java.util.logging
-   */
-  @Deprecated
-  public String getLoggerFile() {
-    ExpressionProperties exprProps = new ExpressionProperties(properties, System.getProperties());
-    return PGProperty.LOGGER_FILE.getOrDefault(exprProps);
-  }
-
-  /**
-   * This property is no longer used by the driver and will be ignored.
-   * @param loggerFile will be ignored
-   * @deprecated Configure via java.util.logging
-   */
-  @Deprecated
-  public void setLoggerFile(String loggerFile) {
-    PGProperty.LOGGER_FILE.set(properties, loggerFile);
-  }
-
-  /**
-   * Generates a {@link DriverManager} URL from the other properties supplied.
-   *
-   * @return {@link DriverManager} URL from the other properties supplied
-   */
-  public String getUrl() {
-    StringBuilder url = new StringBuilder(100);
-    url.append("jdbc:postgresql://");
-    for (int i = 0; i < serverNames.length; i++) {
-      if (i > 0) {
-        url.append(",");
-      }
-      url.append(serverNames[i]);
-      if (portNumbers != null) {
-        if (serverNames.length != portNumbers.length) {
-          throw new IllegalArgumentException(
-              String.format("Invalid argument: number of port %s entries must equal number of serverNames %s",
-                  Arrays.toString(portNumbers), Arrays.toString(serverNames)));
-        }
-        if (portNumbers.length >= i && portNumbers[i] != 0) {
-          url.append(":").append(portNumbers[i]);
-        }
-
-      }
-    }
-    url.append("/");
-    if (databaseName != null) {
-      url.append(URLCoder.encode(databaseName));
-    }
-
-    StringBuilder query = new StringBuilder(100);
-    for (PGProperty property : PGProperty.values()) {
-      if (property.isPresent(properties)) {
-        if (query.length() != 0) {
-          query.append("&");
-        }
-        query.append(property.getName());
-        query.append("=");
-        String value = property.getOrDefault(properties);
-        query.append(URLCoder.encode(value));
-      }
-    }
-
-    if (query.length() > 0) {
-      url.append("?");
-      url.append(query);
-    }
-
-    return url.toString();
-  }
-
-  /**
-   * Generates a {@link DriverManager} URL from the other properties supplied.
-   *
-   * @return {@link DriverManager} URL from the other properties supplied
-   */
-  public String getURL() {
-    return getUrl();
-  }
-
-  /**
-   * Sets properties from a {@link DriverManager} URL.
-   *
-   * @param url properties to set
-   */
-  public void setUrl(String url) {
-
-    Properties p = Driver.parseURL(url, null);
-
-    if (p == null) {
-      throw new IllegalArgumentException("URL invalid " + url);
-    }
-    for (PGProperty property : PGProperty.values()) {
-      if (!this.properties.containsKey(property.getName())) {
-        setProperty(property, property.getOrDefault(p));
-      }
-    }
-  }
-
-  /**
-   * Sets properties from a {@link DriverManager} URL.
-   * Added to follow convention used in other DBMS.
-   *
-   * @param url properties to set
-   */
-  public void setURL(String url) {
-    setUrl(url);
-  }
-
-  /**
-   *
-   * @return the class name to use for the Authentication Plugin.
-   *         This can be null in which case the default password authentication plugin will be used
-   */
-  public String getAuthenticationPluginClassName() {
-    return PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getOrDefault(properties);
-  }
-
-  /**
-   *
-   * @param className name of a class which implements {@link org.postgresql.plugin.AuthenticationPlugin}
-   *                  This class will be used to get the encoded bytes to be sent to the server as the
-   *                  password to authenticate the user.
-   *
-   */
-  public void setAuthenticationPluginClassName(String className) {
-    PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.set(properties, className);
-  }
-
-  public String getProperty(String name) throws SQLException {
-    PGProperty pgProperty = PGProperty.forName(name);
-    if (pgProperty != null) {
-      return getProperty(pgProperty);
-    } else {
-      throw new PSQLException(GT.tr("Unsupported property name: {0}", name),
-        PSQLState.INVALID_PARAMETER_VALUE);
-    }
-  }
-
-  public void setProperty(String name, String value) throws SQLException {
-    PGProperty pgProperty = PGProperty.forName(name);
-    if (pgProperty != null) {
-      setProperty(pgProperty, value);
-    } else {
-      throw new PSQLException(GT.tr("Unsupported property name: {0}", name),
-        PSQLState.INVALID_PARAMETER_VALUE);
-    }
-  }
-
-  public String getProperty(PGProperty property) {
-    return property.getOrDefault(properties);
-  }
-
-  public void setProperty(PGProperty property, String value) {
-    if (value == null) {
-      // TODO: this is not consistent with PGProperty.PROPERTY.set(prop, null)
-      // PGProperty removes an entry for put(null) call, however here we just ignore null
-      return;
-    }
-    switch (property) {
-      case PG_HOST:
-        setServerNames(value.split(","));
-        break;
-      case PG_PORT:
-        String[] ps = value.split(",");
-        int[] ports = new int[ps.length];
-        for (int i = 0; i < ps.length; i++) {
-          try {
-            ports[i] = Integer.parseInt(ps[i]);
-          } catch (NumberFormatException e) {
-            ports[i] = 0;
-          }
-        }
-        setPortNumbers(ports);
-        break;
-      case PG_DBNAME:
-        setDatabaseName(value);
-        break;
-      case USER:
-        setUser(value);
-        break;
-      case PASSWORD:
-        setPassword(value);
-        break;
-      default:
-        properties.setProperty(property.getName(), value);
-    }
-  }
-
-  /**
-   * Generates a reference using the appropriate object factory.
-   *
-   * @return reference using the appropriate object factory
-   */
-  protected Reference createReference() {
-    return new Reference(getClass().getName(), PGObjectFactory.class.getName(), null);
-  }
-
-  @Override
-  public Reference getReference() throws NamingException {
-    Reference ref = createReference();
-    StringBuilder serverString = new StringBuilder();
-    for (int i = 0; i < serverNames.length; i++) {
-      if (i > 0) {
-        serverString.append(",");
-      }
-      String serverName = serverNames[i];
-      serverString.append(serverName);
-    }
-    ref.add(new StringRefAddr("serverName", serverString.toString()));
-
-    StringBuilder portString = new StringBuilder();
-    for (int i = 0; i < portNumbers.length; i++) {
-      if (i > 0) {
-        portString.append(",");
-      }
-      int p = portNumbers[i];
-      portString.append(Integer.toString(p));
-    }
-    ref.add(new StringRefAddr("portNumber", portString.toString()));
-    ref.add(new StringRefAddr("databaseName", databaseName));
-    if (user != null) {
-      ref.add(new StringRefAddr("user", user));
-    }
-    if (password != null) {
-      ref.add(new StringRefAddr("password", password));
-    }
-
-    for (PGProperty property : PGProperty.values()) {
-      if (property.isPresent(properties)) {
-        String value = property.getOrDefault(properties);
-        ref.add(new StringRefAddr(property.getName(), value));
-      }
-    }
-
-    return ref;
-  }
-
-  public void setFromReference(Reference ref) {
-    databaseName = getReferenceProperty(ref, "databaseName");
-    String portNumberString = getReferenceProperty(ref, "portNumber");
-    if (portNumberString != null) {
-      String[] ps = portNumberString.split(",");
-      int[] ports = new int[ps.length];
-      for (int i = 0; i < ps.length; i++) {
+    private static final Logger LOGGER = Logger.getLogger(BaseDataSource.class.getName());
+
+    /*
+     * Ensure the driver is loaded as JDBC Driver might be invisible to Java's ServiceLoader.
+     * Usually, {@code Class.forName(...)} is not required as {@link DriverManager} detects JDBC drivers
+     * via {@code META-INF/services/java.sql.Driver} entries. However there might be cases when the driver
+     * is located at the application level classloader, thus it might be required to perform manual
+     * registration of the driver.
+     */
+    static {
         try {
-          ports[i] = Integer.parseInt(ps[i]);
-        } catch (NumberFormatException e) {
-          ports[i] = 0;
+            Class.forName("org.postgresql.Driver");
+        } catch (ClassNotFoundException e) {
+            throw new IllegalStateException(
+                    "BaseDataSource is unable to load org.postgresql.Driver. Please check if you have proper PostgreSQL JDBC Driver jar on the classpath",
+                    e);
         }
-      }
-      setPortNumbers(ports);
-    } else {
-      setPortNumbers(null);
     }
-    String serverName = getReferenceProperty(ref, "serverName");
-    setServerNames(serverName.split(","));
 
-    for (PGProperty property : PGProperty.values()) {
-      setProperty(property, getReferenceProperty(ref, property.getName()));
+    // Standard properties, defined in the JDBC 2.0 Optional Package spec
+    private String[] serverNames = new String[]{"localhost"};
+    private String databaseName = "";
+    private String user;
+    private String password;
+    private int[] portNumbers = new int[]{0};
+    // Map for all other properties
+    private Properties properties = new Properties();
+
+    private static String getReferenceProperty(Reference ref, String propertyName) {
+        RefAddr addr = ref.get(propertyName);
+        if (addr == null) {
+            return null;
+        }
+        return (String) addr.getContent();
     }
-  }
 
-  private static String getReferenceProperty(Reference ref, String propertyName) {
-    RefAddr addr = ref.get(propertyName);
-    if (addr == null) {
-      return null;
+    /**
+     * Gets a connection to the PostgreSQL database. The database is identified by the DataSource
+     * properties serverName, databaseName, and portNumber. The user to connect as is identified by
+     * the DataSource properties user and password.
+     *
+     * @return A valid database connection.
+     * @throws SQLException Occurs when the database connection cannot be established.
+     */
+    public Connection getConnection() throws SQLException {
+        return getConnection(user, password);
     }
-    return (String) addr.getContent();
-  }
 
-  protected void writeBaseObject(ObjectOutputStream out) throws IOException {
-    out.writeObject(serverNames);
-    out.writeObject(databaseName);
-    out.writeObject(user);
-    out.writeObject(password);
-    out.writeObject(portNumbers);
+    /**
+     * Gets a connection to the PostgreSQL database. The database is identified by the DataSource
+     * properties serverName, databaseName, and portNumber. The user to connect as is identified by
+     * the arguments user and password, which override the DataSource properties by the same name.
+     *
+     * @param user     user
+     * @param password password
+     * @return A valid database connection.
+     * @throws SQLException Occurs when the database connection cannot be established.
+     */
+    public Connection getConnection(String user, String password)
+            throws SQLException {
+        try {
+            Connection con = DriverManager.getConnection(getUrl(), user, password);
+            if (LOGGER.isLoggable(Level.FINE)) {
+                LOGGER.log(Level.FINE, "Created a {0} for {1} at {2}",
+                        new Object[]{getDescription(), user, getUrl()});
+            }
+            return con;
+        } catch (SQLException e) {
+            LOGGER.log(Level.FINE, "Failed to create a {0} for {1} at {2}: {3}",
+                    new Object[]{getDescription(), user, getUrl(), e});
+            throw e;
+        }
+    }
 
-    out.writeObject(properties);
-  }
+    /**
+     * This implementation don't use a LogWriter.
+     */
+    @Override
+    public PrintWriter getLogWriter() {
+        return null;
+    }
 
-  protected void readBaseObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
-    serverNames = (String[]) in.readObject();
-    databaseName = (String) in.readObject();
-    user = (String) in.readObject();
-    password = (String) in.readObject();
-    portNumbers = (int[]) in.readObject();
+    /**
+     * This implementation don't use a LogWriter.
+     *
+     * @param printWriter Not used
+     */
+    @Override
+    public void setLogWriter(PrintWriter printWriter) {
+        // NOOP
+    }
 
-    properties = (Properties) in.readObject();
-  }
+    /**
+     * Gets the name of the host the PostgreSQL database is running on.
+     *
+     * @return name of the host the PostgreSQL database is running on
+     * @deprecated use {@link #getServerNames()}
+     */
+    @Deprecated
+    public String getServerName() {
+        return serverNames[0];
+    }
 
-  public void initializeFrom(BaseDataSource source) throws IOException, ClassNotFoundException {
-    ByteArrayOutputStream baos = new ByteArrayOutputStream();
-    ObjectOutputStream oos = new ObjectOutputStream(baos);
-    source.writeBaseObject(oos);
-    oos.close();
-    ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
-    ObjectInputStream ois = new ObjectInputStream(bais);
-    readBaseObject(ois);
-  }
+    /**
+     * Sets the name of the host the PostgreSQL database is running on. If this is changed, it will
+     * only affect future calls to getConnection. The default value is {@code localhost}.
+     *
+     * @param serverName name of the host the PostgreSQL database is running on
+     * @deprecated use {@link #setServerNames(String[])}
+     */
+    @Deprecated
+    public void setServerName(String serverName) {
+        this.setServerNames(new String[]{serverName});
+    }
 
-  /**
-   * @return preferred query execution mode
-   * @see PGProperty#PREFER_QUERY_MODE
-   */
-  public PreferQueryMode getPreferQueryMode() {
-    return PreferQueryMode.of(PGProperty.PREFER_QUERY_MODE.getOrDefault(properties));
-  }
+    /**
+     * Gets the name of the host(s) the PostgreSQL database is running on.
+     *
+     * @return name of the host(s) the PostgreSQL database is running on
+     */
+    public String[] getServerNames() {
+        return serverNames;
+    }
 
-  /**
-   * @param preferQueryMode extended, simple, extendedForPrepared, or extendedCacheEverything
-   * @see PGProperty#PREFER_QUERY_MODE
-   */
-  public void setPreferQueryMode(PreferQueryMode preferQueryMode) {
-    PGProperty.PREFER_QUERY_MODE.set(properties, preferQueryMode.value());
-  }
+    /**
+     * Sets the name of the host(s) the PostgreSQL database is running on. If this is changed, it will
+     * only affect future calls to getConnection. The default value is {@code localhost}.
+     *
+     * @param serverNames name of the host(s) the PostgreSQL database is running on
+     */
+    @SuppressWarnings("nullness")
+    public void setServerNames(String[] serverNames) {
+        if (serverNames == null || serverNames.length == 0) {
+            this.serverNames = new String[]{"localhost"};
+        } else {
+            serverNames = serverNames.clone();
+            for (int i = 0; i < serverNames.length; i++) {
+                String serverName = serverNames[i];
+                if (serverName == null || "".equals(serverName)) {
+                    serverNames[i] = "localhost";
+                }
+            }
+            this.serverNames = serverNames;
+        }
+    }
 
-  /**
-   * @return connection configuration regarding automatic per-query savepoints
-   * @see PGProperty#AUTOSAVE
-   */
-  public AutoSave getAutosave() {
-    return AutoSave.of(PGProperty.AUTOSAVE.getOrDefault(properties));
-  }
+    /**
+     * Gets the name of the PostgreSQL database, running on the server identified by the serverName
+     * property.
+     *
+     * @return name of the PostgreSQL database
+     */
+    public String getDatabaseName() {
+        return databaseName;
+    }
 
-  /**
-   * @param autoSave connection configuration regarding automatic per-query savepoints
-   * @see PGProperty#AUTOSAVE
-   */
-  public void setAutosave(AutoSave autoSave) {
-    PGProperty.AUTOSAVE.set(properties, autoSave.value());
-  }
+    /**
+     * Sets the name of the PostgreSQL database, running on the server identified by the serverName
+     * property. If this is changed, it will only affect future calls to getConnection.
+     *
+     * @param databaseName name of the PostgreSQL database
+     */
+    public void setDatabaseName(String databaseName) {
+        this.databaseName = databaseName;
+    }
 
-  /**
-   * see PGProperty#CLEANUP_SAVEPOINTS
-   *
-   * @return boolean indicating property set
-   */
-  public boolean getCleanupSavepoints() {
-    return PGProperty.CLEANUP_SAVEPOINTS.getBoolean(properties);
-  }
+    /**
+     * Gets a description of this DataSource-ish thing. Must be customized by subclasses.
+     *
+     * @return description of this DataSource-ish thing
+     */
+    public abstract String getDescription();
 
-  /**
-   * see PGProperty#CLEANUP_SAVEPOINTS
-   *
-   * @param cleanupSavepoints will cleanup savepoints after a successful transaction
-   */
-  public void setCleanupSavepoints(boolean cleanupSavepoints) {
-    PGProperty.CLEANUP_SAVEPOINTS.set(properties, cleanupSavepoints);
-  }
+    /**
+     * Gets the user to connect as by default. If this is not specified, you must use the
+     * getConnection method which takes a user and password as parameters.
+     *
+     * @return user to connect as by default
+     */
+    public String getUser() {
+        return user;
+    }
 
-  /**
-   * @return boolean indicating property is enabled or not.
-   * @see PGProperty#REWRITE_BATCHED_INSERTS
-   */
-  public boolean getReWriteBatchedInserts() {
-    return PGProperty.REWRITE_BATCHED_INSERTS.getBoolean(properties);
-  }
+    /**
+     * Sets the user to connect as by default. If this is not specified, you must use the
+     * getConnection method which takes a user and password as parameters. If this is changed, it will
+     * only affect future calls to getConnection.
+     *
+     * @param user user to connect as by default
+     */
+    public void setUser(String user) {
+        this.user = user;
+    }
 
-  /**
-   * @param reWrite boolean value to set the property in the properties collection
-   * @see PGProperty#REWRITE_BATCHED_INSERTS
-   */
-  public void setReWriteBatchedInserts(boolean reWrite) {
-    PGProperty.REWRITE_BATCHED_INSERTS.set(properties, reWrite);
-  }
+    /**
+     * Gets the password to connect with by default. If this is not specified but a password is needed
+     * to log in, you must use the getConnection method which takes a user and password as parameters.
+     *
+     * @return password to connect with by default
+     */
+    public String getPassword() {
+        return password;
+    }
 
-  /**
-   * @return boolean indicating property is enabled or not.
-   * @see PGProperty#HIDE_UNPRIVILEGED_OBJECTS
-   */
-  public boolean getHideUnprivilegedObjects() {
-    return PGProperty.HIDE_UNPRIVILEGED_OBJECTS.getBoolean(properties);
-  }
+    /**
+     * Sets the password to connect with by default. If this is not specified but a password is needed
+     * to log in, you must use the getConnection method which takes a user and password as parameters.
+     * If this is changed, it will only affect future calls to getConnection.
+     *
+     * @param password password to connect with by default
+     */
+    public void setPassword(String password) {
+        this.password = password;
+    }
 
-  /**
-   * @param hideUnprivileged boolean value to set the property in the properties collection
-   * @see PGProperty#HIDE_UNPRIVILEGED_OBJECTS
-   */
-  public void setHideUnprivilegedObjects(boolean hideUnprivileged) {
-    PGProperty.HIDE_UNPRIVILEGED_OBJECTS.set(properties, hideUnprivileged);
-  }
+    /**
+     * Gets the port which the PostgreSQL server is listening on for TCP/IP connections.
+     *
+     * @return The port, or 0 if the default port will be used.
+     * @deprecated use {@link #getPortNumbers()}
+     */
+    @Deprecated
+    public int getPortNumber() {
+        if (portNumbers == null || portNumbers.length == 0) {
+            return 0;
+        }
+        return portNumbers[0];
+    }
 
-  public String getMaxResultBuffer() {
-    return PGProperty.MAX_RESULT_BUFFER.getOrDefault(properties);
-  }
+    /**
+     * Sets the port which the PostgreSQL server is listening on for TCP/IP connections. Be sure the
+     * -i flag is passed to postmaster when PostgreSQL is started. If this is not set, or set to 0,
+     * the default port will be used.
+     *
+     * @param portNumber port which the PostgreSQL server is listening on for TCP/IP
+     * @deprecated use {@link #setPortNumbers(int[])}
+     */
+    @Deprecated
+    public void setPortNumber(int portNumber) {
+        setPortNumbers(new int[]{portNumber});
+    }
 
-  public void setMaxResultBuffer(String maxResultBuffer) {
-    PGProperty.MAX_RESULT_BUFFER.set(properties, maxResultBuffer);
-  }
+    /**
+     * Gets the port(s) which the PostgreSQL server is listening on for TCP/IP connections.
+     *
+     * @return The port(s), or 0 if the default port will be used.
+     */
+    public int[] getPortNumbers() {
+        return portNumbers;
+    }
 
-  public boolean getAdaptiveFetch() {
-    return PGProperty.ADAPTIVE_FETCH.getBoolean(properties);
-  }
+    /**
+     * Sets the port(s) which the PostgreSQL server is listening on for TCP/IP connections. Be sure the
+     * -i flag is passed to postmaster when PostgreSQL is started. If this is not set, or set to 0,
+     * the default port will be used.
+     *
+     * @param portNumbers port(s) which the PostgreSQL server is listening on for TCP/IP
+     */
+    public void setPortNumbers(int[] portNumbers) {
+        if (portNumbers == null || portNumbers.length == 0) {
+            portNumbers = new int[]{0};
+        }
+        this.portNumbers = Arrays.copyOf(portNumbers, portNumbers.length);
+    }
 
-  public void setAdaptiveFetch(boolean adaptiveFetch) {
-    PGProperty.ADAPTIVE_FETCH.set(properties, adaptiveFetch);
-  }
+    /**
+     * @return command line options for this connection
+     */
+    public String getOptions() {
+        return PGProperty.OPTIONS.getOrDefault(properties);
+    }
 
-  public int getAdaptiveFetchMaximum() {
-    return PGProperty.ADAPTIVE_FETCH_MAXIMUM.getIntNoCheck(properties);
-  }
+    /**
+     * Set command line options for this connection
+     *
+     * @param options string to set options to
+     */
+    public void setOptions(String options) {
+        PGProperty.OPTIONS.set(properties, options);
+    }
 
-  public void setAdaptiveFetchMaximum(int adaptiveFetchMaximum) {
-    PGProperty.ADAPTIVE_FETCH_MAXIMUM.set(properties, adaptiveFetchMaximum);
-  }
+    /**
+     * @return login timeout
+     * @see PGProperty#LOGIN_TIMEOUT
+     */
+    @Override
+    public int getLoginTimeout() {
+        return PGProperty.LOGIN_TIMEOUT.getIntNoCheck(properties);
+    }
 
-  public int getAdaptiveFetchMinimum() {
-    return PGProperty.ADAPTIVE_FETCH_MINIMUM.getIntNoCheck(properties);
-  }
+    /**
+     * @param loginTimeout login timeout
+     * @see PGProperty#LOGIN_TIMEOUT
+     */
+    @Override
+    public void setLoginTimeout(int loginTimeout) {
+        PGProperty.LOGIN_TIMEOUT.set(properties, loginTimeout);
+    }
 
-  public void setAdaptiveFetchMinimum(int adaptiveFetchMinimum) {
-    PGProperty.ADAPTIVE_FETCH_MINIMUM.set(properties, adaptiveFetchMinimum);
-  }
+    /**
+     * @return connect timeout
+     * @see PGProperty#CONNECT_TIMEOUT
+     */
+    public int getConnectTimeout() {
+        return PGProperty.CONNECT_TIMEOUT.getIntNoCheck(properties);
+    }
 
-  @Override
-  public Logger getParentLogger() {
-    return Logger.getLogger("org.postgresql");
-  }
+    /**
+     * @param connectTimeout connect timeout
+     * @see PGProperty#CONNECT_TIMEOUT
+     */
+    public void setConnectTimeout(int connectTimeout) {
+        PGProperty.CONNECT_TIMEOUT.set(properties, connectTimeout);
+    }
 
-  public String getXmlFactoryFactory() {
-    return PGProperty.XML_FACTORY_FACTORY.getOrDefault(properties);
-  }
+    /**
+     * @return GSS ResponseTimeout
+     * @see PGProperty#GSS_RESPONSE_TIMEOUT
+     */
+    public int getGssResponseTimeout() {
+        return PGProperty.GSS_RESPONSE_TIMEOUT.getIntNoCheck(properties);
+    }
 
-  public void setXmlFactoryFactory(String xmlFactoryFactory) {
-    PGProperty.XML_FACTORY_FACTORY.set(properties, xmlFactoryFactory);
-  }
+    /**
+     * @param gssResponseTimeout gss response timeout
+     * @see PGProperty#GSS_RESPONSE_TIMEOUT
+     */
+    public void setGssResponseTimeout(int gssResponseTimeout) {
+        PGProperty.GSS_RESPONSE_TIMEOUT.set(properties, gssResponseTimeout);
+    }
 
-  /*
-   * Alias methods below, these are to help with ease-of-use with other database tools / frameworks
-   * which expect normal java bean getters / setters to exist for the property names.
-   */
+    /**
+     * @return SSL ResponseTimeout
+     * @see PGProperty#SSL_RESPONSE_TIMEOUT
+     */
+    public int getSslResponseTimeout() {
+        return PGProperty.SSL_RESPONSE_TIMEOUT.getIntNoCheck(properties);
+    }
 
-  public boolean isSsl() {
-    return getSsl();
-  }
+    /**
+     * @param sslResponseTimeout ssl response timeout
+     * @see PGProperty#SSL_RESPONSE_TIMEOUT
+     */
+    public void setSslResponseTimeout(int sslResponseTimeout) {
+        PGProperty.SSL_RESPONSE_TIMEOUT.set(properties, sslResponseTimeout);
+    }
 
-  public String getSslfactoryarg() {
-    return getSslFactoryArg();
-  }
+    /**
+     * @return protocol version
+     * @see PGProperty#PROTOCOL_VERSION
+     */
+    public int getProtocolVersion() {
+        if (!PGProperty.PROTOCOL_VERSION.isPresent(properties)) {
+            return 0;
+        } else {
+            return PGProperty.PROTOCOL_VERSION.getIntNoCheck(properties);
+        }
+    }
 
-  public void setSslfactoryarg(final String arg) {
-    setSslFactoryArg(arg);
-  }
+    /**
+     * @param protocolVersion protocol version
+     * @see PGProperty#PROTOCOL_VERSION
+     */
+    public void setProtocolVersion(int protocolVersion) {
+        if (protocolVersion == 0) {
+            PGProperty.PROTOCOL_VERSION.set(properties, null);
+        } else {
+            PGProperty.PROTOCOL_VERSION.set(properties, protocolVersion);
+        }
+    }
 
-  public String getSslcert() {
-    return getSslCert();
-  }
+    /**
+     * @return quoteReturningIdentifiers
+     * @see PGProperty#QUOTE_RETURNING_IDENTIFIERS
+     */
+    public boolean getQuoteReturningIdentifiers() {
+        return PGProperty.QUOTE_RETURNING_IDENTIFIERS.getBoolean(properties);
+    }
 
-  public void setSslcert(final String file) {
-    setSslCert(file);
-  }
+    /**
+     * @param quoteIdentifiers indicate whether to quote identifiers
+     * @see PGProperty#QUOTE_RETURNING_IDENTIFIERS
+     */
+    public void setQuoteReturningIdentifiers(boolean quoteIdentifiers) {
+        PGProperty.QUOTE_RETURNING_IDENTIFIERS.set(properties, quoteIdentifiers);
+    }
 
-  public String getSslmode() {
-    return getSslMode();
-  }
+    /**
+     * @return receive buffer size
+     * @see PGProperty#RECEIVE_BUFFER_SIZE
+     */
+    public int getReceiveBufferSize() {
+        return PGProperty.RECEIVE_BUFFER_SIZE.getIntNoCheck(properties);
+    }
 
-  public void setSslmode(final String mode) {
-    setSslMode(mode);
-  }
+    /**
+     * @param nbytes receive buffer size
+     * @see PGProperty#RECEIVE_BUFFER_SIZE
+     */
+    public void setReceiveBufferSize(int nbytes) {
+        PGProperty.RECEIVE_BUFFER_SIZE.set(properties, nbytes);
+    }
 
-  public String getSslhostnameverifier() {
-    return getSslHostnameVerifier();
-  }
+    /**
+     * @return send buffer size
+     * @see PGProperty#SEND_BUFFER_SIZE
+     */
+    public int getSendBufferSize() {
+        return PGProperty.SEND_BUFFER_SIZE.getIntNoCheck(properties);
+    }
 
-  public void setSslhostnameverifier(final String className) {
-    setSslHostnameVerifier(className);
-  }
+    /**
+     * @param nbytes send buffer size
+     * @see PGProperty#SEND_BUFFER_SIZE
+     */
+    public void setSendBufferSize(int nbytes) {
+        PGProperty.SEND_BUFFER_SIZE.set(properties, nbytes);
+    }
 
-  public String getSslkey() {
-    return getSslKey();
-  }
+    /**
+     * @return prepare threshold
+     * @see PGProperty#PREPARE_THRESHOLD
+     */
+    public int getPrepareThreshold() {
+        return PGProperty.PREPARE_THRESHOLD.getIntNoCheck(properties);
+    }
 
-  public void setSslkey(final String file) {
-    setSslKey(file);
-  }
+    /**
+     * @param count prepare threshold
+     * @see PGProperty#PREPARE_THRESHOLD
+     */
+    public void setPrepareThreshold(int count) {
+        PGProperty.PREPARE_THRESHOLD.set(properties, count);
+    }
 
-  public String getSslrootcert() {
-    return getSslRootCert();
-  }
+    /**
+     * @return prepared statement cache size (number of statements per connection)
+     * @see PGProperty#PREPARED_STATEMENT_CACHE_QUERIES
+     */
+    public int getPreparedStatementCacheQueries() {
+        return PGProperty.PREPARED_STATEMENT_CACHE_QUERIES.getIntNoCheck(properties);
+    }
 
-  public void setSslrootcert(final String file) {
-    setSslRootCert(file);
-  }
+    /**
+     * @param cacheSize prepared statement cache size (number of statements per connection)
+     * @see PGProperty#PREPARED_STATEMENT_CACHE_QUERIES
+     */
+    public void setPreparedStatementCacheQueries(int cacheSize) {
+        PGProperty.PREPARED_STATEMENT_CACHE_QUERIES.set(properties, cacheSize);
+    }
 
-  public String getSslpasswordcallback() {
-    return getSslPasswordCallback();
-  }
+    /**
+     * @return prepared statement cache size (number of megabytes per connection)
+     * @see PGProperty#PREPARED_STATEMENT_CACHE_SIZE_MIB
+     */
+    public int getPreparedStatementCacheSizeMiB() {
+        return PGProperty.PREPARED_STATEMENT_CACHE_SIZE_MIB.getIntNoCheck(properties);
+    }
 
-  public void setSslpasswordcallback(final String className) {
-    setSslPasswordCallback(className);
-  }
+    /**
+     * @param cacheSize statement cache size (number of megabytes per connection)
+     * @see PGProperty#PREPARED_STATEMENT_CACHE_SIZE_MIB
+     */
+    public void setPreparedStatementCacheSizeMiB(int cacheSize) {
+        PGProperty.PREPARED_STATEMENT_CACHE_SIZE_MIB.set(properties, cacheSize);
+    }
 
-  public String getSslpassword() {
-    return getSslPassword();
-  }
+    /**
+     * @return database metadata cache fields size (number of fields cached per connection)
+     * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS
+     */
+    public int getDatabaseMetadataCacheFields() {
+        return PGProperty.DATABASE_METADATA_CACHE_FIELDS.getIntNoCheck(properties);
+    }
 
-  public void setSslpassword(final String sslpassword) {
-    setSslPassword(sslpassword);
-  }
+    /**
+     * @param cacheSize database metadata cache fields size (number of fields cached per connection)
+     * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS
+     */
+    public void setDatabaseMetadataCacheFields(int cacheSize) {
+        PGProperty.DATABASE_METADATA_CACHE_FIELDS.set(properties, cacheSize);
+    }
 
-  public int getRecvBufferSize() {
-    return getReceiveBufferSize();
-  }
+    /**
+     * @return database metadata cache fields size (number of megabytes per connection)
+     * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS_MIB
+     */
+    public int getDatabaseMetadataCacheFieldsMiB() {
+        return PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.getIntNoCheck(properties);
+    }
 
-  public void setRecvBufferSize(final int nbytes) {
-    setReceiveBufferSize(nbytes);
-  }
+    /**
+     * @param cacheSize database metadata cache fields size (number of megabytes per connection)
+     * @see PGProperty#DATABASE_METADATA_CACHE_FIELDS_MIB
+     */
+    public void setDatabaseMetadataCacheFieldsMiB(int cacheSize) {
+        PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.set(properties, cacheSize);
+    }
 
-  public boolean isAllowEncodingChanges() {
-    return getAllowEncodingChanges();
-  }
+    /**
+     * @return default fetch size
+     * @see PGProperty#DEFAULT_ROW_FETCH_SIZE
+     */
+    public int getDefaultRowFetchSize() {
+        return PGProperty.DEFAULT_ROW_FETCH_SIZE.getIntNoCheck(properties);
+    }
 
-  public boolean isLogUnclosedConnections() {
-    return getLogUnclosedConnections();
-  }
+    /**
+     * @param fetchSize default fetch size
+     * @see PGProperty#DEFAULT_ROW_FETCH_SIZE
+     */
+    public void setDefaultRowFetchSize(int fetchSize) {
+        PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, fetchSize);
+    }
 
-  public boolean isTcpKeepAlive() {
-    return getTcpKeepAlive();
-  }
+    /**
+     * @return unknown length
+     * @see PGProperty#UNKNOWN_LENGTH
+     */
+    public int getUnknownLength() {
+        return PGProperty.UNKNOWN_LENGTH.getIntNoCheck(properties);
+    }
 
-  public boolean isReadOnly() {
-    return getReadOnly();
-  }
+    /**
+     * @param unknownLength unknown length
+     * @see PGProperty#UNKNOWN_LENGTH
+     */
+    public void setUnknownLength(int unknownLength) {
+        PGProperty.UNKNOWN_LENGTH.set(properties, unknownLength);
+    }
 
-  public boolean isDisableColumnSanitiser() {
-    return getDisableColumnSanitiser();
-  }
+    /**
+     * @return socket timeout
+     * @see PGProperty#SOCKET_TIMEOUT
+     */
+    public int getSocketTimeout() {
+        return PGProperty.SOCKET_TIMEOUT.getIntNoCheck(properties);
+    }
 
-  public boolean isLoadBalanceHosts() {
-    return getLoadBalanceHosts();
-  }
+    /**
+     * @param seconds socket timeout
+     * @see PGProperty#SOCKET_TIMEOUT
+     */
+    public void setSocketTimeout(int seconds) {
+        PGProperty.SOCKET_TIMEOUT.set(properties, seconds);
+    }
 
-  public boolean isCleanupSavePoints() {
-    return getCleanupSavepoints();
-  }
+    /**
+     * @return timeout that is used for sending cancel command in seconds
+     * @see PGProperty#CANCEL_SIGNAL_TIMEOUT
+     */
+    public int getCancelSignalTimeout() {
+        return PGProperty.CANCEL_SIGNAL_TIMEOUT.getIntNoCheck(properties);
+    }
 
-  public void setCleanupSavePoints(final boolean cleanupSavepoints) {
-    setCleanupSavepoints(cleanupSavepoints);
-  }
+    /**
+     * @param seconds timeout that is used for sending cancel command
+     * @see PGProperty#CANCEL_SIGNAL_TIMEOUT
+     */
+    public void setCancelSignalTimeout(int seconds) {
+        PGProperty.CANCEL_SIGNAL_TIMEOUT.set(properties, seconds);
+    }
 
-  public boolean isReWriteBatchedInserts() {
-    return getReWriteBatchedInserts();
-  }
+    /**
+     * @return true if SSL is enabled
+     * @see PGProperty#SSL
+     */
+    public boolean getSsl() {
+        // "true" if "ssl" is set but empty
+        return PGProperty.SSL.getBoolean(properties) || "".equals(PGProperty.SSL.getOrDefault(properties));
+    }
+
+    /**
+     * @return SSL factory class name
+     * @see PGProperty#SSL_FACTORY
+     */
+    public String getSslfactory() {
+        return PGProperty.SSL_FACTORY.getOrDefault(properties);
+    }
+
+    /**
+     * @param classname SSL factory class name
+     * @see PGProperty#SSL_FACTORY
+     */
+    public void setSslfactory(String classname) {
+        PGProperty.SSL_FACTORY.set(properties, classname);
+    }
+
+    /**
+     * @return SSL mode
+     * @see PGProperty#SSL_MODE
+     */
+    public String getSslMode() {
+        return PGProperty.SSL_MODE.getOrDefault(properties);
+    }
+
+    /**
+     * @param mode SSL mode
+     * @see PGProperty#SSL_MODE
+     */
+    public void setSslMode(String mode) {
+        PGProperty.SSL_MODE.set(properties, mode);
+    }
+
+    /**
+     * @return SSL mode
+     * @see PGProperty#SSL_FACTORY_ARG
+     */
+    @SuppressWarnings("deprecation")
+    public String getSslFactoryArg() {
+        return PGProperty.SSL_FACTORY_ARG.getOrDefault(properties);
+    }
+
+    /**
+     * @param arg argument forwarded to SSL factory
+     * @see PGProperty#SSL_FACTORY_ARG
+     */
+    @SuppressWarnings("deprecation")
+    public void setSslFactoryArg(String arg) {
+        PGProperty.SSL_FACTORY_ARG.set(properties, arg);
+    }
+
+    /**
+     * @return argument forwarded to SSL factory
+     * @see PGProperty#SSL_HOSTNAME_VERIFIER
+     */
+    public String getSslHostnameVerifier() {
+        return PGProperty.SSL_HOSTNAME_VERIFIER.getOrDefault(properties);
+    }
+
+    /**
+     * @param className SSL hostname verifier
+     * @see PGProperty#SSL_HOSTNAME_VERIFIER
+     */
+    public void setSslHostnameVerifier(String className) {
+        PGProperty.SSL_HOSTNAME_VERIFIER.set(properties, className);
+    }
+
+    /**
+     * @return className SSL hostname verifier
+     * @see PGProperty#SSL_CERT
+     */
+    public String getSslCert() {
+        return PGProperty.SSL_CERT.getOrDefault(properties);
+    }
+
+    /**
+     * @param file SSL certificate
+     * @see PGProperty#SSL_CERT
+     */
+    public void setSslCert(String file) {
+        PGProperty.SSL_CERT.set(properties, file);
+    }
+
+    /**
+     * @return SSL certificate
+     * @see PGProperty#SSL_KEY
+     */
+    public String getSslKey() {
+        return PGProperty.SSL_KEY.getOrDefault(properties);
+    }
+
+    /**
+     * @param file SSL key
+     * @see PGProperty#SSL_KEY
+     */
+    public void setSslKey(String file) {
+        PGProperty.SSL_KEY.set(properties, file);
+    }
+
+    /**
+     * @return SSL root certificate
+     * @see PGProperty#SSL_ROOT_CERT
+     */
+    public String getSslRootCert() {
+        return PGProperty.SSL_ROOT_CERT.getOrDefault(properties);
+    }
+
+    /**
+     * @param file SSL root certificate
+     * @see PGProperty#SSL_ROOT_CERT
+     */
+    public void setSslRootCert(String file) {
+        PGProperty.SSL_ROOT_CERT.set(properties, file);
+    }
+
+    /**
+     * @return SSL password
+     * @see PGProperty#SSL_PASSWORD
+     */
+    public String getSslPassword() {
+        return PGProperty.SSL_PASSWORD.getOrDefault(properties);
+    }
+
+    /**
+     * @param password SSL password
+     * @see PGProperty#SSL_PASSWORD
+     */
+    public void setSslPassword(String password) {
+        PGProperty.SSL_PASSWORD.set(properties, password);
+    }
+
+    /**
+     * @return SSL password callback
+     * @see PGProperty#SSL_PASSWORD_CALLBACK
+     */
+    public String getSslPasswordCallback() {
+        return PGProperty.SSL_PASSWORD_CALLBACK.getOrDefault(properties);
+    }
+
+    /**
+     * @param className SSL password callback class name
+     * @see PGProperty#SSL_PASSWORD_CALLBACK
+     */
+    public void setSslPasswordCallback(String className) {
+        PGProperty.SSL_PASSWORD_CALLBACK.set(properties, className);
+    }
+
+    /**
+     * @return application name
+     * @see PGProperty#APPLICATION_NAME
+     */
+    public String getApplicationName() {
+        return PGProperty.APPLICATION_NAME.getOrDefault(properties);
+    }
+
+    /**
+     * @param applicationName application name
+     * @see PGProperty#APPLICATION_NAME
+     */
+    public void setApplicationName(String applicationName) {
+        PGProperty.APPLICATION_NAME.set(properties, applicationName);
+    }
+
+    /**
+     * @return target server type
+     * @see PGProperty#TARGET_SERVER_TYPE
+     */
+    public String getTargetServerType() {
+        return PGProperty.TARGET_SERVER_TYPE.getOrDefault(properties);
+    }
+
+    /**
+     * @param targetServerType target server type
+     * @see PGProperty#TARGET_SERVER_TYPE
+     */
+    public void setTargetServerType(String targetServerType) {
+        PGProperty.TARGET_SERVER_TYPE.set(properties, targetServerType);
+    }
+
+    /**
+     * @return load balance hosts
+     * @see PGProperty#LOAD_BALANCE_HOSTS
+     */
+    public boolean getLoadBalanceHosts() {
+        return PGProperty.LOAD_BALANCE_HOSTS.isPresent(properties);
+    }
+
+    /**
+     * @return host recheck seconds
+     * @see PGProperty#HOST_RECHECK_SECONDS
+     */
+    public int getHostRecheckSeconds() {
+        return PGProperty.HOST_RECHECK_SECONDS.getIntNoCheck(properties);
+    }
+
+    /**
+     * @param hostRecheckSeconds host recheck seconds
+     * @see PGProperty#HOST_RECHECK_SECONDS
+     */
+    public void setHostRecheckSeconds(int hostRecheckSeconds) {
+        PGProperty.HOST_RECHECK_SECONDS.set(properties, hostRecheckSeconds);
+    }
+
+    /**
+     * @return true if TCP keep alive is enabled
+     * @see PGProperty#TCP_KEEP_ALIVE
+     */
+    public boolean getTcpKeepAlive() {
+        return PGProperty.TCP_KEEP_ALIVE.getBoolean(properties);
+    }
+
+    /**
+     * @return true if TCP no delay is enabled
+     * @see PGProperty#TCP_NO_DELAY
+     */
+    public boolean getTcpNoDelay() {
+        return PGProperty.TCP_NO_DELAY.getBoolean(properties);
+    }
+
+    /**
+     * @param enabled if TCP no delay should be enabled
+     * @see PGProperty#TCP_NO_DELAY
+     */
+    public void setTcpNoDelay(boolean enabled) {
+        PGProperty.TCP_NO_DELAY.set(properties, enabled);
+    }
+
+    /**
+     * @return true if binary transfer is enabled
+     * @see PGProperty#BINARY_TRANSFER
+     */
+    public boolean getBinaryTransfer() {
+        return PGProperty.BINARY_TRANSFER.getBoolean(properties);
+    }
+
+    /**
+     * @param enabled if binary transfer should be enabled
+     * @see PGProperty#BINARY_TRANSFER
+     */
+    public void setBinaryTransfer(boolean enabled) {
+        PGProperty.BINARY_TRANSFER.set(properties, enabled);
+    }
+
+    /**
+     * @return list of OIDs that are allowed to use binary transfer
+     * @see PGProperty#BINARY_TRANSFER_ENABLE
+     */
+    public String getBinaryTransferEnable() {
+        return PGProperty.BINARY_TRANSFER_ENABLE.getOrDefault(properties);
+    }
+
+    /**
+     * @param oidList list of OIDs that are allowed to use binary transfer
+     * @see PGProperty#BINARY_TRANSFER_ENABLE
+     */
+    public void setBinaryTransferEnable(String oidList) {
+        PGProperty.BINARY_TRANSFER_ENABLE.set(properties, oidList);
+    }
+
+    /**
+     * @return list of OIDs that are not allowed to use binary transfer
+     * @see PGProperty#BINARY_TRANSFER_DISABLE
+     */
+    public String getBinaryTransferDisable() {
+        return PGProperty.BINARY_TRANSFER_DISABLE.getOrDefault(properties);
+    }
+
+    /**
+     * @param oidList list of OIDs that are not allowed to use binary transfer
+     * @see PGProperty#BINARY_TRANSFER_DISABLE
+     */
+    public void setBinaryTransferDisable(String oidList) {
+        PGProperty.BINARY_TRANSFER_DISABLE.set(properties, oidList);
+    }
+
+    /**
+     * @return string type
+     * @see PGProperty#STRING_TYPE
+     */
+    public String getStringType() {
+        return PGProperty.STRING_TYPE.getOrDefault(properties);
+    }
+
+    /**
+     * @param stringType string type
+     * @see PGProperty#STRING_TYPE
+     */
+    public void setStringType(String stringType) {
+        PGProperty.STRING_TYPE.set(properties, stringType);
+    }
+
+    /**
+     * @return true if column sanitizer is disabled
+     * @see PGProperty#DISABLE_COLUMN_SANITISER
+     */
+    public boolean isColumnSanitiserDisabled() {
+        return PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(properties);
+    }
+
+    /**
+     * @return true if column sanitizer is disabled
+     * @see PGProperty#DISABLE_COLUMN_SANITISER
+     */
+    public boolean getDisableColumnSanitiser() {
+        return PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(properties);
+    }
+
+    /**
+     * @return current schema
+     * @see PGProperty#CURRENT_SCHEMA
+     */
+    public String getCurrentSchema() {
+        return PGProperty.CURRENT_SCHEMA.getOrDefault(properties);
+    }
+
+    /**
+     * @param currentSchema current schema
+     * @see PGProperty#CURRENT_SCHEMA
+     */
+    public void setCurrentSchema(String currentSchema) {
+        PGProperty.CURRENT_SCHEMA.set(properties, currentSchema);
+    }
+
+    /**
+     * @return true if connection is readonly
+     * @see PGProperty#READ_ONLY
+     */
+    public boolean getReadOnly() {
+        return PGProperty.READ_ONLY.getBoolean(properties);
+    }
+
+    /**
+     * @return The behavior when set read only
+     * @see PGProperty#READ_ONLY_MODE
+     */
+    public String getReadOnlyMode() {
+        return PGProperty.READ_ONLY_MODE.getOrDefault(properties);
+    }
+
+    /**
+     * @param mode the behavior when set read only
+     * @see PGProperty#READ_ONLY_MODE
+     */
+    public void setReadOnlyMode(String mode) {
+        PGProperty.READ_ONLY_MODE.set(properties, mode);
+    }
+
+    /**
+     * @return true if driver should log unclosed connections
+     * @see PGProperty#LOG_UNCLOSED_CONNECTIONS
+     */
+    public boolean getLogUnclosedConnections() {
+        return PGProperty.LOG_UNCLOSED_CONNECTIONS.getBoolean(properties);
+    }
+
+    /**
+     * @return true if driver should log include detail in server error messages
+     * @see PGProperty#LOG_SERVER_ERROR_DETAIL
+     */
+    public boolean getLogServerErrorDetail() {
+        return PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(properties);
+    }
+
+    /**
+     * @param enabled true if driver should include detail in server error messages
+     * @see PGProperty#LOG_SERVER_ERROR_DETAIL
+     */
+    public void setLogServerErrorDetail(boolean enabled) {
+        PGProperty.LOG_SERVER_ERROR_DETAIL.set(properties, enabled);
+    }
+
+    /**
+     * @return assumed minimal server version
+     * @see PGProperty#ASSUME_MIN_SERVER_VERSION
+     */
+    public String getAssumeMinServerVersion() {
+        return PGProperty.ASSUME_MIN_SERVER_VERSION.getOrDefault(properties);
+    }
+
+    /**
+     * @param minVersion assumed minimal server version
+     * @see PGProperty#ASSUME_MIN_SERVER_VERSION
+     */
+    public void setAssumeMinServerVersion(String minVersion) {
+        PGProperty.ASSUME_MIN_SERVER_VERSION.set(properties, minVersion);
+    }
+
+    /**
+     * This is important in pool-by-transaction scenarios in order to make sure that all the statements
+     * reaches the same connection that is being initialized. If set then we will group the startup
+     * parameters in a transaction
+     *
+     * @return whether to group startup parameters or not
+     * @see PGProperty#GROUP_STARTUP_PARAMETERS
+     */
+    public boolean getGroupStartupParameters() {
+        return PGProperty.GROUP_STARTUP_PARAMETERS.getBoolean(properties);
+    }
+
+    /**
+     * @param groupStartupParameters whether to group startup Parameters in a transaction or not
+     * @see PGProperty#GROUP_STARTUP_PARAMETERS
+     */
+    public void setGroupStartupParameters(boolean groupStartupParameters) {
+        PGProperty.GROUP_STARTUP_PARAMETERS.set(properties, groupStartupParameters);
+    }
+
+    /**
+     * @return JAAS application name
+     * @see PGProperty#JAAS_APPLICATION_NAME
+     */
+    public String getJaasApplicationName() {
+        return PGProperty.JAAS_APPLICATION_NAME.getOrDefault(properties);
+    }
+
+    /**
+     * @param name JAAS application name
+     * @see PGProperty#JAAS_APPLICATION_NAME
+     */
+    public void setJaasApplicationName(String name) {
+        PGProperty.JAAS_APPLICATION_NAME.set(properties, name);
+    }
+
+    /**
+     * @return true if perform JAAS login before GSS authentication
+     * @see PGProperty#JAAS_LOGIN
+     */
+    public boolean getJaasLogin() {
+        return PGProperty.JAAS_LOGIN.getBoolean(properties);
+    }
+
+    /**
+     * @param doLogin true if perform JAAS login before GSS authentication
+     * @see PGProperty#JAAS_LOGIN
+     */
+    public void setJaasLogin(boolean doLogin) {
+        PGProperty.JAAS_LOGIN.set(properties, doLogin);
+    }
+
+    /**
+     * @return Kerberos server name
+     * @see PGProperty#KERBEROS_SERVER_NAME
+     */
+    public String getKerberosServerName() {
+        return PGProperty.KERBEROS_SERVER_NAME.getOrDefault(properties);
+    }
+
+    /**
+     * @param serverName Kerberos server name
+     * @see PGProperty#KERBEROS_SERVER_NAME
+     */
+    public void setKerberosServerName(String serverName) {
+        PGProperty.KERBEROS_SERVER_NAME.set(properties, serverName);
+    }
+
+    /**
+     * @return true if use SPNEGO
+     * @see PGProperty#USE_SPNEGO
+     */
+    public boolean getUseSpNego() {
+        return PGProperty.USE_SPNEGO.getBoolean(properties);
+    }
+
+    /**
+     * @param use true if use SPNEGO
+     * @see PGProperty#USE_SPNEGO
+     */
+    public void setUseSpNego(boolean use) {
+        PGProperty.USE_SPNEGO.set(properties, use);
+    }
+
+    /**
+     * @return GSS mode: auto, sspi, or gssapi
+     * @see PGProperty#GSS_LIB
+     */
+    public String getGssLib() {
+        return PGProperty.GSS_LIB.getOrDefault(properties);
+    }
+
+    /**
+     * @param lib GSS mode: auto, sspi, or gssapi
+     * @see PGProperty#GSS_LIB
+     */
+    public void setGssLib(String lib) {
+        PGProperty.GSS_LIB.set(properties, lib);
+    }
+
+    /**
+     * @return GSS encryption mode: disable, prefer or require
+     */
+    public String getGssEncMode() {
+        return PGProperty.GSS_ENC_MODE.getOrDefault(properties);
+    }
+
+    /**
+     * @param mode encryption mode: disable, prefer or require
+     */
+    public void setGssEncMode(String mode) {
+        PGProperty.GSS_ENC_MODE.set(properties, mode);
+    }
+
+    /**
+     * @return SSPI service class
+     * @see PGProperty#SSPI_SERVICE_CLASS
+     */
+    public String getSspiServiceClass() {
+        return PGProperty.SSPI_SERVICE_CLASS.getOrDefault(properties);
+    }
+
+    /**
+     * @param serviceClass SSPI service class
+     * @see PGProperty#SSPI_SERVICE_CLASS
+     */
+    public void setSspiServiceClass(String serviceClass) {
+        PGProperty.SSPI_SERVICE_CLASS.set(properties, serviceClass);
+    }
+
+    /**
+     * @return if connection allows encoding changes
+     * @see PGProperty#ALLOW_ENCODING_CHANGES
+     */
+    public boolean getAllowEncodingChanges() {
+        return PGProperty.ALLOW_ENCODING_CHANGES.getBoolean(properties);
+    }
+
+    /**
+     * @return socket factory class name
+     * @see PGProperty#SOCKET_FACTORY
+     */
+    public String getSocketFactory() {
+        return PGProperty.SOCKET_FACTORY.getOrDefault(properties);
+    }
+
+    /**
+     * @param socketFactoryClassName socket factory class name
+     * @see PGProperty#SOCKET_FACTORY
+     */
+    public void setSocketFactory(String socketFactoryClassName) {
+        PGProperty.SOCKET_FACTORY.set(properties, socketFactoryClassName);
+    }
+
+    /**
+     * @return socket factory argument
+     * @see PGProperty#SOCKET_FACTORY_ARG
+     */
+    @SuppressWarnings("deprecation")
+    public String getSocketFactoryArg() {
+        return PGProperty.SOCKET_FACTORY_ARG.getOrDefault(properties);
+    }
+
+    /**
+     * @param socketFactoryArg socket factory argument
+     * @see PGProperty#SOCKET_FACTORY_ARG
+     */
+    @SuppressWarnings("deprecation")
+    public void setSocketFactoryArg(String socketFactoryArg) {
+        PGProperty.SOCKET_FACTORY_ARG.set(properties, socketFactoryArg);
+    }
+
+    /**
+     * @return 'select', "callIfNoReturn', or 'call'
+     * @see PGProperty#ESCAPE_SYNTAX_CALL_MODE
+     */
+    public String getEscapeSyntaxCallMode() {
+        return PGProperty.ESCAPE_SYNTAX_CALL_MODE.getOrDefault(properties);
+    }
+
+    /**
+     * @param callMode the call mode to use for JDBC escape call syntax
+     * @see PGProperty#ESCAPE_SYNTAX_CALL_MODE
+     */
+    public void setEscapeSyntaxCallMode(String callMode) {
+        PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(properties, callMode);
+    }
+
+    /**
+     * @return null, 'database', or 'true
+     * @see PGProperty#REPLICATION
+     */
+    public String getReplication() {
+        return PGProperty.REPLICATION.getOrDefault(properties);
+    }
+
+    /**
+     * @param replication set to 'database' for logical replication or 'true' for physical replication
+     * @see PGProperty#REPLICATION
+     */
+    public void setReplication(String replication) {
+        PGProperty.REPLICATION.set(properties, replication);
+    }
+
+    /**
+     * @return the localSocketAddress
+     * @see PGProperty#LOCAL_SOCKET_ADDRESS
+     */
+    public String getLocalSocketAddress() {
+        return PGProperty.LOCAL_SOCKET_ADDRESS.getOrDefault(properties);
+    }
+
+    /**
+     * @param localSocketAddress local address to bind client side to
+     * @see PGProperty#LOCAL_SOCKET_ADDRESS
+     */
+    public void setLocalSocketAddress(String localSocketAddress) {
+        PGProperty.LOCAL_SOCKET_ADDRESS.set(properties, localSocketAddress);
+    }
+
+    /**
+     * This property is no longer used by the driver and will be ignored.
+     *
+     * @return loggerLevel in properties
+     * @deprecated Configure via java.util.logging
+     */
+    @Deprecated
+    public String getLoggerLevel() {
+        return PGProperty.LOGGER_LEVEL.getOrDefault(properties);
+    }
+
+    /**
+     * This property is no longer used by the driver and will be ignored.
+     *
+     * @param loggerLevel loggerLevel to set, will be ignored
+     * @deprecated Configure via java.util.logging
+     */
+    @Deprecated
+    public void setLoggerLevel(String loggerLevel) {
+        PGProperty.LOGGER_LEVEL.set(properties, loggerLevel);
+    }
+
+    /**
+     * This property is no longer used by the driver and will be ignored.
+     *
+     * @return loggerFile in properties
+     * @deprecated Configure via java.util.logging
+     */
+    @Deprecated
+    public String getLoggerFile() {
+        ExpressionProperties exprProps = new ExpressionProperties(properties, System.getProperties());
+        return PGProperty.LOGGER_FILE.getOrDefault(exprProps);
+    }
+
+    /**
+     * This property is no longer used by the driver and will be ignored.
+     *
+     * @param loggerFile will be ignored
+     * @deprecated Configure via java.util.logging
+     */
+    @Deprecated
+    public void setLoggerFile(String loggerFile) {
+        PGProperty.LOGGER_FILE.set(properties, loggerFile);
+    }
+
+    /**
+     * Generates a {@link DriverManager} URL from the other properties supplied.
+     *
+     * @return {@link DriverManager} URL from the other properties supplied
+     */
+    public String getUrl() {
+        StringBuilder url = new StringBuilder(100);
+        url.append("jdbc:postgresql://");
+        for (int i = 0; i < serverNames.length; i++) {
+            if (i > 0) {
+                url.append(",");
+            }
+            url.append(serverNames[i]);
+            if (portNumbers != null) {
+                if (serverNames.length != portNumbers.length) {
+                    throw new IllegalArgumentException(
+                            String.format("Invalid argument: number of port %s entries must equal number of serverNames %s",
+                                    Arrays.toString(portNumbers), Arrays.toString(serverNames)));
+                }
+                if (portNumbers.length >= i && portNumbers[i] != 0) {
+                    url.append(":").append(portNumbers[i]);
+                }
+
+            }
+        }
+        url.append("/");
+        if (databaseName != null) {
+            url.append(URLCoder.encode(databaseName));
+        }
+
+        StringBuilder query = new StringBuilder(100);
+        for (PGProperty property : PGProperty.values()) {
+            if (property.isPresent(properties)) {
+                if (query.length() != 0) {
+                    query.append("&");
+                }
+                query.append(property.getName());
+                query.append("=");
+                String value = property.getOrDefault(properties);
+                query.append(URLCoder.encode(value));
+            }
+        }
+
+        if (query.length() > 0) {
+            url.append("?");
+            url.append(query);
+        }
+
+        return url.toString();
+    }
+
+    /**
+     * Sets properties from a {@link DriverManager} URL.
+     *
+     * @param url properties to set
+     */
+    public void setUrl(String url) {
+
+        Properties p = Driver.parseURL(url, null);
+
+        if (p == null) {
+            throw new IllegalArgumentException("URL invalid " + url);
+        }
+        for (PGProperty property : PGProperty.values()) {
+            if (!this.properties.containsKey(property.getName())) {
+                setProperty(property, property.getOrDefault(p));
+            }
+        }
+    }
+
+    /**
+     * Generates a {@link DriverManager} URL from the other properties supplied.
+     *
+     * @return {@link DriverManager} URL from the other properties supplied
+     */
+    public String getURL() {
+        return getUrl();
+    }
+
+    /**
+     * Sets properties from a {@link DriverManager} URL.
+     * Added to follow convention used in other DBMS.
+     *
+     * @param url properties to set
+     */
+    public void setURL(String url) {
+        setUrl(url);
+    }
+
+    /**
+     * @return the class name to use for the Authentication Plugin.
+     * This can be null in which case the default password authentication plugin will be used
+     */
+    public String getAuthenticationPluginClassName() {
+        return PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getOrDefault(properties);
+    }
+
+    /**
+     * @param className name of a class which implements {@link org.postgresql.plugin.AuthenticationPlugin}
+     *                  This class will be used to get the encoded bytes to be sent to the server as the
+     *                  password to authenticate the user.
+     */
+    public void setAuthenticationPluginClassName(String className) {
+        PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.set(properties, className);
+    }
+
+    public String getProperty(String name) throws SQLException {
+        PGProperty pgProperty = PGProperty.forName(name);
+        if (pgProperty != null) {
+            return getProperty(pgProperty);
+        } else {
+            throw new PSQLException(GT.tr("Unsupported property name: {0}", name),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+    }
+
+    public void setProperty(String name, String value) throws SQLException {
+        PGProperty pgProperty = PGProperty.forName(name);
+        if (pgProperty != null) {
+            setProperty(pgProperty, value);
+        } else {
+            throw new PSQLException(GT.tr("Unsupported property name: {0}", name),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+    }
+
+    public String getProperty(PGProperty property) {
+        return property.getOrDefault(properties);
+    }
+
+    public void setProperty(PGProperty property, String value) {
+        if (value == null) {
+            // TODO: this is not consistent with PGProperty.PROPERTY.set(prop, null)
+            // PGProperty removes an entry for put(null) call, however here we just ignore null
+            return;
+        }
+        switch (property) {
+            case PG_HOST:
+                setServerNames(value.split(","));
+                break;
+            case PG_PORT:
+                String[] ps = value.split(",");
+                int[] ports = new int[ps.length];
+                for (int i = 0; i < ps.length; i++) {
+                    try {
+                        ports[i] = Integer.parseInt(ps[i]);
+                    } catch (NumberFormatException e) {
+                        ports[i] = 0;
+                    }
+                }
+                setPortNumbers(ports);
+                break;
+            case PG_DBNAME:
+                setDatabaseName(value);
+                break;
+            case USER:
+                setUser(value);
+                break;
+            case PASSWORD:
+                setPassword(value);
+                break;
+            default:
+                properties.setProperty(property.getName(), value);
+        }
+    }
+
+    /**
+     * Generates a reference using the appropriate object factory.
+     *
+     * @return reference using the appropriate object factory
+     */
+    protected Reference createReference() {
+        return new Reference(getClass().getName(), PGObjectFactory.class.getName(), null);
+    }
+
+    @Override
+    public Reference getReference() throws NamingException {
+        Reference ref = createReference();
+        StringBuilder serverString = new StringBuilder();
+        for (int i = 0; i < serverNames.length; i++) {
+            if (i > 0) {
+                serverString.append(",");
+            }
+            String serverName = serverNames[i];
+            serverString.append(serverName);
+        }
+        ref.add(new StringRefAddr("serverName", serverString.toString()));
+
+        StringBuilder portString = new StringBuilder();
+        for (int i = 0; i < portNumbers.length; i++) {
+            if (i > 0) {
+                portString.append(",");
+            }
+            int p = portNumbers[i];
+            portString.append(Integer.toString(p));
+        }
+        ref.add(new StringRefAddr("portNumber", portString.toString()));
+        ref.add(new StringRefAddr("databaseName", databaseName));
+        if (user != null) {
+            ref.add(new StringRefAddr("user", user));
+        }
+        if (password != null) {
+            ref.add(new StringRefAddr("password", password));
+        }
+
+        for (PGProperty property : PGProperty.values()) {
+            if (property.isPresent(properties)) {
+                String value = property.getOrDefault(properties);
+                ref.add(new StringRefAddr(property.getName(), value));
+            }
+        }
+
+        return ref;
+    }
+
+    public void setFromReference(Reference ref) {
+        databaseName = getReferenceProperty(ref, "databaseName");
+        String portNumberString = getReferenceProperty(ref, "portNumber");
+        if (portNumberString != null) {
+            String[] ps = portNumberString.split(",");
+            int[] ports = new int[ps.length];
+            for (int i = 0; i < ps.length; i++) {
+                try {
+                    ports[i] = Integer.parseInt(ps[i]);
+                } catch (NumberFormatException e) {
+                    ports[i] = 0;
+                }
+            }
+            setPortNumbers(ports);
+        } else {
+            setPortNumbers(null);
+        }
+        String serverName = getReferenceProperty(ref, "serverName");
+        setServerNames(serverName.split(","));
+
+        for (PGProperty property : PGProperty.values()) {
+            setProperty(property, getReferenceProperty(ref, property.getName()));
+        }
+    }
+
+    protected void writeBaseObject(ObjectOutputStream out) throws IOException {
+        out.writeObject(serverNames);
+        out.writeObject(databaseName);
+        out.writeObject(user);
+        out.writeObject(password);
+        out.writeObject(portNumbers);
+
+        out.writeObject(properties);
+    }
+
+    protected void readBaseObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
+        serverNames = (String[]) in.readObject();
+        databaseName = (String) in.readObject();
+        user = (String) in.readObject();
+        password = (String) in.readObject();
+        portNumbers = (int[]) in.readObject();
+
+        properties = (Properties) in.readObject();
+    }
+
+    public void initializeFrom(BaseDataSource source) throws IOException, ClassNotFoundException {
+        ByteArrayOutputStream baos = new ByteArrayOutputStream();
+        ObjectOutputStream oos = new ObjectOutputStream(baos);
+        source.writeBaseObject(oos);
+        oos.close();
+        ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
+        ObjectInputStream ois = new ObjectInputStream(bais);
+        readBaseObject(ois);
+    }
+
+    /**
+     * @return preferred query execution mode
+     * @see PGProperty#PREFER_QUERY_MODE
+     */
+    public PreferQueryMode getPreferQueryMode() {
+        return PreferQueryMode.of(PGProperty.PREFER_QUERY_MODE.getOrDefault(properties));
+    }
+
+    /**
+     * @param preferQueryMode extended, simple, extendedForPrepared, or extendedCacheEverything
+     * @see PGProperty#PREFER_QUERY_MODE
+     */
+    public void setPreferQueryMode(PreferQueryMode preferQueryMode) {
+        PGProperty.PREFER_QUERY_MODE.set(properties, preferQueryMode.value());
+    }
+
+    /**
+     * @return connection configuration regarding automatic per-query savepoints
+     * @see PGProperty#AUTOSAVE
+     */
+    public AutoSave getAutosave() {
+        return AutoSave.of(PGProperty.AUTOSAVE.getOrDefault(properties));
+    }
+
+    /**
+     * @param autoSave connection configuration regarding automatic per-query savepoints
+     * @see PGProperty#AUTOSAVE
+     */
+    public void setAutosave(AutoSave autoSave) {
+        PGProperty.AUTOSAVE.set(properties, autoSave.value());
+    }
+
+    /**
+     * see PGProperty#CLEANUP_SAVEPOINTS
+     *
+     * @return boolean indicating property set
+     */
+    public boolean getCleanupSavepoints() {
+        return PGProperty.CLEANUP_SAVEPOINTS.getBoolean(properties);
+    }
+
+    /**
+     * see PGProperty#CLEANUP_SAVEPOINTS
+     *
+     * @param cleanupSavepoints will cleanup savepoints after a successful transaction
+     */
+    public void setCleanupSavepoints(boolean cleanupSavepoints) {
+        PGProperty.CLEANUP_SAVEPOINTS.set(properties, cleanupSavepoints);
+    }
+
+    /**
+     * @return boolean indicating property is enabled or not.
+     * @see PGProperty#REWRITE_BATCHED_INSERTS
+     */
+    public boolean getReWriteBatchedInserts() {
+        return PGProperty.REWRITE_BATCHED_INSERTS.getBoolean(properties);
+    }
+
+    /**
+     * @return boolean indicating property is enabled or not.
+     * @see PGProperty#HIDE_UNPRIVILEGED_OBJECTS
+     */
+    public boolean getHideUnprivilegedObjects() {
+        return PGProperty.HIDE_UNPRIVILEGED_OBJECTS.getBoolean(properties);
+    }
+
+    /**
+     * @param hideUnprivileged boolean value to set the property in the properties collection
+     * @see PGProperty#HIDE_UNPRIVILEGED_OBJECTS
+     */
+    public void setHideUnprivilegedObjects(boolean hideUnprivileged) {
+        PGProperty.HIDE_UNPRIVILEGED_OBJECTS.set(properties, hideUnprivileged);
+    }
+
+    public String getMaxResultBuffer() {
+        return PGProperty.MAX_RESULT_BUFFER.getOrDefault(properties);
+    }
+
+    public void setMaxResultBuffer(String maxResultBuffer) {
+        PGProperty.MAX_RESULT_BUFFER.set(properties, maxResultBuffer);
+    }
+
+    public boolean getAdaptiveFetch() {
+        return PGProperty.ADAPTIVE_FETCH.getBoolean(properties);
+    }
+
+    public void setAdaptiveFetch(boolean adaptiveFetch) {
+        PGProperty.ADAPTIVE_FETCH.set(properties, adaptiveFetch);
+    }
+
+    public int getAdaptiveFetchMaximum() {
+        return PGProperty.ADAPTIVE_FETCH_MAXIMUM.getIntNoCheck(properties);
+    }
+
+    public void setAdaptiveFetchMaximum(int adaptiveFetchMaximum) {
+        PGProperty.ADAPTIVE_FETCH_MAXIMUM.set(properties, adaptiveFetchMaximum);
+    }
+
+    public int getAdaptiveFetchMinimum() {
+        return PGProperty.ADAPTIVE_FETCH_MINIMUM.getIntNoCheck(properties);
+    }
+
+    public void setAdaptiveFetchMinimum(int adaptiveFetchMinimum) {
+        PGProperty.ADAPTIVE_FETCH_MINIMUM.set(properties, adaptiveFetchMinimum);
+    }
+
+    @Override
+    public Logger getParentLogger() {
+        return Logger.getLogger("org.postgresql");
+    }
+
+    public String getXmlFactoryFactory() {
+        return PGProperty.XML_FACTORY_FACTORY.getOrDefault(properties);
+    }
+
+    public void setXmlFactoryFactory(String xmlFactoryFactory) {
+        PGProperty.XML_FACTORY_FACTORY.set(properties, xmlFactoryFactory);
+    }
+
+    public boolean isSsl() {
+        return getSsl();
+    }
+
+    /**
+     * @param enabled if SSL is enabled
+     * @see PGProperty#SSL
+     */
+    public void setSsl(boolean enabled) {
+        if (enabled) {
+            PGProperty.SSL.set(properties, true);
+        } else {
+            PGProperty.SSL.set(properties, false);
+        }
+    }
+
+    public String getSslfactoryarg() {
+        return getSslFactoryArg();
+    }
+
+    public void setSslfactoryarg(final String arg) {
+        setSslFactoryArg(arg);
+    }
+
+    public String getSslcert() {
+        return getSslCert();
+    }
+
+    public void setSslcert(final String file) {
+        setSslCert(file);
+    }
+
+    public String getSslmode() {
+        return getSslMode();
+    }
+
+    public void setSslmode(final String mode) {
+        setSslMode(mode);
+    }
+
+    /*
+     * Alias methods below, these are to help with ease-of-use with other database tools / frameworks
+     * which expect normal java bean getters / setters to exist for the property names.
+     */
+
+    public String getSslhostnameverifier() {
+        return getSslHostnameVerifier();
+    }
+
+    public void setSslhostnameverifier(final String className) {
+        setSslHostnameVerifier(className);
+    }
+
+    public String getSslkey() {
+        return getSslKey();
+    }
+
+    public void setSslkey(final String file) {
+        setSslKey(file);
+    }
+
+    public String getSslrootcert() {
+        return getSslRootCert();
+    }
+
+    public void setSslrootcert(final String file) {
+        setSslRootCert(file);
+    }
+
+    public String getSslpasswordcallback() {
+        return getSslPasswordCallback();
+    }
+
+    public void setSslpasswordcallback(final String className) {
+        setSslPasswordCallback(className);
+    }
+
+    public String getSslpassword() {
+        return getSslPassword();
+    }
+
+    public void setSslpassword(final String sslpassword) {
+        setSslPassword(sslpassword);
+    }
+
+    public int getRecvBufferSize() {
+        return getReceiveBufferSize();
+    }
+
+    public void setRecvBufferSize(final int nbytes) {
+        setReceiveBufferSize(nbytes);
+    }
+
+    public boolean isAllowEncodingChanges() {
+        return getAllowEncodingChanges();
+    }
+
+    /**
+     * @param allow if connection allows encoding changes
+     * @see PGProperty#ALLOW_ENCODING_CHANGES
+     */
+    public void setAllowEncodingChanges(boolean allow) {
+        PGProperty.ALLOW_ENCODING_CHANGES.set(properties, allow);
+    }
+
+    public boolean isLogUnclosedConnections() {
+        return getLogUnclosedConnections();
+    }
+
+    /**
+     * @param enabled true if driver should log unclosed connections
+     * @see PGProperty#LOG_UNCLOSED_CONNECTIONS
+     */
+    public void setLogUnclosedConnections(boolean enabled) {
+        PGProperty.LOG_UNCLOSED_CONNECTIONS.set(properties, enabled);
+    }
+
+    public boolean isTcpKeepAlive() {
+        return getTcpKeepAlive();
+    }
+
+    /**
+     * @param enabled if TCP keep alive should be enabled
+     * @see PGProperty#TCP_KEEP_ALIVE
+     */
+    public void setTcpKeepAlive(boolean enabled) {
+        PGProperty.TCP_KEEP_ALIVE.set(properties, enabled);
+    }
+
+    public boolean isReadOnly() {
+        return getReadOnly();
+    }
+
+    /**
+     * @param readOnly if connection should be readonly
+     * @see PGProperty#READ_ONLY
+     */
+    public void setReadOnly(boolean readOnly) {
+        PGProperty.READ_ONLY.set(properties, readOnly);
+    }
+
+    public boolean isDisableColumnSanitiser() {
+        return getDisableColumnSanitiser();
+    }
+
+    /**
+     * @param disableColumnSanitiser if column sanitizer should be disabled
+     * @see PGProperty#DISABLE_COLUMN_SANITISER
+     */
+    public void setDisableColumnSanitiser(boolean disableColumnSanitiser) {
+        PGProperty.DISABLE_COLUMN_SANITISER.set(properties, disableColumnSanitiser);
+    }
+
+    public boolean isLoadBalanceHosts() {
+        return getLoadBalanceHosts();
+    }
+
+    /**
+     * @param loadBalanceHosts load balance hosts
+     * @see PGProperty#LOAD_BALANCE_HOSTS
+     */
+    public void setLoadBalanceHosts(boolean loadBalanceHosts) {
+        PGProperty.LOAD_BALANCE_HOSTS.set(properties, loadBalanceHosts);
+    }
+
+    public boolean isCleanupSavePoints() {
+        return getCleanupSavepoints();
+    }
+
+    public void setCleanupSavePoints(final boolean cleanupSavepoints) {
+        setCleanupSavepoints(cleanupSavepoints);
+    }
+
+    public boolean isReWriteBatchedInserts() {
+        return getReWriteBatchedInserts();
+    }
+
+    /**
+     * @param reWrite boolean value to set the property in the properties collection
+     * @see PGProperty#REWRITE_BATCHED_INSERTS
+     */
+    public void setReWriteBatchedInserts(boolean reWrite) {
+        PGProperty.REWRITE_BATCHED_INSERTS.set(properties, reWrite);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ds/common/PGObjectFactory.java b/pgjdbc/src/main/java/org/postgresql/ds/common/PGObjectFactory.java
index d02613a..8002a55 100644
--- a/pgjdbc/src/main/java/org/postgresql/ds/common/PGObjectFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/ds/common/PGObjectFactory.java
@@ -5,17 +5,15 @@
 
 package org.postgresql.ds.common;
 
-import org.postgresql.ds.PGConnectionPoolDataSource;
-import org.postgresql.ds.PGPoolingDataSource;
-import org.postgresql.ds.PGSimpleDataSource;
-
 import java.util.Hashtable;
-
 import javax.naming.Context;
 import javax.naming.Name;
 import javax.naming.RefAddr;
 import javax.naming.Reference;
 import javax.naming.spi.ObjectFactory;
+import org.postgresql.ds.PGConnectionPoolDataSource;
+import org.postgresql.ds.PGPoolingDataSource;
+import org.postgresql.ds.PGSimpleDataSource;
 
 /**
  * Returns a DataSource-ish thing based on a JNDI reference. In the case of a SimpleDataSource or
@@ -27,77 +25,77 @@ import javax.naming.spi.ObjectFactory;
  * @author Aaron Mulder (ammulder@chariotsolutions.com)
  */
 public class PGObjectFactory implements ObjectFactory {
-  /**
-   * Dereferences a PostgreSQL DataSource. Other types of references are ignored.
-   */
-  @Override
-  public Object getObjectInstance(Object obj, Name name, Context nameCtx,
-      Hashtable<?, ?> environment) throws Exception {
-    Reference ref = (Reference) obj;
-    String className = ref.getClassName();
-    // Old names are here for those who still use them
-    if ("org.postgresql.ds.PGSimpleDataSource".equals(className)
-        || "org.postgresql.jdbc2.optional.SimpleDataSource".equals(className)
-        || "org.postgresql.jdbc3.Jdbc3SimpleDataSource".equals(className)) {
-      return loadSimpleDataSource(ref);
-    } else if ("org.postgresql.ds.PGConnectionPoolDataSource".equals(className)
-        || "org.postgresql.jdbc2.optional.ConnectionPool".equals(className)
-        || "org.postgresql.jdbc3.Jdbc3ConnectionPool".equals(className)) {
-      return loadConnectionPool(ref);
-    } else if ("org.postgresql.ds.PGPoolingDataSource".equals(className)
-        || "org.postgresql.jdbc2.optional.PoolingDataSource".equals(className)
-        || "org.postgresql.jdbc3.Jdbc3PoolingDataSource".equals(className)) {
-      return loadPoolingDataSource(ref);
-    } else {
-      return null;
+    /**
+     * Dereferences a PostgreSQL DataSource. Other types of references are ignored.
+     */
+    @Override
+    public Object getObjectInstance(Object obj, Name name, Context nameCtx,
+                                    Hashtable<?, ?> environment) throws Exception {
+        Reference ref = (Reference) obj;
+        String className = ref.getClassName();
+        // Old names are here for those who still use them
+        if ("org.postgresql.ds.PGSimpleDataSource".equals(className)
+                || "org.postgresql.jdbc2.optional.SimpleDataSource".equals(className)
+                || "org.postgresql.jdbc3.Jdbc3SimpleDataSource".equals(className)) {
+            return loadSimpleDataSource(ref);
+        } else if ("org.postgresql.ds.PGConnectionPoolDataSource".equals(className)
+                || "org.postgresql.jdbc2.optional.ConnectionPool".equals(className)
+                || "org.postgresql.jdbc3.Jdbc3ConnectionPool".equals(className)) {
+            return loadConnectionPool(ref);
+        } else if ("org.postgresql.ds.PGPoolingDataSource".equals(className)
+                || "org.postgresql.jdbc2.optional.PoolingDataSource".equals(className)
+                || "org.postgresql.jdbc3.Jdbc3PoolingDataSource".equals(className)) {
+            return loadPoolingDataSource(ref);
+        } else {
+            return null;
+        }
     }
-  }
 
-  @SuppressWarnings("deprecation")
-  private Object loadPoolingDataSource(Reference ref) {
-    // If DataSource exists, return it
-    String name = getProperty(ref, "dataSourceName");
-    PGPoolingDataSource pds = PGPoolingDataSource.getDataSource(name);
-    if (pds != null) {
-      return pds;
+    @SuppressWarnings("deprecation")
+    private Object loadPoolingDataSource(Reference ref) {
+        // If DataSource exists, return it
+        String name = getProperty(ref, "dataSourceName");
+        PGPoolingDataSource pds = PGPoolingDataSource.getDataSource(name);
+        if (pds != null) {
+            return pds;
+        }
+        // Otherwise, create a new one
+        pds = new PGPoolingDataSource();
+        pds.setDataSourceName(name);
+        loadBaseDataSource(pds, ref);
+        String min = getProperty(ref, "initialConnections");
+        if (min != null) {
+            pds.setInitialConnections(Integer.parseInt(min));
+        }
+        String max = getProperty(ref, "maxConnections");
+        if (max != null) {
+            pds.setMaxConnections(Integer.parseInt(max));
+        }
+        return pds;
     }
-    // Otherwise, create a new one
-    pds = new PGPoolingDataSource();
-    pds.setDataSourceName(name);
-    loadBaseDataSource(pds, ref);
-    String min = getProperty(ref, "initialConnections");
-    if (min != null) {
-      pds.setInitialConnections(Integer.parseInt(min));
+
+    private Object loadSimpleDataSource(Reference ref) {
+        PGSimpleDataSource ds = new PGSimpleDataSource();
+        return loadBaseDataSource(ds, ref);
     }
-    String max = getProperty(ref, "maxConnections");
-    if (max != null) {
-      pds.setMaxConnections(Integer.parseInt(max));
+
+    private Object loadConnectionPool(Reference ref) {
+        PGConnectionPoolDataSource cp = new PGConnectionPoolDataSource();
+        return loadBaseDataSource(cp, ref);
     }
-    return pds;
-  }
 
-  private Object loadSimpleDataSource(Reference ref) {
-    PGSimpleDataSource ds = new PGSimpleDataSource();
-    return loadBaseDataSource(ds, ref);
-  }
+    protected Object loadBaseDataSource(BaseDataSource ds, Reference ref) {
+        ds.setFromReference(ref);
 
-  private Object loadConnectionPool(Reference ref) {
-    PGConnectionPoolDataSource cp = new PGConnectionPoolDataSource();
-    return loadBaseDataSource(cp, ref);
-  }
-
-  protected Object loadBaseDataSource(BaseDataSource ds, Reference ref) {
-    ds.setFromReference(ref);
-
-    return ds;
-  }
-
-  protected String getProperty(Reference ref, String s) {
-    RefAddr addr = ref.get(s);
-    if (addr == null) {
-      return null;
+        return ds;
+    }
+
+    protected String getProperty(Reference ref, String s) {
+        RefAddr addr = ref.get(s);
+        if (addr == null) {
+            return null;
+        }
+        return (String) addr.getContent();
     }
-    return (String) addr.getContent();
-  }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/fastpath/Fastpath.java b/pgjdbc/src/main/java/org/postgresql/fastpath/Fastpath.java
index 92a8028..e474c2f 100644
--- a/pgjdbc/src/main/java/org/postgresql/fastpath/Fastpath.java
+++ b/pgjdbc/src/main/java/org/postgresql/fastpath/Fastpath.java
@@ -27,295 +27,295 @@ import java.util.logging.Level;
  * <p>It is based around the file src/interfaces/libpq/fe-exec.c</p>
  *
  * @deprecated This API is somewhat obsolete, as one may achieve similar performance
- *         and greater functionality by setting up a prepared statement to define
- *         the function call. Then, executing the statement with binary transmission of parameters
- *         and results substitutes for a fast-path function call.
+ * and greater functionality by setting up a prepared statement to define
+ * the function call. Then, executing the statement with binary transmission of parameters
+ * and results substitutes for a fast-path function call.
  */
 @Deprecated
 public class Fastpath {
-  // Java passes oids around as longs, but in the backend
-  // it's an unsigned int, so we use this to make the conversion
-  // of long -> signed int which the backend interprets as unsigned.
-  private static final long NUM_OIDS = 4294967296L; // 2^32
+    // Java passes oids around as longs, but in the backend
+    // it's an unsigned int, so we use this to make the conversion
+    // of long -> signed int which the backend interprets as unsigned.
+    private static final long NUM_OIDS = 4294967296L; // 2^32
 
-  // This maps the functions names to their id's (possible unique just
-  // to a connection).
-  private final Map<String, Integer> func = new HashMap<>();
-  private final QueryExecutor executor;
-  private final BaseConnection connection;
+    // This maps the functions names to their id's (possible unique just
+    // to a connection).
+    private final Map<String, Integer> func = new HashMap<>();
+    private final QueryExecutor executor;
+    private final BaseConnection connection;
 
-  /**
-   * Initialises the fastpath system.
-   *
-   * @param conn BaseConnection to attach to
-   */
-  public Fastpath(BaseConnection conn) {
-    this.connection = conn;
-    this.executor = conn.getQueryExecutor();
-  }
-
-  /**
-   * Send a function call to the PostgreSQL backend.
-   *
-   * @param fnId Function id
-   * @param resultType True if the result is a numeric (Integer or Long)
-   * @param args FastpathArguments to pass to fastpath
-   * @return null if no data, Integer if an integer result, Long if a long result, or byte[]
-   *         otherwise
-   * @throws SQLException if a database-access error occurs.
-   * @deprecated please use {@link #fastpath(int, FastpathArg[])}
-   */
-  @Deprecated
-  public Object fastpath(int fnId, boolean resultType, FastpathArg[] args)
-      throws SQLException {
-    // Run it.
-    byte[] returnValue = fastpath(fnId, args);
-
-    // Interpret results.
-    if (!resultType || returnValue == null) {
-      return returnValue;
+    /**
+     * Initialises the fastpath system.
+     *
+     * @param conn BaseConnection to attach to
+     */
+    public Fastpath(BaseConnection conn) {
+        this.connection = conn;
+        this.executor = conn.getQueryExecutor();
     }
 
-    if (returnValue.length == 4) {
-      return ByteConverter.int4(returnValue, 0);
-    } else if (returnValue.length == 8) {
-      return ByteConverter.int8(returnValue, 0);
-    } else {
-      throw new PSQLException(
-          GT.tr("Fastpath call {0} - No result was returned and we expected a numeric.", fnId),
-          PSQLState.NO_DATA);
-    }
-  }
-
-  /**
-   * Send a function call to the PostgreSQL backend.
-   *
-   * @param fnId Function id
-   * @param args FastpathArguments to pass to fastpath
-   * @return null if no data, byte[] otherwise
-   * @throws SQLException if a database-access error occurs.
-   */
-  public byte [] fastpath(int fnId, FastpathArg[] args) throws SQLException {
-    // Turn fastpath array into a parameter list.
-    ParameterList params = executor.createFastpathParameters(args.length);
-    for (int i = 0; i < args.length; i++) {
-      args[i].populateParameter(params, i + 1);
+    /**
+     * Creates a FastpathArg with an oid parameter. This is here instead of a constructor of
+     * FastpathArg because the constructor can't tell the difference between an long that's really
+     * int8 and a long thats an oid.
+     *
+     * @param oid input oid
+     * @return FastpathArg with an oid parameter
+     */
+    public static FastpathArg createOIDArg(long oid) {
+        if (oid > Integer.MAX_VALUE) {
+            oid -= NUM_OIDS;
+        }
+        return new FastpathArg((int) oid);
     }
 
-    // Run it.
-    return executor.fastpathCall(fnId, params, connection.getAutoCommit());
-  }
+    /**
+     * Send a function call to the PostgreSQL backend.
+     *
+     * @param fnId       Function id
+     * @param resultType True if the result is a numeric (Integer or Long)
+     * @param args       FastpathArguments to pass to fastpath
+     * @return null if no data, Integer if an integer result, Long if a long result, or byte[]
+     * otherwise
+     * @throws SQLException if a database-access error occurs.
+     * @deprecated please use {@link #fastpath(int, FastpathArg[])}
+     */
+    @Deprecated
+    public Object fastpath(int fnId, boolean resultType, FastpathArg[] args)
+            throws SQLException {
+        // Run it.
+        byte[] returnValue = fastpath(fnId, args);
 
-  /**
-   * @param name Function name
-   * @param resulttype True if the result is a numeric (Integer or Long)
-   * @param args FastpathArguments to pass to fastpath
-   * @return null if no data, Integer if an integer result, Long if a long result, or byte[]
-   *         otherwise
-   * @throws SQLException if something goes wrong
-   * @see #fastpath(int, FastpathArg[])
-   * @see #fastpath(String, FastpathArg[])
-   * @deprecated Use {@link #getData(String, FastpathArg[])} if you expect a binary result, or one
-   *             of {@link #getInteger(String, FastpathArg[])} or
-   *             {@link #getLong(String, FastpathArg[])} if you expect a numeric one
-   */
-  @Deprecated
-  public Object fastpath(String name, boolean resulttype, FastpathArg[] args)
-      throws SQLException {
-    connection.getLogger().log(Level.FINEST, "Fastpath: calling {0}", name);
-    return fastpath(getID(name), resulttype, args);
-  }
+        // Interpret results.
+        if (!resultType || returnValue == null) {
+            return returnValue;
+        }
 
-  /**
-   * <p>Send a function call to the PostgreSQL backend by name.</p>
-   *
-   * <p>Note: the mapping for the procedure name to function id needs to exist, usually to an earlier
-   * call to addfunction().</p>
-   *
-   * <p>This is the preferred method to call, as function id's can/may change between versions of the
-   * backend.</p>
-   *
-   * <p>For an example of how this works, refer to org.postgresql.largeobject.LargeObject</p>
-   *
-   * @param name Function name
-   * @param args FastpathArguments to pass to fastpath
-   * @return null if no data, byte[] otherwise
-   * @throws SQLException if name is unknown or if a database-access error occurs.
-   * @see org.postgresql.largeobject.LargeObject
-   */
-  public byte [] fastpath(String name, FastpathArg[] args) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "Fastpath: calling {0}", name);
-    return fastpath(getID(name), args);
-  }
-
-  /**
-   * This convenience method assumes that the return value is an integer.
-   *
-   * @param name Function name
-   * @param args Function arguments
-   * @return integer result
-   * @throws SQLException if a database-access error occurs or no result
-   */
-  public int getInteger(String name, FastpathArg[] args) throws SQLException {
-    byte[] returnValue = fastpath(name, args);
-    if (returnValue == null) {
-      throw new PSQLException(
-          GT.tr("Fastpath call {0} - No result was returned and we expected an integer.", name),
-          PSQLState.NO_DATA);
+        if (returnValue.length == 4) {
+            return ByteConverter.int4(returnValue, 0);
+        } else if (returnValue.length == 8) {
+            return ByteConverter.int8(returnValue, 0);
+        } else {
+            throw new PSQLException(
+                    GT.tr("Fastpath call {0} - No result was returned and we expected a numeric.", fnId),
+                    PSQLState.NO_DATA);
+        }
     }
 
-    if (returnValue.length == 4) {
-      return ByteConverter.int4(returnValue, 0);
-    } else {
-      throw new PSQLException(GT.tr(
-          "Fastpath call {0} - No result was returned or wrong size while expecting an integer.",
-          name), PSQLState.NO_DATA);
-    }
-  }
+    /**
+     * Send a function call to the PostgreSQL backend.
+     *
+     * @param fnId Function id
+     * @param args FastpathArguments to pass to fastpath
+     * @return null if no data, byte[] otherwise
+     * @throws SQLException if a database-access error occurs.
+     */
+    public byte[] fastpath(int fnId, FastpathArg[] args) throws SQLException {
+        // Turn fastpath array into a parameter list.
+        ParameterList params = executor.createFastpathParameters(args.length);
+        for (int i = 0; i < args.length; i++) {
+            args[i].populateParameter(params, i + 1);
+        }
 
-  /**
-   * This convenience method assumes that the return value is a long (bigint).
-   *
-   * @param name Function name
-   * @param args Function arguments
-   * @return long result
-   * @throws SQLException if a database-access error occurs or no result
-   */
-  public long getLong(String name, FastpathArg[] args) throws SQLException {
-    byte[] returnValue = fastpath(name, args);
-    if (returnValue == null) {
-      throw new PSQLException(
-          GT.tr("Fastpath call {0} - No result was returned and we expected a long.", name),
-          PSQLState.NO_DATA);
-    }
-    if (returnValue.length == 8) {
-      return ByteConverter.int8(returnValue, 0);
-
-    } else {
-      throw new PSQLException(
-          GT.tr("Fastpath call {0} - No result was returned or wrong size while expecting a long.",
-              name),
-          PSQLState.NO_DATA);
-    }
-  }
-
-  /**
-   * This convenience method assumes that the return value is an oid.
-   *
-   * @param name Function name
-   * @param args Function arguments
-   * @return oid of the given call
-   * @throws SQLException if a database-access error occurs or no result
-   */
-  public long getOID(String name, FastpathArg[] args) throws SQLException {
-    long oid = getInteger(name, args);
-    if (oid < 0) {
-      oid += NUM_OIDS;
-    }
-    return oid;
-  }
-
-  /**
-   * This convenience method assumes that the return value is not an Integer.
-   *
-   * @param name Function name
-   * @param args Function arguments
-   * @return byte[] array containing result
-   * @throws SQLException if a database-access error occurs or no result
-   */
-  public byte [] getData(String name, FastpathArg[] args) throws SQLException {
-    return fastpath(name, args);
-  }
-
-  /**
-   * <p>This adds a function to our lookup table.</p>
-   *
-   * <p>User code should use the addFunctions method, which is based upon a query, rather than hard
-   * coding the oid. The oid for a function is not guaranteed to remain static, even on different
-   * servers of the same version.</p>
-   *
-   * @param name Function name
-   * @param fnid Function id
-   */
-  public void addFunction(String name, int fnid) {
-    func.put(name, fnid);
-  }
-
-  /**
-   * <p>This takes a ResultSet containing two columns. Column 1 contains the function name, Column 2
-   * the oid.</p>
-   *
-   * <p>It reads the entire ResultSet, loading the values into the function table.</p>
-   *
-   * <p><b>REMEMBER</b> to close() the resultset after calling this!!</p>
-   *
-   * <p><b><em>Implementation note about function name lookups:</em></b></p>
-   *
-   * <p>PostgreSQL stores the function id's and their corresponding names in the pg_proc table. To
-   * speed things up locally, instead of querying each function from that table when required, a
-   * HashMap is used. Also, only the function's required are entered into this table, keeping
-   * connection times as fast as possible.</p>
-   *
-   * <p>The org.postgresql.largeobject.LargeObject class performs a query upon it's startup, and passes
-   * the returned ResultSet to the addFunctions() method here.</p>
-   *
-   * <p>Once this has been done, the LargeObject api refers to the functions by name.</p>
-   *
-   * <p>Don't think that manually converting them to the oid's will work. Ok, they will for now, but
-   * they can change during development (there was some discussion about this for V7.0), so this is
-   * implemented to prevent any unwarranted headaches in the future.</p>
-   *
-   * @param rs ResultSet
-   * @throws SQLException if a database-access error occurs.
-   * @see org.postgresql.largeobject.LargeObjectManager
-   */
-  public void addFunctions(ResultSet rs) throws SQLException {
-    while (rs.next()) {
-      func.put(rs.getString(1), rs.getInt(2));
-    }
-  }
-
-  /**
-   * <p>This returns the function id associated by its name.</p>
-   *
-   * <p>If addFunction() or addFunctions() have not been called for this name, then an SQLException is
-   * thrown.</p>
-   *
-   * @param name Function name to lookup
-   * @return Function ID for fastpath call
-   * @throws SQLException is function is unknown.
-   */
-  public int getID(String name) throws SQLException {
-    Integer id = func.get(name);
-
-    // may be we could add a lookup to the database here, and store the result
-    // in our lookup table, throwing the exception if that fails.
-    // We must, however, ensure that if we do, any existing ResultSet is
-    // unaffected, otherwise we could break user code.
-    //
-    // so, until we know we can do this (needs testing, on the TODO list)
-    // for now, we throw the exception and do no lookups.
-    if (id == null) {
-      throw new PSQLException(GT.tr("The fastpath function {0} is unknown.", name),
-          PSQLState.UNEXPECTED_ERROR);
+        // Run it.
+        return executor.fastpathCall(fnId, params, connection.getAutoCommit());
     }
 
-    return id;
-  }
-
-  /**
-   * Creates a FastpathArg with an oid parameter. This is here instead of a constructor of
-   * FastpathArg because the constructor can't tell the difference between an long that's really
-   * int8 and a long thats an oid.
-   *
-   * @param oid input oid
-   * @return FastpathArg with an oid parameter
-   */
-  public static FastpathArg createOIDArg(long oid) {
-    if (oid > Integer.MAX_VALUE) {
-      oid -= NUM_OIDS;
+    /**
+     * @param name       Function name
+     * @param resulttype True if the result is a numeric (Integer or Long)
+     * @param args       FastpathArguments to pass to fastpath
+     * @return null if no data, Integer if an integer result, Long if a long result, or byte[]
+     * otherwise
+     * @throws SQLException if something goes wrong
+     * @see #fastpath(int, FastpathArg[])
+     * @see #fastpath(String, FastpathArg[])
+     * @deprecated Use {@link #getData(String, FastpathArg[])} if you expect a binary result, or one
+     * of {@link #getInteger(String, FastpathArg[])} or
+     * {@link #getLong(String, FastpathArg[])} if you expect a numeric one
+     */
+    @Deprecated
+    public Object fastpath(String name, boolean resulttype, FastpathArg[] args)
+            throws SQLException {
+        connection.getLogger().log(Level.FINEST, "Fastpath: calling {0}", name);
+        return fastpath(getID(name), resulttype, args);
+    }
+
+    /**
+     * <p>Send a function call to the PostgreSQL backend by name.</p>
+     *
+     * <p>Note: the mapping for the procedure name to function id needs to exist, usually to an earlier
+     * call to addfunction().</p>
+     *
+     * <p>This is the preferred method to call, as function id's can/may change between versions of the
+     * backend.</p>
+     *
+     * <p>For an example of how this works, refer to org.postgresql.largeobject.LargeObject</p>
+     *
+     * @param name Function name
+     * @param args FastpathArguments to pass to fastpath
+     * @return null if no data, byte[] otherwise
+     * @throws SQLException if name is unknown or if a database-access error occurs.
+     * @see org.postgresql.largeobject.LargeObject
+     */
+    public byte[] fastpath(String name, FastpathArg[] args) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "Fastpath: calling {0}", name);
+        return fastpath(getID(name), args);
+    }
+
+    /**
+     * This convenience method assumes that the return value is an integer.
+     *
+     * @param name Function name
+     * @param args Function arguments
+     * @return integer result
+     * @throws SQLException if a database-access error occurs or no result
+     */
+    public int getInteger(String name, FastpathArg[] args) throws SQLException {
+        byte[] returnValue = fastpath(name, args);
+        if (returnValue == null) {
+            throw new PSQLException(
+                    GT.tr("Fastpath call {0} - No result was returned and we expected an integer.", name),
+                    PSQLState.NO_DATA);
+        }
+
+        if (returnValue.length == 4) {
+            return ByteConverter.int4(returnValue, 0);
+        } else {
+            throw new PSQLException(GT.tr(
+                    "Fastpath call {0} - No result was returned or wrong size while expecting an integer.",
+                    name), PSQLState.NO_DATA);
+        }
+    }
+
+    /**
+     * This convenience method assumes that the return value is a long (bigint).
+     *
+     * @param name Function name
+     * @param args Function arguments
+     * @return long result
+     * @throws SQLException if a database-access error occurs or no result
+     */
+    public long getLong(String name, FastpathArg[] args) throws SQLException {
+        byte[] returnValue = fastpath(name, args);
+        if (returnValue == null) {
+            throw new PSQLException(
+                    GT.tr("Fastpath call {0} - No result was returned and we expected a long.", name),
+                    PSQLState.NO_DATA);
+        }
+        if (returnValue.length == 8) {
+            return ByteConverter.int8(returnValue, 0);
+
+        } else {
+            throw new PSQLException(
+                    GT.tr("Fastpath call {0} - No result was returned or wrong size while expecting a long.",
+                            name),
+                    PSQLState.NO_DATA);
+        }
+    }
+
+    /**
+     * This convenience method assumes that the return value is an oid.
+     *
+     * @param name Function name
+     * @param args Function arguments
+     * @return oid of the given call
+     * @throws SQLException if a database-access error occurs or no result
+     */
+    public long getOID(String name, FastpathArg[] args) throws SQLException {
+        long oid = getInteger(name, args);
+        if (oid < 0) {
+            oid += NUM_OIDS;
+        }
+        return oid;
+    }
+
+    /**
+     * This convenience method assumes that the return value is not an Integer.
+     *
+     * @param name Function name
+     * @param args Function arguments
+     * @return byte[] array containing result
+     * @throws SQLException if a database-access error occurs or no result
+     */
+    public byte[] getData(String name, FastpathArg[] args) throws SQLException {
+        return fastpath(name, args);
+    }
+
+    /**
+     * <p>This adds a function to our lookup table.</p>
+     *
+     * <p>User code should use the addFunctions method, which is based upon a query, rather than hard
+     * coding the oid. The oid for a function is not guaranteed to remain static, even on different
+     * servers of the same version.</p>
+     *
+     * @param name Function name
+     * @param fnid Function id
+     */
+    public void addFunction(String name, int fnid) {
+        func.put(name, fnid);
+    }
+
+    /**
+     * <p>This takes a ResultSet containing two columns. Column 1 contains the function name, Column 2
+     * the oid.</p>
+     *
+     * <p>It reads the entire ResultSet, loading the values into the function table.</p>
+     *
+     * <p><b>REMEMBER</b> to close() the resultset after calling this!!</p>
+     *
+     * <p><b><em>Implementation note about function name lookups:</em></b></p>
+     *
+     * <p>PostgreSQL stores the function id's and their corresponding names in the pg_proc table. To
+     * speed things up locally, instead of querying each function from that table when required, a
+     * HashMap is used. Also, only the function's required are entered into this table, keeping
+     * connection times as fast as possible.</p>
+     *
+     * <p>The org.postgresql.largeobject.LargeObject class performs a query upon it's startup, and passes
+     * the returned ResultSet to the addFunctions() method here.</p>
+     *
+     * <p>Once this has been done, the LargeObject api refers to the functions by name.</p>
+     *
+     * <p>Don't think that manually converting them to the oid's will work. Ok, they will for now, but
+     * they can change during development (there was some discussion about this for V7.0), so this is
+     * implemented to prevent any unwarranted headaches in the future.</p>
+     *
+     * @param rs ResultSet
+     * @throws SQLException if a database-access error occurs.
+     * @see org.postgresql.largeobject.LargeObjectManager
+     */
+    public void addFunctions(ResultSet rs) throws SQLException {
+        while (rs.next()) {
+            func.put(rs.getString(1), rs.getInt(2));
+        }
+    }
+
+    /**
+     * <p>This returns the function id associated by its name.</p>
+     *
+     * <p>If addFunction() or addFunctions() have not been called for this name, then an SQLException is
+     * thrown.</p>
+     *
+     * @param name Function name to lookup
+     * @return Function ID for fastpath call
+     * @throws SQLException is function is unknown.
+     */
+    public int getID(String name) throws SQLException {
+        Integer id = func.get(name);
+
+        // may be we could add a lookup to the database here, and store the result
+        // in our lookup table, throwing the exception if that fails.
+        // We must, however, ensure that if we do, any existing ResultSet is
+        // unaffected, otherwise we could break user code.
+        //
+        // so, until we know we can do this (needs testing, on the TODO list)
+        // for now, we throw the exception and do no lookups.
+        if (id == null) {
+            throw new PSQLException(GT.tr("The fastpath function {0} is unknown.", name),
+                    PSQLState.UNEXPECTED_ERROR);
+        }
+
+        return id;
     }
-    return new FastpathArg((int) oid);
-  }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/fastpath/FastpathArg.java b/pgjdbc/src/main/java/org/postgresql/fastpath/FastpathArg.java
index a739a29..1fab968 100644
--- a/pgjdbc/src/main/java/org/postgresql/fastpath/FastpathArg.java
+++ b/pgjdbc/src/main/java/org/postgresql/fastpath/FastpathArg.java
@@ -20,107 +20,107 @@ import java.sql.SQLException;
  * being called.
  *
  * @deprecated This API is somewhat obsolete, as one may achieve similar performance
- *         and greater functionality by setting up a prepared statement to define
- *         the function call. Then, executing the statement with binary transmission of parameters
- *         and results substitutes for a fast-path function call.
+ * and greater functionality by setting up a prepared statement to define
+ * the function call. Then, executing the statement with binary transmission of parameters
+ * and results substitutes for a fast-path function call.
  */
 @Deprecated
 public class FastpathArg {
-  /**
-   * Encoded byte value of argument.
-   */
-  private final byte [] bytes;
-  private final int bytesStart;
-  private final int bytesLength;
+    /**
+     * Encoded byte value of argument.
+     */
+    private final byte[] bytes;
+    private final int bytesStart;
+    private final int bytesLength;
 
-  static class ByteStreamWriterFastpathArg extends FastpathArg {
-    private final ByteStreamWriter writer;
-
-    ByteStreamWriterFastpathArg(ByteStreamWriter writer) {
-      super(null, 0, 0);
-      this.writer = writer;
+    /**
+     * Constructs an argument that consists of an integer value.
+     *
+     * @param value int value to set
+     */
+    public FastpathArg(int value) {
+        bytes = new byte[4];
+        bytes[3] = (byte) (value);
+        bytes[2] = (byte) (value >> 8);
+        bytes[1] = (byte) (value >> 16);
+        bytes[0] = (byte) (value >> 24);
+        bytesStart = 0;
+        bytesLength = 4;
+    }
+
+    /**
+     * Constructs an argument that consists of an integer value.
+     *
+     * @param value int value to set
+     */
+    public FastpathArg(long value) {
+        bytes = new byte[8];
+        bytes[7] = (byte) (value);
+        bytes[6] = (byte) (value >> 8);
+        bytes[5] = (byte) (value >> 16);
+        bytes[4] = (byte) (value >> 24);
+        bytes[3] = (byte) (value >> 32);
+        bytes[2] = (byte) (value >> 40);
+        bytes[1] = (byte) (value >> 48);
+        bytes[0] = (byte) (value >> 56);
+        bytesStart = 0;
+        bytesLength = 8;
+    }
+
+    /**
+     * Constructs an argument that consists of an array of bytes.
+     *
+     * @param bytes array to store
+     */
+    public FastpathArg(byte[] bytes) {
+        this(bytes, 0, bytes.length);
+    }
+
+    /**
+     * Constructs an argument that consists of part of a byte array.
+     *
+     * @param buf source array
+     * @param off offset within array
+     * @param len length of data to include
+     */
+    public FastpathArg(byte[] buf, int off, int len) {
+        this.bytes = buf;
+        this.bytesStart = off;
+        this.bytesLength = len;
+    }
+
+    /**
+     * Constructs an argument that consists of a String.
+     *
+     * @param s String to store
+     */
+    public FastpathArg(String s) {
+        this(s.getBytes());
+    }
+
+    public static FastpathArg of(ByteStreamWriter writer) {
+        return new ByteStreamWriterFastpathArg(writer);
     }
 
-    @Override
     void populateParameter(ParameterList params, int index) throws SQLException {
-      params.setBytea(index, writer);
+        if (bytes == null) {
+            params.setNull(index, 0);
+        } else {
+            params.setBytea(index, bytes, bytesStart, bytesLength);
+        }
     }
-  }
 
-  /**
-   * Constructs an argument that consists of an integer value.
-   *
-   * @param value int value to set
-   */
-  public FastpathArg(int value) {
-    bytes = new byte[4];
-    bytes[3] = (byte) (value);
-    bytes[2] = (byte) (value >> 8);
-    bytes[1] = (byte) (value >> 16);
-    bytes[0] = (byte) (value >> 24);
-    bytesStart = 0;
-    bytesLength = 4;
-  }
+    static class ByteStreamWriterFastpathArg extends FastpathArg {
+        private final ByteStreamWriter writer;
 
-  /**
-   * Constructs an argument that consists of an integer value.
-   *
-   * @param value int value to set
-   */
-  public FastpathArg(long value) {
-    bytes = new byte[8];
-    bytes[7] = (byte) (value);
-    bytes[6] = (byte) (value >> 8);
-    bytes[5] = (byte) (value >> 16);
-    bytes[4] = (byte) (value >> 24);
-    bytes[3] = (byte) (value >> 32);
-    bytes[2] = (byte) (value >> 40);
-    bytes[1] = (byte) (value >> 48);
-    bytes[0] = (byte) (value >> 56);
-    bytesStart = 0;
-    bytesLength = 8;
-  }
+        ByteStreamWriterFastpathArg(ByteStreamWriter writer) {
+            super(null, 0, 0);
+            this.writer = writer;
+        }
 
-  /**
-   * Constructs an argument that consists of an array of bytes.
-   *
-   * @param bytes array to store
-   */
-  public FastpathArg(byte[] bytes) {
-    this(bytes, 0, bytes.length);
-  }
-
-  /**
-   * Constructs an argument that consists of part of a byte array.
-   *
-   * @param buf source array
-   * @param off offset within array
-   * @param len length of data to include
-   */
-  public FastpathArg(byte [] buf, int off, int len) {
-    this.bytes = buf;
-    this.bytesStart = off;
-    this.bytesLength = len;
-  }
-
-  /**
-   * Constructs an argument that consists of a String.
-   *
-   * @param s String to store
-   */
-  public FastpathArg(String s) {
-    this(s.getBytes());
-  }
-
-  public static FastpathArg of(ByteStreamWriter writer) {
-    return new ByteStreamWriterFastpathArg(writer);
-  }
-
-  void populateParameter(ParameterList params, int index) throws SQLException {
-    if (bytes == null) {
-      params.setNull(index, 0);
-    } else {
-      params.setBytea(index, bytes, bytesStart, bytesLength);
+        @Override
+        void populateParameter(ParameterList params, int index) throws SQLException {
+            params.setBytea(index, writer);
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGbox.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGbox.java
index 7127a41..54b393c 100644
--- a/pgjdbc/src/main/java/org/postgresql/geometric/PGbox.java
+++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGbox.java
@@ -20,181 +20,181 @@ import java.sql.SQLException;
  */
 @SuppressWarnings("serial")
 public class PGbox extends PGobject implements PGBinaryObject, Serializable, Cloneable {
-  /**
-   * These are the two points.
-   */
-  public PGpoint [] point;
+    /**
+     * These are the two points.
+     */
+    public PGpoint[] point;
 
-  /**
-   * @param x1 first x coordinate
-   * @param y1 first y coordinate
-   * @param x2 second x coordinate
-   * @param y2 second y coordinate
-   */
-  public PGbox(double x1, double y1, double x2, double y2) {
-    this(new PGpoint(x1, y1), new PGpoint(x2, y2));
-  }
-
-  /**
-   * @param p1 first point
-   * @param p2 second point
-   */
-  public PGbox(PGpoint p1, PGpoint p2) {
-    this();
-    this.point = new PGpoint[]{p1, p2};
-  }
-
-  /**
-   * @param s Box definition in PostgreSQL syntax
-   * @throws SQLException if definition is invalid
-   */
-  public PGbox(String s) throws SQLException {
-    this();
-    setValue(s);
-  }
-
-  /**
-   * Required constructor.
-   */
-  public PGbox() {
-    type = "box";
-  }
-
-  /**
-   * This method sets the value of this object. It should be overridden, but still called by
-   * subclasses.
-   *
-   * @param value a string representation of the value of the object
-   * @throws SQLException thrown if value is invalid for this type
-   */
-  @Override
-  public void setValue(String value) throws SQLException {
-    if (value == null) {
-      this.point = null;
-      return;
-    }
-    PGtokenizer t = new PGtokenizer(value, ',');
-    if (t.getSize() != 2) {
-      throw new PSQLException(
-          GT.tr("Conversion to type {0} failed: {1}.", type, value),
-          PSQLState.DATA_TYPE_MISMATCH);
+    /**
+     * @param x1 first x coordinate
+     * @param y1 first y coordinate
+     * @param x2 second x coordinate
+     * @param y2 second y coordinate
+     */
+    public PGbox(double x1, double y1, double x2, double y2) {
+        this(new PGpoint(x1, y1), new PGpoint(x2, y2));
     }
 
-    PGpoint[] point = this.point;
-    if (point == null) {
-      this.point = point = new PGpoint[2];
-    }
-    point[0] = new PGpoint(t.getToken(0));
-    point[1] = new PGpoint(t.getToken(1));
-  }
-
-  /**
-   * @param b Definition of this point in PostgreSQL's binary syntax
-   */
-  @Override
-  public void setByteValue(byte[] b, int offset) {
-    PGpoint[] point = this.point;
-    if (point == null) {
-      this.point = point = new PGpoint[2];
-    }
-    point[0] = new PGpoint();
-    point[0].setByteValue(b, offset);
-    point[1] = new PGpoint();
-    point[1].setByteValue(b, offset + point[0].lengthInBytes());
-    this.point = point;
-  }
-
-  /**
-   * @param obj Object to compare with
-   * @return true if the two boxes are identical
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (obj instanceof PGbox) {
-      PGbox p = (PGbox) obj;
-
-      // Same points.
-      PGpoint[] point = this.point;
-      PGpoint[] pPoint = p.point;
-      if (point == null) {
-        return pPoint == null;
-      } else if (pPoint == null) {
-        return false;
-      }
-
-      if (pPoint[0].equals(point[0]) && pPoint[1].equals(point[1])) {
-        return true;
-      }
-
-      // Points swapped.
-      if (pPoint[0].equals(point[1]) && pPoint[1].equals(point[0])) {
-        return true;
-      }
-
-      // Using the opposite two points of the box:
-      // (x1,y1),(x2,y2) -> (x1,y2),(x2,y1)
-      if (pPoint[0].x == point[0].x && pPoint[0].y == point[1].y
-          && pPoint[1].x == point[1].x && pPoint[1].y == point[0].y) {
-        return true;
-      }
-
-      // Using the opposite two points of the box, and the points are swapped
-      // (x1,y1),(x2,y2) -> (x2,y1),(x1,y2)
-      if (pPoint[0].x == point[1].x && pPoint[0].y == point[0].y
-          && pPoint[1].x == point[0].x && pPoint[1].y == point[1].y) {
-        return true;
-      }
+    /**
+     * @param p1 first point
+     * @param p2 second point
+     */
+    public PGbox(PGpoint p1, PGpoint p2) {
+        this();
+        this.point = new PGpoint[]{p1, p2};
     }
 
-    return false;
-  }
+    /**
+     * @param s Box definition in PostgreSQL syntax
+     * @throws SQLException if definition is invalid
+     */
+    public PGbox(String s) throws SQLException {
+        this();
+        setValue(s);
+    }
 
-  @Override
-  public int hashCode() {
-    // This relies on the behaviour of point's hashcode being an exclusive-OR of
-    // its X and Y components; we end up with an exclusive-OR of the two X and
-    // two Y components, which is equal whenever equals() would return true
-    // since xor is commutative.
-    PGpoint[] point = this.point;
-    return point == null ? 0 : point[0].hashCode() ^ point[1].hashCode();
-  }
+    /**
+     * Required constructor.
+     */
+    public PGbox() {
+        type = "box";
+    }
 
-  @Override
-  public Object clone() throws CloneNotSupportedException {
-    PGbox newPGbox = (PGbox) super.clone();
-    if (newPGbox.point != null) {
-      newPGbox.point = newPGbox.point.clone();
-      for (int i = 0; i < newPGbox.point.length; i++) {
-        if (newPGbox.point[i] != null) {
-          newPGbox.point[i] = (PGpoint) newPGbox.point[i].clone();
+    /**
+     * @param b Definition of this point in PostgreSQL's binary syntax
+     */
+    @Override
+    public void setByteValue(byte[] b, int offset) {
+        PGpoint[] point = this.point;
+        if (point == null) {
+            this.point = point = new PGpoint[2];
         }
-      }
+        point[0] = new PGpoint();
+        point[0].setByteValue(b, offset);
+        point[1] = new PGpoint();
+        point[1].setByteValue(b, offset + point[0].lengthInBytes());
+        this.point = point;
     }
-    return newPGbox;
-  }
 
-  /**
-   * @return the PGbox in the syntax expected by org.postgresql
-   */
-  @Override
-  public String getValue() {
-    PGpoint[] point = this.point;
-    return point == null ? null : point[0].toString() + "," + point[1].toString();
-  }
+    /**
+     * @param obj Object to compare with
+     * @return true if the two boxes are identical
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (obj instanceof PGbox) {
+            PGbox p = (PGbox) obj;
 
-  @Override
-  public int lengthInBytes() {
-    PGpoint[] point = this.point;
-    if (point == null) {
-      return 0;
+            // Same points.
+            PGpoint[] point = this.point;
+            PGpoint[] pPoint = p.point;
+            if (point == null) {
+                return pPoint == null;
+            } else if (pPoint == null) {
+                return false;
+            }
+
+            if (pPoint[0].equals(point[0]) && pPoint[1].equals(point[1])) {
+                return true;
+            }
+
+            // Points swapped.
+            if (pPoint[0].equals(point[1]) && pPoint[1].equals(point[0])) {
+                return true;
+            }
+
+            // Using the opposite two points of the box:
+            // (x1,y1),(x2,y2) -> (x1,y2),(x2,y1)
+            if (pPoint[0].x == point[0].x && pPoint[0].y == point[1].y
+                    && pPoint[1].x == point[1].x && pPoint[1].y == point[0].y) {
+                return true;
+            }
+
+            // Using the opposite two points of the box, and the points are swapped
+            // (x1,y1),(x2,y2) -> (x2,y1),(x1,y2)
+            if (pPoint[0].x == point[1].x && pPoint[0].y == point[0].y
+                    && pPoint[1].x == point[0].x && pPoint[1].y == point[1].y) {
+                return true;
+            }
+        }
+
+        return false;
     }
-    return point[0].lengthInBytes() + point[1].lengthInBytes();
-  }
 
-  @Override
-  public void toBytes(byte[] bytes, int offset) {
-    PGpoint[] point = this.point;
-    point[0].toBytes(bytes, offset);
-    point[1].toBytes(bytes, offset + point[0].lengthInBytes());
-  }
+    @Override
+    public int hashCode() {
+        // This relies on the behaviour of point's hashcode being an exclusive-OR of
+        // its X and Y components; we end up with an exclusive-OR of the two X and
+        // two Y components, which is equal whenever equals() would return true
+        // since xor is commutative.
+        PGpoint[] point = this.point;
+        return point == null ? 0 : point[0].hashCode() ^ point[1].hashCode();
+    }
+
+    @Override
+    public Object clone() throws CloneNotSupportedException {
+        PGbox newPGbox = (PGbox) super.clone();
+        if (newPGbox.point != null) {
+            newPGbox.point = newPGbox.point.clone();
+            for (int i = 0; i < newPGbox.point.length; i++) {
+                if (newPGbox.point[i] != null) {
+                    newPGbox.point[i] = (PGpoint) newPGbox.point[i].clone();
+                }
+            }
+        }
+        return newPGbox;
+    }
+
+    /**
+     * @return the PGbox in the syntax expected by org.postgresql
+     */
+    @Override
+    public String getValue() {
+        PGpoint[] point = this.point;
+        return point == null ? null : point[0].toString() + "," + point[1].toString();
+    }
+
+    /**
+     * This method sets the value of this object. It should be overridden, but still called by
+     * subclasses.
+     *
+     * @param value a string representation of the value of the object
+     * @throws SQLException thrown if value is invalid for this type
+     */
+    @Override
+    public void setValue(String value) throws SQLException {
+        if (value == null) {
+            this.point = null;
+            return;
+        }
+        PGtokenizer t = new PGtokenizer(value, ',');
+        if (t.getSize() != 2) {
+            throw new PSQLException(
+                    GT.tr("Conversion to type {0} failed: {1}.", type, value),
+                    PSQLState.DATA_TYPE_MISMATCH);
+        }
+
+        PGpoint[] point = this.point;
+        if (point == null) {
+            this.point = point = new PGpoint[2];
+        }
+        point[0] = new PGpoint(t.getToken(0));
+        point[1] = new PGpoint(t.getToken(1));
+    }
+
+    @Override
+    public int lengthInBytes() {
+        PGpoint[] point = this.point;
+        if (point == null) {
+            return 0;
+        }
+        return point[0].lengthInBytes() + point[1].lengthInBytes();
+    }
+
+    @Override
+    public void toBytes(byte[] bytes, int offset) {
+        PGpoint[] point = this.point;
+        point[0].toBytes(bytes, offset);
+        point[1].toBytes(bytes, offset + point[0].lengthInBytes());
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGcircle.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGcircle.java
index 995023a..ddbced8 100644
--- a/pgjdbc/src/main/java/org/postgresql/geometric/PGcircle.java
+++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGcircle.java
@@ -19,122 +19,122 @@ import java.sql.SQLException;
  */
 @SuppressWarnings("serial")
 public class PGcircle extends PGobject implements Serializable, Cloneable {
-  /**
-   * This is the center point.
-   */
-  public PGpoint center;
+    /**
+     * This is the center point.
+     */
+    public PGpoint center;
 
-  /**
-   * This is the radius.
-   */
-  public double radius;
+    /**
+     * This is the radius.
+     */
+    public double radius;
 
-  /**
-   * @param x coordinate of center
-   * @param y coordinate of center
-   * @param r radius of circle
-   */
-  public PGcircle(double x, double y, double r) {
-    this(new PGpoint(x, y), r);
-  }
-
-  /**
-   * @param c PGpoint describing the circle's center
-   * @param r radius of circle
-   */
-  public PGcircle(PGpoint c, double r) {
-    this();
-    this.center = c;
-    this.radius = r;
-  }
-
-  /**
-   * @param s definition of the circle in PostgreSQL's syntax.
-   * @throws SQLException on conversion failure
-   */
-  public PGcircle(String s) throws SQLException {
-    this();
-    setValue(s);
-  }
-
-  /**
-   * This constructor is used by the driver.
-   */
-  public PGcircle() {
-    type = "circle";
-  }
-
-  /**
-   * @param s definition of the circle in PostgreSQL's syntax.
-   * @throws SQLException on conversion failure
-   */
-  @Override
-  public void setValue(String s) throws SQLException {
-    if (s == null) {
-      center = null;
-      return;
-    }
-    PGtokenizer t = new PGtokenizer(PGtokenizer.removeAngle(s), ',');
-    if (t.getSize() != 2) {
-      throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
-          PSQLState.DATA_TYPE_MISMATCH);
+    /**
+     * @param x coordinate of center
+     * @param y coordinate of center
+     * @param r radius of circle
+     */
+    public PGcircle(double x, double y, double r) {
+        this(new PGpoint(x, y), r);
     }
 
-    try {
-      center = new PGpoint(t.getToken(0));
-      radius = Double.parseDouble(t.getToken(1));
-    } catch (NumberFormatException e) {
-      throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
-          PSQLState.DATA_TYPE_MISMATCH, e);
+    /**
+     * @param c PGpoint describing the circle's center
+     * @param r radius of circle
+     */
+    public PGcircle(PGpoint c, double r) {
+        this();
+        this.center = c;
+        this.radius = r;
     }
-  }
 
-  /**
-   * @param obj Object to compare with
-   * @return true if the two circles are identical
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (obj instanceof PGcircle) {
-      PGcircle p = (PGcircle) obj;
-      PGpoint center = this.center;
-      PGpoint pCenter = p.center;
-      if (center == null) {
-        return pCenter == null;
-      } else if (pCenter == null) {
+    /**
+     * @param s definition of the circle in PostgreSQL's syntax.
+     * @throws SQLException on conversion failure
+     */
+    public PGcircle(String s) throws SQLException {
+        this();
+        setValue(s);
+    }
+
+    /**
+     * This constructor is used by the driver.
+     */
+    public PGcircle() {
+        type = "circle";
+    }
+
+    /**
+     * @param obj Object to compare with
+     * @return true if the two circles are identical
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (obj instanceof PGcircle) {
+            PGcircle p = (PGcircle) obj;
+            PGpoint center = this.center;
+            PGpoint pCenter = p.center;
+            if (center == null) {
+                return pCenter == null;
+            } else if (pCenter == null) {
+                return false;
+            }
+
+            return p.radius == radius && equals(pCenter, center);
+        }
         return false;
-      }
-
-      return p.radius == radius && equals(pCenter, center);
     }
-    return false;
-  }
 
-  @Override
-  public int hashCode() {
-    if (center == null) {
-      return 0;
+    @Override
+    public int hashCode() {
+        if (center == null) {
+            return 0;
+        }
+        long bits = Double.doubleToLongBits(radius);
+        int v = (int) (bits ^ (bits >>> 32));
+        v = v * 31 + center.hashCode();
+        return v;
     }
-    long bits = Double.doubleToLongBits(radius);
-    int v = (int) (bits ^ (bits >>> 32));
-    v = v * 31 + center.hashCode();
-    return v;
-  }
 
-  @Override
-  public Object clone() throws CloneNotSupportedException {
-    PGcircle newPGcircle = (PGcircle) super.clone();
-    if (newPGcircle.center != null) {
-      newPGcircle.center = (PGpoint) newPGcircle.center.clone();
+    @Override
+    public Object clone() throws CloneNotSupportedException {
+        PGcircle newPGcircle = (PGcircle) super.clone();
+        if (newPGcircle.center != null) {
+            newPGcircle.center = (PGpoint) newPGcircle.center.clone();
+        }
+        return newPGcircle;
     }
-    return newPGcircle;
-  }
 
-  /**
-   * @return the PGcircle in the syntax expected by org.postgresql
-   */
-  @Override
-  public String getValue() {
-    return center == null ? null : "<" + center + "," + radius + ">";
-  }
+    /**
+     * @return the PGcircle in the syntax expected by org.postgresql
+     */
+    @Override
+    public String getValue() {
+        return center == null ? null : "<" + center + "," + radius + ">";
+    }
+
+    /**
+     * @param s definition of the circle in PostgreSQL's syntax.
+     * @throws SQLException on conversion failure
+     */
+    @Override
+    public void setValue(String s) throws SQLException {
+        if (s == null) {
+            center = null;
+            return;
+        }
+        PGtokenizer t = new PGtokenizer(PGtokenizer.removeAngle(s), ',');
+        if (t.getSize() != 2) {
+            throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
+                    PSQLState.DATA_TYPE_MISMATCH);
+        }
+
+        try {
+            center = new PGpoint(t.getToken(0));
+            radius = Double.parseDouble(t.getToken(1));
+        } catch (NumberFormatException e) {
+            throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
+                    PSQLState.DATA_TYPE_MISMATCH, e);
+        }
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGline.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGline.java
index 9ee8ffb..57ffb0b 100644
--- a/pgjdbc/src/main/java/org/postgresql/geometric/PGline.java
+++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGline.java
@@ -20,195 +20,195 @@ import java.sql.SQLException;
 @SuppressWarnings("serial")
 public class PGline extends PGobject implements Serializable, Cloneable {
 
-  /**
-   * Coefficient of x.
-   */
-  public double a;
+    /**
+     * Coefficient of x.
+     */
+    public double a;
 
-  /**
-   * Coefficient of y.
-   */
-  public double b;
+    /**
+     * Coefficient of y.
+     */
+    public double b;
 
-  /**
-   * Constant.
-   */
-  public double c;
+    /**
+     * Constant.
+     */
+    public double c;
 
-  private boolean isNull;
+    private boolean isNull;
 
-  /**
-   * @param a coefficient of x
-   * @param b coefficient of y
-   * @param c constant
-   */
-  public PGline(double a, double b, double c) {
-    this();
-    this.a = a;
-    this.b = b;
-    this.c = c;
-  }
-
-  /**
-   * @param x1 coordinate for first point on the line
-   * @param y1 coordinate for first point on the line
-   * @param x2 coordinate for second point on the line
-   * @param y2 coordinate for second point on the line
-   */
-  public PGline(double x1, double y1, double x2, double y2) {
-    this();
-    setValue(x1, y1, x2, y2);
-  }
-
-  /**
-   * @param p1 first point on the line
-   * @param p2 second point on the line
-   */
-  public PGline(PGpoint p1, PGpoint p2) {
-    this();
-    setValue(p1, p2);
-  }
-
-  /**
-   * @param lseg Line segment which calls on this line.
-   */
-  public PGline(PGlseg lseg) {
-    this();
-    if (lseg == null) {
-      isNull = true;
-      return;
-    }
-    PGpoint[] point = lseg.point;
-    if (point == null) {
-      isNull = true;
-      return;
-    }
-    setValue(point[0], point[1]);
-  }
-
-  private void setValue(PGpoint p1, PGpoint p2) {
-    if (p1 == null || p2 == null) {
-      isNull = true;
-    } else {
-      setValue(p1.x, p1.y, p2.x, p2.y);
-    }
-  }
-
-  private void setValue(double x1, double y1, double x2, double y2) {
-    if (x1 == x2) {
-      a = -1;
-      b = 0;
-    } else {
-      a = (y2 - y1) / (x2 - x1);
-      b = -1;
-    }
-    c = y1 - a * x1;
-  }
-
-  /**
-   * @param s definition of the line in PostgreSQL's syntax.
-   * @throws SQLException on conversion failure
-   */
-  public PGline(String s) throws SQLException {
-    this();
-    setValue(s);
-  }
-
-  /**
-   * required by the driver.
-   */
-  public PGline() {
-    type = "line";
-  }
-
-  /**
-   * @param s Definition of the line in PostgreSQL's syntax
-   * @throws SQLException on conversion failure
-   */
-  @Override
-  public void setValue(String s) throws SQLException {
-    isNull = s == null;
-    if (s == null) {
-      return;
-    }
-    if (s.trim().startsWith("{")) {
-      PGtokenizer t = new PGtokenizer(PGtokenizer.removeCurlyBrace(s), ',');
-      if (t.getSize() != 3) {
-        throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
-            PSQLState.DATA_TYPE_MISMATCH);
-      }
-      a = Double.parseDouble(t.getToken(0));
-      b = Double.parseDouble(t.getToken(1));
-      c = Double.parseDouble(t.getToken(2));
-    } else if (s.trim().startsWith("[")) {
-      PGtokenizer t = new PGtokenizer(PGtokenizer.removeBox(s), ',');
-      if (t.getSize() != 2) {
-        throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
-            PSQLState.DATA_TYPE_MISMATCH);
-      }
-      PGpoint point1 = new PGpoint(t.getToken(0));
-      PGpoint point2 = new PGpoint(t.getToken(1));
-      a = point2.x - point1.x;
-      b = point2.y - point1.y;
-      c = point1.y;
-    }
-  }
-
-  /**
-   * @param obj Object to compare with
-   * @return true if the two lines are identical
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (this == obj) {
-      return true;
-    }
-    if (obj == null || getClass() != obj.getClass()) {
-      return false;
-    }
-    if (!super.equals(obj)) {
-      return false;
+    /**
+     * @param a coefficient of x
+     * @param b coefficient of y
+     * @param c constant
+     */
+    public PGline(double a, double b, double c) {
+        this();
+        this.a = a;
+        this.b = b;
+        this.c = c;
     }
 
-    PGline pGline = (PGline) obj;
-    if (isNull) {
-      return pGline.isNull;
-    } else if (pGline.isNull) {
-      return false;
+    /**
+     * @param x1 coordinate for first point on the line
+     * @param y1 coordinate for first point on the line
+     * @param x2 coordinate for second point on the line
+     * @param y2 coordinate for second point on the line
+     */
+    public PGline(double x1, double y1, double x2, double y2) {
+        this();
+        setValue(x1, y1, x2, y2);
     }
 
-    return Double.compare(pGline.a, a) == 0
-        && Double.compare(pGline.b, b) == 0
-        && Double.compare(pGline.c, c) == 0;
-  }
-
-  @Override
-  public int hashCode() {
-    if (isNull) {
-      return 0;
+    /**
+     * @param p1 first point on the line
+     * @param p2 second point on the line
+     */
+    public PGline(PGpoint p1, PGpoint p2) {
+        this();
+        setValue(p1, p2);
     }
-    int result = super.hashCode();
-    long temp;
-    temp = Double.doubleToLongBits(a);
-    result = 31 * result + (int) (temp ^ (temp >>> 32));
-    temp = Double.doubleToLongBits(b);
-    result = 31 * result + (int) (temp ^ (temp >>> 32));
-    temp = Double.doubleToLongBits(c);
-    result = 31 * result + (int) (temp ^ (temp >>> 32));
-    return result;
-  }
 
-  /**
-   * @return the PGline in the syntax expected by org.postgresql
-   */
-  @Override
-  public String getValue() {
-    return isNull ? null : "{" + a + "," + b + "," + c + "}";
-  }
+    /**
+     * @param lseg Line segment which calls on this line.
+     */
+    public PGline(PGlseg lseg) {
+        this();
+        if (lseg == null) {
+            isNull = true;
+            return;
+        }
+        PGpoint[] point = lseg.point;
+        if (point == null) {
+            isNull = true;
+            return;
+        }
+        setValue(point[0], point[1]);
+    }
 
-  @Override
-  public Object clone() throws CloneNotSupportedException {
-    // squid:S2157 "Cloneables" should implement "clone
-    return super.clone();
-  }
+    /**
+     * @param s definition of the line in PostgreSQL's syntax.
+     * @throws SQLException on conversion failure
+     */
+    public PGline(String s) throws SQLException {
+        this();
+        setValue(s);
+    }
+
+    /**
+     * required by the driver.
+     */
+    public PGline() {
+        type = "line";
+    }
+
+    private void setValue(PGpoint p1, PGpoint p2) {
+        if (p1 == null || p2 == null) {
+            isNull = true;
+        } else {
+            setValue(p1.x, p1.y, p2.x, p2.y);
+        }
+    }
+
+    private void setValue(double x1, double y1, double x2, double y2) {
+        if (x1 == x2) {
+            a = -1;
+            b = 0;
+        } else {
+            a = (y2 - y1) / (x2 - x1);
+            b = -1;
+        }
+        c = y1 - a * x1;
+    }
+
+    /**
+     * @param obj Object to compare with
+     * @return true if the two lines are identical
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) {
+            return true;
+        }
+        if (obj == null || getClass() != obj.getClass()) {
+            return false;
+        }
+        if (!super.equals(obj)) {
+            return false;
+        }
+
+        PGline pGline = (PGline) obj;
+        if (isNull) {
+            return pGline.isNull;
+        } else if (pGline.isNull) {
+            return false;
+        }
+
+        return Double.compare(pGline.a, a) == 0
+                && Double.compare(pGline.b, b) == 0
+                && Double.compare(pGline.c, c) == 0;
+    }
+
+    @Override
+    public int hashCode() {
+        if (isNull) {
+            return 0;
+        }
+        int result = super.hashCode();
+        long temp;
+        temp = Double.doubleToLongBits(a);
+        result = 31 * result + (int) (temp ^ (temp >>> 32));
+        temp = Double.doubleToLongBits(b);
+        result = 31 * result + (int) (temp ^ (temp >>> 32));
+        temp = Double.doubleToLongBits(c);
+        result = 31 * result + (int) (temp ^ (temp >>> 32));
+        return result;
+    }
+
+    /**
+     * @return the PGline in the syntax expected by org.postgresql
+     */
+    @Override
+    public String getValue() {
+        return isNull ? null : "{" + a + "," + b + "," + c + "}";
+    }
+
+    /**
+     * @param s Definition of the line in PostgreSQL's syntax
+     * @throws SQLException on conversion failure
+     */
+    @Override
+    public void setValue(String s) throws SQLException {
+        isNull = s == null;
+        if (s == null) {
+            return;
+        }
+        if (s.trim().startsWith("{")) {
+            PGtokenizer t = new PGtokenizer(PGtokenizer.removeCurlyBrace(s), ',');
+            if (t.getSize() != 3) {
+                throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
+                        PSQLState.DATA_TYPE_MISMATCH);
+            }
+            a = Double.parseDouble(t.getToken(0));
+            b = Double.parseDouble(t.getToken(1));
+            c = Double.parseDouble(t.getToken(2));
+        } else if (s.trim().startsWith("[")) {
+            PGtokenizer t = new PGtokenizer(PGtokenizer.removeBox(s), ',');
+            if (t.getSize() != 2) {
+                throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
+                        PSQLState.DATA_TYPE_MISMATCH);
+            }
+            PGpoint point1 = new PGpoint(t.getToken(0));
+            PGpoint point2 = new PGpoint(t.getToken(1));
+            a = point2.x - point1.x;
+            b = point2.y - point1.y;
+            c = point1.y;
+        }
+    }
+
+    @Override
+    public Object clone() throws CloneNotSupportedException {
+        // squid:S2157 "Cloneables" should implement "clone
+        return super.clone();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGlseg.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGlseg.java
index da1c158..8c6c0f0 100644
--- a/pgjdbc/src/main/java/org/postgresql/geometric/PGlseg.java
+++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGlseg.java
@@ -19,123 +19,123 @@ import java.sql.SQLException;
  */
 @SuppressWarnings("serial")
 public class PGlseg extends PGobject implements Serializable, Cloneable {
-  /**
-   * These are the two points.
-   */
-  public PGpoint [] point;
+    /**
+     * These are the two points.
+     */
+    public PGpoint[] point;
 
-  /**
-   * @param x1 coordinate for first point
-   * @param y1 coordinate for first point
-   * @param x2 coordinate for second point
-   * @param y2 coordinate for second point
-   */
-  public PGlseg(double x1, double y1, double x2, double y2) {
-    this(new PGpoint(x1, y1), new PGpoint(x2, y2));
-  }
-
-  /**
-   * @param p1 first point
-   * @param p2 second point
-   */
-  public PGlseg(PGpoint p1, PGpoint p2) {
-    this();
-    point = new PGpoint[]{p1, p2};
-  }
-
-  /**
-   * @param s definition of the line segment in PostgreSQL's syntax.
-   * @throws SQLException on conversion failure
-   */
-  public PGlseg(String s) throws SQLException {
-    this();
-    setValue(s);
-  }
-
-  /**
-   * required by the driver.
-   */
-  public PGlseg() {
-    type = "lseg";
-  }
-
-  /**
-   * @param s Definition of the line segment in PostgreSQL's syntax
-   * @throws SQLException on conversion failure
-   */
-  @Override
-  public void setValue(String s) throws SQLException {
-    if (s == null) {
-      point = null;
-      return;
-    }
-    PGtokenizer t = new PGtokenizer(PGtokenizer.removeBox(s), ',');
-    if (t.getSize() != 2) {
-      throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
-          PSQLState.DATA_TYPE_MISMATCH);
+    /**
+     * @param x1 coordinate for first point
+     * @param y1 coordinate for first point
+     * @param x2 coordinate for second point
+     * @param y2 coordinate for second point
+     */
+    public PGlseg(double x1, double y1, double x2, double y2) {
+        this(new PGpoint(x1, y1), new PGpoint(x2, y2));
     }
 
-    PGpoint[] point = this.point;
-    if (point == null) {
-      this.point = point = new PGpoint[2];
+    /**
+     * @param p1 first point
+     * @param p2 second point
+     */
+    public PGlseg(PGpoint p1, PGpoint p2) {
+        this();
+        point = new PGpoint[]{p1, p2};
     }
-    point[0] = new PGpoint(t.getToken(0));
-    point[1] = new PGpoint(t.getToken(1));
-  }
 
-  /**
-   * @param obj Object to compare with
-   * @return true if the two line segments are identical
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (obj instanceof PGlseg) {
-      PGlseg p = (PGlseg) obj;
-      PGpoint[] point = this.point;
-      PGpoint[] pPoint = p.point;
-      if (point == null) {
-        return pPoint == null;
-      } else if (pPoint == null) {
-        return false;
-      }
-      return (pPoint[0].equals(point[0]) && pPoint[1].equals(point[1]))
-          || (pPoint[0].equals(point[1]) && pPoint[1].equals(point[0]));
+    /**
+     * @param s definition of the line segment in PostgreSQL's syntax.
+     * @throws SQLException on conversion failure
+     */
+    public PGlseg(String s) throws SQLException {
+        this();
+        setValue(s);
     }
-    return false;
-  }
 
-  @Override
-  public int hashCode() {
-    PGpoint[] point = this.point;
-    if (point == null) {
-      return 0;
+    /**
+     * required by the driver.
+     */
+    public PGlseg() {
+        type = "lseg";
     }
-    return point[0].hashCode() ^ point[1].hashCode();
-  }
 
-  @Override
-  public Object clone() throws CloneNotSupportedException {
-    PGlseg newPGlseg = (PGlseg) super.clone();
-    if (newPGlseg.point != null) {
-      newPGlseg.point = newPGlseg.point.clone();
-      for (int i = 0; i < newPGlseg.point.length; i++) {
-        if (newPGlseg.point[i] != null) {
-          newPGlseg.point[i] = (PGpoint) newPGlseg.point[i].clone();
+    /**
+     * @param obj Object to compare with
+     * @return true if the two line segments are identical
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (obj instanceof PGlseg) {
+            PGlseg p = (PGlseg) obj;
+            PGpoint[] point = this.point;
+            PGpoint[] pPoint = p.point;
+            if (point == null) {
+                return pPoint == null;
+            } else if (pPoint == null) {
+                return false;
+            }
+            return (pPoint[0].equals(point[0]) && pPoint[1].equals(point[1]))
+                    || (pPoint[0].equals(point[1]) && pPoint[1].equals(point[0]));
         }
-      }
+        return false;
     }
-    return newPGlseg;
-  }
 
-  /**
-   * @return the PGlseg in the syntax expected by org.postgresql
-   */
-  @Override
-  public String getValue() {
-    PGpoint[] point = this.point;
-    if (point == null) {
-      return null;
+    @Override
+    public int hashCode() {
+        PGpoint[] point = this.point;
+        if (point == null) {
+            return 0;
+        }
+        return point[0].hashCode() ^ point[1].hashCode();
+    }
+
+    @Override
+    public Object clone() throws CloneNotSupportedException {
+        PGlseg newPGlseg = (PGlseg) super.clone();
+        if (newPGlseg.point != null) {
+            newPGlseg.point = newPGlseg.point.clone();
+            for (int i = 0; i < newPGlseg.point.length; i++) {
+                if (newPGlseg.point[i] != null) {
+                    newPGlseg.point[i] = (PGpoint) newPGlseg.point[i].clone();
+                }
+            }
+        }
+        return newPGlseg;
+    }
+
+    /**
+     * @return the PGlseg in the syntax expected by org.postgresql
+     */
+    @Override
+    public String getValue() {
+        PGpoint[] point = this.point;
+        if (point == null) {
+            return null;
+        }
+        return "[" + point[0] + "," + point[1] + "]";
+    }
+
+    /**
+     * @param s Definition of the line segment in PostgreSQL's syntax
+     * @throws SQLException on conversion failure
+     */
+    @Override
+    public void setValue(String s) throws SQLException {
+        if (s == null) {
+            point = null;
+            return;
+        }
+        PGtokenizer t = new PGtokenizer(PGtokenizer.removeBox(s), ',');
+        if (t.getSize() != 2) {
+            throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
+                    PSQLState.DATA_TYPE_MISMATCH);
+        }
+
+        PGpoint[] point = this.point;
+        if (point == null) {
+            this.point = point = new PGpoint[2];
+        }
+        point[0] = new PGpoint(t.getToken(0));
+        point[1] = new PGpoint(t.getToken(1));
     }
-    return "[" + point[0] + "," + point[1] + "]";
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGpath.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGpath.java
index 807ee86..c8a54d1 100644
--- a/pgjdbc/src/main/java/org/postgresql/geometric/PGpath.java
+++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGpath.java
@@ -19,172 +19,173 @@ import java.sql.SQLException;
  */
 @SuppressWarnings("serial")
 public class PGpath extends PGobject implements Serializable, Cloneable {
-  /**
-   * True if the path is open, false if closed.
-   */
-  public boolean open;
+    /**
+     * True if the path is open, false if closed.
+     */
+    public boolean open;
 
-  /**
-   * The points defining this path.
-   */
-  public PGpoint [] points;
+    /**
+     * The points defining this path.
+     */
+    public PGpoint[] points;
 
-  /**
-   * @param points the PGpoints that define the path
-   * @param open True if the path is open, false if closed
-   */
-  public PGpath(PGpoint [] points, boolean open) {
-    this();
-    this.points = points;
-    this.open = open;
-  }
-
-  /**
-   * Required by the driver.
-   */
-  public PGpath() {
-    type = "path";
-  }
-
-  /**
-   * @param s definition of the path in PostgreSQL's syntax.
-   * @throws SQLException on conversion failure
-   */
-  public PGpath(String s) throws SQLException {
-    this();
-    setValue(s);
-  }
-
-  /**
-   * @param s Definition of the path in PostgreSQL's syntax
-   * @throws SQLException on conversion failure
-   */
-  @Override
-  public void setValue(String s) throws SQLException {
-    if (s == null) {
-      points = null;
-      return;
-    }
-    // First test to see if were open
-    if (s.startsWith("[") && s.endsWith("]")) {
-      open = true;
-      s = PGtokenizer.removeBox(s);
-    } else if (s.startsWith("(") && s.endsWith(")")) {
-      open = false;
-      s = PGtokenizer.removePara(s);
-    } else {
-      throw new PSQLException(GT.tr("Cannot tell if path is open or closed: {0}.", s),
-          PSQLState.DATA_TYPE_MISMATCH);
+    /**
+     * @param points the PGpoints that define the path
+     * @param open   True if the path is open, false if closed
+     */
+    public PGpath(PGpoint[] points, boolean open) {
+        this();
+        this.points = points;
+        this.open = open;
     }
 
-    PGtokenizer t = new PGtokenizer(s, ',');
-    int npoints = t.getSize();
-    PGpoint[] points = new PGpoint[npoints];
-    this.points = points;
-    for (int p = 0; p < npoints; p++) {
-      points[p] = new PGpoint(t.getToken(p));
+    /**
+     * Required by the driver.
+     */
+    public PGpath() {
+        type = "path";
     }
-  }
 
-  /**
-   * @param obj Object to compare with
-   * @return true if the two paths are identical
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (obj instanceof PGpath) {
-      PGpath p = (PGpath) obj;
+    /**
+     * @param s definition of the path in PostgreSQL's syntax.
+     * @throws SQLException on conversion failure
+     */
+    public PGpath(String s) throws SQLException {
+        this();
+        setValue(s);
+    }
 
-      PGpoint[] points = this.points;
-      PGpoint[] pPoints = p.points;
-      if (points == null) {
-        return pPoints == null;
-      } else if (pPoints == null) {
-        return false;
-      }
+    /**
+     * @param obj Object to compare with
+     * @return true if the two paths are identical
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (obj instanceof PGpath) {
+            PGpath p = (PGpath) obj;
 
-      if (p.open != open) {
-        return false;
-      }
+            PGpoint[] points = this.points;
+            PGpoint[] pPoints = p.points;
+            if (points == null) {
+                return pPoints == null;
+            } else if (pPoints == null) {
+                return false;
+            }
 
-      if (pPoints.length != points.length) {
-        return false;
-      }
+            if (p.open != open) {
+                return false;
+            }
 
-      for (int i = 0; i < points.length; i++) {
-        if (!points[i].equals(pPoints[i])) {
-          return false;
+            if (pPoints.length != points.length) {
+                return false;
+            }
+
+            for (int i = 0; i < points.length; i++) {
+                if (!points[i].equals(pPoints[i])) {
+                    return false;
+                }
+            }
+
+            return true;
         }
-      }
-
-      return true;
+        return false;
     }
-    return false;
-  }
 
-  @Override
-  public int hashCode() {
-    PGpoint[] points = this.points;
-    if (points == null) {
-      return 0;
+    @Override
+    public int hashCode() {
+        PGpoint[] points = this.points;
+        if (points == null) {
+            return 0;
+        }
+        // XXX not very good..
+        int hash = open ? 1231 : 1237;
+        for (int i = 0; i < points.length && i < 5; i++) {
+            hash = hash * 31 + points[i].hashCode();
+        }
+        return hash;
     }
-    // XXX not very good..
-    int hash = open ? 1231 : 1237;
-    for (int i = 0; i < points.length && i < 5; i++) {
-      hash = hash * 31 + points[i].hashCode();
+
+    @Override
+    public Object clone() throws CloneNotSupportedException {
+        PGpath newPGpath = (PGpath) super.clone();
+        if (newPGpath.points != null) {
+            PGpoint[] newPoints = newPGpath.points.clone();
+            newPGpath.points = newPoints;
+            for (int i = 0; i < newPGpath.points.length; i++) {
+                newPoints[i] = (PGpoint) newPGpath.points[i].clone();
+            }
+        }
+        return newPGpath;
     }
-    return hash;
-  }
 
-  @Override
-  public Object clone() throws CloneNotSupportedException {
-    PGpath newPGpath = (PGpath) super.clone();
-    if (newPGpath.points != null) {
-      PGpoint[] newPoints = newPGpath.points.clone();
-      newPGpath.points = newPoints;
-      for (int i = 0; i < newPGpath.points.length; i++) {
-        newPoints[i] = (PGpoint) newPGpath.points[i].clone();
-      }
+    /**
+     * This returns the path in the syntax expected by org.postgresql.
+     *
+     * @return the value of this object
+     */
+    @Override
+    public String getValue() {
+        PGpoint[] points = this.points;
+        if (points == null) {
+            return null;
+        }
+        StringBuilder b = new StringBuilder(open ? "[" : "(");
+
+        for (int p = 0; p < points.length; p++) {
+            if (p > 0) {
+                b.append(",");
+            }
+            b.append(points[p].toString());
+        }
+        b.append(open ? "]" : ")");
+
+        return b.toString();
     }
-    return newPGpath;
-  }
 
-  /**
-   * This returns the path in the syntax expected by org.postgresql.
-   * @return the value of this object
-   */
-  @Override
-  public String getValue() {
-    PGpoint[] points = this.points;
-    if (points == null) {
-      return null;
+    /**
+     * @param s Definition of the path in PostgreSQL's syntax
+     * @throws SQLException on conversion failure
+     */
+    @Override
+    public void setValue(String s) throws SQLException {
+        if (s == null) {
+            points = null;
+            return;
+        }
+        // First test to see if were open
+        if (s.startsWith("[") && s.endsWith("]")) {
+            open = true;
+            s = PGtokenizer.removeBox(s);
+        } else if (s.startsWith("(") && s.endsWith(")")) {
+            open = false;
+            s = PGtokenizer.removePara(s);
+        } else {
+            throw new PSQLException(GT.tr("Cannot tell if path is open or closed: {0}.", s),
+                    PSQLState.DATA_TYPE_MISMATCH);
+        }
+
+        PGtokenizer t = new PGtokenizer(s, ',');
+        int npoints = t.getSize();
+        PGpoint[] points = new PGpoint[npoints];
+        this.points = points;
+        for (int p = 0; p < npoints; p++) {
+            points[p] = new PGpoint(t.getToken(p));
+        }
     }
-    StringBuilder b = new StringBuilder(open ? "[" : "(");
 
-    for (int p = 0; p < points.length; p++) {
-      if (p > 0) {
-        b.append(",");
-      }
-      b.append(points[p].toString());
+    public boolean isOpen() {
+        return open && points != null;
     }
-    b.append(open ? "]" : ")");
 
-    return b.toString();
-  }
+    public boolean isClosed() {
+        return !open && points != null;
+    }
 
-  public boolean isOpen() {
-    return open && points != null;
-  }
+    public void closePath() {
+        open = false;
+    }
 
-  public boolean isClosed() {
-    return !open && points != null;
-  }
-
-  public void closePath() {
-    open = false;
-  }
-
-  public void openPath() {
-    open = true;
-  }
+    public void openPath() {
+        open = true;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGpoint.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGpoint.java
index 7744a30..e7138cf 100644
--- a/pgjdbc/src/main/java/org/postgresql/geometric/PGpoint.java
+++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGpoint.java
@@ -23,190 +23,190 @@ import java.sql.SQLException;
  */
 @SuppressWarnings("serial")
 public class PGpoint extends PGobject implements PGBinaryObject, Serializable, Cloneable {
-  /**
-   * The X coordinate of the point.
-   */
-  public double x;
+    /**
+     * The X coordinate of the point.
+     */
+    public double x;
 
-  /**
-   * The Y coordinate of the point.
-   */
-  public double y;
+    /**
+     * The Y coordinate of the point.
+     */
+    public double y;
 
-  /**
-   * True if the point represents {@code null::point}.
-   */
-  public boolean isNull;
+    /**
+     * True if the point represents {@code null::point}.
+     */
+    public boolean isNull;
 
-  /**
-   * @param x coordinate
-   * @param y coordinate
-   */
-  public PGpoint(double x, double y) {
-    this();
-    this.x = x;
-    this.y = y;
-  }
-
-  /**
-   * This is called mainly from the other geometric types, when a point is embedded within their
-   * definition.
-   *
-   * @param value Definition of this point in PostgreSQL's syntax
-   * @throws SQLException if something goes wrong
-   */
-  public PGpoint(String value) throws SQLException {
-    this();
-    setValue(value);
-  }
-
-  /**
-   * Required by the driver.
-   */
-  public PGpoint() {
-    type = "point";
-  }
-
-  /**
-   * @param s Definition of this point in PostgreSQL's syntax
-   * @throws SQLException on conversion failure
-   */
-  @Override
-  public void setValue(String s) throws SQLException {
-    isNull = s == null;
-    if (s == null) {
-      return;
+    /**
+     * @param x coordinate
+     * @param y coordinate
+     */
+    public PGpoint(double x, double y) {
+        this();
+        this.x = x;
+        this.y = y;
     }
-    PGtokenizer t = new PGtokenizer(PGtokenizer.removePara(s), ',');
-    try {
-      x = Double.parseDouble(t.getToken(0));
-      y = Double.parseDouble(t.getToken(1));
-    } catch (NumberFormatException e) {
-      throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
-          PSQLState.DATA_TYPE_MISMATCH, e);
+
+    /**
+     * This is called mainly from the other geometric types, when a point is embedded within their
+     * definition.
+     *
+     * @param value Definition of this point in PostgreSQL's syntax
+     * @throws SQLException if something goes wrong
+     */
+    public PGpoint(String value) throws SQLException {
+        this();
+        setValue(value);
     }
-  }
 
-  /**
-   * @param b Definition of this point in PostgreSQL's binary syntax
-   */
-  @Override
-  public void setByteValue(byte[] b, int offset) {
-    this.isNull = false;
-    x = ByteConverter.float8(b, offset);
-    y = ByteConverter.float8(b, offset + 8);
-  }
+    /**
+     * Required by the driver.
+     */
+    public PGpoint() {
+        type = "point";
+    }
 
-  /**
-   * @param obj Object to compare with
-   * @return true if the two points are identical
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (obj instanceof PGpoint) {
-      PGpoint p = (PGpoint) obj;
-      if (isNull) {
-        return p.isNull;
-      } else if (p.isNull) {
+    /**
+     * @param b Definition of this point in PostgreSQL's binary syntax
+     */
+    @Override
+    public void setByteValue(byte[] b, int offset) {
+        this.isNull = false;
+        x = ByteConverter.float8(b, offset);
+        y = ByteConverter.float8(b, offset + 8);
+    }
+
+    /**
+     * @param obj Object to compare with
+     * @return true if the two points are identical
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (obj instanceof PGpoint) {
+            PGpoint p = (PGpoint) obj;
+            if (isNull) {
+                return p.isNull;
+            } else if (p.isNull) {
+                return false;
+            }
+            return x == p.x && y == p.y;
+        }
         return false;
-      }
-      return x == p.x && y == p.y;
     }
-    return false;
-  }
 
-  @Override
-  public int hashCode() {
-    if (isNull) {
-      return 0;
+    @Override
+    public int hashCode() {
+        if (isNull) {
+            return 0;
+        }
+        long v1 = Double.doubleToLongBits(x);
+        long v2 = Double.doubleToLongBits(y);
+        return (int) (v1 ^ v2 ^ (v1 >>> 32) ^ (v2 >>> 32));
     }
-    long v1 = Double.doubleToLongBits(x);
-    long v2 = Double.doubleToLongBits(y);
-    return (int) (v1 ^ v2 ^ (v1 >>> 32) ^ (v2 >>> 32));
-  }
 
-  /**
-   * @return the PGpoint in the syntax expected by org.postgresql
-   */
-  @Override
-  public String getValue() {
-    return isNull ? null : "(" + x + "," + y + ")";
-  }
-
-  @Override
-  public int lengthInBytes() {
-    return isNull ? 0 : 16;
-  }
-
-  /**
-   * Populate the byte array with PGpoint in the binary syntax expected by org.postgresql.
-   */
-  @Override
-  public void toBytes(byte[] b, int offset) {
-    if (isNull) {
-      return;
+    /**
+     * @return the PGpoint in the syntax expected by org.postgresql
+     */
+    @Override
+    public String getValue() {
+        return isNull ? null : "(" + x + "," + y + ")";
     }
-    ByteConverter.float8(b, offset, x);
-    ByteConverter.float8(b, offset + 8, y);
-  }
 
-  /**
-   * Translate the point by the supplied amount.
-   *
-   * @param x integer amount to add on the x axis
-   * @param y integer amount to add on the y axis
-   */
-  public void translate(int x, int y) {
-    translate((double) x, (double) y);
-  }
+    /**
+     * @param s Definition of this point in PostgreSQL's syntax
+     * @throws SQLException on conversion failure
+     */
+    @Override
+    public void setValue(String s) throws SQLException {
+        isNull = s == null;
+        if (s == null) {
+            return;
+        }
+        PGtokenizer t = new PGtokenizer(PGtokenizer.removePara(s), ',');
+        try {
+            x = Double.parseDouble(t.getToken(0));
+            y = Double.parseDouble(t.getToken(1));
+        } catch (NumberFormatException e) {
+            throw new PSQLException(GT.tr("Conversion to type {0} failed: {1}.", type, s),
+                    PSQLState.DATA_TYPE_MISMATCH, e);
+        }
+    }
 
-  /**
-   * Translate the point by the supplied amount.
-   *
-   * @param x double amount to add on the x axis
-   * @param y double amount to add on the y axis
-   */
-  public void translate(double x, double y) {
-    this.isNull = false;
-    this.x += x;
-    this.y += y;
-  }
+    @Override
+    public int lengthInBytes() {
+        return isNull ? 0 : 16;
+    }
 
-  /**
-   * Moves the point to the supplied coordinates.
-   *
-   * @param x integer coordinate
-   * @param y integer coordinate
-   */
-  public void move(int x, int y) {
-    setLocation(x, y);
-  }
+    /**
+     * Populate the byte array with PGpoint in the binary syntax expected by org.postgresql.
+     */
+    @Override
+    public void toBytes(byte[] b, int offset) {
+        if (isNull) {
+            return;
+        }
+        ByteConverter.float8(b, offset, x);
+        ByteConverter.float8(b, offset + 8, y);
+    }
 
-  /**
-   * Moves the point to the supplied coordinates.
-   *
-   * @param x double coordinate
-   * @param y double coordinate
-   */
-  public void move(double x, double y) {
-    this.isNull = false;
-    this.x = x;
-    this.y = y;
-  }
+    /**
+     * Translate the point by the supplied amount.
+     *
+     * @param x integer amount to add on the x axis
+     * @param y integer amount to add on the y axis
+     */
+    public void translate(int x, int y) {
+        translate((double) x, (double) y);
+    }
 
-  /**
-   * Moves the point to the supplied coordinates. refer to java.awt.Point for description of this.
-   *
-   * @param x integer coordinate
-   * @param y integer coordinate
-   */
-  public void setLocation(int x, int y) {
-    move((double) x, (double) y);
-  }
+    /**
+     * Translate the point by the supplied amount.
+     *
+     * @param x double amount to add on the x axis
+     * @param y double amount to add on the y axis
+     */
+    public void translate(double x, double y) {
+        this.isNull = false;
+        this.x += x;
+        this.y += y;
+    }
 
-  @Override
-  public Object clone() throws CloneNotSupportedException {
-    // squid:S2157 "Cloneables" should implement "clone
-    return super.clone();
-  }
+    /**
+     * Moves the point to the supplied coordinates.
+     *
+     * @param x integer coordinate
+     * @param y integer coordinate
+     */
+    public void move(int x, int y) {
+        setLocation(x, y);
+    }
+
+    /**
+     * Moves the point to the supplied coordinates.
+     *
+     * @param x double coordinate
+     * @param y double coordinate
+     */
+    public void move(double x, double y) {
+        this.isNull = false;
+        this.x = x;
+        this.y = y;
+    }
+
+    /**
+     * Moves the point to the supplied coordinates. refer to java.awt.Point for description of this.
+     *
+     * @param x integer coordinate
+     * @param y integer coordinate
+     */
+    public void setLocation(int x, int y) {
+        move((double) x, (double) y);
+    }
+
+    @Override
+    public Object clone() throws CloneNotSupportedException {
+        // squid:S2157 "Cloneables" should implement "clone
+        return super.clone();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/geometric/PGpolygon.java b/pgjdbc/src/main/java/org/postgresql/geometric/PGpolygon.java
index 3a0d7b9..f283531 100644
--- a/pgjdbc/src/main/java/org/postgresql/geometric/PGpolygon.java
+++ b/pgjdbc/src/main/java/org/postgresql/geometric/PGpolygon.java
@@ -16,136 +16,136 @@ import java.sql.SQLException;
  */
 @SuppressWarnings("serial")
 public class PGpolygon extends PGobject implements Serializable, Cloneable {
-  /**
-   * The points defining the polygon.
-   */
-  public PGpoint [] points;
+    /**
+     * The points defining the polygon.
+     */
+    public PGpoint[] points;
 
-  /**
-   * Creates a polygon using an array of PGpoints.
-   *
-   * @param points the points defining the polygon
-   */
-  public PGpolygon(PGpoint[] points) {
-    this();
-    this.points = points;
-  }
-
-  /**
-   * @param s definition of the polygon in PostgreSQL's syntax.
-   * @throws SQLException on conversion failure
-   */
-  public PGpolygon(String s) throws SQLException {
-    this();
-    setValue(s);
-  }
-
-  /**
-   * Required by the driver.
-   */
-  public PGpolygon() {
-    type = "polygon";
-  }
-
-  /**
-   * @param s Definition of the polygon in PostgreSQL's syntax
-   * @throws SQLException on conversion failure
-   */
-  @Override
-  public void setValue(String s) throws SQLException {
-    if (s == null) {
-      points = null;
-      return;
+    /**
+     * Creates a polygon using an array of PGpoints.
+     *
+     * @param points the points defining the polygon
+     */
+    public PGpolygon(PGpoint[] points) {
+        this();
+        this.points = points;
     }
-    PGtokenizer t = new PGtokenizer(PGtokenizer.removePara(s), ',');
-    int npoints = t.getSize();
-    PGpoint[] points = this.points;
-    if (points == null || points.length != npoints) {
-      this.points = points = new PGpoint[npoints];
+
+    /**
+     * @param s definition of the polygon in PostgreSQL's syntax.
+     * @throws SQLException on conversion failure
+     */
+    public PGpolygon(String s) throws SQLException {
+        this();
+        setValue(s);
     }
-    for (int p = 0; p < npoints; p++) {
-      points[p] = new PGpoint(t.getToken(p));
+
+    /**
+     * Required by the driver.
+     */
+    public PGpolygon() {
+        type = "polygon";
     }
-  }
 
-  /**
-   * @param obj Object to compare with
-   * @return true if the two polygons are identical
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (obj instanceof PGpolygon) {
-      PGpolygon p = (PGpolygon) obj;
+    /**
+     * @param obj Object to compare with
+     * @return true if the two polygons are identical
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (obj instanceof PGpolygon) {
+            PGpolygon p = (PGpolygon) obj;
 
-      PGpoint[] points = this.points;
-      PGpoint[] pPoints = p.points;
-      if (points == null) {
-        return pPoints == null;
-      } else if (pPoints == null) {
-        return false;
-      }
+            PGpoint[] points = this.points;
+            PGpoint[] pPoints = p.points;
+            if (points == null) {
+                return pPoints == null;
+            } else if (pPoints == null) {
+                return false;
+            }
 
-      if (pPoints.length != points.length) {
-        return false;
-      }
+            if (pPoints.length != points.length) {
+                return false;
+            }
 
-      for (int i = 0; i < points.length; i++) {
-        if (!points[i].equals(pPoints[i])) {
-          return false;
+            for (int i = 0; i < points.length; i++) {
+                if (!points[i].equals(pPoints[i])) {
+                    return false;
+                }
+            }
+
+            return true;
         }
-      }
-
-      return true;
+        return false;
     }
-    return false;
-  }
 
-  @Override
-  public int hashCode() {
-    int hash = 0;
-    PGpoint[] points = this.points;
-    if (points == null) {
-      return hash;
-    }
-    for (int i = 0; i < points.length && i < 5; i++) {
-      hash = hash * 31 + points[i].hashCode();
-    }
-    return hash;
-  }
-
-  @Override
-  public Object clone() throws CloneNotSupportedException {
-    PGpolygon newPGpolygon = (PGpolygon) super.clone();
-    if (newPGpolygon.points != null) {
-      PGpoint[] newPoints = newPGpolygon.points.clone();
-      newPGpolygon.points = newPoints;
-      for (int i = 0; i < newPGpolygon.points.length; i++) {
-        if (newPGpolygon.points[i] != null) {
-          newPoints[i] = (PGpoint) newPGpolygon.points[i].clone();
+    @Override
+    public int hashCode() {
+        int hash = 0;
+        PGpoint[] points = this.points;
+        if (points == null) {
+            return hash;
         }
-      }
+        for (int i = 0; i < points.length && i < 5; i++) {
+            hash = hash * 31 + points[i].hashCode();
+        }
+        return hash;
     }
-    return newPGpolygon;
-  }
 
-  /**
-   * @return the PGpolygon in the syntax expected by org.postgresql
-   */
-  @Override
-  public String getValue() {
-    PGpoint[] points = this.points;
-    if (points == null) {
-      return null;
+    @Override
+    public Object clone() throws CloneNotSupportedException {
+        PGpolygon newPGpolygon = (PGpolygon) super.clone();
+        if (newPGpolygon.points != null) {
+            PGpoint[] newPoints = newPGpolygon.points.clone();
+            newPGpolygon.points = newPoints;
+            for (int i = 0; i < newPGpolygon.points.length; i++) {
+                if (newPGpolygon.points[i] != null) {
+                    newPoints[i] = (PGpoint) newPGpolygon.points[i].clone();
+                }
+            }
+        }
+        return newPGpolygon;
     }
-    StringBuilder b = new StringBuilder();
-    b.append("(");
-    for (int p = 0; p < points.length; p++) {
-      if (p > 0) {
-        b.append(",");
-      }
-      b.append(points[p].toString());
+
+    /**
+     * @return the PGpolygon in the syntax expected by org.postgresql
+     */
+    @Override
+    public String getValue() {
+        PGpoint[] points = this.points;
+        if (points == null) {
+            return null;
+        }
+        StringBuilder b = new StringBuilder();
+        b.append("(");
+        for (int p = 0; p < points.length; p++) {
+            if (p > 0) {
+                b.append(",");
+            }
+            b.append(points[p].toString());
+        }
+        b.append(")");
+        return b.toString();
+    }
+
+    /**
+     * @param s Definition of the polygon in PostgreSQL's syntax
+     * @throws SQLException on conversion failure
+     */
+    @Override
+    public void setValue(String s) throws SQLException {
+        if (s == null) {
+            points = null;
+            return;
+        }
+        PGtokenizer t = new PGtokenizer(PGtokenizer.removePara(s), ',');
+        int npoints = t.getSize();
+        PGpoint[] points = this.points;
+        if (points == null || points.length != npoints) {
+            this.points = points = new PGpoint[npoints];
+        }
+        for (int p = 0; p < npoints; p++) {
+            points[p] = new PGpoint(t.getToken(p));
+        }
     }
-    b.append(")");
-    return b.toString();
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/gss/GSSCallbackHandler.java b/pgjdbc/src/main/java/org/postgresql/gss/GSSCallbackHandler.java
index 9ec36fe..8c09381 100644
--- a/pgjdbc/src/main/java/org/postgresql/gss/GSSCallbackHandler.java
+++ b/pgjdbc/src/main/java/org/postgresql/gss/GSSCallbackHandler.java
@@ -6,7 +6,6 @@
 package org.postgresql.gss;
 
 import java.io.IOException;
-
 import javax.security.auth.callback.Callback;
 import javax.security.auth.callback.CallbackHandler;
 import javax.security.auth.callback.NameCallback;
@@ -20,44 +19,44 @@ import javax.security.auth.callback.UnsupportedCallbackException;
 
 class GSSCallbackHandler implements CallbackHandler {
 
-  private final String user;
-  private final char [] password;
+    private final String user;
+    private final char[] password;
 
-  GSSCallbackHandler(String user, char [] password) {
-    this.user = user;
-    this.password = password;
-  }
-
-  @Override
-  public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
-    for (Callback callback : callbacks) {
-      if (callback instanceof TextOutputCallback) {
-        TextOutputCallback toc = (TextOutputCallback) callback;
-        switch (toc.getMessageType()) {
-          case TextOutputCallback.INFORMATION:
-            System.out.println("INFO: " + toc.getMessage());
-            break;
-          case TextOutputCallback.ERROR:
-            System.out.println("ERROR: " + toc.getMessage());
-            break;
-          case TextOutputCallback.WARNING:
-            System.out.println("WARNING: " + toc.getMessage());
-            break;
-          default:
-            throw new IOException("Unsupported message type: " + toc.getMessageType());
-        }
-      } else if (callback instanceof NameCallback) {
-        NameCallback nc = (NameCallback) callback;
-        nc.setName(user);
-      } else if (callback instanceof PasswordCallback) {
-        PasswordCallback pc = (PasswordCallback) callback;
-        if (password == null) {
-          throw new IOException("No cached kerberos ticket found and no password supplied.");
-        }
-        pc.setPassword(password);
-      } else {
-        throw new UnsupportedCallbackException(callback, "Unrecognized Callback");
-      }
+    GSSCallbackHandler(String user, char[] password) {
+        this.user = user;
+        this.password = password;
+    }
+
+    @Override
+    public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
+        for (Callback callback : callbacks) {
+            if (callback instanceof TextOutputCallback) {
+                TextOutputCallback toc = (TextOutputCallback) callback;
+                switch (toc.getMessageType()) {
+                    case TextOutputCallback.INFORMATION:
+                        System.out.println("INFO: " + toc.getMessage());
+                        break;
+                    case TextOutputCallback.ERROR:
+                        System.out.println("ERROR: " + toc.getMessage());
+                        break;
+                    case TextOutputCallback.WARNING:
+                        System.out.println("WARNING: " + toc.getMessage());
+                        break;
+                    default:
+                        throw new IOException("Unsupported message type: " + toc.getMessageType());
+                }
+            } else if (callback instanceof NameCallback) {
+                NameCallback nc = (NameCallback) callback;
+                nc.setName(user);
+            } else if (callback instanceof PasswordCallback) {
+                PasswordCallback pc = (PasswordCallback) callback;
+                if (password == null) {
+                    throw new IOException("No cached kerberos ticket found and no password supplied.");
+                }
+                pc.setPassword(password);
+            } else {
+                throw new UnsupportedCallbackException(callback, "Unrecognized Callback");
+            }
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/gss/GSSInputStream.java b/pgjdbc/src/main/java/org/postgresql/gss/GSSInputStream.java
index 2007b82..e45d711 100644
--- a/pgjdbc/src/main/java/org/postgresql/gss/GSSInputStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/gss/GSSInputStream.java
@@ -5,69 +5,68 @@
 
 package org.postgresql.gss;
 
+import java.io.IOException;
+import java.io.InputStream;
 import org.ietf.jgss.GSSContext;
 import org.ietf.jgss.GSSException;
 import org.ietf.jgss.MessageProp;
 
-import java.io.IOException;
-import java.io.InputStream;
-
 public class GSSInputStream extends InputStream {
-  private final GSSContext gssContext;
-  private final MessageProp messageProp;
-  private final InputStream wrapped;
-  byte [] unencrypted;
-  int unencryptedPos;
-  int unencryptedLength;
+    private final GSSContext gssContext;
+    private final MessageProp messageProp;
+    private final InputStream wrapped;
+    byte[] unencrypted;
+    int unencryptedPos;
+    int unencryptedLength;
 
-  public GSSInputStream(InputStream wrapped, GSSContext gssContext, MessageProp messageProp) {
-    this.wrapped = wrapped;
-    this.gssContext = gssContext;
-    this.messageProp = messageProp;
-  }
+    public GSSInputStream(InputStream wrapped, GSSContext gssContext, MessageProp messageProp) {
+        this.wrapped = wrapped;
+        this.gssContext = gssContext;
+        this.messageProp = messageProp;
+    }
 
-  @Override
-  public int read() throws IOException {
-    return 0;
-  }
+    @Override
+    public int read() throws IOException {
+        return 0;
+    }
 
-  @Override
-  public int read(byte [] buffer, int pos, int len) throws IOException {
-    byte[] int4Buf = new byte[4];
-    int encryptedLength;
-    int copyLength = 0;
+    @Override
+    public int read(byte[] buffer, int pos, int len) throws IOException {
+        byte[] int4Buf = new byte[4];
+        int encryptedLength;
+        int copyLength = 0;
 
-    if ( unencryptedLength > 0 ) {
-      copyLength = Math.min(len, unencryptedLength);
-      System.arraycopy(unencrypted, unencryptedPos, buffer, pos, copyLength);
-      unencryptedLength -= copyLength;
-      unencryptedPos += copyLength;
-    } else {
-      if (wrapped.read(int4Buf, 0, 4) == 4 ) {
+        if (unencryptedLength > 0) {
+            copyLength = Math.min(len, unencryptedLength);
+            System.arraycopy(unencrypted, unencryptedPos, buffer, pos, copyLength);
+            unencryptedLength -= copyLength;
+            unencryptedPos += copyLength;
+        } else {
+            if (wrapped.read(int4Buf, 0, 4) == 4) {
 
-        encryptedLength = (int4Buf[0] & 0xFF) << 24 | (int4Buf[1] & 0xFF) << 16 | (int4Buf[2] & 0xFF) << 8
-            | int4Buf[3] & 0xFF;
+                encryptedLength = (int4Buf[0] & 0xFF) << 24 | (int4Buf[1] & 0xFF) << 16 | (int4Buf[2] & 0xFF) << 8
+                        | int4Buf[3] & 0xFF;
 
-        byte[] encryptedBuffer = new byte[encryptedLength];
-        wrapped.read(encryptedBuffer, 0, encryptedLength);
+                byte[] encryptedBuffer = new byte[encryptedLength];
+                wrapped.read(encryptedBuffer, 0, encryptedLength);
 
-        try {
-          byte[] unencrypted = gssContext.unwrap(encryptedBuffer, 0, encryptedLength, messageProp);
-          this.unencrypted = unencrypted;
-          unencryptedLength = unencrypted.length;
-          unencryptedPos = 0;
+                try {
+                    byte[] unencrypted = gssContext.unwrap(encryptedBuffer, 0, encryptedLength, messageProp);
+                    this.unencrypted = unencrypted;
+                    unencryptedLength = unencrypted.length;
+                    unencryptedPos = 0;
 
-          copyLength = Math.min(len, unencrypted.length);
-          System.arraycopy(unencrypted, unencryptedPos, buffer, pos, copyLength);
-          unencryptedLength -= copyLength;
-          unencryptedPos += copyLength;
+                    copyLength = Math.min(len, unencrypted.length);
+                    System.arraycopy(unencrypted, unencryptedPos, buffer, pos, copyLength);
+                    unencryptedLength -= copyLength;
+                    unencryptedPos += copyLength;
 
-        } catch (GSSException e) {
-          throw new IOException(e);
+                } catch (GSSException e) {
+                    throw new IOException(e);
+                }
+                return copyLength;
+            }
         }
         return copyLength;
-      }
     }
-    return copyLength;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/gss/GSSOutputStream.java b/pgjdbc/src/main/java/org/postgresql/gss/GSSOutputStream.java
index 197ddb7..c3a4f31 100644
--- a/pgjdbc/src/main/java/org/postgresql/gss/GSSOutputStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/gss/GSSOutputStream.java
@@ -5,81 +5,80 @@
 
 package org.postgresql.gss;
 
+import java.io.IOException;
+import java.io.OutputStream;
 import org.ietf.jgss.GSSContext;
 import org.ietf.jgss.GSSException;
 import org.ietf.jgss.MessageProp;
 
-import java.io.IOException;
-import java.io.OutputStream;
-
 public class GSSOutputStream extends OutputStream {
-  private final GSSContext gssContext;
-  private final MessageProp messageProp;
-  private final byte[] buffer;
-  private final byte[] int4Buf = new byte[4];
-  private int index;
-  private final OutputStream wrapped;
+    private final GSSContext gssContext;
+    private final MessageProp messageProp;
+    private final byte[] buffer;
+    private final byte[] int4Buf = new byte[4];
+    private final OutputStream wrapped;
+    private int index;
 
-  public GSSOutputStream(OutputStream out, GSSContext gssContext, MessageProp messageProp, int bufferSize) {
-    wrapped = out;
-    this.gssContext = gssContext;
-    this.messageProp = messageProp;
-    buffer = new byte[bufferSize];
-  }
-
-  @Override
-  public void write(int b) throws IOException {
-    buffer[index++] = (byte) b;
-    if (index >= buffer.length) {
-      flush();
+    public GSSOutputStream(OutputStream out, GSSContext gssContext, MessageProp messageProp, int bufferSize) {
+        wrapped = out;
+        this.gssContext = gssContext;
+        this.messageProp = messageProp;
+        buffer = new byte[bufferSize];
     }
-  }
 
-  @Override
-  public void write(byte[] buf) throws IOException {
-    write(buf, 0, buf.length);
-  }
-
-  @Override
-  public void write(byte[] b, int pos, int len) throws IOException {
-    int max;
-
-    while ( len > 0 ) {
-      int roomToWrite = buffer.length - index;
-      if ( len < roomToWrite ) {
-        System.arraycopy(b, pos, buffer, index, len);
-        index += len;
-        len -= roomToWrite;
-      } else {
-        System.arraycopy(b, pos, buffer, index, roomToWrite);
-        index += roomToWrite;
-        len -= roomToWrite;
-      }
-      if (roomToWrite == 0) {
-        flush();
-      }
+    @Override
+    public void write(int b) throws IOException {
+        buffer[index++] = (byte) b;
+        if (index >= buffer.length) {
+            flush();
+        }
     }
-  }
 
-  @Override
-  public void flush() throws IOException {
-    try {
-      byte[] token = gssContext.wrap(buffer, 0, index, messageProp);
-      sendInteger4Raw(token.length);
-      wrapped.write(token, 0, token.length);
-      index = 0;
-    } catch ( GSSException ex ) {
-      throw new IOException(ex);
+    @Override
+    public void write(byte[] buf) throws IOException {
+        write(buf, 0, buf.length);
     }
-    wrapped.flush();
-  }
 
-  private void sendInteger4Raw(int val) throws IOException {
-    int4Buf[0] = (byte) (val >>> 24);
-    int4Buf[1] = (byte) (val >>> 16);
-    int4Buf[2] = (byte) (val >>> 8);
-    int4Buf[3] = (byte) (val);
-    wrapped.write(int4Buf);
-  }
+    @Override
+    public void write(byte[] b, int pos, int len) throws IOException {
+        int max;
+
+        while (len > 0) {
+            int roomToWrite = buffer.length - index;
+            if (len < roomToWrite) {
+                System.arraycopy(b, pos, buffer, index, len);
+                index += len;
+                len -= roomToWrite;
+            } else {
+                System.arraycopy(b, pos, buffer, index, roomToWrite);
+                index += roomToWrite;
+                len -= roomToWrite;
+            }
+            if (roomToWrite == 0) {
+                flush();
+            }
+        }
+    }
+
+    @Override
+    public void flush() throws IOException {
+        try {
+            byte[] token = gssContext.wrap(buffer, 0, index, messageProp);
+            sendInteger4Raw(token.length);
+            wrapped.write(token, 0, token.length);
+            index = 0;
+        } catch (GSSException ex) {
+            throw new IOException(ex);
+        }
+        wrapped.flush();
+    }
+
+    private void sendInteger4Raw(int val) throws IOException {
+        int4Buf[0] = (byte) (val >>> 24);
+        int4Buf[1] = (byte) (val >>> 16);
+        int4Buf[2] = (byte) (val >>> 8);
+        int4Buf[3] = (byte) (val);
+        wrapped.write(int4Buf);
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/gss/GssAction.java b/pgjdbc/src/main/java/org/postgresql/gss/GssAction.java
index b1ad40f..7b4b65d 100644
--- a/pgjdbc/src/main/java/org/postgresql/gss/GssAction.java
+++ b/pgjdbc/src/main/java/org/postgresql/gss/GssAction.java
@@ -5,19 +5,6 @@
 
 package org.postgresql.gss;
 
-import org.postgresql.core.PGStream;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-import org.postgresql.util.ServerErrorMessage;
-
-import org.ietf.jgss.GSSContext;
-import org.ietf.jgss.GSSCredential;
-import org.ietf.jgss.GSSException;
-import org.ietf.jgss.GSSManager;
-import org.ietf.jgss.GSSName;
-import org.ietf.jgss.Oid;
-
 import java.io.IOException;
 import java.security.Principal;
 import java.security.PrivilegedAction;
@@ -26,151 +13,161 @@ import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.logging.Level;
 import java.util.logging.Logger;
-
 import javax.security.auth.Subject;
+import org.ietf.jgss.GSSContext;
+import org.ietf.jgss.GSSCredential;
+import org.ietf.jgss.GSSException;
+import org.ietf.jgss.GSSManager;
+import org.ietf.jgss.GSSName;
+import org.ietf.jgss.Oid;
+import org.postgresql.core.PGStream;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+import org.postgresql.util.ServerErrorMessage;
 
 class GssAction implements PrivilegedAction<Exception>, Callable<Exception> {
 
-  private static final Logger LOGGER = Logger.getLogger(GssAction.class.getName());
-  private final PGStream pgStream;
-  private final String host;
-  private final String kerberosServerName;
-  private final String user;
-  private final boolean useSpnego;
-  private final Subject subject;
-  private final boolean logServerErrorDetail;
+    private static final Logger LOGGER = Logger.getLogger(GssAction.class.getName());
+    private final PGStream pgStream;
+    private final String host;
+    private final String kerberosServerName;
+    private final String user;
+    private final boolean useSpnego;
+    private final Subject subject;
+    private final boolean logServerErrorDetail;
 
-  GssAction(PGStream pgStream, Subject subject, String host, String user,
-      String kerberosServerName, boolean useSpnego, boolean logServerErrorDetail) {
-    this.pgStream = pgStream;
-    this.subject = subject;
-    this.host = host;
-    this.user = user;
-    this.kerberosServerName = kerberosServerName;
-    this.useSpnego = useSpnego;
-    this.logServerErrorDetail = logServerErrorDetail;
-  }
-
-  private static boolean hasSpnegoSupport(GSSManager manager) throws GSSException {
-    Oid spnego = new Oid("1.3.6.1.5.5.2");
-    Oid[] mechs = manager.getMechs();
-
-    for (Oid mech : mechs) {
-      if (mech.equals(spnego)) {
-        return true;
-      }
+    GssAction(PGStream pgStream, Subject subject, String host, String user,
+              String kerberosServerName, boolean useSpnego, boolean logServerErrorDetail) {
+        this.pgStream = pgStream;
+        this.subject = subject;
+        this.host = host;
+        this.user = user;
+        this.kerberosServerName = kerberosServerName;
+        this.useSpnego = useSpnego;
+        this.logServerErrorDetail = logServerErrorDetail;
     }
 
-    return false;
-  }
+    private static boolean hasSpnegoSupport(GSSManager manager) throws GSSException {
+        Oid spnego = new Oid("1.3.6.1.5.5.2");
+        Oid[] mechs = manager.getMechs();
 
-  @Override
-  public Exception run() {
-    try {
-      GSSManager manager = GSSManager.getInstance();
-      GSSCredential clientCreds = null;
-      Oid[] desiredMechs = new Oid[1];
-
-      //Try to get credential from subject first.
-      GSSCredential gssCredential = null;
-      if (subject != null) {
-        Set<GSSCredential> gssCreds = subject.getPrivateCredentials(GSSCredential.class);
-        if (gssCreds != null && !gssCreds.isEmpty()) {
-          gssCredential = gssCreds.iterator().next();
-        }
-      }
-
-      //If failed to get credential from subject,
-      //then call createCredential to create one.
-      if (gssCredential == null) {
-        if (useSpnego && hasSpnegoSupport(manager)) {
-          desiredMechs[0] = new Oid("1.3.6.1.5.5.2");
-        } else {
-          desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
-        }
-        String principalName = this.user;
-        if (subject != null) {
-          Set<Principal> principals = subject.getPrincipals();
-          Iterator<Principal> principalIterator = principals.iterator();
-
-          Principal principal = null;
-          if (principalIterator.hasNext()) {
-            principal = principalIterator.next();
-            principalName = principal.getName();
-          }
+        for (Oid mech : mechs) {
+            if (mech.equals(spnego)) {
+                return true;
+            }
         }
 
-        GSSName clientName = manager.createName(principalName, GSSName.NT_USER_NAME);
-        clientCreds = manager.createCredential(clientName, 8 * 3600, desiredMechs,
-            GSSCredential.INITIATE_ONLY);
-      } else {
-        desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
-        clientCreds = gssCredential;
-      }
-
-      GSSName serverName =
-          manager.createName(kerberosServerName + "@" + host, GSSName.NT_HOSTBASED_SERVICE);
-
-      GSSContext secContext = manager.createContext(serverName, desiredMechs[0], clientCreds,
-          GSSContext.DEFAULT_LIFETIME);
-      secContext.requestMutualAuth(true);
-
-      byte[] inToken = new byte[0];
-      byte[] outToken = null;
-
-      boolean established = false;
-      while (!established) {
-        outToken = secContext.initSecContext(inToken, 0, inToken.length);
-
-        if (outToken != null) {
-          LOGGER.log(Level.FINEST, " FE=> Password(GSS Authentication Token)");
-
-          pgStream.sendChar('p');
-          pgStream.sendInteger4(4 + outToken.length);
-          pgStream.send(outToken);
-          pgStream.flush();
-        }
-
-        if (!secContext.isEstablished()) {
-          int response = pgStream.receiveChar();
-          // Error
-          switch (response) {
-            case 'E':
-              int elen = pgStream.receiveInteger4();
-              ServerErrorMessage errorMsg
-                  = new ServerErrorMessage(pgStream.receiveErrorString(elen - 4));
-
-              LOGGER.log(Level.FINEST, " <=BE ErrorMessage({0})", errorMsg);
-
-              return new PSQLException(errorMsg, logServerErrorDetail);
-            case 'R':
-              LOGGER.log(Level.FINEST, " <=BE AuthenticationGSSContinue");
-              int len = pgStream.receiveInteger4();
-              int type = pgStream.receiveInteger4();
-              // should check type = 8
-              inToken = pgStream.receive(len - 8);
-              break;
-            default:
-              // Unknown/unexpected message type.
-              return new PSQLException(GT.tr("Protocol error.  Session setup failed."),
-                  PSQLState.CONNECTION_UNABLE_TO_CONNECT);
-          }
-        } else {
-          established = true;
-        }
-      }
-
-    } catch (IOException e) {
-      return e;
-    } catch (GSSException gsse) {
-      return new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE,
-          gsse);
+        return false;
     }
-    return null;
-  }
 
-  @Override
-  public Exception call() throws Exception {
-    return run();
-  }
+    @Override
+    public Exception run() {
+        try {
+            GSSManager manager = GSSManager.getInstance();
+            GSSCredential clientCreds = null;
+            Oid[] desiredMechs = new Oid[1];
+
+            //Try to get credential from subject first.
+            GSSCredential gssCredential = null;
+            if (subject != null) {
+                Set<GSSCredential> gssCreds = subject.getPrivateCredentials(GSSCredential.class);
+                if (gssCreds != null && !gssCreds.isEmpty()) {
+                    gssCredential = gssCreds.iterator().next();
+                }
+            }
+
+            //If failed to get credential from subject,
+            //then call createCredential to create one.
+            if (gssCredential == null) {
+                if (useSpnego && hasSpnegoSupport(manager)) {
+                    desiredMechs[0] = new Oid("1.3.6.1.5.5.2");
+                } else {
+                    desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
+                }
+                String principalName = this.user;
+                if (subject != null) {
+                    Set<Principal> principals = subject.getPrincipals();
+                    Iterator<Principal> principalIterator = principals.iterator();
+
+                    Principal principal = null;
+                    if (principalIterator.hasNext()) {
+                        principal = principalIterator.next();
+                        principalName = principal.getName();
+                    }
+                }
+
+                GSSName clientName = manager.createName(principalName, GSSName.NT_USER_NAME);
+                clientCreds = manager.createCredential(clientName, 8 * 3600, desiredMechs,
+                        GSSCredential.INITIATE_ONLY);
+            } else {
+                desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
+                clientCreds = gssCredential;
+            }
+
+            GSSName serverName =
+                    manager.createName(kerberosServerName + "@" + host, GSSName.NT_HOSTBASED_SERVICE);
+
+            GSSContext secContext = manager.createContext(serverName, desiredMechs[0], clientCreds,
+                    GSSContext.DEFAULT_LIFETIME);
+            secContext.requestMutualAuth(true);
+
+            byte[] inToken = new byte[0];
+            byte[] outToken = null;
+
+            boolean established = false;
+            while (!established) {
+                outToken = secContext.initSecContext(inToken, 0, inToken.length);
+
+                if (outToken != null) {
+                    LOGGER.log(Level.FINEST, " FE=> Password(GSS Authentication Token)");
+
+                    pgStream.sendChar('p');
+                    pgStream.sendInteger4(4 + outToken.length);
+                    pgStream.send(outToken);
+                    pgStream.flush();
+                }
+
+                if (!secContext.isEstablished()) {
+                    int response = pgStream.receiveChar();
+                    // Error
+                    switch (response) {
+                        case 'E':
+                            int elen = pgStream.receiveInteger4();
+                            ServerErrorMessage errorMsg
+                                    = new ServerErrorMessage(pgStream.receiveErrorString(elen - 4));
+
+                            LOGGER.log(Level.FINEST, " <=BE ErrorMessage({0})", errorMsg);
+
+                            return new PSQLException(errorMsg, logServerErrorDetail);
+                        case 'R':
+                            LOGGER.log(Level.FINEST, " <=BE AuthenticationGSSContinue");
+                            int len = pgStream.receiveInteger4();
+                            int type = pgStream.receiveInteger4();
+                            // should check type = 8
+                            inToken = pgStream.receive(len - 8);
+                            break;
+                        default:
+                            // Unknown/unexpected message type.
+                            return new PSQLException(GT.tr("Protocol error.  Session setup failed."),
+                                    PSQLState.CONNECTION_UNABLE_TO_CONNECT);
+                    }
+                } else {
+                    established = true;
+                }
+            }
+
+        } catch (IOException e) {
+            return e;
+        } catch (GSSException gsse) {
+            return new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE,
+                    gsse);
+        }
+        return null;
+    }
+
+    @Override
+    public Exception call() throws Exception {
+        return run();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/gss/GssEncAction.java b/pgjdbc/src/main/java/org/postgresql/gss/GssEncAction.java
index 37cc0d8..525a730 100644
--- a/pgjdbc/src/main/java/org/postgresql/gss/GssEncAction.java
+++ b/pgjdbc/src/main/java/org/postgresql/gss/GssEncAction.java
@@ -5,18 +5,6 @@
 
 package org.postgresql.gss;
 
-import org.postgresql.core.PGStream;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
-import org.ietf.jgss.GSSContext;
-import org.ietf.jgss.GSSCredential;
-import org.ietf.jgss.GSSException;
-import org.ietf.jgss.GSSManager;
-import org.ietf.jgss.GSSName;
-import org.ietf.jgss.Oid;
-
 import java.io.IOException;
 import java.security.Principal;
 import java.security.PrivilegedAction;
@@ -25,133 +13,142 @@ import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.logging.Level;
 import java.util.logging.Logger;
-
 import javax.security.auth.Subject;
+import org.ietf.jgss.GSSContext;
+import org.ietf.jgss.GSSCredential;
+import org.ietf.jgss.GSSException;
+import org.ietf.jgss.GSSManager;
+import org.ietf.jgss.GSSName;
+import org.ietf.jgss.Oid;
+import org.postgresql.core.PGStream;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
 
 public class GssEncAction implements PrivilegedAction<Exception>, Callable<Exception> {
-  private static final Logger LOGGER = Logger.getLogger(GssAction.class.getName());
-  private final PGStream pgStream;
-  private final String host;
-  private final String user;
-  private final String kerberosServerName;
-  private final boolean useSpnego;
-  private final Subject subject;
-  private final boolean logServerErrorDetail;
+    private static final Logger LOGGER = Logger.getLogger(GssAction.class.getName());
+    private final PGStream pgStream;
+    private final String host;
+    private final String user;
+    private final String kerberosServerName;
+    private final boolean useSpnego;
+    private final Subject subject;
+    private final boolean logServerErrorDetail;
 
-  public GssEncAction(PGStream pgStream, Subject subject,
-      String host, String user,
-      String kerberosServerName, boolean useSpnego, boolean logServerErrorDetail) {
-    this.pgStream = pgStream;
-    this.subject = subject;
-    this.host = host;
-    this.user = user;
-    this.kerberosServerName = kerberosServerName;
-    this.useSpnego = useSpnego;
-    this.logServerErrorDetail = logServerErrorDetail;
-  }
-
-  private static boolean hasSpnegoSupport(GSSManager manager) throws GSSException {
-    Oid spnego = new Oid("1.3.6.1.5.5.2");
-    Oid[] mechs = manager.getMechs();
-
-    for (Oid mech : mechs) {
-      if (mech.equals(spnego)) {
-        return true;
-      }
+    public GssEncAction(PGStream pgStream, Subject subject,
+                        String host, String user,
+                        String kerberosServerName, boolean useSpnego, boolean logServerErrorDetail) {
+        this.pgStream = pgStream;
+        this.subject = subject;
+        this.host = host;
+        this.user = user;
+        this.kerberosServerName = kerberosServerName;
+        this.useSpnego = useSpnego;
+        this.logServerErrorDetail = logServerErrorDetail;
     }
 
-    return false;
-  }
+    private static boolean hasSpnegoSupport(GSSManager manager) throws GSSException {
+        Oid spnego = new Oid("1.3.6.1.5.5.2");
+        Oid[] mechs = manager.getMechs();
 
-  @Override
-  public Exception run() {
-    try {
-      GSSManager manager = GSSManager.getInstance();
-      GSSCredential clientCreds = null;
-      Oid[] desiredMechs = new Oid[1];
-
-      //Try to get credential from subject first.
-      GSSCredential gssCredential = null;
-      if (subject != null) {
-        Set<GSSCredential> gssCreds = subject.getPrivateCredentials(GSSCredential.class);
-        if (gssCreds != null && !gssCreds.isEmpty()) {
-          gssCredential = gssCreds.iterator().next();
-        }
-      }
-
-      //If failed to get credential from subject,
-      //then call createCredential to create one.
-      if (gssCredential == null) {
-        if (useSpnego && hasSpnegoSupport(manager)) {
-          desiredMechs[0] = new Oid("1.3.6.1.5.5.2");
-        } else {
-          desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
-        }
-        String principalName = this.user;
-        if (subject != null) {
-          Set<Principal> principals = subject.getPrincipals();
-          Iterator<Principal> principalIterator = principals.iterator();
-
-          Principal principal = null;
-          if (principalIterator.hasNext()) {
-            principal = principalIterator.next();
-            principalName = principal.getName();
-          }
+        for (Oid mech : mechs) {
+            if (mech.equals(spnego)) {
+                return true;
+            }
         }
 
-        GSSName clientName = manager.createName(principalName, GSSName.NT_USER_NAME);
-        clientCreds = manager.createCredential(clientName, 8 * 3600, desiredMechs,
-            GSSCredential.INITIATE_ONLY);
-      } else {
-        desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
-        clientCreds = gssCredential;
-      }
-      GSSName serverName =
-          manager.createName(kerberosServerName + "@" + host, GSSName.NT_HOSTBASED_SERVICE);
-
-      GSSContext secContext = manager.createContext(serverName, desiredMechs[0], clientCreds,
-          GSSContext.DEFAULT_LIFETIME);
-      secContext.requestMutualAuth(true);
-      secContext.requestConf(true);
-      secContext.requestInteg(true);
-
-      byte[] inToken = new byte[0];
-      byte[] outToken = null;
-
-      boolean established = false;
-      while (!established) {
-        outToken = secContext.initSecContext(inToken, 0, inToken.length);
-
-        if (outToken != null) {
-          LOGGER.log(Level.FINEST, " FE=> Password(GSS Authentication Token)");
-
-          pgStream.sendInteger4(outToken.length);
-          pgStream.send(outToken);
-          pgStream.flush();
-        }
-
-        if (!secContext.isEstablished()) {
-          int len = pgStream.receiveInteger4();
-          // should check type = 8
-          inToken = pgStream.receive(len);
-        } else {
-          established = true;
-          pgStream.setSecContext(secContext);
-        }
-      }
-
-    } catch (IOException e) {
-      return e;
-    } catch (GSSException gsse) {
-      return new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE,
-          gsse);
+        return false;
     }
 
-    return null;
-  }
+    @Override
+    public Exception run() {
+        try {
+            GSSManager manager = GSSManager.getInstance();
+            GSSCredential clientCreds = null;
+            Oid[] desiredMechs = new Oid[1];
 
-  @Override
-  public Exception call() throws Exception {
-    return run();
-  }
+            //Try to get credential from subject first.
+            GSSCredential gssCredential = null;
+            if (subject != null) {
+                Set<GSSCredential> gssCreds = subject.getPrivateCredentials(GSSCredential.class);
+                if (gssCreds != null && !gssCreds.isEmpty()) {
+                    gssCredential = gssCreds.iterator().next();
+                }
+            }
+
+            //If failed to get credential from subject,
+            //then call createCredential to create one.
+            if (gssCredential == null) {
+                if (useSpnego && hasSpnegoSupport(manager)) {
+                    desiredMechs[0] = new Oid("1.3.6.1.5.5.2");
+                } else {
+                    desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
+                }
+                String principalName = this.user;
+                if (subject != null) {
+                    Set<Principal> principals = subject.getPrincipals();
+                    Iterator<Principal> principalIterator = principals.iterator();
+
+                    Principal principal = null;
+                    if (principalIterator.hasNext()) {
+                        principal = principalIterator.next();
+                        principalName = principal.getName();
+                    }
+                }
+
+                GSSName clientName = manager.createName(principalName, GSSName.NT_USER_NAME);
+                clientCreds = manager.createCredential(clientName, 8 * 3600, desiredMechs,
+                        GSSCredential.INITIATE_ONLY);
+            } else {
+                desiredMechs[0] = new Oid("1.2.840.113554.1.2.2");
+                clientCreds = gssCredential;
+            }
+            GSSName serverName =
+                    manager.createName(kerberosServerName + "@" + host, GSSName.NT_HOSTBASED_SERVICE);
+
+            GSSContext secContext = manager.createContext(serverName, desiredMechs[0], clientCreds,
+                    GSSContext.DEFAULT_LIFETIME);
+            secContext.requestMutualAuth(true);
+            secContext.requestConf(true);
+            secContext.requestInteg(true);
+
+            byte[] inToken = new byte[0];
+            byte[] outToken = null;
+
+            boolean established = false;
+            while (!established) {
+                outToken = secContext.initSecContext(inToken, 0, inToken.length);
+
+                if (outToken != null) {
+                    LOGGER.log(Level.FINEST, " FE=> Password(GSS Authentication Token)");
+
+                    pgStream.sendInteger4(outToken.length);
+                    pgStream.send(outToken);
+                    pgStream.flush();
+                }
+
+                if (!secContext.isEstablished()) {
+                    int len = pgStream.receiveInteger4();
+                    // should check type = 8
+                    inToken = pgStream.receive(len);
+                } else {
+                    established = true;
+                    pgStream.setSecContext(secContext);
+                }
+            }
+
+        } catch (IOException e) {
+            return e;
+        } catch (GSSException gsse) {
+            return new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE,
+                    gsse);
+        }
+
+        return null;
+    }
+
+    @Override
+    public Exception call() throws Exception {
+        return run();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/gss/MakeGSS.java b/pgjdbc/src/main/java/org/postgresql/gss/MakeGSS.java
index a548275..57b2181 100644
--- a/pgjdbc/src/main/java/org/postgresql/gss/MakeGSS.java
+++ b/pgjdbc/src/main/java/org/postgresql/gss/MakeGSS.java
@@ -5,14 +5,6 @@
 
 package org.postgresql.gss;
 
-import org.postgresql.PGProperty;
-import org.postgresql.core.PGStream;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
-import org.ietf.jgss.GSSCredential;
-
 import java.io.IOException;
 import java.lang.invoke.MethodHandle;
 import java.lang.invoke.MethodHandles;
@@ -22,160 +14,166 @@ import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.logging.Level;
 import java.util.logging.Logger;
-
 import javax.security.auth.Subject;
 import javax.security.auth.login.LoginContext;
+import org.ietf.jgss.GSSCredential;
+import org.postgresql.PGProperty;
+import org.postgresql.core.PGStream;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
 
 public class MakeGSS {
-  private static final Logger LOGGER = Logger.getLogger(MakeGSS.class.getName());
-  private static final MethodHandle SUBJECT_CURRENT;
-  private static final MethodHandle ACCESS_CONTROLLER_GET_CONTEXT;
-  private static final MethodHandle SUBJECT_GET_SUBJECT;
-  // Java <18
-  private static final MethodHandle SUBJECT_DO_AS;
-  // Java 18+, see https://bugs.openjdk.org/browse/JDK-8267108
-  private static final MethodHandle SUBJECT_CALL_AS;
+    private static final Logger LOGGER = Logger.getLogger(MakeGSS.class.getName());
+    private static final MethodHandle SUBJECT_CURRENT;
+    private static final MethodHandle ACCESS_CONTROLLER_GET_CONTEXT;
+    private static final MethodHandle SUBJECT_GET_SUBJECT;
+    // Java <18
+    private static final MethodHandle SUBJECT_DO_AS;
+    // Java 18+, see https://bugs.openjdk.org/browse/JDK-8267108
+    private static final MethodHandle SUBJECT_CALL_AS;
 
-  static {
-    MethodHandle subjectCurrent = null;
-    try {
-      subjectCurrent = MethodHandles.lookup()
-          .findStatic(Subject.class, "current", MethodType.methodType(Subject.class));
-    } catch (NoSuchMethodException | IllegalAccessException ignore) {
-      // E.g. pre Java 18
-    }
-    SUBJECT_CURRENT = subjectCurrent;
-
-    MethodHandle accessControllerGetContext = null;
-    MethodHandle subjectGetSubject = null;
-
-    try {
-      Class<?> accessControllerClass = Class.forName("java.security.AccessController");
-      Class<?> accessControlContextClass =
-          Class.forName("java.security.AccessControlContext");
-      accessControllerGetContext = MethodHandles.lookup()
-          .findStatic(accessControllerClass, "getContext",
-              MethodType.methodType(accessControlContextClass));
-      subjectGetSubject = MethodHandles.lookup()
-          .findStatic(Subject.class, "getSubject",
-              MethodType.methodType(Subject.class, accessControlContextClass));
-    } catch (NoSuchMethodException | IllegalAccessException | ClassNotFoundException ignore) {
-      // E.g. pre Java 18+
-    }
-
-    ACCESS_CONTROLLER_GET_CONTEXT = accessControllerGetContext;
-    SUBJECT_GET_SUBJECT = subjectGetSubject;
-
-    MethodHandle subjectDoAs = null;
-    try {
-      subjectDoAs = MethodHandles.lookup().findStatic(Subject.class, "doAs",
-          MethodType.methodType(Object.class, Subject.class, PrivilegedAction.class));
-    } catch (NoSuchMethodException | IllegalAccessException ignore) {
-    }
-    SUBJECT_DO_AS = subjectDoAs;
-
-    MethodHandle subjectCallAs = null;
-    try {
-      subjectCallAs = MethodHandles.lookup().findStatic(Subject.class, "callAs",
-          MethodType.methodType(Object.class, Subject.class, Callable.class));
-    } catch (NoSuchMethodException | IllegalAccessException ignore) {
-    }
-    SUBJECT_CALL_AS = subjectCallAs;
-  }
-
-  /**
-   * Use {@code Subject.current()} in Java 18+, and
-   * {@code Subject.getSubject(AccessController.getContext())} in Java before 18.
-   * @return current Subject or null
-   */
-  @SuppressWarnings("deprecation")
-  private static Subject getCurrentSubject() {
-    try {
-      if (SUBJECT_CURRENT != null) {
-        return (Subject) SUBJECT_CURRENT.invokeExact();
-      }
-      if (SUBJECT_GET_SUBJECT == null || ACCESS_CONTROLLER_GET_CONTEXT == null) {
-        return null;
-      }
-      return (Subject) SUBJECT_GET_SUBJECT.invoke(
-          ACCESS_CONTROLLER_GET_CONTEXT.invoke()
-      );
-    } catch (Throwable e) {
-      if (e instanceof RuntimeException) {
-        throw (RuntimeException) e;
-      }
-      if (e instanceof Error) {
-        throw (Error) e;
-      }
-      throw new RuntimeException(e);
-    }
-  }
-
-  public static void authenticate(boolean encrypted,
-      PGStream pgStream, String host, String user, char [] password,
-      String jaasApplicationName, String kerberosServerName,
-      boolean useSpnego, boolean jaasLogin,
-      boolean logServerErrorDetail)
-          throws IOException, PSQLException {
-    LOGGER.log(Level.FINEST, " <=BE AuthenticationReqGSS");
-
-    if (jaasApplicationName == null) {
-      jaasApplicationName = PGProperty.JAAS_APPLICATION_NAME.getDefaultValue();
-    }
-    if (kerberosServerName == null) {
-      kerberosServerName = "postgres";
-    }
-
-    Exception result;
-    try {
-      boolean performAuthentication = jaasLogin;
-
-      //Check if we can get credential from subject to avoid login.
-      Subject sub = getCurrentSubject();
-      if (sub != null) {
-        Set<GSSCredential> gssCreds = sub.getPrivateCredentials(GSSCredential.class);
-        if (gssCreds != null && !gssCreds.isEmpty()) {
-          performAuthentication = false;
+    static {
+        MethodHandle subjectCurrent = null;
+        try {
+            subjectCurrent = MethodHandles.lookup()
+                    .findStatic(Subject.class, "current", MethodType.methodType(Subject.class));
+        } catch (NoSuchMethodException | IllegalAccessException ignore) {
+            // E.g. pre Java 18
         }
-      }
-      if (performAuthentication) {
-        LoginContext lc = new LoginContext(jaasApplicationName, new GSSCallbackHandler(user, password));
-        lc.login();
-        sub = lc.getSubject();
-      }
+        SUBJECT_CURRENT = subjectCurrent;
 
-      PrivilegedAction<Exception> action;
-      if ( encrypted ) {
-        action = new GssEncAction(pgStream, sub, host, user,
-            kerberosServerName, useSpnego, logServerErrorDetail);
-      } else {
-        action = new GssAction(pgStream, sub, host, user,
-            kerberosServerName, useSpnego, logServerErrorDetail);
-      }
-      @SuppressWarnings({"cast.unsafe", "assignment"})
-      Subject subject = sub;
-      if (SUBJECT_DO_AS != null) {
-        result = (Exception) SUBJECT_DO_AS.invoke(subject, action);
-      } else if (SUBJECT_CALL_AS != null) {
-        result = (Exception) SUBJECT_CALL_AS.invoke(subject, action);
-      } else {
-        throw new PSQLException(
-            GT.tr("Neither Subject.doAs (Java before 18) nor Subject.callAs (Java 18+) method found"),
-            PSQLState.OBJECT_NOT_IN_STATE);
-      }
-    } catch (Throwable e) {
-      throw new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE, e);
+        MethodHandle accessControllerGetContext = null;
+        MethodHandle subjectGetSubject = null;
+
+        try {
+            Class<?> accessControllerClass = Class.forName("java.security.AccessController");
+            Class<?> accessControlContextClass =
+                    Class.forName("java.security.AccessControlContext");
+            accessControllerGetContext = MethodHandles.lookup()
+                    .findStatic(accessControllerClass, "getContext",
+                            MethodType.methodType(accessControlContextClass));
+            subjectGetSubject = MethodHandles.lookup()
+                    .findStatic(Subject.class, "getSubject",
+                            MethodType.methodType(Subject.class, accessControlContextClass));
+        } catch (NoSuchMethodException | IllegalAccessException | ClassNotFoundException ignore) {
+            // E.g. pre Java 18+
+        }
+
+        ACCESS_CONTROLLER_GET_CONTEXT = accessControllerGetContext;
+        SUBJECT_GET_SUBJECT = subjectGetSubject;
+
+        MethodHandle subjectDoAs = null;
+        try {
+            subjectDoAs = MethodHandles.lookup().findStatic(Subject.class, "doAs",
+                    MethodType.methodType(Object.class, Subject.class, PrivilegedAction.class));
+        } catch (NoSuchMethodException | IllegalAccessException ignore) {
+        }
+        SUBJECT_DO_AS = subjectDoAs;
+
+        MethodHandle subjectCallAs = null;
+        try {
+            subjectCallAs = MethodHandles.lookup().findStatic(Subject.class, "callAs",
+                    MethodType.methodType(Object.class, Subject.class, Callable.class));
+        } catch (NoSuchMethodException | IllegalAccessException ignore) {
+        }
+        SUBJECT_CALL_AS = subjectCallAs;
     }
 
-    if (result instanceof IOException) {
-      throw (IOException) result;
-    } else if (result instanceof PSQLException) {
-      throw (PSQLException) result;
-    } else if (result != null) {
-      throw new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE,
-          result);
+    /**
+     * Use {@code Subject.current()} in Java 18+, and
+     * {@code Subject.getSubject(AccessController.getContext())} in Java before 18.
+     *
+     * @return current Subject or null
+     */
+    @SuppressWarnings("deprecation")
+    private static Subject getCurrentSubject() {
+        try {
+            if (SUBJECT_CURRENT != null) {
+                return (Subject) SUBJECT_CURRENT.invokeExact();
+            }
+            if (SUBJECT_GET_SUBJECT == null || ACCESS_CONTROLLER_GET_CONTEXT == null) {
+                return null;
+            }
+            return (Subject) SUBJECT_GET_SUBJECT.invoke(
+                    ACCESS_CONTROLLER_GET_CONTEXT.invoke()
+            );
+        } catch (Throwable e) {
+            if (e instanceof RuntimeException) {
+                throw (RuntimeException) e;
+            }
+            if (e instanceof Error) {
+                throw (Error) e;
+            }
+            throw new RuntimeException(e);
+        }
     }
 
-  }
+    public static void authenticate(boolean encrypted,
+                                    PGStream pgStream, String host, String user, char[] password,
+                                    String jaasApplicationName, String kerberosServerName,
+                                    boolean useSpnego, boolean jaasLogin,
+                                    boolean logServerErrorDetail)
+            throws IOException, PSQLException {
+        LOGGER.log(Level.FINEST, " <=BE AuthenticationReqGSS");
+
+        if (jaasApplicationName == null) {
+            jaasApplicationName = PGProperty.JAAS_APPLICATION_NAME.getDefaultValue();
+        }
+        if (kerberosServerName == null) {
+            kerberosServerName = "postgres";
+        }
+
+        Exception result;
+        try {
+            boolean performAuthentication = jaasLogin;
+
+            //Check if we can get credential from subject to avoid login.
+            Subject sub = getCurrentSubject();
+            if (sub != null) {
+                Set<GSSCredential> gssCreds = sub.getPrivateCredentials(GSSCredential.class);
+                if (gssCreds != null && !gssCreds.isEmpty()) {
+                    performAuthentication = false;
+                }
+            }
+            if (performAuthentication) {
+                LoginContext lc = new LoginContext(jaasApplicationName, new GSSCallbackHandler(user, password));
+                lc.login();
+                sub = lc.getSubject();
+            }
+
+            PrivilegedAction<Exception> action;
+            if (encrypted) {
+                action = new GssEncAction(pgStream, sub, host, user,
+                        kerberosServerName, useSpnego, logServerErrorDetail);
+            } else {
+                action = new GssAction(pgStream, sub, host, user,
+                        kerberosServerName, useSpnego, logServerErrorDetail);
+            }
+            @SuppressWarnings({"cast.unsafe", "assignment"})
+            Subject subject = sub;
+            if (SUBJECT_DO_AS != null) {
+                result = (Exception) SUBJECT_DO_AS.invoke(subject, action);
+            } else if (SUBJECT_CALL_AS != null) {
+                result = (Exception) SUBJECT_CALL_AS.invoke(subject, action);
+            } else {
+                throw new PSQLException(
+                        GT.tr("Neither Subject.doAs (Java before 18) nor Subject.callAs (Java 18+) method found"),
+                        PSQLState.OBJECT_NOT_IN_STATE);
+            }
+        } catch (Throwable e) {
+            throw new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE, e);
+        }
+
+        if (result instanceof IOException) {
+            throw (IOException) result;
+        } else if (result instanceof PSQLException) {
+            throw (PSQLException) result;
+        } else if (result != null) {
+            throw new PSQLException(GT.tr("GSS Authentication failed"), PSQLState.CONNECTION_FAILURE,
+                    result);
+        }
+
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/CandidateHost.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/CandidateHost.java
index b0303e3..6f44165 100644
--- a/pgjdbc/src/main/java/org/postgresql/hostchooser/CandidateHost.java
+++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/CandidateHost.java
@@ -11,11 +11,11 @@ import org.postgresql.util.HostSpec;
  * Candidate host to be connected.
  */
 public class CandidateHost {
-  public final HostSpec hostSpec;
-  public final HostRequirement targetServerType;
+    public final HostSpec hostSpec;
+    public final HostRequirement targetServerType;
 
-  public CandidateHost(HostSpec hostSpec, HostRequirement targetServerType) {
-    this.hostSpec = hostSpec;
-    this.targetServerType = targetServerType;
-  }
+    public CandidateHost(HostSpec hostSpec, HostRequirement targetServerType) {
+        this.hostSpec = hostSpec;
+        this.targetServerType = targetServerType;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/GlobalHostStatusTracker.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/GlobalHostStatusTracker.java
index d8a93b7..9b7f0d5 100644
--- a/pgjdbc/src/main/java/org/postgresql/hostchooser/GlobalHostStatusTracker.java
+++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/GlobalHostStatusTracker.java
@@ -18,67 +18,67 @@ import java.util.Map;
  */
 @SuppressWarnings("try")
 public class GlobalHostStatusTracker {
-  private static final Map<HostSpec, HostSpecStatus> hostStatusMap =
-      new HashMap<>();
-  private static final ResourceLock lock = new ResourceLock();
+    private static final Map<HostSpec, HostSpecStatus> hostStatusMap =
+            new HashMap<>();
+    private static final ResourceLock lock = new ResourceLock();
 
-  /**
-   * Store the actual observed host status.
-   *
-   * @param hostSpec The host whose status is known.
-   * @param hostStatus Latest known status for the host.
-   */
-  public static void reportHostStatus(HostSpec hostSpec, HostStatus hostStatus) {
-    long now = System.nanoTime() / 1000000;
-    try (ResourceLock ignore = lock.obtain()) {
-      HostSpecStatus hostSpecStatus = hostStatusMap.get(hostSpec);
-      if (hostSpecStatus == null) {
-        hostSpecStatus = new HostSpecStatus(hostSpec);
-        hostStatusMap.put(hostSpec, hostSpecStatus);
-      }
-      hostSpecStatus.status = hostStatus;
-      hostSpecStatus.lastUpdated = now;
-    }
-  }
-
-  /**
-   * Returns a list of candidate hosts that have the required targetServerType.
-   *
-   * @param hostSpecs The potential list of hosts.
-   * @param targetServerType The required target server type.
-   * @param hostRecheckMillis How stale information is allowed.
-   * @return candidate hosts to connect to.
-   */
-  static List<HostSpec> getCandidateHosts(HostSpec[] hostSpecs,
-      HostRequirement targetServerType, long hostRecheckMillis) {
-    List<HostSpec> candidates = new ArrayList<>(hostSpecs.length);
-    long latestAllowedUpdate = System.nanoTime() / 1000000 - hostRecheckMillis;
-    try (ResourceLock ignore = lock.obtain()) {
-      for (HostSpec hostSpec : hostSpecs) {
-        HostSpecStatus hostInfo = hostStatusMap.get(hostSpec);
-        // candidates are nodes we do not know about and the nodes with correct type
-        if (hostInfo == null
-            || hostInfo.lastUpdated < latestAllowedUpdate
-            || targetServerType.allowConnectingTo(hostInfo.status)) {
-          candidates.add(hostSpec);
+    /**
+     * Store the actual observed host status.
+     *
+     * @param hostSpec   The host whose status is known.
+     * @param hostStatus Latest known status for the host.
+     */
+    public static void reportHostStatus(HostSpec hostSpec, HostStatus hostStatus) {
+        long now = System.nanoTime() / 1000000;
+        try (ResourceLock ignore = lock.obtain()) {
+            HostSpecStatus hostSpecStatus = hostStatusMap.get(hostSpec);
+            if (hostSpecStatus == null) {
+                hostSpecStatus = new HostSpecStatus(hostSpec);
+                hostStatusMap.put(hostSpec, hostSpecStatus);
+            }
+            hostSpecStatus.status = hostStatus;
+            hostSpecStatus.lastUpdated = now;
         }
-      }
-    }
-    return candidates;
-  }
-
-  static class HostSpecStatus {
-    final HostSpec host;
-    HostStatus status;
-    long lastUpdated;
-
-    HostSpecStatus(HostSpec host) {
-      this.host = host;
     }
 
-    @Override
-    public String toString() {
-      return host.toString() + '=' + status;
+    /**
+     * Returns a list of candidate hosts that have the required targetServerType.
+     *
+     * @param hostSpecs         The potential list of hosts.
+     * @param targetServerType  The required target server type.
+     * @param hostRecheckMillis How stale information is allowed.
+     * @return candidate hosts to connect to.
+     */
+    static List<HostSpec> getCandidateHosts(HostSpec[] hostSpecs,
+                                            HostRequirement targetServerType, long hostRecheckMillis) {
+        List<HostSpec> candidates = new ArrayList<>(hostSpecs.length);
+        long latestAllowedUpdate = System.nanoTime() / 1000000 - hostRecheckMillis;
+        try (ResourceLock ignore = lock.obtain()) {
+            for (HostSpec hostSpec : hostSpecs) {
+                HostSpecStatus hostInfo = hostStatusMap.get(hostSpec);
+                // candidates are nodes we do not know about and the nodes with correct type
+                if (hostInfo == null
+                        || hostInfo.lastUpdated < latestAllowedUpdate
+                        || targetServerType.allowConnectingTo(hostInfo.status)) {
+                    candidates.add(hostSpec);
+                }
+            }
+        }
+        return candidates;
+    }
+
+    static class HostSpecStatus {
+        final HostSpec host;
+        HostStatus status;
+        long lastUpdated;
+
+        HostSpecStatus(HostSpec host) {
+            this.host = host;
+        }
+
+        @Override
+        public String toString() {
+            return host.toString() + '=' + status;
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooser.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooser.java
index a506b7b..862cdf3 100644
--- a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooser.java
+++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooser.java
@@ -11,11 +11,11 @@ import java.util.Iterator;
  * Lists connections in preferred order.
  */
 public interface HostChooser extends Iterable<CandidateHost> {
-  /**
-   * Lists connection hosts in preferred order.
-   *
-   * @return connection hosts in preferred order.
-   */
-  @Override
-  Iterator<CandidateHost> iterator();
+    /**
+     * Lists connection hosts in preferred order.
+     *
+     * @return connection hosts in preferred order.
+     */
+    @Override
+    Iterator<CandidateHost> iterator();
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooserFactory.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooserFactory.java
index 4099fa0..bd6c7b5 100644
--- a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooserFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostChooserFactory.java
@@ -14,11 +14,11 @@ import java.util.Properties;
  */
 public class HostChooserFactory {
 
-  public static HostChooser createHostChooser(HostSpec[] hostSpecs,
-      HostRequirement targetServerType, Properties info) {
-    if (hostSpecs.length == 1) {
-      return new SingleHostChooser(hostSpecs[0], targetServerType);
+    public static HostChooser createHostChooser(HostSpec[] hostSpecs,
+                                                HostRequirement targetServerType, Properties info) {
+        if (hostSpecs.length == 1) {
+            return new SingleHostChooser(hostSpecs[0], targetServerType);
+        }
+        return new MultiHostChooser(hostSpecs, targetServerType, info);
     }
-    return new MultiHostChooser(hostSpecs, targetServerType, info);
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostRequirement.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostRequirement.java
index 666bb9f..bef9b68 100644
--- a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostRequirement.java
+++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostRequirement.java
@@ -9,68 +9,68 @@ package org.postgresql.hostchooser;
  * Describes the required server type.
  */
 public enum HostRequirement {
-  any {
-    @Override
-    public boolean allowConnectingTo(HostStatus status) {
-      return status != HostStatus.ConnectFail;
-    }
-  },
-  /**
-   * @deprecated we no longer use the terms master or slave in the driver, or the PostgreSQL
-   *        project.
-   */
-  @Deprecated
-  master {
-    @Override
-    public boolean allowConnectingTo(HostStatus status) {
-      return primary.allowConnectingTo(status);
-    }
-  },
-  primary {
-    @Override
-    public boolean allowConnectingTo(HostStatus status) {
-      return status == HostStatus.Primary || status == HostStatus.ConnectOK;
-    }
-  },
-  secondary {
-    @Override
-    public boolean allowConnectingTo(HostStatus status) {
-      return status == HostStatus.Secondary || status == HostStatus.ConnectOK;
-    }
-  },
-  preferSecondary {
-    @Override
-    public boolean allowConnectingTo(HostStatus status) {
-      return status != HostStatus.ConnectFail;
-    }
-  },
-  preferPrimary {
-    @Override
-    public boolean allowConnectingTo(HostStatus status) {
-      return status != HostStatus.ConnectFail;
-    }
-  };
+    any {
+        @Override
+        public boolean allowConnectingTo(HostStatus status) {
+            return status != HostStatus.ConnectFail;
+        }
+    },
+    /**
+     * @deprecated we no longer use the terms master or slave in the driver, or the PostgreSQL
+     * project.
+     */
+    @Deprecated
+    master {
+        @Override
+        public boolean allowConnectingTo(HostStatus status) {
+            return primary.allowConnectingTo(status);
+        }
+    },
+    primary {
+        @Override
+        public boolean allowConnectingTo(HostStatus status) {
+            return status == HostStatus.Primary || status == HostStatus.ConnectOK;
+        }
+    },
+    secondary {
+        @Override
+        public boolean allowConnectingTo(HostStatus status) {
+            return status == HostStatus.Secondary || status == HostStatus.ConnectOK;
+        }
+    },
+    preferSecondary {
+        @Override
+        public boolean allowConnectingTo(HostStatus status) {
+            return status != HostStatus.ConnectFail;
+        }
+    },
+    preferPrimary {
+        @Override
+        public boolean allowConnectingTo(HostStatus status) {
+            return status != HostStatus.ConnectFail;
+        }
+    };
 
-  public abstract boolean allowConnectingTo(HostStatus status);
+    /**
+     * <p>The postgreSQL project has decided not to use the term slave to refer to alternate servers.
+     * secondary or standby is preferred. We have arbitrarily chosen secondary.
+     * As of Jan 2018 in order not to break existing code we are going to accept both slave or
+     * secondary for names of alternate servers.</p>
+     *
+     * <p>The current policy is to keep accepting this silently but not document slave, or slave preferSlave</p>
+     *
+     * <p>As of Jul 2018 silently deprecate the use of the word master as well</p>
+     *
+     * @param targetServerType the value of {@code targetServerType} connection property
+     * @return HostRequirement
+     */
 
-  /**
-   * <p>The postgreSQL project has decided not to use the term slave to refer to alternate servers.
-   * secondary or standby is preferred. We have arbitrarily chosen secondary.
-   * As of Jan 2018 in order not to break existing code we are going to accept both slave or
-   * secondary for names of alternate servers.</p>
-   *
-   * <p>The current policy is to keep accepting this silently but not document slave, or slave preferSlave</p>
-   *
-   * <p>As of Jul 2018 silently deprecate the use of the word master as well</p>
-   *
-   * @param targetServerType the value of {@code targetServerType} connection property
-   * @return HostRequirement
-   */
+    public static HostRequirement getTargetServerType(String targetServerType) {
 
-  public static HostRequirement getTargetServerType(String targetServerType) {
+        String allowSlave = targetServerType.replace("lave", "econdary").replace("master", "primary");
+        return valueOf(allowSlave);
+    }
 
-    String allowSlave = targetServerType.replace("lave", "econdary").replace("master", "primary");
-    return valueOf(allowSlave);
-  }
+    public abstract boolean allowConnectingTo(HostStatus status);
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostStatus.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostStatus.java
index d303e8d..28a0780 100644
--- a/pgjdbc/src/main/java/org/postgresql/hostchooser/HostStatus.java
+++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/HostStatus.java
@@ -9,8 +9,8 @@ package org.postgresql.hostchooser;
  * Known state of a server.
  */
 public enum HostStatus {
-  ConnectFail,
-  ConnectOK,
-  Primary,
-  Secondary
+    ConnectFail,
+    ConnectOK,
+    Primary,
+    Secondary
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/MultiHostChooser.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/MultiHostChooser.java
index 953417f..db2bb05 100644
--- a/pgjdbc/src/main/java/org/postgresql/hostchooser/MultiHostChooser.java
+++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/MultiHostChooser.java
@@ -22,117 +22,117 @@ import java.util.Properties;
  * HostChooser that keeps track of known host statuses.
  */
 class MultiHostChooser implements HostChooser {
-  private final HostSpec[] hostSpecs;
-  private final HostRequirement targetServerType;
-  private int hostRecheckTime;
-  private boolean loadBalance;
+    private final HostSpec[] hostSpecs;
+    private final HostRequirement targetServerType;
+    private int hostRecheckTime;
+    private boolean loadBalance;
 
-  MultiHostChooser(HostSpec[] hostSpecs, HostRequirement targetServerType,
-      Properties info) {
-    this.hostSpecs = hostSpecs;
-    this.targetServerType = targetServerType;
-    try {
-      hostRecheckTime = PGProperty.HOST_RECHECK_SECONDS.getInt(info) * 1000;
-      loadBalance = PGProperty.LOAD_BALANCE_HOSTS.getBoolean(info);
-    } catch (PSQLException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  @Override
-  public Iterator<CandidateHost> iterator() {
-    Iterator<CandidateHost> res = candidateIterator();
-    if (!res.hasNext()) {
-      // In case all the candidate hosts are unavailable or do not match, try all the hosts just in case
-      List<HostSpec> allHosts = Arrays.asList(hostSpecs);
-      if (loadBalance) {
-        allHosts = new ArrayList<>(allHosts);
-        shuffle(allHosts);
-      }
-      res = withReqStatus(targetServerType, allHosts).iterator();
-    }
-    return res;
-  }
-
-  private Iterator<CandidateHost> candidateIterator() {
-    if (   targetServerType != HostRequirement.preferSecondary
-        && targetServerType != HostRequirement.preferPrimary   ) {
-      return getCandidateHosts(targetServerType).iterator();
+    MultiHostChooser(HostSpec[] hostSpecs, HostRequirement targetServerType,
+                     Properties info) {
+        this.hostSpecs = hostSpecs;
+        this.targetServerType = targetServerType;
+        try {
+            hostRecheckTime = PGProperty.HOST_RECHECK_SECONDS.getInt(info) * 1000;
+            loadBalance = PGProperty.LOAD_BALANCE_HOSTS.getBoolean(info);
+        } catch (PSQLException e) {
+            throw new RuntimeException(e);
+        }
     }
 
-    HostRequirement preferredServerType =
-        targetServerType == HostRequirement.preferSecondary
-          ? HostRequirement.secondary
-          : HostRequirement.primary;
-
-    // preferSecondary tries to find secondary hosts first
-    // Note: sort does not work here since there are "unknown" hosts,
-    // and that "unknown" might turn out to be master, so we should discard that
-    // if other secondaries exist
-    // Same logic as the above works for preferPrimary if we replace "secondary"
-    // with "primary" and vice versa
-    List<CandidateHost> preferred = getCandidateHosts(preferredServerType);
-    List<CandidateHost> any = getCandidateHosts(HostRequirement.any);
-
-    if (  !preferred.isEmpty() && !any.isEmpty()
-        && preferred.get(preferred.size() - 1).hostSpec.equals(any.get(0).hostSpec)) {
-      // When the last preferred host's hostspec is the same as the first in "any" list, there's no need
-      // to attempt to connect it as "preferred"
-      // Note: this is only an optimization
-      preferred = rtrim(1, preferred);
+    @Override
+    public Iterator<CandidateHost> iterator() {
+        Iterator<CandidateHost> res = candidateIterator();
+        if (!res.hasNext()) {
+            // In case all the candidate hosts are unavailable or do not match, try all the hosts just in case
+            List<HostSpec> allHosts = Arrays.asList(hostSpecs);
+            if (loadBalance) {
+                allHosts = new ArrayList<>(allHosts);
+                shuffle(allHosts);
+            }
+            res = withReqStatus(targetServerType, allHosts).iterator();
+        }
+        return res;
     }
-    return append(preferred, any).iterator();
-  }
 
-  private List<CandidateHost> getCandidateHosts(HostRequirement hostRequirement) {
-    List<HostSpec> candidates =
-        GlobalHostStatusTracker.getCandidateHosts(hostSpecs, hostRequirement, hostRecheckTime);
-    if (loadBalance) {
-      shuffle(candidates);
+    private Iterator<CandidateHost> candidateIterator() {
+        if (targetServerType != HostRequirement.preferSecondary
+                && targetServerType != HostRequirement.preferPrimary) {
+            return getCandidateHosts(targetServerType).iterator();
+        }
+
+        HostRequirement preferredServerType =
+                targetServerType == HostRequirement.preferSecondary
+                        ? HostRequirement.secondary
+                        : HostRequirement.primary;
+
+        // preferSecondary tries to find secondary hosts first
+        // Note: sort does not work here since there are "unknown" hosts,
+        // and that "unknown" might turn out to be master, so we should discard that
+        // if other secondaries exist
+        // Same logic as the above works for preferPrimary if we replace "secondary"
+        // with "primary" and vice versa
+        List<CandidateHost> preferred = getCandidateHosts(preferredServerType);
+        List<CandidateHost> any = getCandidateHosts(HostRequirement.any);
+
+        if (!preferred.isEmpty() && !any.isEmpty()
+                && preferred.get(preferred.size() - 1).hostSpec.equals(any.get(0).hostSpec)) {
+            // When the last preferred host's hostspec is the same as the first in "any" list, there's no need
+            // to attempt to connect it as "preferred"
+            // Note: this is only an optimization
+            preferred = rtrim(1, preferred);
+        }
+        return append(preferred, any).iterator();
     }
-    return withReqStatus(hostRequirement, candidates);
-  }
 
-  private List<CandidateHost> withReqStatus(final HostRequirement requirement, final List<HostSpec> hosts) {
-    return new AbstractList<CandidateHost>() {
-      @Override
-      public CandidateHost get(int index) {
-        return new CandidateHost(hosts.get(index), requirement);
-      }
+    private List<CandidateHost> getCandidateHosts(HostRequirement hostRequirement) {
+        List<HostSpec> candidates =
+                GlobalHostStatusTracker.getCandidateHosts(hostSpecs, hostRequirement, hostRecheckTime);
+        if (loadBalance) {
+            shuffle(candidates);
+        }
+        return withReqStatus(hostRequirement, candidates);
+    }
 
-      @Override
-      public int size() {
-        return hosts.size();
-      }
-    };
-  }
+    private List<CandidateHost> withReqStatus(final HostRequirement requirement, final List<HostSpec> hosts) {
+        return new AbstractList<CandidateHost>() {
+            @Override
+            public CandidateHost get(int index) {
+                return new CandidateHost(hosts.get(index), requirement);
+            }
 
-  private <T> List<T> append(final List<T> a, final List<T> b) {
-    return new AbstractList<T>() {
-      @Override
-      public T get(int index) {
-        return index < a.size() ? a.get(index) : b.get(index - a.size());
-      }
+            @Override
+            public int size() {
+                return hosts.size();
+            }
+        };
+    }
 
-      @Override
-      public int size() {
-        return a.size() + b.size();
-      }
-    };
-  }
+    private <T> List<T> append(final List<T> a, final List<T> b) {
+        return new AbstractList<T>() {
+            @Override
+            public T get(int index) {
+                return index < a.size() ? a.get(index) : b.get(index - a.size());
+            }
 
-  private <T> List<T> rtrim(final int size, final List<T> a) {
-    return new AbstractList<T>() {
-      @Override
-      public T get(int index) {
-        return a.get(index);
-      }
+            @Override
+            public int size() {
+                return a.size() + b.size();
+            }
+        };
+    }
 
-      @Override
-      public int size() {
-        return Math.max(0, a.size() - size);
-      }
-    };
-  }
+    private <T> List<T> rtrim(final int size, final List<T> a) {
+        return new AbstractList<T>() {
+            @Override
+            public T get(int index) {
+                return a.get(index);
+            }
+
+            @Override
+            public int size() {
+                return Math.max(0, a.size() - size);
+            }
+        };
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/hostchooser/SingleHostChooser.java b/pgjdbc/src/main/java/org/postgresql/hostchooser/SingleHostChooser.java
index e79e834..d80245b 100644
--- a/pgjdbc/src/main/java/org/postgresql/hostchooser/SingleHostChooser.java
+++ b/pgjdbc/src/main/java/org/postgresql/hostchooser/SingleHostChooser.java
@@ -15,14 +15,14 @@ import java.util.Iterator;
  * Host chooser that returns the single host.
  */
 class SingleHostChooser implements HostChooser {
-  private final Collection<CandidateHost> candidateHost;
+    private final Collection<CandidateHost> candidateHost;
 
-  SingleHostChooser(HostSpec hostSpec, HostRequirement targetServerType) {
-    this.candidateHost = Collections.singletonList(new CandidateHost(hostSpec, targetServerType));
-  }
+    SingleHostChooser(HostSpec hostSpec, HostRequirement targetServerType) {
+        this.candidateHost = Collections.singletonList(new CandidateHost(hostSpec, targetServerType));
+    }
 
-  @Override
-  public Iterator<CandidateHost> iterator() {
-    return candidateHost.iterator();
-  }
+    @Override
+    public Iterator<CandidateHost> iterator() {
+        return candidateHost.iterator();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/AbstractBlobClob.java b/pgjdbc/src/main/java/org/postgresql/jdbc/AbstractBlobClob.java
index d50ebf4..bc0d762 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/AbstractBlobClob.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/AbstractBlobClob.java
@@ -26,269 +26,266 @@ import java.util.ArrayList;
  */
 @SuppressWarnings("try")
 public abstract class AbstractBlobClob {
-  protected BaseConnection conn;
+    protected final ResourceLock lock = new ResourceLock();
+    private final boolean support64bit;
+    private final long oid;
+    protected BaseConnection conn;
+    private LargeObject currentLo;
+    private boolean currentLoIsWriteable;
+    /**
+     * We create separate LargeObjects for methods that use streams so they won't interfere with each
+     * other.
+     */
+    private ArrayList<LargeObject> subLOs = new ArrayList<LargeObject>();
 
-  private LargeObject currentLo;
-  private boolean currentLoIsWriteable;
-  private final boolean support64bit;
+    public AbstractBlobClob(BaseConnection conn, long oid) throws SQLException {
+        this.conn = conn;
+        this.oid = oid;
+        this.currentLoIsWriteable = false;
 
-  /**
-   * We create separate LargeObjects for methods that use streams so they won't interfere with each
-   * other.
-   */
-  private ArrayList<LargeObject> subLOs = new ArrayList<LargeObject>();
+        support64bit = conn.haveMinimumServerVersion(90300);
+    }
 
-  protected final ResourceLock lock = new ResourceLock();
-  private final long oid;
-
-  public AbstractBlobClob(BaseConnection conn, long oid) throws SQLException {
-    this.conn = conn;
-    this.oid = oid;
-    this.currentLoIsWriteable = false;
-
-    support64bit = conn.haveMinimumServerVersion(90300);
-  }
-
-  public void free() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (currentLo != null) {
-        currentLo.close();
-        currentLo = null;
-        currentLoIsWriteable = false;
-      }
-      if (subLOs != null) {
-        for (LargeObject subLO : subLOs) {
-          subLO.close();
+    public void free() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (currentLo != null) {
+                currentLo.close();
+                currentLo = null;
+                currentLoIsWriteable = false;
+            }
+            if (subLOs != null) {
+                for (LargeObject subLO : subLOs) {
+                    subLO.close();
+                }
+            }
+            subLOs = null;
         }
-      }
-      subLOs = null;
     }
-  }
 
-  /**
-   * For Blobs this should be in bytes while for Clobs it should be in characters. Since we really
-   * haven't figured out how to handle character sets for Clobs the current implementation uses
-   * bytes for both Blobs and Clobs.
-   *
-   * @param len maximum length
-   * @throws SQLException if operation fails
-   */
-  public void truncate(long len) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      if (!conn.haveMinimumServerVersion(ServerVersion.v8_3)) {
-        throw new PSQLException(
-            GT.tr("Truncation of large objects is only implemented in 8.3 and later servers."),
-            PSQLState.NOT_IMPLEMENTED);
-      }
+    /**
+     * For Blobs this should be in bytes while for Clobs it should be in characters. Since we really
+     * haven't figured out how to handle character sets for Clobs the current implementation uses
+     * bytes for both Blobs and Clobs.
+     *
+     * @param len maximum length
+     * @throws SQLException if operation fails
+     */
+    public void truncate(long len) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            if (!conn.haveMinimumServerVersion(ServerVersion.v8_3)) {
+                throw new PSQLException(
+                        GT.tr("Truncation of large objects is only implemented in 8.3 and later servers."),
+                        PSQLState.NOT_IMPLEMENTED);
+            }
 
-      if (len < 0) {
-        throw new PSQLException(GT.tr("Cannot truncate LOB to a negative length."),
-            PSQLState.INVALID_PARAMETER_VALUE);
-      }
-      if (len > Integer.MAX_VALUE) {
-        if (support64bit) {
-          getLo(true).truncate64(len);
-        } else {
-          throw new PSQLException(GT.tr("PostgreSQL LOBs can only index to: {0}", Integer.MAX_VALUE),
-              PSQLState.INVALID_PARAMETER_VALUE);
+            if (len < 0) {
+                throw new PSQLException(GT.tr("Cannot truncate LOB to a negative length."),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+            if (len > Integer.MAX_VALUE) {
+                if (support64bit) {
+                    getLo(true).truncate64(len);
+                } else {
+                    throw new PSQLException(GT.tr("PostgreSQL LOBs can only index to: {0}", Integer.MAX_VALUE),
+                            PSQLState.INVALID_PARAMETER_VALUE);
+                }
+            } else {
+                getLo(true).truncate((int) len);
+            }
         }
-      } else {
-        getLo(true).truncate((int) len);
-      }
     }
-  }
 
-  public long length() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      if (support64bit) {
-        return getLo(false).size64();
-      } else {
-        return getLo(false).size();
-      }
-    }
-  }
-
-  public byte[] getBytes(long pos, int length) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      assertPosition(pos);
-      getLo(false).seek((int) (pos - 1), LargeObject.SEEK_SET);
-      return getLo(false).read(length);
-    }
-  }
-
-  public InputStream getBinaryStream() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      LargeObject subLO = getLo(false).copy();
-      addSubLO(subLO);
-      subLO.seek(0, LargeObject.SEEK_SET);
-      return subLO.getInputStream();
-    }
-  }
-
-  public OutputStream setBinaryStream(long pos) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      assertPosition(pos);
-      LargeObject subLO = getLo(true).copy();
-      addSubLO(subLO);
-      subLO.seek((int) (pos - 1));
-      return subLO.getOutputStream();
-    }
-  }
-
-  /**
-   * Iterate over the buffer looking for the specified pattern.
-   *
-   * @param pattern A pattern of bytes to search the blob for
-   * @param start The position to start reading from
-   * @return position of the specified pattern
-   * @throws SQLException if something wrong happens
-   */
-  public long position(byte[] pattern, long start) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      assertPosition(start, pattern.length);
-
-      int position = 1;
-      int patternIdx = 0;
-      long result = -1;
-      int tmpPosition = 1;
-
-      for (LOIterator i = new LOIterator(start - 1); i.hasNext(); position++) {
-        byte b = i.next();
-        if (b == pattern[patternIdx]) {
-          if (patternIdx == 0) {
-            tmpPosition = position;
-          }
-          patternIdx++;
-          if (patternIdx == pattern.length) {
-            result = tmpPosition;
-            break;
-          }
-        } else {
-          patternIdx = 0;
+    public long length() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            if (support64bit) {
+                return getLo(false).size64();
+            } else {
+                return getLo(false).size();
+            }
         }
-      }
-
-      return result;
-    }
-  }
-
-  /**
-   * Iterates over a large object returning byte values. Will buffer the data from the large object.
-   */
-  private class LOIterator  {
-    private static final int BUFFER_SIZE = 8096;
-    private final byte[] buffer = new byte[BUFFER_SIZE];
-    private int idx = BUFFER_SIZE;
-    private int numBytes = BUFFER_SIZE;
-
-    LOIterator(long start) throws SQLException {
-      getLo(false).seek((int) start);
     }
 
-    public boolean hasNext() throws SQLException {
-      boolean result;
-      if (idx < numBytes) {
-        result = true;
-      } else {
-        numBytes = getLo(false).read(buffer, 0, BUFFER_SIZE);
-        idx = 0;
-        result = numBytes > 0;
-      }
-      return result;
+    public byte[] getBytes(long pos, int length) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            assertPosition(pos);
+            getLo(false).seek((int) (pos - 1), LargeObject.SEEK_SET);
+            return getLo(false).read(length);
+        }
     }
 
-    private byte next() {
-      return buffer[idx++];
+    public InputStream getBinaryStream() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            LargeObject subLO = getLo(false).copy();
+            addSubLO(subLO);
+            subLO.seek(0, LargeObject.SEEK_SET);
+            return subLO.getInputStream();
+        }
     }
-  }
 
-  /**
-   * This is simply passing the byte value of the pattern Blob.
-   *
-   * @param pattern search pattern
-   * @param start start position
-   * @return position of given pattern
-   * @throws SQLException if something goes wrong
-   */
-  public long position(Blob pattern, long start) throws SQLException {
-    return position(pattern.getBytes(1, (int) pattern.length()), start);
-  }
-
-  /**
-   * Throws an exception if the pos value exceeds the max value by which the large object API can
-   * index.
-   *
-   * @param pos Position to write at.
-   * @throws SQLException if something goes wrong
-   */
-  protected void assertPosition(long pos) throws SQLException {
-    assertPosition(pos, 0);
-  }
-
-  /**
-   * Throws an exception if the pos value exceeds the max value by which the large object API can
-   * index.
-   *
-   * @param pos Position to write at.
-   * @param len number of bytes to write.
-   * @throws SQLException if something goes wrong
-   */
-  protected void assertPosition(long pos, long len) throws SQLException {
-    checkFreed();
-    if (pos < 1) {
-      throw new PSQLException(GT.tr("LOB positioning offsets start at 1."),
-          PSQLState.INVALID_PARAMETER_VALUE);
+    public OutputStream setBinaryStream(long pos) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            assertPosition(pos);
+            LargeObject subLO = getLo(true).copy();
+            addSubLO(subLO);
+            subLO.seek((int) (pos - 1));
+            return subLO.getOutputStream();
+        }
     }
-    if (pos + len - 1 > Integer.MAX_VALUE) {
-      throw new PSQLException(GT.tr("PostgreSQL LOBs can only index to: {0}", Integer.MAX_VALUE),
-          PSQLState.INVALID_PARAMETER_VALUE);
+
+    /**
+     * Iterate over the buffer looking for the specified pattern.
+     *
+     * @param pattern A pattern of bytes to search the blob for
+     * @param start   The position to start reading from
+     * @return position of the specified pattern
+     * @throws SQLException if something wrong happens
+     */
+    public long position(byte[] pattern, long start) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            assertPosition(start, pattern.length);
+
+            int position = 1;
+            int patternIdx = 0;
+            long result = -1;
+            int tmpPosition = 1;
+
+            for (LOIterator i = new LOIterator(start - 1); i.hasNext(); position++) {
+                byte b = i.next();
+                if (b == pattern[patternIdx]) {
+                    if (patternIdx == 0) {
+                        tmpPosition = position;
+                    }
+                    patternIdx++;
+                    if (patternIdx == pattern.length) {
+                        result = tmpPosition;
+                        break;
+                    }
+                } else {
+                    patternIdx = 0;
+                }
+            }
+
+            return result;
+        }
     }
-  }
 
-  /**
-   * Checks that this LOB hasn't been free()d already.
-   *
-   * @throws SQLException if LOB has been freed.
-   */
-  protected void checkFreed() throws SQLException {
-    if (subLOs == null) {
-      throw new PSQLException(GT.tr("free() was called on this LOB previously"),
-          PSQLState.OBJECT_NOT_IN_STATE);
+    /**
+     * This is simply passing the byte value of the pattern Blob.
+     *
+     * @param pattern search pattern
+     * @param start   start position
+     * @return position of given pattern
+     * @throws SQLException if something goes wrong
+     */
+    public long position(Blob pattern, long start) throws SQLException {
+        return position(pattern.getBytes(1, (int) pattern.length()), start);
     }
-  }
 
-  protected LargeObject getLo(boolean forWrite) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      LargeObject currentLo = this.currentLo;
-      if (currentLo != null) {
-        if (forWrite && !currentLoIsWriteable) {
-          // Reopen the stream in read-write, at the same pos.
-          int currentPos = currentLo.tell();
+    /**
+     * Throws an exception if the pos value exceeds the max value by which the large object API can
+     * index.
+     *
+     * @param pos Position to write at.
+     * @throws SQLException if something goes wrong
+     */
+    protected void assertPosition(long pos) throws SQLException {
+        assertPosition(pos, 0);
+    }
 
-          LargeObjectManager lom = conn.getLargeObjectAPI();
-          LargeObject newLo = lom.open(oid, LargeObjectManager.READWRITE);
-          subLOs.add(currentLo);
-          this.currentLo = currentLo = newLo;
+    /**
+     * Throws an exception if the pos value exceeds the max value by which the large object API can
+     * index.
+     *
+     * @param pos Position to write at.
+     * @param len number of bytes to write.
+     * @throws SQLException if something goes wrong
+     */
+    protected void assertPosition(long pos, long len) throws SQLException {
+        checkFreed();
+        if (pos < 1) {
+            throw new PSQLException(GT.tr("LOB positioning offsets start at 1."),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+        if (pos + len - 1 > Integer.MAX_VALUE) {
+            throw new PSQLException(GT.tr("PostgreSQL LOBs can only index to: {0}", Integer.MAX_VALUE),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+    }
 
-          if (currentPos != 0) {
-            currentLo.seek(currentPos);
-          }
+    /**
+     * Checks that this LOB hasn't been free()d already.
+     *
+     * @throws SQLException if LOB has been freed.
+     */
+    protected void checkFreed() throws SQLException {
+        if (subLOs == null) {
+            throw new PSQLException(GT.tr("free() was called on this LOB previously"),
+                    PSQLState.OBJECT_NOT_IN_STATE);
+        }
+    }
+
+    protected LargeObject getLo(boolean forWrite) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            LargeObject currentLo = this.currentLo;
+            if (currentLo != null) {
+                if (forWrite && !currentLoIsWriteable) {
+                    // Reopen the stream in read-write, at the same pos.
+                    int currentPos = currentLo.tell();
+
+                    LargeObjectManager lom = conn.getLargeObjectAPI();
+                    LargeObject newLo = lom.open(oid, LargeObjectManager.READWRITE);
+                    subLOs.add(currentLo);
+                    this.currentLo = currentLo = newLo;
+
+                    if (currentPos != 0) {
+                        currentLo.seek(currentPos);
+                    }
+                }
+
+                return currentLo;
+            }
+            LargeObjectManager lom = conn.getLargeObjectAPI();
+            this.currentLo = currentLo =
+                    lom.open(oid, forWrite ? LargeObjectManager.READWRITE : LargeObjectManager.READ);
+            currentLoIsWriteable = forWrite;
+            return currentLo;
+        }
+    }
+
+    protected void addSubLO(LargeObject subLO) {
+        subLOs.add(subLO);
+    }
+
+    /**
+     * Iterates over a large object returning byte values. Will buffer the data from the large object.
+     */
+    private class LOIterator {
+        private static final int BUFFER_SIZE = 8096;
+        private final byte[] buffer = new byte[BUFFER_SIZE];
+        private int idx = BUFFER_SIZE;
+        private int numBytes = BUFFER_SIZE;
+
+        LOIterator(long start) throws SQLException {
+            getLo(false).seek((int) start);
         }
 
-        return currentLo;
-      }
-      LargeObjectManager lom = conn.getLargeObjectAPI();
-      this.currentLo = currentLo =
-          lom.open(oid, forWrite ? LargeObjectManager.READWRITE : LargeObjectManager.READ);
-      currentLoIsWriteable = forWrite;
-      return currentLo;
-    }
-  }
+        public boolean hasNext() throws SQLException {
+            boolean result;
+            if (idx < numBytes) {
+                result = true;
+            } else {
+                numBytes = getLo(false).read(buffer, 0, BUFFER_SIZE);
+                idx = 0;
+                result = numBytes > 0;
+            }
+            return result;
+        }
 
-  protected void addSubLO(LargeObject subLO) {
-    subLOs.add(subLO);
-  }
+        private byte next() {
+            return buffer[idx++];
+        }
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayDecoding.java b/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayDecoding.java
index 9bee44d..475ddde 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayDecoding.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayDecoding.java
@@ -44,761 +44,732 @@ import java.util.Map;
  */
 public final class ArrayDecoding {
 
-  public ArrayDecoding() {
-  }
+    private static final ArrayDecoder<Long[]> LONG_OBJ_ARRAY = new AbstractObjectArrayDecoder<Long[]>(Long.class) {
 
-  /**
-   * Array list implementation specific for storing PG array elements. If
-   * {@link PgArrayList#dimensionsCount} is {@code 1}, the contents will be
-   * {@link String}. For all larger <i>dimensionsCount</i>, the values will be
-   * {@link PgArrayList} instances.
-   */
-  @SuppressWarnings("serial")
-  public static final class PgArrayList extends ArrayList<Object> {
-
-    /**
-     * How many dimensions.
-     */
-    int dimensionsCount = 1;
-
-    public PgArrayList() {
-    }
-
-  }
-
-  private interface ArrayDecoder<A extends Object> {
-
-    A createArray(int size);
-
-    Object[] createMultiDimensionalArray(int[] sizes);
-
-    boolean supportBinary();
-
-    void populateFromBinary(A array, int index, int count, ByteBuffer bytes, BaseConnection connection)
-        throws SQLException;
-
-    void populateFromString(A array, List<String> strings, BaseConnection connection) throws SQLException;
-  }
-
-  private abstract static class AbstractObjectStringArrayDecoder<A extends Object> implements ArrayDecoder<A> {
-    final Class<?> baseClazz;
-
-    AbstractObjectStringArrayDecoder(Class<?> baseClazz) {
-      this.baseClazz = baseClazz;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean supportBinary() {
-      return false;
-    }
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public A createArray(int size) {
-      return (A) Array.newInstance(baseClazz, size);
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public Object[] createMultiDimensionalArray(int[] sizes) {
-      return (Object[]) Array.newInstance(baseClazz, sizes);
-    }
-
-    @Override
-    public void populateFromBinary(A arr, int index, int count, ByteBuffer bytes, BaseConnection connection)
-        throws SQLException {
-      throw new SQLFeatureNotSupportedException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void populateFromString(A arr, List<String> strings, BaseConnection connection) throws SQLException {
-      final Object[] array = (Object[]) arr;
-
-      for (int i = 0, j = strings.size(); i < j; i++) {
-        final String stringVal = strings.get(i);
-        array[i] = stringVal != null ? parseValue(stringVal, connection) : null;
-      }
-    }
-
-    abstract Object parseValue(String stringVal, BaseConnection connection) throws SQLException;
-  }
-
-  private abstract static class AbstractObjectArrayDecoder<A extends Object> extends AbstractObjectStringArrayDecoder<A> {
-
-    AbstractObjectArrayDecoder(Class<?> baseClazz) {
-      super(baseClazz);
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean supportBinary() {
-      return true;
-    }
-
-    @Override
-    public void populateFromBinary(A arr, int index, int count, ByteBuffer bytes, BaseConnection connection)
-        throws SQLException {
-      final Object[] array = (Object[]) arr;
-
-      // skip through to the requested index
-      for (int i = 0; i < index; i++) {
-        final int length = bytes.getInt();
-        if (length > 0) {
-          bytes.position(bytes.position() + length);
+        @Override
+        Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
+            return bytes.getLong();
         }
-      }
 
-      for (int i = 0; i < count; i++) {
-        final int length = bytes.getInt();
-        if (length != -1) {
-          array[i] = parseValue(length, bytes, connection);
-        } else {
-          // explicitly set to null for reader's clarity
-          array[i] = null;
+        @Override
+        Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+            return PgResultSet.toLong(stringVal);
         }
-      }
-    }
+    };
+    private static final ArrayDecoder<Long[]> INT4_UNSIGNED_OBJ_ARRAY = new AbstractObjectArrayDecoder<Long[]>(
+            Long.class) {
 
-    abstract Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException;
-  }
-
-  private static final ArrayDecoder<Long[]> LONG_OBJ_ARRAY = new AbstractObjectArrayDecoder<Long[]>(Long.class) {
-
-    @Override
-    Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
-      return bytes.getLong();
-    }
-
-    @Override
-    Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
-      return PgResultSet.toLong(stringVal);
-    }
-  };
-
-  private static final ArrayDecoder<Long[]> INT4_UNSIGNED_OBJ_ARRAY = new AbstractObjectArrayDecoder<Long[]>(
-      Long.class) {
-
-    @Override
-    Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
-      return bytes.getInt() & 0xFFFFFFFFL;
-    }
-
-    @Override
-    Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
-      return PgResultSet.toLong(stringVal);
-    }
-  };
-
-  private static final ArrayDecoder<Integer[]> INTEGER_OBJ_ARRAY = new AbstractObjectArrayDecoder<Integer[]>(
-      Integer.class) {
-
-    @Override
-    Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
-      return bytes.getInt();
-    }
-
-    @Override
-    Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
-      return PgResultSet.toInt(stringVal);
-    }
-  };
-
-  private static final ArrayDecoder<Short[]> SHORT_OBJ_ARRAY = new AbstractObjectArrayDecoder<Short[]>(Short.class) {
-
-    @Override
-    Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
-      return bytes.getShort();
-    }
-
-    @Override
-    Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
-      return PgResultSet.toShort(stringVal);
-    }
-  };
-
-  private static final ArrayDecoder<Double[]> DOUBLE_OBJ_ARRAY = new AbstractObjectArrayDecoder<Double[]>(
-      Double.class) {
-
-    @Override
-    Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
-      return bytes.getDouble();
-    }
-
-    @Override
-    Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
-      return PgResultSet.toDouble(stringVal);
-    }
-  };
-
-  private static final ArrayDecoder<Float[]> FLOAT_OBJ_ARRAY = new AbstractObjectArrayDecoder<Float[]>(Float.class) {
-
-    @Override
-    Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
-      return bytes.getFloat();
-    }
-
-    @Override
-    Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
-      return PgResultSet.toFloat(stringVal);
-    }
-  };
-
-  private static final ArrayDecoder<Boolean[]> BOOLEAN_OBJ_ARRAY = new AbstractObjectArrayDecoder<Boolean[]>(
-      Boolean.class) {
-
-    @Override
-    Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
-      return bytes.get() == 1;
-    }
-
-    @Override
-    Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
-      return BooleanTypeUtil.fromString(stringVal);
-    }
-  };
-
-  private static final ArrayDecoder<String[]> STRING_ARRAY = new AbstractObjectArrayDecoder<>(String.class) {
-
-    @Override
-    Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException {
-      assert bytes.hasArray();
-      final byte[] byteArray = bytes.array();
-      final int offset = bytes.arrayOffset() + bytes.position();
-
-      String val;
-      try {
-        val = connection.getEncoding().decode(byteArray, offset, length);
-      } catch (IOException e) {
-        throw new PSQLException(GT.tr(
-            "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database."),
-            PSQLState.DATA_ERROR, e);
-      }
-      bytes.position(bytes.position() + length);
-      return val;
-    }
-
-    @Override
-    Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
-      return stringVal;
-    }
-  };
-
-  private static final ArrayDecoder<byte[][]> BYTE_ARRAY_ARRAY = new AbstractObjectArrayDecoder<byte[][]>(
-      byte[].class) {
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException {
-      final byte[] array = new byte[length];
-      bytes.get(array);
-      return array;
-    }
-
-    @Override
-    Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
-      return PGbytea.toBytes(stringVal.getBytes(StandardCharsets.US_ASCII));
-    }
-  };
-
-  private static final ArrayDecoder<BigDecimal[]> BIG_DECIMAL_STRING_DECODER = new AbstractObjectStringArrayDecoder<BigDecimal[]>(
-      BigDecimal.class) {
-
-    @Override
-    Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
-      return PgResultSet.toBigDecimal(stringVal);
-    }
-  };
-
-  private static final ArrayDecoder<String[]> STRING_ONLY_DECODER = new AbstractObjectStringArrayDecoder<String[]>(
-      String.class) {
-
-    @Override
-    Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
-      return stringVal;
-    }
-  };
-
-  private static final ArrayDecoder<Date[]> DATE_DECODER = new AbstractObjectStringArrayDecoder<Date[]>(
-      Date.class) {
-
-    @SuppressWarnings("deprecation")
-    @Override
-    Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
-      return connection.getTimestampUtils().toDate(null, stringVal);
-    }
-  };
-
-  private static final ArrayDecoder<Time[]> TIME_DECODER = new AbstractObjectStringArrayDecoder<Time[]>(
-      Time.class) {
-
-    @SuppressWarnings("deprecation")
-    @Override
-    Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
-      return connection.getTimestampUtils().toTime(null, stringVal);
-    }
-  };
-
-  private static final ArrayDecoder<Timestamp[]> TIMESTAMP_DECODER = new AbstractObjectStringArrayDecoder<Timestamp[]>(
-      Timestamp.class) {
-
-    @SuppressWarnings("deprecation")
-    @Override
-    Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
-      return connection.getTimestampUtils().toTimestamp(null, stringVal);
-    }
-  };
-
-  /**
-   * Maps from base type oid to {@link ArrayDecoder} capable of processing
-   * entries.
-   */
-  @SuppressWarnings("rawtypes")
-  private static final Map<Integer, ArrayDecoder> OID_TO_DECODER = new HashMap<>(
-      (int) (21 / .75) + 1);
-
-  static {
-    OID_TO_DECODER.put(Oid.OID, INT4_UNSIGNED_OBJ_ARRAY);
-    OID_TO_DECODER.put(Oid.INT8, LONG_OBJ_ARRAY);
-    OID_TO_DECODER.put(Oid.INT4, INTEGER_OBJ_ARRAY);
-    OID_TO_DECODER.put(Oid.INT2, SHORT_OBJ_ARRAY);
-    OID_TO_DECODER.put(Oid.MONEY, DOUBLE_OBJ_ARRAY);
-    OID_TO_DECODER.put(Oid.FLOAT8, DOUBLE_OBJ_ARRAY);
-    OID_TO_DECODER.put(Oid.FLOAT4, FLOAT_OBJ_ARRAY);
-    OID_TO_DECODER.put(Oid.TEXT, STRING_ARRAY);
-    OID_TO_DECODER.put(Oid.VARCHAR, STRING_ARRAY);
-    // 42.2.x decodes jsonb array as String rather than PGobject
-    OID_TO_DECODER.put(Oid.JSONB, STRING_ONLY_DECODER);
-    OID_TO_DECODER.put(Oid.BIT, BOOLEAN_OBJ_ARRAY);
-    OID_TO_DECODER.put(Oid.BOOL, BOOLEAN_OBJ_ARRAY);
-    OID_TO_DECODER.put(Oid.BYTEA, BYTE_ARRAY_ARRAY);
-    OID_TO_DECODER.put(Oid.NUMERIC, BIG_DECIMAL_STRING_DECODER);
-    OID_TO_DECODER.put(Oid.BPCHAR, STRING_ONLY_DECODER);
-    OID_TO_DECODER.put(Oid.CHAR, STRING_ONLY_DECODER);
-    OID_TO_DECODER.put(Oid.JSON, STRING_ONLY_DECODER);
-    OID_TO_DECODER.put(Oid.DATE, DATE_DECODER);
-    OID_TO_DECODER.put(Oid.TIME, TIME_DECODER);
-    OID_TO_DECODER.put(Oid.TIMETZ, TIME_DECODER);
-    OID_TO_DECODER.put(Oid.TIMESTAMP, TIMESTAMP_DECODER);
-    OID_TO_DECODER.put(Oid.TIMESTAMPTZ, TIMESTAMP_DECODER);
-  }
-
-  @SuppressWarnings("rawtypes")
-  private static final class ArrayAssistantObjectArrayDecoder extends AbstractObjectArrayDecoder {
-    private final ArrayAssistant arrayAssistant;
-
-    @SuppressWarnings("unchecked")
-    ArrayAssistantObjectArrayDecoder(ArrayAssistant arrayAssistant) {
-      super(arrayAssistant.baseType());
-      this.arrayAssistant = arrayAssistant;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException {
-
-      assert bytes.hasArray();
-      final byte[] byteArray = bytes.array();
-      final int offset = bytes.arrayOffset() + bytes.position();
-
-      final Object val = arrayAssistant.buildElement(byteArray, offset, length);
-
-      bytes.position(bytes.position() + length);
-      return val;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
-      return arrayAssistant.buildElement(stringVal);
-    }
-  }
-
-  private static final class MappedTypeObjectArrayDecoder extends AbstractObjectArrayDecoder<Object[]> {
-
-    private final String typeName;
-
-    MappedTypeObjectArrayDecoder(String baseTypeName) {
-      super(Object.class);
-      this.typeName = baseTypeName;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException {
-      final byte[] copy = new byte[length];
-      bytes.get(copy);
-      return connection.getObject(typeName, null, copy);
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
-      return connection.getObject(typeName, stringVal, null);
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  private static <A extends Object> ArrayDecoder<A> getDecoder(int oid, BaseConnection connection) throws SQLException {
-    final Integer key = oid;
-    @SuppressWarnings("rawtypes")
-    final ArrayDecoder decoder = OID_TO_DECODER.get(key);
-    if (decoder != null) {
-      return decoder;
-    }
-
-    final ArrayAssistant assistant = ArrayAssistantRegistry.getAssistant(oid);
-
-    if (assistant != null) {
-      return new ArrayAssistantObjectArrayDecoder(assistant);
-    }
-
-    final String typeName = connection.getTypeInfo().getPGType(oid);
-    if (typeName == null) {
-      throw Driver.notImplemented(PgArray.class, "readArray(data,oid)");
-    }
-
-    // 42.2.x should return enums as strings
-    int type = connection.getTypeInfo().getSQLType(typeName);
-    if (type == Types.CHAR || type == Types.VARCHAR) {
-      return (ArrayDecoder<A>) STRING_ONLY_DECODER;
-    }
-    return (ArrayDecoder<A>) new MappedTypeObjectArrayDecoder(typeName);
-  }
-
-  /**
-   * Reads binary representation of array into object model.
-   *
-   * @param index
-   *          1 based index of where to start on outermost array.
-   * @param count
-   *          The number of items to return from outermost array (beginning at
-   *          <i>index</i>).
-   * @param bytes
-   *          The binary representation of the array.
-   * @param connection
-   *          The connection the <i>bytes</i> were retrieved from.
-   * @return The parsed array.
-   * @throws SQLException
-   *           For failures encountered during parsing.
-   */
-  @SuppressWarnings("unchecked")
-  public static Object readBinaryArray(int index, int count, byte[] bytes, BaseConnection connection)
-      throws SQLException {
-    final ByteBuffer buffer = ByteBuffer.wrap(bytes);
-    buffer.order(ByteOrder.BIG_ENDIAN);
-    final int dimensions = buffer.getInt();
-    final boolean hasNulls = buffer.getInt() != 0;
-    final int elementOid = buffer.getInt();
-
-    @SuppressWarnings("rawtypes")
-    final ArrayDecoder decoder = getDecoder(elementOid, connection);
-
-    if (!decoder.supportBinary()) {
-      throw Driver.notImplemented(PgArray.class, "readBinaryArray(data,oid)");
-    }
-
-    if (dimensions == 0) {
-      return decoder.createArray(0);
-    }
-
-    final int adjustedSkipIndex = index > 0 ? index - 1 : 0;
-
-    // optimize for single dimension array
-    if (dimensions == 1) {
-      int length = buffer.getInt();
-      buffer.position(buffer.position() + 4);
-      if (count > 0) {
-        length = Math.min(length, count);
-      }
-      final Object array = decoder.createArray(length);
-      decoder.populateFromBinary(array, adjustedSkipIndex, length, buffer, connection);
-      return array;
-    }
-
-    final int[] dimensionLengths = new int[dimensions];
-    for (int i = 0; i < dimensions; i++) {
-      dimensionLengths[i] = buffer.getInt();
-      buffer.position(buffer.position() + 4);
-    }
-
-    if (count > 0) {
-      dimensionLengths[0] = Math.min(count, dimensionLengths[0]);
-    }
-
-    final Object[] array = decoder.createMultiDimensionalArray(dimensionLengths);
-
-    // TODO: in certain circumstances (no nulls, fixed size data types)
-    // if adjustedSkipIndex is > 0, we could advance through the buffer rather than
-    // parse our way through throwing away the results
-
-    storeValues(array, decoder, buffer, adjustedSkipIndex, dimensionLengths, 0, connection);
-
-    return array;
-  }
-
-  @SuppressWarnings("unchecked")
-  private static <A extends Object> void storeValues(A[] array, ArrayDecoder<A> decoder, ByteBuffer bytes,
-      int skip, int[] dimensionLengths, int dim, BaseConnection connection) throws SQLException {
-    assert dim <= dimensionLengths.length - 2;
-
-    for (int i = 0; i < skip; i++) {
-      if (dim == dimensionLengths.length - 2) {
-        decoder.populateFromBinary(array[0], 0, dimensionLengths[dim + 1], bytes, connection);
-      } else {
-        storeValues((A[]) array[0], decoder, bytes, 0, dimensionLengths, dim + 1, connection);
-      }
-    }
-
-    for (int i = 0; i < dimensionLengths[dim]; i++) {
-      if (dim == dimensionLengths.length - 2) {
-        decoder.populateFromBinary(array[i], 0, dimensionLengths[dim + 1], bytes, connection);
-      } else {
-        storeValues((A[]) array[i], decoder, bytes, 0, dimensionLengths, dim + 1, connection);
-      }
-    }
-  }
-
-  /**
-   * Parses the string representation of an array into a {@link PgArrayList}.
-   *
-   * @param fieldString
-   *          The array value to parse.
-   * @param delim
-   *          The delimiter character appropriate for the data type.
-   * @return A {@link PgArrayList} representing the parsed <i>fieldString</i>.
-   */
-  static PgArrayList buildArrayList(String fieldString, char delim) {
-
-    final PgArrayList arrayList = new PgArrayList();
-
-    if (fieldString == null) {
-      return arrayList;
-    }
-
-    final char[] chars = fieldString.toCharArray();
-    StringBuilder buffer = null;
-    boolean insideString = false;
-
-    // needed for checking if NULL value occurred
-    boolean wasInsideString = false;
-
-    // array dimension arrays
-    final List<PgArrayList> dims = new ArrayList<>();
-
-    // currently processed array
-    PgArrayList curArray = arrayList;
-
-    // Starting with 8.0 non-standard (beginning index
-    // isn't 1) bounds the dimensions are returned in the
-    // data formatted like so "[0:3]={0,1,2,3,4}".
-    // Older versions simply do not return the bounds.
-    //
-    // Right now we ignore these bounds, but we could
-    // consider allowing these index values to be used
-    // even though the JDBC spec says 1 is the first
-    // index. I'm not sure what a client would like
-    // to see, so we just retain the old behavior.
-    int startOffset = 0;
-    {
-      if (chars[0] == '[') {
-        while (chars[startOffset] != '=') {
-          startOffset++;
+        @Override
+        Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
+            return bytes.getInt() & 0xFFFFFFFFL;
         }
-        startOffset++; // skip =
-      }
-    }
 
-    for (int i = startOffset; i < chars.length; i++) {
-
-      // escape character that we need to skip
-      if (chars[i] == '\\') {
-        i++;
-      } else if (!insideString && chars[i] == '{') {
-        // subarray start
-        if (dims.isEmpty()) {
-          dims.add(arrayList);
-        } else {
-          PgArrayList a = new PgArrayList();
-          PgArrayList p = dims.get(dims.size() - 1);
-          p.add(a);
-          dims.add(a);
+        @Override
+        Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+            return PgResultSet.toLong(stringVal);
         }
-        curArray = dims.get(dims.size() - 1);
+    };
+    private static final ArrayDecoder<Integer[]> INTEGER_OBJ_ARRAY = new AbstractObjectArrayDecoder<Integer[]>(
+            Integer.class) {
 
-        // number of dimensions
-        {
-          for (int t = i + 1; t < chars.length; t++) {
-            if (Character.isWhitespace(chars[t])) {
-              continue;
-            } else if (chars[t] == '{') {
-              curArray.dimensionsCount++;
-            } else {
-              break;
+        @Override
+        Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
+            return bytes.getInt();
+        }
+
+        @Override
+        Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+            return PgResultSet.toInt(stringVal);
+        }
+    };
+    private static final ArrayDecoder<Short[]> SHORT_OBJ_ARRAY = new AbstractObjectArrayDecoder<Short[]>(Short.class) {
+
+        @Override
+        Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
+            return bytes.getShort();
+        }
+
+        @Override
+        Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+            return PgResultSet.toShort(stringVal);
+        }
+    };
+    private static final ArrayDecoder<Double[]> DOUBLE_OBJ_ARRAY = new AbstractObjectArrayDecoder<Double[]>(
+            Double.class) {
+
+        @Override
+        Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
+            return bytes.getDouble();
+        }
+
+        @Override
+        Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+            return PgResultSet.toDouble(stringVal);
+        }
+    };
+    private static final ArrayDecoder<Float[]> FLOAT_OBJ_ARRAY = new AbstractObjectArrayDecoder<Float[]>(Float.class) {
+
+        @Override
+        Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
+            return bytes.getFloat();
+        }
+
+        @Override
+        Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+            return PgResultSet.toFloat(stringVal);
+        }
+    };
+    private static final ArrayDecoder<Boolean[]> BOOLEAN_OBJ_ARRAY = new AbstractObjectArrayDecoder<Boolean[]>(
+            Boolean.class) {
+
+        @Override
+        Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) {
+            return bytes.get() == 1;
+        }
+
+        @Override
+        Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+            return BooleanTypeUtil.fromString(stringVal);
+        }
+    };
+    private static final ArrayDecoder<String[]> STRING_ARRAY = new AbstractObjectArrayDecoder<>(String.class) {
+
+        @Override
+        Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException {
+            assert bytes.hasArray();
+            final byte[] byteArray = bytes.array();
+            final int offset = bytes.arrayOffset() + bytes.position();
+
+            String val;
+            try {
+                val = connection.getEncoding().decode(byteArray, offset, length);
+            } catch (IOException e) {
+                throw new PSQLException(GT.tr(
+                        "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database."),
+                        PSQLState.DATA_ERROR, e);
             }
-          }
+            bytes.position(bytes.position() + length);
+            return val;
         }
 
-        buffer = new StringBuilder();
-        continue;
-      } else if (chars[i] == '"') {
-        // quoted element
-        insideString = !insideString;
-        wasInsideString = true;
-        continue;
-      } else if (!insideString && Parser.isArrayWhiteSpace(chars[i])) {
-        // white space
-        continue;
-      } else if ((!insideString && (chars[i] == delim || chars[i] == '}')) || i == chars.length - 1) {
-        // array end or element end
-        // when character that is a part of array element
-        if (chars[i] != '"' && chars[i] != '}' && chars[i] != delim && buffer != null) {
-          buffer.append(chars[i]);
+        @Override
+        Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+            return stringVal;
+        }
+    };
+    private static final ArrayDecoder<byte[][]> BYTE_ARRAY_ARRAY = new AbstractObjectArrayDecoder<byte[][]>(
+            byte[].class) {
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException {
+            final byte[] array = new byte[length];
+            bytes.get(array);
+            return array;
         }
 
-        String b = buffer == null ? null : buffer.toString();
+        @Override
+        Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+            return PGbytea.toBytes(stringVal.getBytes(StandardCharsets.US_ASCII));
+        }
+    };
+    private static final ArrayDecoder<BigDecimal[]> BIG_DECIMAL_STRING_DECODER = new AbstractObjectStringArrayDecoder<BigDecimal[]>(
+            BigDecimal.class) {
 
-        // add element to current array
-        if (b != null && (!b.isEmpty() || wasInsideString)) {
-          curArray.add(!wasInsideString && "NULL".equals(b) ? null : b);
+        @Override
+        Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+            return PgResultSet.toBigDecimal(stringVal);
+        }
+    };
+    private static final ArrayDecoder<String[]> STRING_ONLY_DECODER = new AbstractObjectStringArrayDecoder<String[]>(
+            String.class) {
+
+        @Override
+        Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+            return stringVal;
+        }
+    };
+    private static final ArrayDecoder<Date[]> DATE_DECODER = new AbstractObjectStringArrayDecoder<Date[]>(
+            Date.class) {
+
+        @SuppressWarnings("deprecation")
+        @Override
+        Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+            return connection.getTimestampUtils().toDate(null, stringVal);
+        }
+    };
+    private static final ArrayDecoder<Time[]> TIME_DECODER = new AbstractObjectStringArrayDecoder<Time[]>(
+            Time.class) {
+
+        @SuppressWarnings("deprecation")
+        @Override
+        Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+            return connection.getTimestampUtils().toTime(null, stringVal);
+        }
+    };
+    private static final ArrayDecoder<Timestamp[]> TIMESTAMP_DECODER = new AbstractObjectStringArrayDecoder<Timestamp[]>(
+            Timestamp.class) {
+
+        @SuppressWarnings("deprecation")
+        @Override
+        Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+            return connection.getTimestampUtils().toTimestamp(null, stringVal);
+        }
+    };
+    /**
+     * Maps from base type oid to {@link ArrayDecoder} capable of processing
+     * entries.
+     */
+    @SuppressWarnings("rawtypes")
+    private static final Map<Integer, ArrayDecoder> OID_TO_DECODER = new HashMap<>(
+            (int) (21 / .75) + 1);
+
+    static {
+        OID_TO_DECODER.put(Oid.OID, INT4_UNSIGNED_OBJ_ARRAY);
+        OID_TO_DECODER.put(Oid.INT8, LONG_OBJ_ARRAY);
+        OID_TO_DECODER.put(Oid.INT4, INTEGER_OBJ_ARRAY);
+        OID_TO_DECODER.put(Oid.INT2, SHORT_OBJ_ARRAY);
+        OID_TO_DECODER.put(Oid.MONEY, DOUBLE_OBJ_ARRAY);
+        OID_TO_DECODER.put(Oid.FLOAT8, DOUBLE_OBJ_ARRAY);
+        OID_TO_DECODER.put(Oid.FLOAT4, FLOAT_OBJ_ARRAY);
+        OID_TO_DECODER.put(Oid.TEXT, STRING_ARRAY);
+        OID_TO_DECODER.put(Oid.VARCHAR, STRING_ARRAY);
+        // 42.2.x decodes jsonb array as String rather than PGobject
+        OID_TO_DECODER.put(Oid.JSONB, STRING_ONLY_DECODER);
+        OID_TO_DECODER.put(Oid.BIT, BOOLEAN_OBJ_ARRAY);
+        OID_TO_DECODER.put(Oid.BOOL, BOOLEAN_OBJ_ARRAY);
+        OID_TO_DECODER.put(Oid.BYTEA, BYTE_ARRAY_ARRAY);
+        OID_TO_DECODER.put(Oid.NUMERIC, BIG_DECIMAL_STRING_DECODER);
+        OID_TO_DECODER.put(Oid.BPCHAR, STRING_ONLY_DECODER);
+        OID_TO_DECODER.put(Oid.CHAR, STRING_ONLY_DECODER);
+        OID_TO_DECODER.put(Oid.JSON, STRING_ONLY_DECODER);
+        OID_TO_DECODER.put(Oid.DATE, DATE_DECODER);
+        OID_TO_DECODER.put(Oid.TIME, TIME_DECODER);
+        OID_TO_DECODER.put(Oid.TIMETZ, TIME_DECODER);
+        OID_TO_DECODER.put(Oid.TIMESTAMP, TIMESTAMP_DECODER);
+        OID_TO_DECODER.put(Oid.TIMESTAMPTZ, TIMESTAMP_DECODER);
+    }
+
+    public ArrayDecoding() {
+    }
+
+    @SuppressWarnings("unchecked")
+    private static <A extends Object> ArrayDecoder<A> getDecoder(int oid, BaseConnection connection) throws SQLException {
+        final Integer key = oid;
+        @SuppressWarnings("rawtypes") final ArrayDecoder decoder = OID_TO_DECODER.get(key);
+        if (decoder != null) {
+            return decoder;
         }
 
-        wasInsideString = false;
-        buffer = new StringBuilder();
+        final ArrayAssistant assistant = ArrayAssistantRegistry.getAssistant(oid);
 
-        // when end of an array
-        if (chars[i] == '}') {
-          dims.remove(dims.size() - 1);
-
-          // when multi-dimension
-          if (!dims.isEmpty()) {
-            curArray = dims.get(dims.size() - 1);
-          }
-
-          buffer = null;
+        if (assistant != null) {
+            return new ArrayAssistantObjectArrayDecoder(assistant);
         }
 
-        continue;
-      }
-
-      if (buffer != null) {
-        buffer.append(chars[i]);
-      }
-    }
-
-    return arrayList;
-  }
-
-  /**
-   * Reads {@code String} representation of array into object model.
-   *
-   * @param index
-   *          1 based index of where to start on outermost array.
-   * @param count
-   *          The number of items to return from outermost array (beginning at
-   *          <i>index</i>).
-   * @param oid
-   *          The oid of the base type of the array.
-   * @param list
-   *          The {@code #buildArrayList(String, char) processed} string
-   *          representation of an array.
-   * @param connection
-   *          The connection the <i>bytes</i> were retrieved from.
-   * @return The parsed array.
-   * @throws SQLException
-   *           For failures encountered during parsing.
-   */
-  @SuppressWarnings({"unchecked", "rawtypes"})
-  public static Object readStringArray(int index, int count, int oid, PgArrayList list, BaseConnection connection)
-      throws SQLException {
-
-    final ArrayDecoder decoder = getDecoder(oid, connection);
-
-    final int dims = list.dimensionsCount;
-
-    if (dims == 0) {
-      return decoder.createArray(0);
-    }
-
-    boolean sublist = false;
-
-    int adjustedSkipIndex = 0;
-    if (index > 1) {
-      sublist = true;
-      adjustedSkipIndex = index - 1;
-    }
-
-    int adjustedCount = list.size();
-    if (count > 0 && count != adjustedCount) {
-      sublist = true;
-      adjustedCount = Math.min(adjustedCount, count);
-    }
-
-    final List adjustedList = sublist ? list.subList(adjustedSkipIndex, adjustedSkipIndex + adjustedCount) : list;
-
-    if (dims == 1) {
-      int length = adjustedList.size();
-      if (count > 0) {
-        length = Math.min(length, count);
-      }
-      final Object array = decoder.createArray(length);
-      decoder.populateFromString(array, adjustedList, connection);
-      return array;
-    }
-
-    // dimensions length array (to be used with
-    // java.lang.reflect.Array.newInstance(Class<?>, int[]))
-    final int[] dimensionLengths = new int[dims];
-    dimensionLengths[0] = adjustedCount;
-    {
-      List tmpList = (List) adjustedList.get(0);
-      for (int i = 1; i < dims; i++) {
-        // TODO: tmpList always non-null?
-        dimensionLengths[i] = tmpList.size();
-        if (i != dims - 1) {
-          tmpList = (List) tmpList.get(0);
+        final String typeName = connection.getTypeInfo().getPGType(oid);
+        if (typeName == null) {
+            throw Driver.notImplemented(PgArray.class, "readArray(data,oid)");
         }
-      }
+
+        // 42.2.x should return enums as strings
+        int type = connection.getTypeInfo().getSQLType(typeName);
+        if (type == Types.CHAR || type == Types.VARCHAR) {
+            return (ArrayDecoder<A>) STRING_ONLY_DECODER;
+        }
+        return (ArrayDecoder<A>) new MappedTypeObjectArrayDecoder(typeName);
     }
 
-    final Object[] array = decoder.createMultiDimensionalArray(dimensionLengths);
+    /**
+     * Reads binary representation of array into object model.
+     *
+     * @param index      1 based index of where to start on outermost array.
+     * @param count      The number of items to return from outermost array (beginning at
+     *                   <i>index</i>).
+     * @param bytes      The binary representation of the array.
+     * @param connection The connection the <i>bytes</i> were retrieved from.
+     * @return The parsed array.
+     * @throws SQLException For failures encountered during parsing.
+     */
+    @SuppressWarnings("unchecked")
+    public static Object readBinaryArray(int index, int count, byte[] bytes, BaseConnection connection)
+            throws SQLException {
+        final ByteBuffer buffer = ByteBuffer.wrap(bytes);
+        buffer.order(ByteOrder.BIG_ENDIAN);
+        final int dimensions = buffer.getInt();
+        final boolean hasNulls = buffer.getInt() != 0;
+        final int elementOid = buffer.getInt();
 
-    storeStringValues(array, decoder, adjustedList, dimensionLengths, 0, connection);
+        @SuppressWarnings("rawtypes") final ArrayDecoder decoder = getDecoder(elementOid, connection);
 
-    return array;
-  }
+        if (!decoder.supportBinary()) {
+            throw Driver.notImplemented(PgArray.class, "readBinaryArray(data,oid)");
+        }
 
-  @SuppressWarnings({"unchecked", "rawtypes"})
-  private static <A extends Object> void storeStringValues(A[] array, ArrayDecoder<A> decoder, List list, int [] dimensionLengths,
-      int dim, BaseConnection connection) throws SQLException {
-    assert dim <= dimensionLengths.length - 2;
+        if (dimensions == 0) {
+            return decoder.createArray(0);
+        }
 
-    for (int i = 0; i < dimensionLengths[dim]; i++) {
-      Object element = list.get(i);
-      if (dim == dimensionLengths.length - 2) {
-        decoder.populateFromString(array[i], (List<String>) element, connection);
-      } else {
-        storeStringValues((A[]) array[i], decoder, (List) element, dimensionLengths, dim + 1, connection);
-      }
+        final int adjustedSkipIndex = index > 0 ? index - 1 : 0;
+
+        // optimize for single dimension array
+        if (dimensions == 1) {
+            int length = buffer.getInt();
+            buffer.position(buffer.position() + 4);
+            if (count > 0) {
+                length = Math.min(length, count);
+            }
+            final Object array = decoder.createArray(length);
+            decoder.populateFromBinary(array, adjustedSkipIndex, length, buffer, connection);
+            return array;
+        }
+
+        final int[] dimensionLengths = new int[dimensions];
+        for (int i = 0; i < dimensions; i++) {
+            dimensionLengths[i] = buffer.getInt();
+            buffer.position(buffer.position() + 4);
+        }
+
+        if (count > 0) {
+            dimensionLengths[0] = Math.min(count, dimensionLengths[0]);
+        }
+
+        final Object[] array = decoder.createMultiDimensionalArray(dimensionLengths);
+
+        // TODO: in certain circumstances (no nulls, fixed size data types)
+        // if adjustedSkipIndex is > 0, we could advance through the buffer rather than
+        // parse our way through throwing away the results
+
+        storeValues(array, decoder, buffer, adjustedSkipIndex, dimensionLengths, 0, connection);
+
+        return array;
+    }
+
+    @SuppressWarnings("unchecked")
+    private static <A extends Object> void storeValues(A[] array, ArrayDecoder<A> decoder, ByteBuffer bytes,
+                                                       int skip, int[] dimensionLengths, int dim, BaseConnection connection) throws SQLException {
+        assert dim <= dimensionLengths.length - 2;
+
+        for (int i = 0; i < skip; i++) {
+            if (dim == dimensionLengths.length - 2) {
+                decoder.populateFromBinary(array[0], 0, dimensionLengths[dim + 1], bytes, connection);
+            } else {
+                storeValues((A[]) array[0], decoder, bytes, 0, dimensionLengths, dim + 1, connection);
+            }
+        }
+
+        for (int i = 0; i < dimensionLengths[dim]; i++) {
+            if (dim == dimensionLengths.length - 2) {
+                decoder.populateFromBinary(array[i], 0, dimensionLengths[dim + 1], bytes, connection);
+            } else {
+                storeValues((A[]) array[i], decoder, bytes, 0, dimensionLengths, dim + 1, connection);
+            }
+        }
+    }
+
+    /**
+     * Parses the string representation of an array into a {@link PgArrayList}.
+     *
+     * @param fieldString The array value to parse.
+     * @param delim       The delimiter character appropriate for the data type.
+     * @return A {@link PgArrayList} representing the parsed <i>fieldString</i>.
+     */
+    static PgArrayList buildArrayList(String fieldString, char delim) {
+
+        final PgArrayList arrayList = new PgArrayList();
+
+        if (fieldString == null) {
+            return arrayList;
+        }
+
+        final char[] chars = fieldString.toCharArray();
+        StringBuilder buffer = null;
+        boolean insideString = false;
+
+        // needed for checking if NULL value occurred
+        boolean wasInsideString = false;
+
+        // array dimension arrays
+        final List<PgArrayList> dims = new ArrayList<>();
+
+        // currently processed array
+        PgArrayList curArray = arrayList;
+
+        // Starting with 8.0 non-standard (beginning index
+        // isn't 1) bounds the dimensions are returned in the
+        // data formatted like so "[0:3]={0,1,2,3,4}".
+        // Older versions simply do not return the bounds.
+        //
+        // Right now we ignore these bounds, but we could
+        // consider allowing these index values to be used
+        // even though the JDBC spec says 1 is the first
+        // index. I'm not sure what a client would like
+        // to see, so we just retain the old behavior.
+        int startOffset = 0;
+        {
+            if (chars[0] == '[') {
+                while (chars[startOffset] != '=') {
+                    startOffset++;
+                }
+                startOffset++; // skip =
+            }
+        }
+
+        for (int i = startOffset; i < chars.length; i++) {
+
+            // escape character that we need to skip
+            if (chars[i] == '\\') {
+                i++;
+            } else if (!insideString && chars[i] == '{') {
+                // subarray start
+                if (dims.isEmpty()) {
+                    dims.add(arrayList);
+                } else {
+                    PgArrayList a = new PgArrayList();
+                    PgArrayList p = dims.get(dims.size() - 1);
+                    p.add(a);
+                    dims.add(a);
+                }
+                curArray = dims.get(dims.size() - 1);
+
+                // number of dimensions
+                {
+                    for (int t = i + 1; t < chars.length; t++) {
+                        if (Character.isWhitespace(chars[t])) {
+                            continue;
+                        } else if (chars[t] == '{') {
+                            curArray.dimensionsCount++;
+                        } else {
+                            break;
+                        }
+                    }
+                }
+
+                buffer = new StringBuilder();
+                continue;
+            } else if (chars[i] == '"') {
+                // quoted element
+                insideString = !insideString;
+                wasInsideString = true;
+                continue;
+            } else if (!insideString && Parser.isArrayWhiteSpace(chars[i])) {
+                // white space
+                continue;
+            } else if ((!insideString && (chars[i] == delim || chars[i] == '}')) || i == chars.length - 1) {
+                // array end or element end
+                // when character that is a part of array element
+                if (chars[i] != '"' && chars[i] != '}' && chars[i] != delim && buffer != null) {
+                    buffer.append(chars[i]);
+                }
+
+                String b = buffer == null ? null : buffer.toString();
+
+                // add element to current array
+                if (b != null && (!b.isEmpty() || wasInsideString)) {
+                    curArray.add(!wasInsideString && "NULL".equals(b) ? null : b);
+                }
+
+                wasInsideString = false;
+                buffer = new StringBuilder();
+
+                // when end of an array
+                if (chars[i] == '}') {
+                    dims.remove(dims.size() - 1);
+
+                    // when multi-dimension
+                    if (!dims.isEmpty()) {
+                        curArray = dims.get(dims.size() - 1);
+                    }
+
+                    buffer = null;
+                }
+
+                continue;
+            }
+
+            if (buffer != null) {
+                buffer.append(chars[i]);
+            }
+        }
+
+        return arrayList;
+    }
+
+    /**
+     * Reads {@code String} representation of array into object model.
+     *
+     * @param index      1 based index of where to start on outermost array.
+     * @param count      The number of items to return from outermost array (beginning at
+     *                   <i>index</i>).
+     * @param oid        The oid of the base type of the array.
+     * @param list       The {@code #buildArrayList(String, char) processed} string
+     *                   representation of an array.
+     * @param connection The connection the <i>bytes</i> were retrieved from.
+     * @return The parsed array.
+     * @throws SQLException For failures encountered during parsing.
+     */
+    @SuppressWarnings({"unchecked", "rawtypes"})
+    public static Object readStringArray(int index, int count, int oid, PgArrayList list, BaseConnection connection)
+            throws SQLException {
+
+        final ArrayDecoder decoder = getDecoder(oid, connection);
+
+        final int dims = list.dimensionsCount;
+
+        if (dims == 0) {
+            return decoder.createArray(0);
+        }
+
+        boolean sublist = false;
+
+        int adjustedSkipIndex = 0;
+        if (index > 1) {
+            sublist = true;
+            adjustedSkipIndex = index - 1;
+        }
+
+        int adjustedCount = list.size();
+        if (count > 0 && count != adjustedCount) {
+            sublist = true;
+            adjustedCount = Math.min(adjustedCount, count);
+        }
+
+        final List adjustedList = sublist ? list.subList(adjustedSkipIndex, adjustedSkipIndex + adjustedCount) : list;
+
+        if (dims == 1) {
+            int length = adjustedList.size();
+            if (count > 0) {
+                length = Math.min(length, count);
+            }
+            final Object array = decoder.createArray(length);
+            decoder.populateFromString(array, adjustedList, connection);
+            return array;
+        }
+
+        // dimensions length array (to be used with
+        // java.lang.reflect.Array.newInstance(Class<?>, int[]))
+        final int[] dimensionLengths = new int[dims];
+        dimensionLengths[0] = adjustedCount;
+        {
+            List tmpList = (List) adjustedList.get(0);
+            for (int i = 1; i < dims; i++) {
+                // TODO: tmpList always non-null?
+                dimensionLengths[i] = tmpList.size();
+                if (i != dims - 1) {
+                    tmpList = (List) tmpList.get(0);
+                }
+            }
+        }
+
+        final Object[] array = decoder.createMultiDimensionalArray(dimensionLengths);
+
+        storeStringValues(array, decoder, adjustedList, dimensionLengths, 0, connection);
+
+        return array;
+    }
+
+    @SuppressWarnings({"unchecked", "rawtypes"})
+    private static <A extends Object> void storeStringValues(A[] array, ArrayDecoder<A> decoder, List list, int[] dimensionLengths,
+                                                             int dim, BaseConnection connection) throws SQLException {
+        assert dim <= dimensionLengths.length - 2;
+
+        for (int i = 0; i < dimensionLengths[dim]; i++) {
+            Object element = list.get(i);
+            if (dim == dimensionLengths.length - 2) {
+                decoder.populateFromString(array[i], (List<String>) element, connection);
+            } else {
+                storeStringValues((A[]) array[i], decoder, (List) element, dimensionLengths, dim + 1, connection);
+            }
+        }
+    }
+
+    private interface ArrayDecoder<A extends Object> {
+
+        A createArray(int size);
+
+        Object[] createMultiDimensionalArray(int[] sizes);
+
+        boolean supportBinary();
+
+        void populateFromBinary(A array, int index, int count, ByteBuffer bytes, BaseConnection connection)
+                throws SQLException;
+
+        void populateFromString(A array, List<String> strings, BaseConnection connection) throws SQLException;
+    }
+
+    /**
+     * Array list implementation specific for storing PG array elements. If
+     * {@link PgArrayList#dimensionsCount} is {@code 1}, the contents will be
+     * {@link String}. For all larger <i>dimensionsCount</i>, the values will be
+     * {@link PgArrayList} instances.
+     */
+    @SuppressWarnings("serial")
+    public static final class PgArrayList extends ArrayList<Object> {
+
+        /**
+         * How many dimensions.
+         */
+        int dimensionsCount = 1;
+
+        public PgArrayList() {
+        }
+
+    }
+
+    private abstract static class AbstractObjectStringArrayDecoder<A extends Object> implements ArrayDecoder<A> {
+        final Class<?> baseClazz;
+
+        AbstractObjectStringArrayDecoder(Class<?> baseClazz) {
+            this.baseClazz = baseClazz;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean supportBinary() {
+            return false;
+        }
+
+        @SuppressWarnings("unchecked")
+        @Override
+        public A createArray(int size) {
+            return (A) Array.newInstance(baseClazz, size);
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public Object[] createMultiDimensionalArray(int[] sizes) {
+            return (Object[]) Array.newInstance(baseClazz, sizes);
+        }
+
+        @Override
+        public void populateFromBinary(A arr, int index, int count, ByteBuffer bytes, BaseConnection connection)
+                throws SQLException {
+            throw new SQLFeatureNotSupportedException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void populateFromString(A arr, List<String> strings, BaseConnection connection) throws SQLException {
+            final Object[] array = (Object[]) arr;
+
+            for (int i = 0, j = strings.size(); i < j; i++) {
+                final String stringVal = strings.get(i);
+                array[i] = stringVal != null ? parseValue(stringVal, connection) : null;
+            }
+        }
+
+        abstract Object parseValue(String stringVal, BaseConnection connection) throws SQLException;
+    }
+
+    private abstract static class AbstractObjectArrayDecoder<A extends Object> extends AbstractObjectStringArrayDecoder<A> {
+
+        AbstractObjectArrayDecoder(Class<?> baseClazz) {
+            super(baseClazz);
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean supportBinary() {
+            return true;
+        }
+
+        @Override
+        public void populateFromBinary(A arr, int index, int count, ByteBuffer bytes, BaseConnection connection)
+                throws SQLException {
+            final Object[] array = (Object[]) arr;
+
+            // skip through to the requested index
+            for (int i = 0; i < index; i++) {
+                final int length = bytes.getInt();
+                if (length > 0) {
+                    bytes.position(bytes.position() + length);
+                }
+            }
+
+            for (int i = 0; i < count; i++) {
+                final int length = bytes.getInt();
+                if (length != -1) {
+                    array[i] = parseValue(length, bytes, connection);
+                } else {
+                    // explicitly set to null for reader's clarity
+                    array[i] = null;
+                }
+            }
+        }
+
+        abstract Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException;
+    }
+
+    @SuppressWarnings("rawtypes")
+    private static final class ArrayAssistantObjectArrayDecoder extends AbstractObjectArrayDecoder {
+        private final ArrayAssistant arrayAssistant;
+
+        @SuppressWarnings("unchecked")
+        ArrayAssistantObjectArrayDecoder(ArrayAssistant arrayAssistant) {
+            super(arrayAssistant.baseType());
+            this.arrayAssistant = arrayAssistant;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException {
+
+            assert bytes.hasArray();
+            final byte[] byteArray = bytes.array();
+            final int offset = bytes.arrayOffset() + bytes.position();
+
+            final Object val = arrayAssistant.buildElement(byteArray, offset, length);
+
+            bytes.position(bytes.position() + length);
+            return val;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+            return arrayAssistant.buildElement(stringVal);
+        }
+    }
+
+    private static final class MappedTypeObjectArrayDecoder extends AbstractObjectArrayDecoder<Object[]> {
+
+        private final String typeName;
+
+        MappedTypeObjectArrayDecoder(String baseTypeName) {
+            super(Object.class);
+            this.typeName = baseTypeName;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        Object parseValue(int length, ByteBuffer bytes, BaseConnection connection) throws SQLException {
+            final byte[] copy = new byte[length];
+            bytes.get(copy);
+            return connection.getObject(typeName, null, copy);
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        Object parseValue(String stringVal, BaseConnection connection) throws SQLException {
+            return connection.getObject(typeName, stringVal, null);
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayEncoding.java b/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayEncoding.java
index 1647c9a..4ef0671 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayEncoding.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/ArrayEncoding.java
@@ -47,1391 +47,1340 @@ import java.util.Map;
  */
 public final class ArrayEncoding {
 
-  public ArrayEncoding() {
-  }
-
-  public interface ArrayEncoder<A extends Object> {
-
-    /**
-     * The default array type oid supported by this instance.
-     *
-     * @return The default array type oid supported by this instance.
-     */
-    int getDefaultArrayTypeOid();
-
-    /**
-     * Creates {@code String} representation of the <i>array</i>.
-     *
-     * @param delim
-     *          The character to use to delimit between elements.
-     * @param array
-     *          The array to represent as a {@code String}.
-     * @return {@code String} representation of the <i>array</i>.
-     */
-    String toArrayString(char delim, A array);
-
-    /**
-     * Indicates if an array can be encoded in binary form to array <i>oid</i>.
-     *
-     * @param oid
-     *          The array oid to see check for binary support.
-     * @return Indication of whether
-     *         {@link #toBinaryRepresentation(BaseConnection, Object, int)} is
-     *         supported for <i>oid</i>.
-     */
-    boolean supportBinaryRepresentation(int oid);
-
-    /**
-     * Creates binary representation of the <i>array</i>.
-     *
-     * @param connection
-     *          The connection the binary representation will be used on. Attributes
-     *          from the connection might impact how values are translated to
-     *          binary.
-     * @param array
-     *          The array to binary encode. Must not be {@code null}, but may
-     *          contain {@code null} elements.
-     * @param oid
-     *          The array type oid to use. Calls to
-     *          {@link #supportBinaryRepresentation(int)} must have returned
-     *          {@code true}.
-     * @return The binary representation of <i>array</i>.
-     * @throws SQLFeatureNotSupportedException
-     *           If {@link #supportBinaryRepresentation(int)} is false for
-     *           <i>oid</i>.
-     */
-    byte[] toBinaryRepresentation(BaseConnection connection, A array, int oid)
-        throws SQLException, SQLFeatureNotSupportedException;
-
-    /**
-     * Append {@code String} representation of <i>array</i> to <i>sb</i>.
-     *
-     * @param sb
-     *          The {@link StringBuilder} to append to.
-     * @param delim
-     *          The delimiter between elements.
-     * @param array
-     *          The array to represent. Will not be {@code null}, but may contain
-     *          {@code null} elements.
-     */
-    void appendArray(StringBuilder sb, char delim, A array);
-  }
-
-  /**
-   * Base class to implement {@link ArrayEncoding.ArrayEncoder} and provide
-   * multi-dimensional support.
-   *
-   * @param <A>
-   *          Base array type supported.
-   */
-  private abstract static class AbstractArrayEncoder<A extends Object>
-      implements ArrayEncoder<A> {
-
-    private final int oid;
-
-    final int arrayOid;
-
-    /**
-     *
-     * @param oid
-     *          The default/primary base oid type.
-     * @param arrayOid
-     *          The default/primary array oid type.
-     */
-    AbstractArrayEncoder(int oid, int arrayOid) {
-      this.oid = oid;
-      this.arrayOid = arrayOid;
-    }
-
-    /**
-     *
-     * @param arrayOid
-     *          The array oid to get base oid type for.
-     * @return The base oid type for the given array oid type given to
-     *         {@link #toBinaryRepresentation(BaseConnection, Object, int)}.
-     */
-    int getTypeOID(@SuppressWarnings("unused") int arrayOid) {
-      return oid;
-    }
-
-    /**
-     * By default returns the <i>arrayOid</i> this instance was instantiated with.
-     */
-    @Override
-    public int getDefaultArrayTypeOid() {
-      return arrayOid;
-    }
-
-    /**
-     * Counts the number of {@code null} elements in <i>array</i>.
-     *
-     * @param array
-     *          The array to count {@code null} elements in.
-     * @return The number of {@code null} elements in <i>array</i>.
-     */
-    int countNulls(A array) {
-      int nulls = 0;
-      final int arrayLength = Array.getLength(array);
-      for (int i = 0; i < arrayLength; i++) {
-        if (Array.get(array, i) == null) {
-          ++nulls;
-        }
-      }
-      return nulls;
-    }
-
-    /**
-     * Creates {@code byte[]} of just the raw data (no metadata).
-     *
-     * @param connection
-     *          The connection the binary representation will be used on.
-     * @param array
-     *          The array to create binary representation of. Will not be
-     *          {@code null}, but may contain {@code null} elements.
-     * @return {@code byte[]} of just the raw data (no metadata).
-     * @throws SQLFeatureNotSupportedException
-     *           If {@link #supportBinaryRepresentation(int)} is false for
-     *           <i>oid</i>.
-     */
-    abstract byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, A array)
-        throws SQLException, SQLFeatureNotSupportedException;
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public String toArrayString(char delim, A array) {
-      final StringBuilder sb = new StringBuilder(1024);
-      appendArray(sb, delim, array);
-      return sb.toString();
-    }
-
-    /**
-     * By default returns {@code true} if <i>oid</i> matches the <i>arrayOid</i>
-     * this instance was instantiated with.
-     */
-    @Override
-    public boolean supportBinaryRepresentation(int oid) {
-      return oid == arrayOid;
-    }
-  }
-
-  /**
-   * Base class to provide support for {@code Number} based arrays.
-   *
-   * @param <N>
-   *          The base type of array.
-   */
-  private abstract static class NumberArrayEncoder<N extends Number> extends AbstractArrayEncoder<N[]> {
-
-    private final int fieldSize;
-
-    /**
-     *
-     * @param fieldSize
-     *          The fixed size to represent each value in binary.
-     * @param oid
-     *          The base type oid.
-     * @param arrayOid
-     *          The array type oid.
-     */
-    NumberArrayEncoder(int fieldSize, int oid, int arrayOid) {
-      super(oid, arrayOid);
-      this.fieldSize = fieldSize;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    final int countNulls(N[] array) {
-      int count = 0;
-      for (int i = 0; i < array.length; i++) {
-        if (array[i] == null) {
-          ++count;
-        }
-      }
-      return count;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public final byte[] toBinaryRepresentation(BaseConnection connection, N[] array, int oid)
-        throws SQLException, SQLFeatureNotSupportedException {
-      assert oid == this.arrayOid;
-
-      final int nullCount = countNulls(array);
-
-      final byte[] bytes = writeBytes(array, nullCount, 20);
-
-      // 1 dimension
-      ByteConverter.int4(bytes, 0, 1);
-      // no null
-      ByteConverter.int4(bytes, 4, nullCount == 0 ? 0 : 1);
-      // oid
-      ByteConverter.int4(bytes, 8, getTypeOID(oid));
-      // length
-      ByteConverter.int4(bytes, 12, array.length);
-      // postgresql uses 1 base by default
-      ByteConverter.int4(bytes, 16, 1);
-
-      return bytes;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    final byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, N[] array)
-        throws SQLException, SQLFeatureNotSupportedException {
-
-      final int nullCount = countNulls(array);
-
-      return writeBytes(array, nullCount, 0);
-    }
-
-    private byte[] writeBytes(final N[] array, final int nullCount, final int offset) {
-      final int length = offset + (4 * array.length) + (fieldSize * (array.length - nullCount));
-      final byte[] bytes = new byte[length];
-
-      int idx = offset;
-      for (int i = 0; i < array.length; i++) {
-        if (array[i] == null) {
-          ByteConverter.int4(bytes, idx, -1);
-          idx += 4;
-        } else {
-          ByteConverter.int4(bytes, idx, fieldSize);
-          idx += 4;
-          write(array[i], bytes, idx);
-          idx += fieldSize;
-        }
-      }
-
-      return bytes;
-    }
-
-    /**
-     * Write single value (<i>number</i>) to <i>bytes</i> beginning at
-     * <i>offset</i>.
-     *
-     * @param number
-     *          The value to write to <i>bytes</i>. This will never be {@code null}.
-     * @param bytes
-     *          The {@code byte[]} to write to.
-     * @param offset
-     *          The offset into <i>bytes</i> to write the <i>number</i> value.
-     */
-    protected abstract void write(N number, byte[] bytes, int offset);
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public final void appendArray(StringBuilder sb, char delim, N[] array) {
-      sb.append('{');
-      for (int i = 0; i < array.length; i++) {
-        if (i != 0) {
-          sb.append(delim);
-        }
-        if (array[i] == null) {
-          sb.append('N').append('U').append('L').append('L');
-        } else {
-          sb.append('"');
-          sb.append(array[i].toString());
-          sb.append('"');
-        }
-      }
-      sb.append('}');
-    }
-  }
-
-  /**
-   * Base support for primitive arrays.
-   *
-   * @param <A>
-   *          The primitive array to support.
-   */
-  private abstract static class FixedSizePrimitiveArrayEncoder<A extends Object>
-      extends AbstractArrayEncoder<A> {
-
-    private final int fieldSize;
-
-    FixedSizePrimitiveArrayEncoder(int fieldSize, int oid, int arrayOid) {
-      super(oid, arrayOid);
-      this.fieldSize = fieldSize;
-    }
-
-    /**
-     * {@inheritDoc}
-     *
-     * <p>
-     * Always returns {@code 0}.
-     * </p>
-     */
-    @Override
-    final int countNulls(A array) {
-      return 0;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public final byte[] toBinaryRepresentation(BaseConnection connection, A array, int oid)
-        throws SQLException, SQLFeatureNotSupportedException {
-      assert oid == arrayOid;
-
-      final int arrayLength = Array.getLength(array);
-      final int length = 20 + ((fieldSize + 4) * arrayLength);
-      final byte[] bytes = new byte[length];
-
-      // 1 dimension
-      ByteConverter.int4(bytes, 0, 1);
-      // no null
-      ByteConverter.int4(bytes, 4, 0);
-      // oid
-      ByteConverter.int4(bytes, 8, getTypeOID(oid));
-      // length
-      ByteConverter.int4(bytes, 12, arrayLength);
-      // postgresql uses 1 base by default
-      ByteConverter.int4(bytes, 16, 1);
-
-      write(array, bytes, 20);
-
-      return bytes;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    final byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, A array)
-        throws SQLException, SQLFeatureNotSupportedException {
-      final int length = (fieldSize + 4) * Array.getLength(array);
-      final byte[] bytes = new byte[length];
-
-      write(array, bytes, 0);
-      return bytes;
-    }
-
-    /**
-     * Write the entire contents of <i>array</i> to <i>bytes</i> starting at
-     * <i>offset</i> without metadata describing type or length.
-     *
-     * @param array
-     *          The array to write.
-     * @param bytes
-     *          The {@code byte[]} to write to.
-     * @param offset
-     *          The offset into <i>bytes</i> to start writing.
-     */
-    protected abstract void write(A array, byte[] bytes, int offset);
-  }
-
-  private static final AbstractArrayEncoder<long[]> LONG_ARRAY = new FixedSizePrimitiveArrayEncoder<long[]>(8, Oid.INT8,
-      Oid.INT8_ARRAY) {
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void appendArray(StringBuilder sb, char delim, long[] array) {
-      sb.append('{');
-      for (int i = 0; i < array.length; i++) {
-        if (i > 0) {
-          sb.append(delim);
-        }
-        sb.append(array[i]);
-      }
-      sb.append('}');
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    protected void write(long[] array, byte[] bytes, int offset) {
-      int idx = offset;
-      for (int i = 0; i < array.length; i++) {
-        bytes[idx + 3] = 8;
-        ByteConverter.int8(bytes, idx + 4, array[i]);
-        idx += 12;
-      }
-    }
-  };
-
-  private static final AbstractArrayEncoder<Long[]> LONG_OBJ_ARRAY = new NumberArrayEncoder<Long>(8, Oid.INT8,
-      Oid.INT8_ARRAY) {
-
-    @Override
-    protected void write(Long number, byte[] bytes, int offset) {
-      ByteConverter.int8(bytes, offset, number.longValue());
-    }
-  };
-
-  private static final AbstractArrayEncoder<int[]> INT_ARRAY = new FixedSizePrimitiveArrayEncoder<int[]>(4, Oid.INT4,
-      Oid.INT4_ARRAY) {
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void appendArray(StringBuilder sb, char delim, int[] array) {
-      sb.append('{');
-      for (int i = 0; i < array.length; i++) {
-        if (i > 0) {
-          sb.append(delim);
-        }
-        sb.append(array[i]);
-      }
-      sb.append('}');
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    protected void write(int[] array, byte[] bytes, int offset) {
-      int idx = offset;
-      for (int i = 0; i < array.length; i++) {
-        bytes[idx + 3] = 4;
-        ByteConverter.int4(bytes, idx + 4, array[i]);
-        idx += 8;
-      }
-    }
-  };
-
-  private static final AbstractArrayEncoder<Integer[]> INT_OBJ_ARRAY = new NumberArrayEncoder<Integer>(4, Oid.INT4,
-      Oid.INT4_ARRAY) {
-
-    @Override
-    protected void write(Integer number, byte[] bytes, int offset) {
-      ByteConverter.int4(bytes, offset, number.intValue());
-    }
-  };
-
-  private static final AbstractArrayEncoder<short[]> SHORT_ARRAY = new FixedSizePrimitiveArrayEncoder<short[]>(2,
-      Oid.INT2, Oid.INT2_ARRAY) {
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void appendArray(StringBuilder sb, char delim, short[] array) {
-      sb.append('{');
-      for (int i = 0; i < array.length; i++) {
-        if (i > 0) {
-          sb.append(delim);
-        }
-        sb.append(array[i]);
-      }
-      sb.append('}');
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    protected void write(short[] array, byte[] bytes, int offset) {
-      int idx = offset;
-      for (int i = 0; i < array.length; i++) {
-        bytes[idx + 3] = 2;
-        ByteConverter.int2(bytes, idx + 4, array[i]);
-        idx += 6;
-      }
-    }
-  };
-
-  private static final AbstractArrayEncoder<Short[]> SHORT_OBJ_ARRAY = new NumberArrayEncoder<Short>(2, Oid.INT2,
-      Oid.INT2_ARRAY) {
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    protected void write(Short number, byte[] bytes, int offset) {
-      ByteConverter.int2(bytes, offset, number.shortValue());
-    }
-  };
-
-  private static final AbstractArrayEncoder<double[]> DOUBLE_ARRAY = new FixedSizePrimitiveArrayEncoder<double[]>(8,
-      Oid.FLOAT8, Oid.FLOAT8_ARRAY) {
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void appendArray(StringBuilder sb, char delim, double[] array) {
-      sb.append('{');
-      for (int i = 0; i < array.length; i++) {
-        if (i > 0) {
-          sb.append(delim);
-        }
-        // use quotes to account for any issues with scientific notation
-        sb.append('"');
-        sb.append(array[i]);
-        sb.append('"');
-      }
-      sb.append('}');
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    protected void write(double[] array, byte[] bytes, int offset) {
-      int idx = offset;
-      for (int i = 0; i < array.length; i++) {
-        bytes[idx + 3] = 8;
-        ByteConverter.float8(bytes, idx + 4, array[i]);
-        idx += 12;
-      }
-    }
-  };
-
-  private static final AbstractArrayEncoder<Double[]> DOUBLE_OBJ_ARRAY = new NumberArrayEncoder<Double>(8, Oid.FLOAT8,
-      Oid.FLOAT8_ARRAY) {
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    protected void write(Double number, byte[] bytes, int offset) {
-      ByteConverter.float8(bytes, offset, number.doubleValue());
-    }
-  };
-
-  private static final AbstractArrayEncoder<float[]> FLOAT_ARRAY = new FixedSizePrimitiveArrayEncoder<float[]>(4,
-      Oid.FLOAT4, Oid.FLOAT4_ARRAY) {
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void appendArray(StringBuilder sb, char delim, float[] array) {
-      sb.append('{');
-      for (int i = 0; i < array.length; i++) {
-        if (i > 0) {
-          sb.append(delim);
-        }
-        // use quotes to account for any issues with scientific notation
-        sb.append('"');
-        sb.append(array[i]);
-        sb.append('"');
-      }
-      sb.append('}');
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    protected void write(float[] array, byte[] bytes, int offset) {
-      int idx = offset;
-      for (int i = 0; i < array.length; i++) {
-        bytes[idx + 3] = 4;
-        ByteConverter.float4(bytes, idx + 4, array[i]);
-        idx += 8;
-      }
-    }
-  };
-
-  private static final AbstractArrayEncoder<Float[]> FLOAT_OBJ_ARRAY = new NumberArrayEncoder<Float>(4, Oid.FLOAT4,
-      Oid.FLOAT4_ARRAY) {
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    protected void write(Float number, byte[] bytes, int offset) {
-      ByteConverter.float4(bytes, offset, number.floatValue());
-    }
-  };
-
-  private static final AbstractArrayEncoder<boolean[]> BOOLEAN_ARRAY = new FixedSizePrimitiveArrayEncoder<boolean[]>(1,
-      Oid.BOOL, Oid.BOOL_ARRAY) {
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void appendArray(StringBuilder sb, char delim, boolean[] array) {
-      sb.append('{');
-      for (int i = 0; i < array.length; i++) {
-        if (i > 0) {
-          sb.append(delim);
-        }
-        sb.append(array[i] ? '1' : '0');
-      }
-      sb.append('}');
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    protected void write(boolean[] array, byte[] bytes, int offset) {
-      int idx = offset;
-      for (int i = 0; i < array.length; i++) {
-        bytes[idx + 3] = 1;
-        ByteConverter.bool(bytes, idx + 4, array[i]);
-        idx += 5;
-      }
-    }
-  };
-
-  private static final AbstractArrayEncoder<Boolean[]> BOOLEAN_OBJ_ARRAY = new AbstractArrayEncoder<Boolean[]>(Oid.BOOL,
-      Oid.BOOL_ARRAY) {
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public byte[] toBinaryRepresentation(BaseConnection connection, Boolean[] array, int oid)
-        throws SQLException, SQLFeatureNotSupportedException {
-      assert oid == arrayOid;
-
-      final int nullCount = countNulls(array);
-
-      final byte[] bytes = writeBytes(array, nullCount, 20);
-
-      // 1 dimension
-      ByteConverter.int4(bytes, 0, 1);
-      // no null
-      ByteConverter.int4(bytes, 4, nullCount == 0 ? 0 : 1);
-      // oid
-      ByteConverter.int4(bytes, 8, getTypeOID(oid));
-      // length
-      ByteConverter.int4(bytes, 12, array.length);
-      // postgresql uses 1 base by default
-      ByteConverter.int4(bytes, 16, 1);
-
-      return bytes;
-    }
-
-    private byte[] writeBytes(final Boolean[] array, final int nullCount, final int offset) {
-      final int length = offset + (4 * array.length) + (array.length - nullCount);
-      final byte[] bytes = new byte[length];
-
-      int idx = offset;
-      for (int i = 0; i < array.length; i++) {
-        if (array[i] == null) {
-          ByteConverter.int4(bytes, idx, -1);
-          idx += 4;
-        } else {
-          ByteConverter.int4(bytes, idx, 1);
-          idx += 4;
-          write(array[i], bytes, idx);
-          ++idx;
-        }
-      }
-
-      return bytes;
-    }
-
-    private void write(Boolean bool, byte[] bytes, int idx) {
-      ByteConverter.bool(bytes, idx, bool.booleanValue());
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, Boolean[] array)
-        throws SQLException, SQLFeatureNotSupportedException {
-      final int nullCount = countNulls(array);
-      return writeBytes(array, nullCount, 0);
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void appendArray(StringBuilder sb, char delim, Boolean[] array) {
-      sb.append('{');
-      for (int i = 0; i < array.length; i++) {
-        if (i != 0) {
-          sb.append(delim);
-        }
-        if (array[i] == null) {
-          sb.append('N').append('U').append('L').append('L');
-        } else {
-          sb.append(array[i].booleanValue() ? '1' : '0');
-        }
-      }
-      sb.append('}');
-    }
-  };
-
-  private static final AbstractArrayEncoder<String[]> STRING_ARRAY = new AbstractArrayEncoder<String[]>(Oid.VARCHAR,
-      Oid.VARCHAR_ARRAY) {
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    int countNulls(String[] array) {
-      int count = 0;
-      for (int i = 0; i < array.length; i++) {
-        if (array[i] == null) {
-          ++count;
-        }
-      }
-      return count;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean supportBinaryRepresentation(int oid) {
-      return oid == Oid.VARCHAR_ARRAY || oid == Oid.TEXT_ARRAY;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    int getTypeOID(int arrayOid) {
-      if (arrayOid == Oid.VARCHAR_ARRAY) {
-        return Oid.VARCHAR;
-      }
-
-      if (arrayOid == Oid.TEXT_ARRAY) {
-        return Oid.TEXT;
-      }
-
-      // this should not be possible based on supportBinaryRepresentation returning
-      // false for all other types
-      throw new IllegalStateException("Invalid array oid: " + arrayOid);
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void appendArray(StringBuilder sb, char delim, String[] array) {
-      sb.append('{');
-      for (int i = 0; i < array.length; i++) {
-        if (i > 0) {
-          sb.append(delim);
-        }
-        if (array[i] == null) {
-          sb.append('N').append('U').append('L').append('L');
-        } else {
-          PgArray.escapeArrayElement(sb, array[i]);
-        }
-      }
-      sb.append('}');
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public byte[] toBinaryRepresentation(BaseConnection connection, String[] array, int oid) throws SQLException {
-      final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.min(1024, (array.length * 32) + 20));
-
-      assert supportBinaryRepresentation(oid);
-
-      final byte[] buffer = new byte[4];
-
-      try {
-        // 1 dimension
-        ByteConverter.int4(buffer, 0, 1);
-        baos.write(buffer);
-        // null
-        ByteConverter.int4(buffer, 0, countNulls(array) > 0 ? 1 : 0);
-        baos.write(buffer);
-        // oid
-        ByteConverter.int4(buffer, 0, getTypeOID(oid));
-        baos.write(buffer);
-        // length
-        ByteConverter.int4(buffer, 0, array.length);
-        baos.write(buffer);
-
-        // postgresql uses 1 base by default
-        ByteConverter.int4(buffer, 0, 1);
-        baos.write(buffer);
-
-        final Encoding encoding = connection.getEncoding();
-        for (int i = 0; i < array.length; i++) {
-          final String string = array[i];
-          if (string != null) {
-            final byte[] encoded;
-            try {
-              encoded = encoding.encode(string);
-            } catch (IOException e) {
-              throw new PSQLException(GT.tr("Unable to translate data into the desired encoding."),
-                  PSQLState.DATA_ERROR, e);
+    private static final AbstractArrayEncoder<long[]> LONG_ARRAY = new FixedSizePrimitiveArrayEncoder<long[]>(8, Oid.INT8,
+            Oid.INT8_ARRAY) {
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void appendArray(StringBuilder sb, char delim, long[] array) {
+            sb.append('{');
+            for (int i = 0; i < array.length; i++) {
+                if (i > 0) {
+                    sb.append(delim);
+                }
+                sb.append(array[i]);
             }
-            ByteConverter.int4(buffer, 0, encoded.length);
-            baos.write(buffer);
-            baos.write(encoded);
-          } else {
-            ByteConverter.int4(buffer, 0, -1);
-            baos.write(buffer);
-          }
+            sb.append('}');
         }
 
-        return baos.toByteArray();
-      } catch (IOException e) {
-        // this IO exception is from writing to baos, which will never throw an
-        // IOException
-        throw new java.lang.AssertionError(e);
-      }
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, String[] array)
-        throws SQLException, SQLFeatureNotSupportedException {
-      try {
-        final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.min(1024, (array.length * 32) + 20));
-        final byte[] buffer = new byte[4];
-        final Encoding encoding = connection.getEncoding();
-        for (int i = 0; i < array.length; i++) {
-          final String string = array[i];
-          if (string != null) {
-            final byte[] encoded;
-            try {
-              encoded = encoding.encode(string);
-            } catch (IOException e) {
-              throw new PSQLException(GT.tr("Unable to translate data into the desired encoding."),
-                  PSQLState.DATA_ERROR, e);
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        protected void write(long[] array, byte[] bytes, int offset) {
+            int idx = offset;
+            for (int i = 0; i < array.length; i++) {
+                bytes[idx + 3] = 8;
+                ByteConverter.int8(bytes, idx + 4, array[i]);
+                idx += 12;
             }
-            ByteConverter.int4(buffer, 0, encoded.length);
-            baos.write(buffer);
-            baos.write(encoded);
-          } else {
-            ByteConverter.int4(buffer, 0, -1);
-            baos.write(buffer);
-          }
+        }
+    };
+    private static final AbstractArrayEncoder<Long[]> LONG_OBJ_ARRAY = new NumberArrayEncoder<Long>(8, Oid.INT8,
+            Oid.INT8_ARRAY) {
+
+        @Override
+        protected void write(Long number, byte[] bytes, int offset) {
+            ByteConverter.int8(bytes, offset, number.longValue());
+        }
+    };
+    private static final AbstractArrayEncoder<int[]> INT_ARRAY = new FixedSizePrimitiveArrayEncoder<int[]>(4, Oid.INT4,
+            Oid.INT4_ARRAY) {
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void appendArray(StringBuilder sb, char delim, int[] array) {
+            sb.append('{');
+            for (int i = 0; i < array.length; i++) {
+                if (i > 0) {
+                    sb.append(delim);
+                }
+                sb.append(array[i]);
+            }
+            sb.append('}');
         }
 
-        return baos.toByteArray();
-      } catch (IOException e) {
-        // this IO exception is from writing to baos, which will never throw an
-        // IOException
-        throw new java.lang.AssertionError(e);
-      }
-    }
-  };
-
-  private static final AbstractArrayEncoder<byte[][]> BYTEA_ARRAY = new AbstractArrayEncoder<byte[][]>(Oid.BYTEA,
-      Oid.BYTEA_ARRAY) {
-
-    /**
-     * The possible characters to use for representing hex binary data.
-     */
-    private final char[] hexDigits = new char[]{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd',
-        'e', 'f'};
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public byte[] toBinaryRepresentation(BaseConnection connection, byte[][] array, int oid)
-        throws SQLException, SQLFeatureNotSupportedException {
-
-      assert oid == arrayOid;
-
-      int length = 20;
-      for (int i = 0; i < array.length; i++) {
-        length += 4;
-        if (array[i] != null) {
-          length += array[i].length;
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        protected void write(int[] array, byte[] bytes, int offset) {
+            int idx = offset;
+            for (int i = 0; i < array.length; i++) {
+                bytes[idx + 3] = 4;
+                ByteConverter.int4(bytes, idx + 4, array[i]);
+                idx += 8;
+            }
         }
-      }
-      final byte[] bytes = new byte[length];
+    };
+    private static final AbstractArrayEncoder<Integer[]> INT_OBJ_ARRAY = new NumberArrayEncoder<Integer>(4, Oid.INT4,
+            Oid.INT4_ARRAY) {
 
-      // 1 dimension
-      ByteConverter.int4(bytes, 0, 1);
-      // no null
-      ByteConverter.int4(bytes, 4, 0);
-      // oid
-      ByteConverter.int4(bytes, 8, getTypeOID(oid));
-      // length
-      ByteConverter.int4(bytes, 12, array.length);
-      // postgresql uses 1 base by default
-      ByteConverter.int4(bytes, 16, 1);
-
-      write(array, bytes, 20);
-
-      return bytes;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, byte[][] array)
-        throws SQLException, SQLFeatureNotSupportedException {
-      int length = 0;
-      for (int i = 0; i < array.length; i++) {
-        length += 4;
-        if (array[i] != null) {
-          length += array[i].length;
+        @Override
+        protected void write(Integer number, byte[] bytes, int offset) {
+            ByteConverter.int4(bytes, offset, number.intValue());
         }
-      }
-      final byte[] bytes = new byte[length];
+    };
+    private static final AbstractArrayEncoder<short[]> SHORT_ARRAY = new FixedSizePrimitiveArrayEncoder<short[]>(2,
+            Oid.INT2, Oid.INT2_ARRAY) {
 
-      write(array, bytes, 0);
-      return bytes;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    int countNulls(byte[][] array) {
-      int nulls = 0;
-      for (int i = 0; i < array.length; i++) {
-        if (array[i] == null) {
-          ++nulls;
-        }
-      }
-      return nulls;
-    }
-
-    private void write(byte[][] array, byte[] bytes, int offset) {
-      int idx = offset;
-      for (int i = 0; i < array.length; i++) {
-        if (array[i] != null) {
-          ByteConverter.int4(bytes, idx, array[i].length);
-          idx += 4;
-          System.arraycopy(array[i], 0, bytes, idx, array[i].length);
-          idx += array[i].length;
-        } else {
-          ByteConverter.int4(bytes, idx, -1);
-          idx += 4;
-        }
-      }
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void appendArray(StringBuilder sb, char delim, byte[][] array) {
-      sb.append('{');
-      for (int i = 0; i < array.length; i++) {
-        if (i > 0) {
-          sb.append(delim);
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void appendArray(StringBuilder sb, char delim, short[] array) {
+            sb.append('{');
+            for (int i = 0; i < array.length; i++) {
+                if (i > 0) {
+                    sb.append(delim);
+                }
+                sb.append(array[i]);
+            }
+            sb.append('}');
         }
 
-        if (array[i] != null) {
-          sb.append("\"\\\\x");
-          for (int j = 0; j < array[i].length; j++) {
-            byte b = array[i][j];
-
-            // get the value for the left 4 bits (drop sign)
-            sb.append(hexDigits[(b & 0xF0) >>> 4]);
-            // get the value for the right 4 bits
-            sb.append(hexDigits[b & 0x0F]);
-          }
-          sb.append('"');
-        } else {
-          sb.append("NULL");
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        protected void write(short[] array, byte[] bytes, int offset) {
+            int idx = offset;
+            for (int i = 0; i < array.length; i++) {
+                bytes[idx + 3] = 2;
+                ByteConverter.int2(bytes, idx + 4, array[i]);
+                idx += 6;
+            }
         }
-      }
-      sb.append('}');
-    }
-  };
+    };
+    private static final AbstractArrayEncoder<Short[]> SHORT_OBJ_ARRAY = new NumberArrayEncoder<Short>(2, Oid.INT2,
+            Oid.INT2_ARRAY) {
 
-  private static final AbstractArrayEncoder<Object[]> OBJECT_ARRAY = new AbstractArrayEncoder<Object[]>(0, 0) {
-
-    @Override
-    public int getDefaultArrayTypeOid() {
-      return 0;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean supportBinaryRepresentation(int oid) {
-      return false;
-    }
-
-    @Override
-    public byte[] toBinaryRepresentation(BaseConnection connection, Object[] array, int oid)
-        throws SQLException, SQLFeatureNotSupportedException {
-      throw new SQLFeatureNotSupportedException();
-    }
-
-    @Override
-    byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, Object[] array)
-        throws SQLException, SQLFeatureNotSupportedException {
-      throw new SQLFeatureNotSupportedException();
-    }
-
-    @Override
-    public void appendArray(StringBuilder sb, char delim, Object[] array) {
-      sb.append('{');
-      for (int i = 0; i < array.length; i++) {
-        if (i > 0) {
-          sb.append(delim);
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        protected void write(Short number, byte[] bytes, int offset) {
+            ByteConverter.int2(bytes, offset, number.shortValue());
         }
-        if (array[i] == null) {
-          sb.append('N').append('U').append('L').append('L');
-        } else if (array[i].getClass().isArray()) {
-          if (array[i] instanceof byte[]) {
-            throw new UnsupportedOperationException("byte[] nested inside Object[]");
-          }
-          try {
-            getArrayEncoder(array[i]).appendArray(sb, delim, array[i]);
-          } catch (PSQLException e) {
-            // this should never happen
-            throw new IllegalStateException(e);
-          }
-        } else {
-          PgArray.escapeArrayElement(sb, array[i].toString());
-        }
-      }
-      sb.append('}');
-    }
-  };
+    };
+    private static final AbstractArrayEncoder<double[]> DOUBLE_ARRAY = new FixedSizePrimitiveArrayEncoder<double[]>(8,
+            Oid.FLOAT8, Oid.FLOAT8_ARRAY) {
 
-  @SuppressWarnings("rawtypes")
-  private static final Map<Class, AbstractArrayEncoder> ARRAY_CLASS_TO_ENCODER = new HashMap<>(
-      (int) (14 / .75) + 1);
-
-  static {
-    ARRAY_CLASS_TO_ENCODER.put(long.class, LONG_ARRAY);
-    ARRAY_CLASS_TO_ENCODER.put(Long.class, LONG_OBJ_ARRAY);
-    ARRAY_CLASS_TO_ENCODER.put(int.class, INT_ARRAY);
-    ARRAY_CLASS_TO_ENCODER.put(Integer.class, INT_OBJ_ARRAY);
-    ARRAY_CLASS_TO_ENCODER.put(short.class, SHORT_ARRAY);
-    ARRAY_CLASS_TO_ENCODER.put(Short.class, SHORT_OBJ_ARRAY);
-    ARRAY_CLASS_TO_ENCODER.put(double.class, DOUBLE_ARRAY);
-    ARRAY_CLASS_TO_ENCODER.put(Double.class, DOUBLE_OBJ_ARRAY);
-    ARRAY_CLASS_TO_ENCODER.put(float.class, FLOAT_ARRAY);
-    ARRAY_CLASS_TO_ENCODER.put(Float.class, FLOAT_OBJ_ARRAY);
-    ARRAY_CLASS_TO_ENCODER.put(boolean.class, BOOLEAN_ARRAY);
-    ARRAY_CLASS_TO_ENCODER.put(Boolean.class, BOOLEAN_OBJ_ARRAY);
-    ARRAY_CLASS_TO_ENCODER.put(byte[].class, BYTEA_ARRAY);
-    ARRAY_CLASS_TO_ENCODER.put(String.class, STRING_ARRAY);
-  }
-
-  /**
-   * Returns support for encoding <i>array</i>.
-   *
-   * @param array
-   *          The array to encode. Must not be {@code null}.
-   * @return An instance capable of encoding <i>array</i> as a {@code String} at
-   *         minimum. Some types may support binary encoding.
-   * @throws PSQLException
-   *           if <i>array</i> is not a supported type.
-   * @see ArrayEncoding.ArrayEncoder#supportBinaryRepresentation(int)
-   */
-  @SuppressWarnings({"unchecked", "rawtypes"})
-  public static <A extends Object> ArrayEncoder<A> getArrayEncoder(A array) throws PSQLException {
-    final Class<?> arrayClazz = array.getClass();
-    Class<?> subClazz = arrayClazz.getComponentType();
-    if (subClazz == null) {
-      throw new PSQLException(GT.tr("Invalid elements {0}", array), PSQLState.INVALID_PARAMETER_TYPE);
-    }
-    AbstractArrayEncoder<A> support = ARRAY_CLASS_TO_ENCODER.get(subClazz);
-    if (support != null) {
-      return support;
-    }
-    Class<?> subSubClazz = subClazz.getComponentType();
-    if (subSubClazz == null) {
-      if (Object.class.isAssignableFrom(subClazz)) {
-        return (ArrayEncoder<A>) OBJECT_ARRAY;
-      }
-      throw new PSQLException(GT.tr("Invalid elements {0}", array), PSQLState.INVALID_PARAMETER_TYPE);
-    }
-
-    subClazz = subSubClazz;
-    int dimensions = 2;
-    while (subClazz != null) {
-      support = ARRAY_CLASS_TO_ENCODER.get(subClazz);
-      if (support != null) {
-        if (dimensions == 2) {
-          return new TwoDimensionPrimitiveArrayEncoder(support);
-        }
-        return new RecursiveArrayEncoder(support, dimensions);
-      }
-      subSubClazz = subClazz.getComponentType();
-      if (subSubClazz == null) {
-        if (Object.class.isAssignableFrom(subClazz)) {
-          if (dimensions == 2) {
-            return new TwoDimensionPrimitiveArrayEncoder(OBJECT_ARRAY);
-          }
-          return new RecursiveArrayEncoder(OBJECT_ARRAY, dimensions);
-        }
-      }
-      ++dimensions;
-      subClazz = subSubClazz;
-    }
-
-    throw new PSQLException(GT.tr("Invalid elements {0}", array), PSQLState.INVALID_PARAMETER_TYPE);
-  }
-
-  /**
-   * Wraps an {@link AbstractArrayEncoder} implementation and provides optimized
-   * support for 2 dimensions.
-   */
-  private static final class TwoDimensionPrimitiveArrayEncoder<A extends Object> implements ArrayEncoder<A[]> {
-    private final AbstractArrayEncoder<A> support;
-
-    /**
-     * @param support
-     *          The instance providing support for the base array type.
-     */
-    TwoDimensionPrimitiveArrayEncoder(AbstractArrayEncoder<A> support) {
-      super();
-      this.support = support;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public int getDefaultArrayTypeOid() {
-      return support.getDefaultArrayTypeOid();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public String toArrayString(char delim, A[] array) {
-      final StringBuilder sb = new StringBuilder(1024);
-      appendArray(sb, delim, array);
-      return sb.toString();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void appendArray(StringBuilder sb, char delim, A[] array) {
-      sb.append('{');
-      for (int i = 0; i < array.length; i++) {
-        if (i > 0) {
-          sb.append(delim);
-        }
-        support.appendArray(sb, delim, array[i]);
-      }
-      sb.append('}');
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean supportBinaryRepresentation(int oid) {
-      return support.supportBinaryRepresentation(oid);
-    }
-
-    /**
-     * {@inheritDoc} 4 bytes - dimension 4 bytes - oid 4 bytes - ? 8*d bytes -
-     * dimension length
-     */
-    @Override
-    public byte[] toBinaryRepresentation(BaseConnection connection, A[] array, int oid)
-        throws SQLException, SQLFeatureNotSupportedException {
-      final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.min(1024, (array.length * 32) + 20));
-      final byte[] buffer = new byte[4];
-
-      boolean hasNulls = false;
-      for (int i = 0; !hasNulls && i < array.length; i++) {
-        if (support.countNulls(array[i]) > 0) {
-          hasNulls = true;
-        }
-      }
-
-      try {
-        // 2 dimension
-        ByteConverter.int4(buffer, 0, 2);
-        baos.write(buffer);
-        // nulls
-        ByteConverter.int4(buffer, 0, hasNulls ? 1 : 0);
-        baos.write(buffer);
-        // oid
-        ByteConverter.int4(buffer, 0, support.getTypeOID(oid));
-        baos.write(buffer);
-
-        // length
-        ByteConverter.int4(buffer, 0, array.length);
-        baos.write(buffer);
-        // postgres defaults to 1 based lower bound
-        ByteConverter.int4(buffer, 0, 1);
-        baos.write(buffer);
-
-        ByteConverter.int4(buffer, 0, array.length > 0 ? Array.getLength(array[0]) : 0);
-        baos.write(buffer);
-        // postgresql uses 1 base by default
-        ByteConverter.int4(buffer, 0, 1);
-        baos.write(buffer);
-
-        for (int i = 0; i < array.length; i++) {
-          baos.write(support.toSingleDimensionBinaryRepresentation(connection, array[i]));
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void appendArray(StringBuilder sb, char delim, double[] array) {
+            sb.append('{');
+            for (int i = 0; i < array.length; i++) {
+                if (i > 0) {
+                    sb.append(delim);
+                }
+                // use quotes to account for any issues with scientific notation
+                sb.append('"');
+                sb.append(array[i]);
+                sb.append('"');
+            }
+            sb.append('}');
         }
 
-        return baos.toByteArray();
-
-      } catch (IOException e) {
-        // this IO exception is from writing to baos, which will never throw an
-        // IOException
-        throw new java.lang.AssertionError(e);
-      }
-    }
-  }
-
-  /**
-   * Wraps an {@link AbstractArrayEncoder} implementation and provides support for
-   * 2 or more dimensions using recursion.
-   */
-  @SuppressWarnings({"unchecked", "rawtypes"})
-  private static final class RecursiveArrayEncoder implements ArrayEncoder {
-
-    private final AbstractArrayEncoder support;
-    private final int dimensions;
-
-    /**
-     * @param support
-     *          The instance providing support for the base array type.
-     */
-    RecursiveArrayEncoder(AbstractArrayEncoder support, int dimensions) {
-      super();
-      this.support = support;
-      this.dimensions = dimensions;
-      assert dimensions >= 2;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public int getDefaultArrayTypeOid() {
-      return support.getDefaultArrayTypeOid();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public String toArrayString(char delim, Object array) {
-      final StringBuilder sb = new StringBuilder(2048);
-      arrayString(sb, array, delim, dimensions);
-      return sb.toString();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void appendArray(StringBuilder sb, char delim, Object array) {
-      arrayString(sb, array, delim, dimensions);
-    }
-
-    private void arrayString(StringBuilder sb, Object array, char delim, int depth) {
-
-      if (depth > 1) {
-        sb.append('{');
-        for (int i = 0, j = Array.getLength(array); i < j; i++) {
-          if (i > 0) {
-            sb.append(delim);
-          }
-          arrayString(sb, Array.get(array, i), delim, depth - 1);
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        protected void write(double[] array, byte[] bytes, int offset) {
+            int idx = offset;
+            for (int i = 0; i < array.length; i++) {
+                bytes[idx + 3] = 8;
+                ByteConverter.float8(bytes, idx + 4, array[i]);
+                idx += 12;
+            }
         }
-        sb.append('}');
-      } else {
-        support.appendArray(sb, delim, array);
-      }
+    };
+    private static final AbstractArrayEncoder<Double[]> DOUBLE_OBJ_ARRAY = new NumberArrayEncoder<Double>(8, Oid.FLOAT8,
+            Oid.FLOAT8_ARRAY) {
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        protected void write(Double number, byte[] bytes, int offset) {
+            ByteConverter.float8(bytes, offset, number.doubleValue());
+        }
+    };
+    private static final AbstractArrayEncoder<float[]> FLOAT_ARRAY = new FixedSizePrimitiveArrayEncoder<float[]>(4,
+            Oid.FLOAT4, Oid.FLOAT4_ARRAY) {
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void appendArray(StringBuilder sb, char delim, float[] array) {
+            sb.append('{');
+            for (int i = 0; i < array.length; i++) {
+                if (i > 0) {
+                    sb.append(delim);
+                }
+                // use quotes to account for any issues with scientific notation
+                sb.append('"');
+                sb.append(array[i]);
+                sb.append('"');
+            }
+            sb.append('}');
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        protected void write(float[] array, byte[] bytes, int offset) {
+            int idx = offset;
+            for (int i = 0; i < array.length; i++) {
+                bytes[idx + 3] = 4;
+                ByteConverter.float4(bytes, idx + 4, array[i]);
+                idx += 8;
+            }
+        }
+    };
+    private static final AbstractArrayEncoder<Float[]> FLOAT_OBJ_ARRAY = new NumberArrayEncoder<Float>(4, Oid.FLOAT4,
+            Oid.FLOAT4_ARRAY) {
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        protected void write(Float number, byte[] bytes, int offset) {
+            ByteConverter.float4(bytes, offset, number.floatValue());
+        }
+    };
+    private static final AbstractArrayEncoder<boolean[]> BOOLEAN_ARRAY = new FixedSizePrimitiveArrayEncoder<boolean[]>(1,
+            Oid.BOOL, Oid.BOOL_ARRAY) {
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void appendArray(StringBuilder sb, char delim, boolean[] array) {
+            sb.append('{');
+            for (int i = 0; i < array.length; i++) {
+                if (i > 0) {
+                    sb.append(delim);
+                }
+                sb.append(array[i] ? '1' : '0');
+            }
+            sb.append('}');
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        protected void write(boolean[] array, byte[] bytes, int offset) {
+            int idx = offset;
+            for (int i = 0; i < array.length; i++) {
+                bytes[idx + 3] = 1;
+                ByteConverter.bool(bytes, idx + 4, array[i]);
+                idx += 5;
+            }
+        }
+    };
+    private static final AbstractArrayEncoder<Boolean[]> BOOLEAN_OBJ_ARRAY = new AbstractArrayEncoder<Boolean[]>(Oid.BOOL,
+            Oid.BOOL_ARRAY) {
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public byte[] toBinaryRepresentation(BaseConnection connection, Boolean[] array, int oid)
+                throws SQLException, SQLFeatureNotSupportedException {
+            assert oid == arrayOid;
+
+            final int nullCount = countNulls(array);
+
+            final byte[] bytes = writeBytes(array, nullCount, 20);
+
+            // 1 dimension
+            ByteConverter.int4(bytes, 0, 1);
+            // no null
+            ByteConverter.int4(bytes, 4, nullCount == 0 ? 0 : 1);
+            // oid
+            ByteConverter.int4(bytes, 8, getTypeOID(oid));
+            // length
+            ByteConverter.int4(bytes, 12, array.length);
+            // postgresql uses 1 base by default
+            ByteConverter.int4(bytes, 16, 1);
+
+            return bytes;
+        }
+
+        private byte[] writeBytes(final Boolean[] array, final int nullCount, final int offset) {
+            final int length = offset + (4 * array.length) + (array.length - nullCount);
+            final byte[] bytes = new byte[length];
+
+            int idx = offset;
+            for (int i = 0; i < array.length; i++) {
+                if (array[i] == null) {
+                    ByteConverter.int4(bytes, idx, -1);
+                    idx += 4;
+                } else {
+                    ByteConverter.int4(bytes, idx, 1);
+                    idx += 4;
+                    write(array[i], bytes, idx);
+                    ++idx;
+                }
+            }
+
+            return bytes;
+        }
+
+        private void write(Boolean bool, byte[] bytes, int idx) {
+            ByteConverter.bool(bytes, idx, bool.booleanValue());
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, Boolean[] array)
+                throws SQLException, SQLFeatureNotSupportedException {
+            final int nullCount = countNulls(array);
+            return writeBytes(array, nullCount, 0);
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void appendArray(StringBuilder sb, char delim, Boolean[] array) {
+            sb.append('{');
+            for (int i = 0; i < array.length; i++) {
+                if (i != 0) {
+                    sb.append(delim);
+                }
+                if (array[i] == null) {
+                    sb.append('N').append('U').append('L').append('L');
+                } else {
+                    sb.append(array[i].booleanValue() ? '1' : '0');
+                }
+            }
+            sb.append('}');
+        }
+    };
+    private static final AbstractArrayEncoder<String[]> STRING_ARRAY = new AbstractArrayEncoder<String[]>(Oid.VARCHAR,
+            Oid.VARCHAR_ARRAY) {
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        int countNulls(String[] array) {
+            int count = 0;
+            for (int i = 0; i < array.length; i++) {
+                if (array[i] == null) {
+                    ++count;
+                }
+            }
+            return count;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean supportBinaryRepresentation(int oid) {
+            return oid == Oid.VARCHAR_ARRAY || oid == Oid.TEXT_ARRAY;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        int getTypeOID(int arrayOid) {
+            if (arrayOid == Oid.VARCHAR_ARRAY) {
+                return Oid.VARCHAR;
+            }
+
+            if (arrayOid == Oid.TEXT_ARRAY) {
+                return Oid.TEXT;
+            }
+
+            // this should not be possible based on supportBinaryRepresentation returning
+            // false for all other types
+            throw new IllegalStateException("Invalid array oid: " + arrayOid);
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void appendArray(StringBuilder sb, char delim, String[] array) {
+            sb.append('{');
+            for (int i = 0; i < array.length; i++) {
+                if (i > 0) {
+                    sb.append(delim);
+                }
+                if (array[i] == null) {
+                    sb.append('N').append('U').append('L').append('L');
+                } else {
+                    PgArray.escapeArrayElement(sb, array[i]);
+                }
+            }
+            sb.append('}');
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public byte[] toBinaryRepresentation(BaseConnection connection, String[] array, int oid) throws SQLException {
+            final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.min(1024, (array.length * 32) + 20));
+
+            assert supportBinaryRepresentation(oid);
+
+            final byte[] buffer = new byte[4];
+
+            try {
+                // 1 dimension
+                ByteConverter.int4(buffer, 0, 1);
+                baos.write(buffer);
+                // null
+                ByteConverter.int4(buffer, 0, countNulls(array) > 0 ? 1 : 0);
+                baos.write(buffer);
+                // oid
+                ByteConverter.int4(buffer, 0, getTypeOID(oid));
+                baos.write(buffer);
+                // length
+                ByteConverter.int4(buffer, 0, array.length);
+                baos.write(buffer);
+
+                // postgresql uses 1 base by default
+                ByteConverter.int4(buffer, 0, 1);
+                baos.write(buffer);
+
+                final Encoding encoding = connection.getEncoding();
+                for (int i = 0; i < array.length; i++) {
+                    final String string = array[i];
+                    if (string != null) {
+                        final byte[] encoded;
+                        try {
+                            encoded = encoding.encode(string);
+                        } catch (IOException e) {
+                            throw new PSQLException(GT.tr("Unable to translate data into the desired encoding."),
+                                    PSQLState.DATA_ERROR, e);
+                        }
+                        ByteConverter.int4(buffer, 0, encoded.length);
+                        baos.write(buffer);
+                        baos.write(encoded);
+                    } else {
+                        ByteConverter.int4(buffer, 0, -1);
+                        baos.write(buffer);
+                    }
+                }
+
+                return baos.toByteArray();
+            } catch (IOException e) {
+                // this IO exception is from writing to baos, which will never throw an
+                // IOException
+                throw new java.lang.AssertionError(e);
+            }
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, String[] array)
+                throws SQLException, SQLFeatureNotSupportedException {
+            try {
+                final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.min(1024, (array.length * 32) + 20));
+                final byte[] buffer = new byte[4];
+                final Encoding encoding = connection.getEncoding();
+                for (int i = 0; i < array.length; i++) {
+                    final String string = array[i];
+                    if (string != null) {
+                        final byte[] encoded;
+                        try {
+                            encoded = encoding.encode(string);
+                        } catch (IOException e) {
+                            throw new PSQLException(GT.tr("Unable to translate data into the desired encoding."),
+                                    PSQLState.DATA_ERROR, e);
+                        }
+                        ByteConverter.int4(buffer, 0, encoded.length);
+                        baos.write(buffer);
+                        baos.write(encoded);
+                    } else {
+                        ByteConverter.int4(buffer, 0, -1);
+                        baos.write(buffer);
+                    }
+                }
+
+                return baos.toByteArray();
+            } catch (IOException e) {
+                // this IO exception is from writing to baos, which will never throw an
+                // IOException
+                throw new java.lang.AssertionError(e);
+            }
+        }
+    };
+    private static final AbstractArrayEncoder<byte[][]> BYTEA_ARRAY = new AbstractArrayEncoder<byte[][]>(Oid.BYTEA,
+            Oid.BYTEA_ARRAY) {
+
+        /**
+         * The possible characters to use for representing hex binary data.
+         */
+        private final char[] hexDigits = new char[]{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd',
+                'e', 'f'};
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public byte[] toBinaryRepresentation(BaseConnection connection, byte[][] array, int oid)
+                throws SQLException, SQLFeatureNotSupportedException {
+
+            assert oid == arrayOid;
+
+            int length = 20;
+            for (int i = 0; i < array.length; i++) {
+                length += 4;
+                if (array[i] != null) {
+                    length += array[i].length;
+                }
+            }
+            final byte[] bytes = new byte[length];
+
+            // 1 dimension
+            ByteConverter.int4(bytes, 0, 1);
+            // no null
+            ByteConverter.int4(bytes, 4, 0);
+            // oid
+            ByteConverter.int4(bytes, 8, getTypeOID(oid));
+            // length
+            ByteConverter.int4(bytes, 12, array.length);
+            // postgresql uses 1 base by default
+            ByteConverter.int4(bytes, 16, 1);
+
+            write(array, bytes, 20);
+
+            return bytes;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, byte[][] array)
+                throws SQLException, SQLFeatureNotSupportedException {
+            int length = 0;
+            for (int i = 0; i < array.length; i++) {
+                length += 4;
+                if (array[i] != null) {
+                    length += array[i].length;
+                }
+            }
+            final byte[] bytes = new byte[length];
+
+            write(array, bytes, 0);
+            return bytes;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        int countNulls(byte[][] array) {
+            int nulls = 0;
+            for (int i = 0; i < array.length; i++) {
+                if (array[i] == null) {
+                    ++nulls;
+                }
+            }
+            return nulls;
+        }
+
+        private void write(byte[][] array, byte[] bytes, int offset) {
+            int idx = offset;
+            for (int i = 0; i < array.length; i++) {
+                if (array[i] != null) {
+                    ByteConverter.int4(bytes, idx, array[i].length);
+                    idx += 4;
+                    System.arraycopy(array[i], 0, bytes, idx, array[i].length);
+                    idx += array[i].length;
+                } else {
+                    ByteConverter.int4(bytes, idx, -1);
+                    idx += 4;
+                }
+            }
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void appendArray(StringBuilder sb, char delim, byte[][] array) {
+            sb.append('{');
+            for (int i = 0; i < array.length; i++) {
+                if (i > 0) {
+                    sb.append(delim);
+                }
+
+                if (array[i] != null) {
+                    sb.append("\"\\\\x");
+                    for (int j = 0; j < array[i].length; j++) {
+                        byte b = array[i][j];
+
+                        // get the value for the left 4 bits (drop sign)
+                        sb.append(hexDigits[(b & 0xF0) >>> 4]);
+                        // get the value for the right 4 bits
+                        sb.append(hexDigits[b & 0x0F]);
+                    }
+                    sb.append('"');
+                } else {
+                    sb.append("NULL");
+                }
+            }
+            sb.append('}');
+        }
+    };
+    @SuppressWarnings("rawtypes")
+    private static final Map<Class, AbstractArrayEncoder> ARRAY_CLASS_TO_ENCODER = new HashMap<>(
+            (int) (14 / .75) + 1);
+
+    static {
+        ARRAY_CLASS_TO_ENCODER.put(long.class, LONG_ARRAY);
+        ARRAY_CLASS_TO_ENCODER.put(Long.class, LONG_OBJ_ARRAY);
+        ARRAY_CLASS_TO_ENCODER.put(int.class, INT_ARRAY);
+        ARRAY_CLASS_TO_ENCODER.put(Integer.class, INT_OBJ_ARRAY);
+        ARRAY_CLASS_TO_ENCODER.put(short.class, SHORT_ARRAY);
+        ARRAY_CLASS_TO_ENCODER.put(Short.class, SHORT_OBJ_ARRAY);
+        ARRAY_CLASS_TO_ENCODER.put(double.class, DOUBLE_ARRAY);
+        ARRAY_CLASS_TO_ENCODER.put(Double.class, DOUBLE_OBJ_ARRAY);
+        ARRAY_CLASS_TO_ENCODER.put(float.class, FLOAT_ARRAY);
+        ARRAY_CLASS_TO_ENCODER.put(Float.class, FLOAT_OBJ_ARRAY);
+        ARRAY_CLASS_TO_ENCODER.put(boolean.class, BOOLEAN_ARRAY);
+        ARRAY_CLASS_TO_ENCODER.put(Boolean.class, BOOLEAN_OBJ_ARRAY);
+        ARRAY_CLASS_TO_ENCODER.put(byte[].class, BYTEA_ARRAY);
+        ARRAY_CLASS_TO_ENCODER.put(String.class, STRING_ARRAY);
+    }
+
+    public ArrayEncoding() {
     }
 
     /**
-     * {@inheritDoc}
+     * Returns support for encoding <i>array</i>.
+     *
+     * @param array The array to encode. Must not be {@code null}.
+     * @return An instance capable of encoding <i>array</i> as a {@code String} at
+     * minimum. Some types may support binary encoding.
+     * @throws PSQLException if <i>array</i> is not a supported type.
+     * @see ArrayEncoding.ArrayEncoder#supportBinaryRepresentation(int)
      */
-    @Override
-    public boolean supportBinaryRepresentation(int oid) {
-      return support.supportBinaryRepresentation(oid);
+    @SuppressWarnings({"unchecked", "rawtypes"})
+    public static <A extends Object> ArrayEncoder<A> getArrayEncoder(A array) throws PSQLException {
+        final Class<?> arrayClazz = array.getClass();
+        Class<?> subClazz = arrayClazz.getComponentType();
+        if (subClazz == null) {
+            throw new PSQLException(GT.tr("Invalid elements {0}", array), PSQLState.INVALID_PARAMETER_TYPE);
+        }
+        AbstractArrayEncoder<A> support = ARRAY_CLASS_TO_ENCODER.get(subClazz);
+        if (support != null) {
+            return support;
+        }
+        Class<?> subSubClazz = subClazz.getComponentType();
+        if (subSubClazz == null) {
+            if (Object.class.isAssignableFrom(subClazz)) {
+                return (ArrayEncoder<A>) OBJECT_ARRAY;
+            }
+            throw new PSQLException(GT.tr("Invalid elements {0}", array), PSQLState.INVALID_PARAMETER_TYPE);
+        }
+
+        subClazz = subSubClazz;
+        int dimensions = 2;
+        while (subClazz != null) {
+            support = ARRAY_CLASS_TO_ENCODER.get(subClazz);
+            if (support != null) {
+                if (dimensions == 2) {
+                    return new TwoDimensionPrimitiveArrayEncoder(support);
+                }
+                return new RecursiveArrayEncoder(support, dimensions);
+            }
+            subSubClazz = subClazz.getComponentType();
+            if (subSubClazz == null) {
+                if (Object.class.isAssignableFrom(subClazz)) {
+                    if (dimensions == 2) {
+                        return new TwoDimensionPrimitiveArrayEncoder(OBJECT_ARRAY);
+                    }
+                    return new RecursiveArrayEncoder(OBJECT_ARRAY, dimensions);
+                }
+            }
+            ++dimensions;
+            subClazz = subSubClazz;
+        }
+
+        throw new PSQLException(GT.tr("Invalid elements {0}", array), PSQLState.INVALID_PARAMETER_TYPE);
     }
 
-    private boolean hasNulls(Object array, int depth) {
-      if (depth > 1) {
-        for (int i = 0, j = Array.getLength(array); i < j; i++) {
-          if (hasNulls(Array.get(array, i), depth - 1)) {
-            return true;
-          }
-        }
-        return false;
-      }
+    public interface ArrayEncoder<A extends Object> {
 
-      return support.countNulls(array) > 0;
+        /**
+         * The default array type oid supported by this instance.
+         *
+         * @return The default array type oid supported by this instance.
+         */
+        int getDefaultArrayTypeOid();
+
+        /**
+         * Creates {@code String} representation of the <i>array</i>.
+         *
+         * @param delim The character to use to delimit between elements.
+         * @param array The array to represent as a {@code String}.
+         * @return {@code String} representation of the <i>array</i>.
+         */
+        String toArrayString(char delim, A array);
+
+        /**
+         * Indicates if an array can be encoded in binary form to array <i>oid</i>.
+         *
+         * @param oid The array oid to see check for binary support.
+         * @return Indication of whether
+         * {@link #toBinaryRepresentation(BaseConnection, Object, int)} is
+         * supported for <i>oid</i>.
+         */
+        boolean supportBinaryRepresentation(int oid);
+
+        /**
+         * Creates binary representation of the <i>array</i>.
+         *
+         * @param connection The connection the binary representation will be used on. Attributes
+         *                   from the connection might impact how values are translated to
+         *                   binary.
+         * @param array      The array to binary encode. Must not be {@code null}, but may
+         *                   contain {@code null} elements.
+         * @param oid        The array type oid to use. Calls to
+         *                   {@link #supportBinaryRepresentation(int)} must have returned
+         *                   {@code true}.
+         * @return The binary representation of <i>array</i>.
+         * @throws SQLFeatureNotSupportedException If {@link #supportBinaryRepresentation(int)} is false for
+         *                                         <i>oid</i>.
+         */
+        byte[] toBinaryRepresentation(BaseConnection connection, A array, int oid)
+                throws SQLException, SQLFeatureNotSupportedException;
+
+        /**
+         * Append {@code String} representation of <i>array</i> to <i>sb</i>.
+         *
+         * @param sb    The {@link StringBuilder} to append to.
+         * @param delim The delimiter between elements.
+         * @param array The array to represent. Will not be {@code null}, but may contain
+         *              {@code null} elements.
+         */
+        void appendArray(StringBuilder sb, char delim, A array);
     }
 
     /**
-     * {@inheritDoc}
+     * Base class to implement {@link ArrayEncoding.ArrayEncoder} and provide
+     * multi-dimensional support.
+     *
+     * @param <A> Base array type supported.
      */
-    @Override
-    public byte[] toBinaryRepresentation(BaseConnection connection, Object array, int oid)
-        throws SQLException, SQLFeatureNotSupportedException {
+    private abstract static class AbstractArrayEncoder<A extends Object>
+            implements ArrayEncoder<A> {
 
-      final boolean hasNulls = hasNulls(array, dimensions);
+        final int arrayOid;
+        private final int oid;
 
-      final ByteArrayOutputStream baos = new ByteArrayOutputStream(1024 * dimensions);
-      final byte[] buffer = new byte[4];
-
-      try {
-        // dimensions
-        ByteConverter.int4(buffer, 0, dimensions);
-        baos.write(buffer);
-        // nulls
-        ByteConverter.int4(buffer, 0, hasNulls ? 1 : 0);
-        baos.write(buffer);
-        // oid
-        ByteConverter.int4(buffer, 0, support.getTypeOID(oid));
-        baos.write(buffer);
-
-        // length
-        ByteConverter.int4(buffer, 0, Array.getLength(array));
-        baos.write(buffer);
-        // postgresql uses 1 base by default
-        ByteConverter.int4(buffer, 0, 1);
-        baos.write(buffer);
-
-        writeArray(connection, buffer, baos, array, dimensions, true);
-
-        return baos.toByteArray();
-
-      } catch (IOException e) {
-        // this IO exception is from writing to baos, which will never throw an
-        // IOException
-        throw new java.lang.AssertionError(e);
-      }
-    }
-
-    private void writeArray(BaseConnection connection, byte[] buffer, ByteArrayOutputStream baos,
-        Object array, int depth, boolean first) throws IOException, SQLException {
-      final int length = Array.getLength(array);
-
-      if (first) {
-        ByteConverter.int4(buffer, 0, length > 0 ? Array.getLength(Array.get(array, 0)) : 0);
-        baos.write(buffer);
-        // postgresql uses 1 base by default
-        ByteConverter.int4(buffer, 0, 1);
-        baos.write(buffer);
-      }
-
-      for (int i = 0; i < length; i++) {
-        final Object subArray = Array.get(array, i);
-        if (depth > 2) {
-          writeArray(connection, buffer, baos, subArray, depth - 1, i == 0);
-        } else {
-          baos.write(support.toSingleDimensionBinaryRepresentation(connection, subArray));
+        /**
+         * @param oid      The default/primary base oid type.
+         * @param arrayOid The default/primary array oid type.
+         */
+        AbstractArrayEncoder(int oid, int arrayOid) {
+            this.oid = oid;
+            this.arrayOid = arrayOid;
+        }
+
+        /**
+         * @param arrayOid The array oid to get base oid type for.
+         * @return The base oid type for the given array oid type given to
+         * {@link #toBinaryRepresentation(BaseConnection, Object, int)}.
+         */
+        int getTypeOID(@SuppressWarnings("unused") int arrayOid) {
+            return oid;
+        }
+
+        /**
+         * By default returns the <i>arrayOid</i> this instance was instantiated with.
+         */
+        @Override
+        public int getDefaultArrayTypeOid() {
+            return arrayOid;
+        }
+
+        /**
+         * Counts the number of {@code null} elements in <i>array</i>.
+         *
+         * @param array The array to count {@code null} elements in.
+         * @return The number of {@code null} elements in <i>array</i>.
+         */
+        int countNulls(A array) {
+            int nulls = 0;
+            final int arrayLength = Array.getLength(array);
+            for (int i = 0; i < arrayLength; i++) {
+                if (Array.get(array, i) == null) {
+                    ++nulls;
+                }
+            }
+            return nulls;
+        }
+
+        /**
+         * Creates {@code byte[]} of just the raw data (no metadata).
+         *
+         * @param connection The connection the binary representation will be used on.
+         * @param array      The array to create binary representation of. Will not be
+         *                   {@code null}, but may contain {@code null} elements.
+         * @return {@code byte[]} of just the raw data (no metadata).
+         * @throws SQLFeatureNotSupportedException If {@link #supportBinaryRepresentation(int)} is false for
+         *                                         <i>oid</i>.
+         */
+        abstract byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, A array)
+                throws SQLException, SQLFeatureNotSupportedException;
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public String toArrayString(char delim, A array) {
+            final StringBuilder sb = new StringBuilder(1024);
+            appendArray(sb, delim, array);
+            return sb.toString();
+        }
+
+        /**
+         * By default returns {@code true} if <i>oid</i> matches the <i>arrayOid</i>
+         * this instance was instantiated with.
+         */
+        @Override
+        public boolean supportBinaryRepresentation(int oid) {
+            return oid == arrayOid;
+        }
+    }    private static final AbstractArrayEncoder<Object[]> OBJECT_ARRAY = new AbstractArrayEncoder<Object[]>(0, 0) {
+
+        @Override
+        public int getDefaultArrayTypeOid() {
+            return 0;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean supportBinaryRepresentation(int oid) {
+            return false;
+        }
+
+        @Override
+        public byte[] toBinaryRepresentation(BaseConnection connection, Object[] array, int oid)
+                throws SQLException, SQLFeatureNotSupportedException {
+            throw new SQLFeatureNotSupportedException();
+        }
+
+        @Override
+        byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, Object[] array)
+                throws SQLException, SQLFeatureNotSupportedException {
+            throw new SQLFeatureNotSupportedException();
+        }
+
+        @Override
+        public void appendArray(StringBuilder sb, char delim, Object[] array) {
+            sb.append('{');
+            for (int i = 0; i < array.length; i++) {
+                if (i > 0) {
+                    sb.append(delim);
+                }
+                if (array[i] == null) {
+                    sb.append('N').append('U').append('L').append('L');
+                } else if (array[i].getClass().isArray()) {
+                    if (array[i] instanceof byte[]) {
+                        throw new UnsupportedOperationException("byte[] nested inside Object[]");
+                    }
+                    try {
+                        getArrayEncoder(array[i]).appendArray(sb, delim, array[i]);
+                    } catch (PSQLException e) {
+                        // this should never happen
+                        throw new IllegalStateException(e);
+                    }
+                } else {
+                    PgArray.escapeArrayElement(sb, array[i].toString());
+                }
+            }
+            sb.append('}');
+        }
+    };
+
+    /**
+     * Base class to provide support for {@code Number} based arrays.
+     *
+     * @param <N> The base type of array.
+     */
+    private abstract static class NumberArrayEncoder<N extends Number> extends AbstractArrayEncoder<N[]> {
+
+        private final int fieldSize;
+
+        /**
+         * @param fieldSize The fixed size to represent each value in binary.
+         * @param oid       The base type oid.
+         * @param arrayOid  The array type oid.
+         */
+        NumberArrayEncoder(int fieldSize, int oid, int arrayOid) {
+            super(oid, arrayOid);
+            this.fieldSize = fieldSize;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        final int countNulls(N[] array) {
+            int count = 0;
+            for (int i = 0; i < array.length; i++) {
+                if (array[i] == null) {
+                    ++count;
+                }
+            }
+            return count;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public final byte[] toBinaryRepresentation(BaseConnection connection, N[] array, int oid)
+                throws SQLException, SQLFeatureNotSupportedException {
+            assert oid == this.arrayOid;
+
+            final int nullCount = countNulls(array);
+
+            final byte[] bytes = writeBytes(array, nullCount, 20);
+
+            // 1 dimension
+            ByteConverter.int4(bytes, 0, 1);
+            // no null
+            ByteConverter.int4(bytes, 4, nullCount == 0 ? 0 : 1);
+            // oid
+            ByteConverter.int4(bytes, 8, getTypeOID(oid));
+            // length
+            ByteConverter.int4(bytes, 12, array.length);
+            // postgresql uses 1 base by default
+            ByteConverter.int4(bytes, 16, 1);
+
+            return bytes;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        final byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, N[] array)
+                throws SQLException, SQLFeatureNotSupportedException {
+
+            final int nullCount = countNulls(array);
+
+            return writeBytes(array, nullCount, 0);
+        }
+
+        private byte[] writeBytes(final N[] array, final int nullCount, final int offset) {
+            final int length = offset + (4 * array.length) + (fieldSize * (array.length - nullCount));
+            final byte[] bytes = new byte[length];
+
+            int idx = offset;
+            for (int i = 0; i < array.length; i++) {
+                if (array[i] == null) {
+                    ByteConverter.int4(bytes, idx, -1);
+                    idx += 4;
+                } else {
+                    ByteConverter.int4(bytes, idx, fieldSize);
+                    idx += 4;
+                    write(array[i], bytes, idx);
+                    idx += fieldSize;
+                }
+            }
+
+            return bytes;
+        }
+
+        /**
+         * Write single value (<i>number</i>) to <i>bytes</i> beginning at
+         * <i>offset</i>.
+         *
+         * @param number The value to write to <i>bytes</i>. This will never be {@code null}.
+         * @param bytes  The {@code byte[]} to write to.
+         * @param offset The offset into <i>bytes</i> to write the <i>number</i> value.
+         */
+        protected abstract void write(N number, byte[] bytes, int offset);
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public final void appendArray(StringBuilder sb, char delim, N[] array) {
+            sb.append('{');
+            for (int i = 0; i < array.length; i++) {
+                if (i != 0) {
+                    sb.append(delim);
+                }
+                if (array[i] == null) {
+                    sb.append('N').append('U').append('L').append('L');
+                } else {
+                    sb.append('"');
+                    sb.append(array[i].toString());
+                    sb.append('"');
+                }
+            }
+            sb.append('}');
         }
-      }
     }
 
-  }
+    /**
+     * Base support for primitive arrays.
+     *
+     * @param <A> The primitive array to support.
+     */
+    private abstract static class FixedSizePrimitiveArrayEncoder<A extends Object>
+            extends AbstractArrayEncoder<A> {
+
+        private final int fieldSize;
+
+        FixedSizePrimitiveArrayEncoder(int fieldSize, int oid, int arrayOid) {
+            super(oid, arrayOid);
+            this.fieldSize = fieldSize;
+        }
+
+        /**
+         * {@inheritDoc}
+         *
+         * <p>
+         * Always returns {@code 0}.
+         * </p>
+         */
+        @Override
+        final int countNulls(A array) {
+            return 0;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public final byte[] toBinaryRepresentation(BaseConnection connection, A array, int oid)
+                throws SQLException, SQLFeatureNotSupportedException {
+            assert oid == arrayOid;
+
+            final int arrayLength = Array.getLength(array);
+            final int length = 20 + ((fieldSize + 4) * arrayLength);
+            final byte[] bytes = new byte[length];
+
+            // 1 dimension
+            ByteConverter.int4(bytes, 0, 1);
+            // no null
+            ByteConverter.int4(bytes, 4, 0);
+            // oid
+            ByteConverter.int4(bytes, 8, getTypeOID(oid));
+            // length
+            ByteConverter.int4(bytes, 12, arrayLength);
+            // postgresql uses 1 base by default
+            ByteConverter.int4(bytes, 16, 1);
+
+            write(array, bytes, 20);
+
+            return bytes;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        final byte[] toSingleDimensionBinaryRepresentation(BaseConnection connection, A array)
+                throws SQLException, SQLFeatureNotSupportedException {
+            final int length = (fieldSize + 4) * Array.getLength(array);
+            final byte[] bytes = new byte[length];
+
+            write(array, bytes, 0);
+            return bytes;
+        }
+
+        /**
+         * Write the entire contents of <i>array</i> to <i>bytes</i> starting at
+         * <i>offset</i> without metadata describing type or length.
+         *
+         * @param array  The array to write.
+         * @param bytes  The {@code byte[]} to write to.
+         * @param offset The offset into <i>bytes</i> to start writing.
+         */
+        protected abstract void write(A array, byte[] bytes, int offset);
+    }
+
+    /**
+     * Wraps an {@link AbstractArrayEncoder} implementation and provides optimized
+     * support for 2 dimensions.
+     */
+    private static final class TwoDimensionPrimitiveArrayEncoder<A extends Object> implements ArrayEncoder<A[]> {
+        private final AbstractArrayEncoder<A> support;
+
+        /**
+         * @param support The instance providing support for the base array type.
+         */
+        TwoDimensionPrimitiveArrayEncoder(AbstractArrayEncoder<A> support) {
+            super();
+            this.support = support;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public int getDefaultArrayTypeOid() {
+            return support.getDefaultArrayTypeOid();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public String toArrayString(char delim, A[] array) {
+            final StringBuilder sb = new StringBuilder(1024);
+            appendArray(sb, delim, array);
+            return sb.toString();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void appendArray(StringBuilder sb, char delim, A[] array) {
+            sb.append('{');
+            for (int i = 0; i < array.length; i++) {
+                if (i > 0) {
+                    sb.append(delim);
+                }
+                support.appendArray(sb, delim, array[i]);
+            }
+            sb.append('}');
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean supportBinaryRepresentation(int oid) {
+            return support.supportBinaryRepresentation(oid);
+        }
+
+        /**
+         * {@inheritDoc} 4 bytes - dimension 4 bytes - oid 4 bytes - ? 8*d bytes -
+         * dimension length
+         */
+        @Override
+        public byte[] toBinaryRepresentation(BaseConnection connection, A[] array, int oid)
+                throws SQLException, SQLFeatureNotSupportedException {
+            final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.min(1024, (array.length * 32) + 20));
+            final byte[] buffer = new byte[4];
+
+            boolean hasNulls = false;
+            for (int i = 0; !hasNulls && i < array.length; i++) {
+                if (support.countNulls(array[i]) > 0) {
+                    hasNulls = true;
+                }
+            }
+
+            try {
+                // 2 dimension
+                ByteConverter.int4(buffer, 0, 2);
+                baos.write(buffer);
+                // nulls
+                ByteConverter.int4(buffer, 0, hasNulls ? 1 : 0);
+                baos.write(buffer);
+                // oid
+                ByteConverter.int4(buffer, 0, support.getTypeOID(oid));
+                baos.write(buffer);
+
+                // length
+                ByteConverter.int4(buffer, 0, array.length);
+                baos.write(buffer);
+                // postgres defaults to 1 based lower bound
+                ByteConverter.int4(buffer, 0, 1);
+                baos.write(buffer);
+
+                ByteConverter.int4(buffer, 0, array.length > 0 ? Array.getLength(array[0]) : 0);
+                baos.write(buffer);
+                // postgresql uses 1 base by default
+                ByteConverter.int4(buffer, 0, 1);
+                baos.write(buffer);
+
+                for (int i = 0; i < array.length; i++) {
+                    baos.write(support.toSingleDimensionBinaryRepresentation(connection, array[i]));
+                }
+
+                return baos.toByteArray();
+
+            } catch (IOException e) {
+                // this IO exception is from writing to baos, which will never throw an
+                // IOException
+                throw new java.lang.AssertionError(e);
+            }
+        }
+    }
+
+    /**
+     * Wraps an {@link AbstractArrayEncoder} implementation and provides support for
+     * 2 or more dimensions using recursion.
+     */
+    @SuppressWarnings({"unchecked", "rawtypes"})
+    private static final class RecursiveArrayEncoder implements ArrayEncoder {
+
+        private final AbstractArrayEncoder support;
+        private final int dimensions;
+
+        /**
+         * @param support The instance providing support for the base array type.
+         */
+        RecursiveArrayEncoder(AbstractArrayEncoder support, int dimensions) {
+            super();
+            this.support = support;
+            this.dimensions = dimensions;
+            assert dimensions >= 2;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public int getDefaultArrayTypeOid() {
+            return support.getDefaultArrayTypeOid();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public String toArrayString(char delim, Object array) {
+            final StringBuilder sb = new StringBuilder(2048);
+            arrayString(sb, array, delim, dimensions);
+            return sb.toString();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void appendArray(StringBuilder sb, char delim, Object array) {
+            arrayString(sb, array, delim, dimensions);
+        }
+
+        private void arrayString(StringBuilder sb, Object array, char delim, int depth) {
+
+            if (depth > 1) {
+                sb.append('{');
+                for (int i = 0, j = Array.getLength(array); i < j; i++) {
+                    if (i > 0) {
+                        sb.append(delim);
+                    }
+                    arrayString(sb, Array.get(array, i), delim, depth - 1);
+                }
+                sb.append('}');
+            } else {
+                support.appendArray(sb, delim, array);
+            }
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean supportBinaryRepresentation(int oid) {
+            return support.supportBinaryRepresentation(oid);
+        }
+
+        private boolean hasNulls(Object array, int depth) {
+            if (depth > 1) {
+                for (int i = 0, j = Array.getLength(array); i < j; i++) {
+                    if (hasNulls(Array.get(array, i), depth - 1)) {
+                        return true;
+                    }
+                }
+                return false;
+            }
+
+            return support.countNulls(array) > 0;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public byte[] toBinaryRepresentation(BaseConnection connection, Object array, int oid)
+                throws SQLException, SQLFeatureNotSupportedException {
+
+            final boolean hasNulls = hasNulls(array, dimensions);
+
+            final ByteArrayOutputStream baos = new ByteArrayOutputStream(1024 * dimensions);
+            final byte[] buffer = new byte[4];
+
+            try {
+                // dimensions
+                ByteConverter.int4(buffer, 0, dimensions);
+                baos.write(buffer);
+                // nulls
+                ByteConverter.int4(buffer, 0, hasNulls ? 1 : 0);
+                baos.write(buffer);
+                // oid
+                ByteConverter.int4(buffer, 0, support.getTypeOID(oid));
+                baos.write(buffer);
+
+                // length
+                ByteConverter.int4(buffer, 0, Array.getLength(array));
+                baos.write(buffer);
+                // postgresql uses 1 base by default
+                ByteConverter.int4(buffer, 0, 1);
+                baos.write(buffer);
+
+                writeArray(connection, buffer, baos, array, dimensions, true);
+
+                return baos.toByteArray();
+
+            } catch (IOException e) {
+                // this IO exception is from writing to baos, which will never throw an
+                // IOException
+                throw new java.lang.AssertionError(e);
+            }
+        }
+
+        private void writeArray(BaseConnection connection, byte[] buffer, ByteArrayOutputStream baos,
+                                Object array, int depth, boolean first) throws IOException, SQLException {
+            final int length = Array.getLength(array);
+
+            if (first) {
+                ByteConverter.int4(buffer, 0, length > 0 ? Array.getLength(Array.get(array, 0)) : 0);
+                baos.write(buffer);
+                // postgresql uses 1 base by default
+                ByteConverter.int4(buffer, 0, 1);
+                baos.write(buffer);
+            }
+
+            for (int i = 0; i < length; i++) {
+                final Object subArray = Array.get(array, i);
+                if (depth > 2) {
+                    writeArray(connection, buffer, baos, subArray, depth - 1, i == 0);
+                } else {
+                    baos.write(support.toSingleDimensionBinaryRepresentation(connection, subArray));
+                }
+            }
+        }
+
+    }
+
+
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/AutoSave.java b/pgjdbc/src/main/java/org/postgresql/jdbc/AutoSave.java
index f4588c7..2ace852 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/AutoSave.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/AutoSave.java
@@ -8,21 +8,21 @@ package org.postgresql.jdbc;
 import java.util.Locale;
 
 public enum AutoSave {
-  NEVER,
-  ALWAYS,
-  CONSERVATIVE;
+    NEVER,
+    ALWAYS,
+    CONSERVATIVE;
 
-  private final String value;
+    private final String value;
 
-  AutoSave() {
-    value = this.name().toLowerCase(Locale.ROOT);
-  }
+    AutoSave() {
+        value = this.name().toLowerCase(Locale.ROOT);
+    }
 
-  public String value() {
-    return value;
-  }
+    public static AutoSave of(String value) {
+        return valueOf(value.toUpperCase(Locale.ROOT));
+    }
 
-  public static AutoSave of(String value) {
-    return valueOf(value.toUpperCase(Locale.ROOT));
-  }
+    public String value() {
+        return value;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/BatchResultHandler.java b/pgjdbc/src/main/java/org/postgresql/jdbc/BatchResultHandler.java
index 1d98932..0835cc5 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/BatchResultHandler.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/BatchResultHandler.java
@@ -30,228 +30,227 @@ import java.util.List;
  */
 public class BatchResultHandler extends ResultHandlerBase {
 
-  private final PgStatement pgStatement;
-  private int resultIndex;
+    private final PgStatement pgStatement;
+    private final Query[] queries;
+    private final long[] longUpdateCounts;
+    private final ParameterList[] parameterLists;
+    private final boolean expectGeneratedKeys;
+    private final List<List<Tuple>> allGeneratedRows;
+    private int resultIndex;
+    private PgResultSet generatedKeys;
+    private int committedRows; // 0 means no rows committed. 1 means row 0 was committed, and so on
+    private List<Tuple> latestGeneratedRows;
+    private PgResultSet latestGeneratedKeysRs;
 
-  private final Query[] queries;
-  private final long[] longUpdateCounts;
-  private final ParameterList [] parameterLists;
-  private final boolean expectGeneratedKeys;
-  private PgResultSet generatedKeys;
-  private int committedRows; // 0 means no rows committed. 1 means row 0 was committed, and so on
-  private final List<List<Tuple>> allGeneratedRows;
-  private List<Tuple> latestGeneratedRows;
-  private PgResultSet latestGeneratedKeysRs;
-
-  BatchResultHandler(PgStatement pgStatement, Query[] queries,
-      ParameterList [] parameterLists,
-      boolean expectGeneratedKeys) {
-    this.pgStatement = pgStatement;
-    this.queries = queries;
-    this.parameterLists = parameterLists;
-    this.longUpdateCounts = new long[queries.length];
-    this.expectGeneratedKeys = expectGeneratedKeys;
-    this.allGeneratedRows = !expectGeneratedKeys ? null : new ArrayList<List<Tuple>>();
-  }
-
-  @Override
-  public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
-      ResultCursor cursor) {
-    // If SELECT, then handleCommandStatus call would just be missing
-    resultIndex++;
-    if (!expectGeneratedKeys) {
-      // No rows expected -> just ignore rows
-      return;
+    BatchResultHandler(PgStatement pgStatement, Query[] queries,
+                       ParameterList[] parameterLists,
+                       boolean expectGeneratedKeys) {
+        this.pgStatement = pgStatement;
+        this.queries = queries;
+        this.parameterLists = parameterLists;
+        this.longUpdateCounts = new long[queries.length];
+        this.expectGeneratedKeys = expectGeneratedKeys;
+        this.allGeneratedRows = !expectGeneratedKeys ? null : new ArrayList<List<Tuple>>();
     }
-    if (generatedKeys == null) {
-      try {
-        // If SELECT, the resulting ResultSet is not valid
-        // Thus it is up to handleCommandStatus to decide if resultSet is good enough
-        latestGeneratedKeysRs = (PgResultSet) pgStatement.createResultSet(fromQuery, fields,
-            new ArrayList<>(), cursor);
-      } catch (SQLException e) {
-        handleError(e);
-      }
-    }
-    latestGeneratedRows = tuples;
-  }
 
-  @Override
-  public void handleCommandStatus(String status, long updateCount, long insertOID) {
-    List<Tuple> latestGeneratedRows = this.latestGeneratedRows;
-    if (latestGeneratedRows != null) {
-      // We have DML. Decrease resultIndex that was just increased in handleResultRows
-      resultIndex--;
-      // If exception thrown, no need to collect generated keys
-      // Note: some generated keys might be secured in generatedKeys
-      if (updateCount > 0 && (getException() == null || isAutoCommit())) {
-        List<List<Tuple>> allGeneratedRows = this.allGeneratedRows;
-        allGeneratedRows.add(latestGeneratedRows);
+    @Override
+    public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
+                                 ResultCursor cursor) {
+        // If SELECT, then handleCommandStatus call would just be missing
+        resultIndex++;
+        if (!expectGeneratedKeys) {
+            // No rows expected -> just ignore rows
+            return;
+        }
         if (generatedKeys == null) {
-          generatedKeys = latestGeneratedKeysRs;
+            try {
+                // If SELECT, the resulting ResultSet is not valid
+                // Thus it is up to handleCommandStatus to decide if resultSet is good enough
+                latestGeneratedKeysRs = (PgResultSet) pgStatement.createResultSet(fromQuery, fields,
+                        new ArrayList<>(), cursor);
+            } catch (SQLException e) {
+                handleError(e);
+            }
         }
-      }
-      this.latestGeneratedRows = null;
+        latestGeneratedRows = tuples;
     }
 
-    if (resultIndex >= queries.length) {
-      handleError(new PSQLException(GT.tr("Too many update results were returned."),
-          PSQLState.TOO_MANY_RESULTS));
-      return;
+    @Override
+    public void handleCommandStatus(String status, long updateCount, long insertOID) {
+        List<Tuple> latestGeneratedRows = this.latestGeneratedRows;
+        if (latestGeneratedRows != null) {
+            // We have DML. Decrease resultIndex that was just increased in handleResultRows
+            resultIndex--;
+            // If exception thrown, no need to collect generated keys
+            // Note: some generated keys might be secured in generatedKeys
+            if (updateCount > 0 && (getException() == null || isAutoCommit())) {
+                List<List<Tuple>> allGeneratedRows = this.allGeneratedRows;
+                allGeneratedRows.add(latestGeneratedRows);
+                if (generatedKeys == null) {
+                    generatedKeys = latestGeneratedKeysRs;
+                }
+            }
+            this.latestGeneratedRows = null;
+        }
+
+        if (resultIndex >= queries.length) {
+            handleError(new PSQLException(GT.tr("Too many update results were returned."),
+                    PSQLState.TOO_MANY_RESULTS));
+            return;
+        }
+        latestGeneratedKeysRs = null;
+
+        longUpdateCounts[resultIndex++] = updateCount;
     }
-    latestGeneratedKeysRs = null;
 
-    longUpdateCounts[resultIndex++] = updateCount;
-  }
-
-  private boolean isAutoCommit() {
-    try {
-      return pgStatement.getConnection().getAutoCommit();
-    } catch (SQLException e) {
-      assert false : "pgStatement.getConnection().getAutoCommit() should not throw";
-      return false;
+    private boolean isAutoCommit() {
+        try {
+            return pgStatement.getConnection().getAutoCommit();
+        } catch (SQLException e) {
+            assert false : "pgStatement.getConnection().getAutoCommit() should not throw";
+            return false;
+        }
     }
-  }
 
-  @Override
-  public void secureProgress() {
-    if (isAutoCommit()) {
-      committedRows = resultIndex;
-      updateGeneratedKeys();
+    @Override
+    public void secureProgress() {
+        if (isAutoCommit()) {
+            committedRows = resultIndex;
+            updateGeneratedKeys();
+        }
     }
-  }
 
-  private void updateGeneratedKeys() {
-    List<List<Tuple>> allGeneratedRows = this.allGeneratedRows;
-    if (allGeneratedRows == null || allGeneratedRows.isEmpty()) {
-      return;
-    }
-    PgResultSet generatedKeys = this.generatedKeys;
-    for (List<Tuple> rows : allGeneratedRows) {
-      generatedKeys.addRows(rows);
-    }
-    allGeneratedRows.clear();
-  }
-
-  @Override
-  public void handleWarning(SQLWarning warning) {
-    pgStatement.addWarning(warning);
-  }
-
-  @Override
-  public void handleError(SQLException newError) {
-    if (getException() == null) {
-      Arrays.fill(longUpdateCounts, committedRows, longUpdateCounts.length, Statement.EXECUTE_FAILED);
-      if (allGeneratedRows != null) {
+    private void updateGeneratedKeys() {
+        List<List<Tuple>> allGeneratedRows = this.allGeneratedRows;
+        if (allGeneratedRows == null || allGeneratedRows.isEmpty()) {
+            return;
+        }
+        PgResultSet generatedKeys = this.generatedKeys;
+        for (List<Tuple> rows : allGeneratedRows) {
+            generatedKeys.addRows(rows);
+        }
         allGeneratedRows.clear();
-      }
+    }
 
-      String queryString = "<unknown>";
-      if (pgStatement.getPGConnection().getLogServerErrorDetail()) {
-        if (resultIndex < queries.length) {
-          queryString = queries[resultIndex].toString(
-             parameterLists == null ? null : parameterLists[resultIndex]);
+    @Override
+    public void handleWarning(SQLWarning warning) {
+        pgStatement.addWarning(warning);
+    }
+
+    @Override
+    public void handleError(SQLException newError) {
+        if (getException() == null) {
+            Arrays.fill(longUpdateCounts, committedRows, longUpdateCounts.length, Statement.EXECUTE_FAILED);
+            if (allGeneratedRows != null) {
+                allGeneratedRows.clear();
+            }
+
+            String queryString = "<unknown>";
+            if (pgStatement.getPGConnection().getLogServerErrorDetail()) {
+                if (resultIndex < queries.length) {
+                    queryString = queries[resultIndex].toString(
+                            parameterLists == null ? null : parameterLists[resultIndex]);
+                }
+            }
+
+            BatchUpdateException batchException;
+            batchException = new BatchUpdateException(
+                    GT.tr("Batch entry {0} {1} was aborted: {2}  Call getNextException to see other errors in the batch.",
+                            resultIndex, queryString, newError.getMessage()),
+                    newError.getSQLState(), 0, uncompressLongUpdateCount(), newError);
+
+            super.handleError(batchException);
         }
-      }
+        resultIndex++;
 
-      BatchUpdateException batchException;
-      batchException = new BatchUpdateException(
-          GT.tr("Batch entry {0} {1} was aborted: {2}  Call getNextException to see other errors in the batch.",
-              resultIndex, queryString, newError.getMessage()),
-          newError.getSQLState(), 0, uncompressLongUpdateCount(), newError);
-
-      super.handleError(batchException);
+        super.handleError(newError);
     }
-    resultIndex++;
 
-    super.handleError(newError);
-  }
+    @Override
+    public void handleCompletion() throws SQLException {
+        updateGeneratedKeys();
+        SQLException batchException = getException();
+        if (batchException != null) {
+            if (isAutoCommit()) {
+                // Re-create batch exception since rows after exception might indeed succeed.
+                BatchUpdateException newException;
+                newException = new BatchUpdateException(
+                        batchException.getMessage(),
+                        batchException.getSQLState(), 0,
+                        uncompressLongUpdateCount(),
+                        batchException.getCause()
+                );
 
-  @Override
-  public void handleCompletion() throws SQLException {
-    updateGeneratedKeys();
-    SQLException batchException = getException();
-    if (batchException != null) {
-      if (isAutoCommit()) {
-        // Re-create batch exception since rows after exception might indeed succeed.
-        BatchUpdateException newException;
-        newException = new BatchUpdateException(
-            batchException.getMessage(),
-            batchException.getSQLState(), 0,
-            uncompressLongUpdateCount(),
-            batchException.getCause()
-        );
-
-        SQLException next = batchException.getNextException();
-        if (next != null) {
-          newException.setNextException(next);
+                SQLException next = batchException.getNextException();
+                if (next != null) {
+                    newException.setNextException(next);
+                }
+                batchException = newException;
+            }
+            throw batchException;
         }
-        batchException = newException;
-      }
-      throw batchException;
-    }
-  }
-
-  public ResultSet getGeneratedKeys() {
-    return generatedKeys;
-  }
-
-  private int[] uncompressUpdateCount() {
-    long[] original = uncompressLongUpdateCount();
-    int[] copy = new int[original.length];
-    for (int i = 0; i < original.length; i++) {
-      copy[i] = original[i] > Integer.MAX_VALUE ? Statement.SUCCESS_NO_INFO : (int) original[i];
-    }
-    return copy;
-  }
-
-  public int[] getUpdateCount() {
-    return uncompressUpdateCount();
-  }
-
-  private long[] uncompressLongUpdateCount() {
-    if (!(queries[0] instanceof BatchedQuery)) {
-      return longUpdateCounts;
-    }
-    int totalRows = 0;
-    boolean hasRewrites = false;
-    for (Query query : queries) {
-      int batchSize = query.getBatchSize();
-      totalRows += batchSize;
-      hasRewrites |= batchSize > 1;
-    }
-    if (!hasRewrites) {
-      return longUpdateCounts;
     }
 
-    /* In this situation there is a batch that has been rewritten. Substitute
-     * the running total returned by the database with a status code to
-     * indicate successful completion for each row the driver client added
-     * to the batch.
-     */
-    long[] newUpdateCounts = new long[totalRows];
-    int offset = 0;
-    for (int i = 0; i < queries.length; i++) {
-      Query query = queries[i];
-      int batchSize = query.getBatchSize();
-      long superBatchResult = longUpdateCounts[i];
-      if (batchSize == 1) {
-        newUpdateCounts[offset++] = superBatchResult;
-        continue;
-      }
-      if (superBatchResult > 0) {
-        // If some rows inserted, we do not really know how did they spread over individual
-        // statements
-        superBatchResult = Statement.SUCCESS_NO_INFO;
-      }
-      Arrays.fill(newUpdateCounts, offset, offset + batchSize, superBatchResult);
-      offset += batchSize;
+    public ResultSet getGeneratedKeys() {
+        return generatedKeys;
     }
-    return newUpdateCounts;
-  }
 
-  public long[] getLargeUpdateCount() {
-    return uncompressLongUpdateCount();
-  }
+    private int[] uncompressUpdateCount() {
+        long[] original = uncompressLongUpdateCount();
+        int[] copy = new int[original.length];
+        for (int i = 0; i < original.length; i++) {
+            copy[i] = original[i] > Integer.MAX_VALUE ? Statement.SUCCESS_NO_INFO : (int) original[i];
+        }
+        return copy;
+    }
+
+    public int[] getUpdateCount() {
+        return uncompressUpdateCount();
+    }
+
+    private long[] uncompressLongUpdateCount() {
+        if (!(queries[0] instanceof BatchedQuery)) {
+            return longUpdateCounts;
+        }
+        int totalRows = 0;
+        boolean hasRewrites = false;
+        for (Query query : queries) {
+            int batchSize = query.getBatchSize();
+            totalRows += batchSize;
+            hasRewrites |= batchSize > 1;
+        }
+        if (!hasRewrites) {
+            return longUpdateCounts;
+        }
+
+        /* In this situation there is a batch that has been rewritten. Substitute
+         * the running total returned by the database with a status code to
+         * indicate successful completion for each row the driver client added
+         * to the batch.
+         */
+        long[] newUpdateCounts = new long[totalRows];
+        int offset = 0;
+        for (int i = 0; i < queries.length; i++) {
+            Query query = queries[i];
+            int batchSize = query.getBatchSize();
+            long superBatchResult = longUpdateCounts[i];
+            if (batchSize == 1) {
+                newUpdateCounts[offset++] = superBatchResult;
+                continue;
+            }
+            if (superBatchResult > 0) {
+                // If some rows inserted, we do not really know how did they spread over individual
+                // statements
+                superBatchResult = Statement.SUCCESS_NO_INFO;
+            }
+            Arrays.fill(newUpdateCounts, offset, offset + batchSize, superBatchResult);
+            offset += batchSize;
+        }
+        return newUpdateCounts;
+    }
+
+    public long[] getLargeUpdateCount() {
+        return uncompressLongUpdateCount();
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/BooleanTypeUtil.java b/pgjdbc/src/main/java/org/postgresql/jdbc/BooleanTypeUtil.java
index b7b2225..e4854af 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/BooleanTypeUtil.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/BooleanTypeUtil.java
@@ -20,84 +20,84 @@ import java.util.logging.Logger;
  */
 class BooleanTypeUtil {
 
-  private static final Logger LOGGER = Logger.getLogger(BooleanTypeUtil.class.getName());
+    private static final Logger LOGGER = Logger.getLogger(BooleanTypeUtil.class.getName());
 
-  private BooleanTypeUtil() {
-  }
+    private BooleanTypeUtil() {
+    }
 
-  /**
-   * Cast an Object value to the corresponding boolean value.
-   *
-   * @param in Object to cast into boolean
-   * @return boolean value corresponding to the cast of the object
-   * @throws PSQLException PSQLState.CANNOT_COERCE
-   */
-  static boolean castToBoolean(final Object in) throws PSQLException {
-    if (LOGGER.isLoggable(Level.FINE)) {
-      LOGGER.log(Level.FINE, "Cast to boolean: \"{0}\"", String.valueOf(in));
+    /**
+     * Cast an Object value to the corresponding boolean value.
+     *
+     * @param in Object to cast into boolean
+     * @return boolean value corresponding to the cast of the object
+     * @throws PSQLException PSQLState.CANNOT_COERCE
+     */
+    static boolean castToBoolean(final Object in) throws PSQLException {
+        if (LOGGER.isLoggable(Level.FINE)) {
+            LOGGER.log(Level.FINE, "Cast to boolean: \"{0}\"", String.valueOf(in));
+        }
+        if (in instanceof Boolean) {
+            return (Boolean) in;
+        }
+        if (in instanceof String) {
+            return fromString((String) in);
+        }
+        if (in instanceof Character) {
+            return fromCharacter((Character) in);
+        }
+        if (in instanceof Number) {
+            return fromNumber((Number) in);
+        }
+        throw new PSQLException("Cannot cast to boolean", PSQLState.CANNOT_COERCE);
     }
-    if (in instanceof Boolean) {
-      return (Boolean) in;
-    }
-    if (in instanceof String) {
-      return fromString((String) in);
-    }
-    if (in instanceof Character) {
-      return fromCharacter((Character) in);
-    }
-    if (in instanceof Number) {
-      return fromNumber((Number) in);
-    }
-    throw new PSQLException("Cannot cast to boolean", PSQLState.CANNOT_COERCE);
-  }
 
-  static boolean fromString(final String strval) throws PSQLException {
-    // Leading or trailing whitespace is ignored, and case does not matter.
-    final String val = strval.trim();
-    if ("1".equals(val) || "true".equalsIgnoreCase(val)
-        || "t".equalsIgnoreCase(val) || "yes".equalsIgnoreCase(val)
-        || "y".equalsIgnoreCase(val) || "on".equalsIgnoreCase(val)) {
-      return true;
+    static boolean fromString(final String strval) throws PSQLException {
+        // Leading or trailing whitespace is ignored, and case does not matter.
+        final String val = strval.trim();
+        if ("1".equals(val) || "true".equalsIgnoreCase(val)
+                || "t".equalsIgnoreCase(val) || "yes".equalsIgnoreCase(val)
+                || "y".equalsIgnoreCase(val) || "on".equalsIgnoreCase(val)) {
+            return true;
+        }
+        if ("0".equals(val) || "false".equalsIgnoreCase(val)
+                || "f".equalsIgnoreCase(val) || "no".equalsIgnoreCase(val)
+                || "n".equalsIgnoreCase(val) || "off".equalsIgnoreCase(val)) {
+            return false;
+        }
+        throw cannotCoerceException(strval);
     }
-    if ("0".equals(val) || "false".equalsIgnoreCase(val)
-        || "f".equalsIgnoreCase(val) || "no".equalsIgnoreCase(val)
-        || "n".equalsIgnoreCase(val) || "off".equalsIgnoreCase(val)) {
-      return false;
-    }
-    throw cannotCoerceException(strval);
-  }
 
-  private static boolean fromCharacter(final Character charval) throws PSQLException {
-    if ('1' == charval || 't' == charval || 'T' == charval
-        || 'y' == charval || 'Y' == charval) {
-      return true;
+    private static boolean fromCharacter(final Character charval) throws PSQLException {
+        if ('1' == charval || 't' == charval || 'T' == charval
+                || 'y' == charval || 'Y' == charval) {
+            return true;
+        }
+        if ('0' == charval || 'f' == charval || 'F' == charval
+                || 'n' == charval || 'N' == charval) {
+            return false;
+        }
+        throw cannotCoerceException(charval);
     }
-    if ('0' == charval || 'f' == charval || 'F' == charval
-        || 'n' == charval || 'N' == charval) {
-      return false;
-    }
-    throw cannotCoerceException(charval);
-  }
 
-  private static boolean fromNumber(final Number numval) throws PSQLException {
-    // Handles BigDecimal, Byte, Short, Integer, Long Float, Double
-    // based on the widening primitive conversions.
-    final double value = numval.doubleValue();
-    if (value == 1.0d) {
-      return true;
+    private static boolean fromNumber(final Number numval) throws PSQLException {
+        // Handles BigDecimal, Byte, Short, Integer, Long Float, Double
+        // based on the widening primitive conversions.
+        final double value = numval.doubleValue();
+        if (value == 1.0d) {
+            return true;
+        }
+        if (value == 0.0d) {
+            return false;
+        }
+        throw cannotCoerceException(numval);
     }
-    if (value == 0.0d) {
-      return false;
-    }
-    throw cannotCoerceException(numval);
-  }
 
-  private static PSQLException cannotCoerceException(final Object value) {
-    if (LOGGER.isLoggable(Level.FINE)) {
-      LOGGER.log(Level.FINE, "Cannot cast to boolean: \"{0}\"", String.valueOf(value));
+    private static PSQLException cannotCoerceException(final Object value) {
+        if (LOGGER.isLoggable(Level.FINE)) {
+            LOGGER.log(Level.FINE, "Cannot cast to boolean: \"{0}\"", String.valueOf(value));
+        }
+        return new PSQLException(GT.tr("Cannot cast to boolean: \"{0}\"", String.valueOf(value)),
+                PSQLState.CANNOT_COERCE);
     }
-    return new PSQLException(GT.tr("Cannot cast to boolean: \"{0}\"", String.valueOf(value)),
-        PSQLState.CANNOT_COERCE);
-  }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/CallableBatchResultHandler.java b/pgjdbc/src/main/java/org/postgresql/jdbc/CallableBatchResultHandler.java
index 6ec60ca..18530cc 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/CallableBatchResultHandler.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/CallableBatchResultHandler.java
@@ -14,14 +14,14 @@ import org.postgresql.core.Tuple;
 import java.util.List;
 
 class CallableBatchResultHandler extends BatchResultHandler {
-  CallableBatchResultHandler(PgStatement statement, Query[] queries,
-      ParameterList[] parameterLists) {
-    super(statement, queries, parameterLists, false);
-  }
+    CallableBatchResultHandler(PgStatement statement, Query[] queries,
+                               ParameterList[] parameterLists) {
+        super(statement, queries, parameterLists, false);
+    }
 
-  @Override
-  public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
-      ResultCursor cursor) {
-    /* ignore */
-  }
+    @Override
+    public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
+                                 ResultCursor cursor) {
+        /* ignore */
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/EscapeSyntaxCallMode.java b/pgjdbc/src/main/java/org/postgresql/jdbc/EscapeSyntaxCallMode.java
index 00200b1..6a57aea 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/EscapeSyntaxCallMode.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/EscapeSyntaxCallMode.java
@@ -13,26 +13,26 @@ package org.postgresql.jdbc;
  * @see org.postgresql.PGProperty#ESCAPE_SYNTAX_CALL_MODE
  */
 public enum EscapeSyntaxCallMode {
-  SELECT("select"),
-  CALL_IF_NO_RETURN("callIfNoReturn"),
-  CALL("call");
+    SELECT("select"),
+    CALL_IF_NO_RETURN("callIfNoReturn"),
+    CALL("call");
 
-  private final String value;
+    private final String value;
 
-  EscapeSyntaxCallMode(String value) {
-    this.value = value;
-  }
-
-  public static EscapeSyntaxCallMode of(String mode) {
-    for (EscapeSyntaxCallMode escapeSyntaxCallMode : values()) {
-      if (escapeSyntaxCallMode.value.equals(mode)) {
-        return escapeSyntaxCallMode;
-      }
+    EscapeSyntaxCallMode(String value) {
+        this.value = value;
     }
-    return SELECT;
-  }
 
-  public String value() {
-    return value;
-  }
+    public static EscapeSyntaxCallMode of(String mode) {
+        for (EscapeSyntaxCallMode escapeSyntaxCallMode : values()) {
+            if (escapeSyntaxCallMode.value.equals(mode)) {
+                return escapeSyntaxCallMode;
+            }
+        }
+        return SELECT;
+    }
+
+    public String value() {
+        return value;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/EscapedFunctions.java b/pgjdbc/src/main/java/org/postgresql/jdbc/EscapedFunctions.java
index 9729315..78bb4cd 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/EscapedFunctions.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/EscapedFunctions.java
@@ -24,727 +24,727 @@ import java.util.Map;
  */
 @Deprecated
 public class EscapedFunctions {
-  // numeric functions names
-  public static final String ABS = "abs";
-  public static final String ACOS = "acos";
-  public static final String ASIN = "asin";
-  public static final String ATAN = "atan";
-  public static final String ATAN2 = "atan2";
-  public static final String CEILING = "ceiling";
-  public static final String COS = "cos";
-  public static final String COT = "cot";
-  public static final String DEGREES = "degrees";
-  public static final String EXP = "exp";
-  public static final String FLOOR = "floor";
-  public static final String LOG = "log";
-  public static final String LOG10 = "log10";
-  public static final String MOD = "mod";
-  public static final String PI = "pi";
-  public static final String POWER = "power";
-  public static final String RADIANS = "radians";
-  public static final String ROUND = "round";
-  public static final String SIGN = "sign";
-  public static final String SIN = "sin";
-  public static final String SQRT = "sqrt";
-  public static final String TAN = "tan";
-  public static final String TRUNCATE = "truncate";
+    // numeric functions names
+    public static final String ABS = "abs";
+    public static final String ACOS = "acos";
+    public static final String ASIN = "asin";
+    public static final String ATAN = "atan";
+    public static final String ATAN2 = "atan2";
+    public static final String CEILING = "ceiling";
+    public static final String COS = "cos";
+    public static final String COT = "cot";
+    public static final String DEGREES = "degrees";
+    public static final String EXP = "exp";
+    public static final String FLOOR = "floor";
+    public static final String LOG = "log";
+    public static final String LOG10 = "log10";
+    public static final String MOD = "mod";
+    public static final String PI = "pi";
+    public static final String POWER = "power";
+    public static final String RADIANS = "radians";
+    public static final String ROUND = "round";
+    public static final String SIGN = "sign";
+    public static final String SIN = "sin";
+    public static final String SQRT = "sqrt";
+    public static final String TAN = "tan";
+    public static final String TRUNCATE = "truncate";
 
-  // string function names
-  public static final String ASCII = "ascii";
-  public static final String CHAR = "char";
-  public static final String CONCAT = "concat";
-  public static final String INSERT = "insert"; // change arguments order
-  public static final String LCASE = "lcase";
-  public static final String LEFT = "left";
-  public static final String LENGTH = "length";
-  public static final String LOCATE = "locate"; // the 3 args version duplicate args
-  public static final String LTRIM = "ltrim";
-  public static final String REPEAT = "repeat";
-  public static final String REPLACE = "replace";
-  public static final String RIGHT = "right"; // duplicate args
-  public static final String RTRIM = "rtrim";
-  public static final String SPACE = "space";
-  public static final String SUBSTRING = "substring";
-  public static final String UCASE = "ucase";
-  // soundex is implemented on the server side by
-  // the contrib/fuzzystrmatch module. We provide a translation
-  // for this in the driver, but since we don't want to bother with run
-  // time detection of this module's installation we don't report this
-  // method as supported in DatabaseMetaData.
-  // difference is currently unsupported entirely.
+    // string function names
+    public static final String ASCII = "ascii";
+    public static final String CHAR = "char";
+    public static final String CONCAT = "concat";
+    public static final String INSERT = "insert"; // change arguments order
+    public static final String LCASE = "lcase";
+    public static final String LEFT = "left";
+    public static final String LENGTH = "length";
+    public static final String LOCATE = "locate"; // the 3 args version duplicate args
+    public static final String LTRIM = "ltrim";
+    public static final String REPEAT = "repeat";
+    public static final String REPLACE = "replace";
+    public static final String RIGHT = "right"; // duplicate args
+    public static final String RTRIM = "rtrim";
+    public static final String SPACE = "space";
+    public static final String SUBSTRING = "substring";
+    public static final String UCASE = "ucase";
+    // soundex is implemented on the server side by
+    // the contrib/fuzzystrmatch module. We provide a translation
+    // for this in the driver, but since we don't want to bother with run
+    // time detection of this module's installation we don't report this
+    // method as supported in DatabaseMetaData.
+    // difference is currently unsupported entirely.
 
-  // date time function names
-  public static final String CURDATE = "curdate";
-  public static final String CURTIME = "curtime";
-  public static final String DAYNAME = "dayname";
-  public static final String DAYOFMONTH = "dayofmonth";
-  public static final String DAYOFWEEK = "dayofweek";
-  public static final String DAYOFYEAR = "dayofyear";
-  public static final String HOUR = "hour";
-  public static final String MINUTE = "minute";
-  public static final String MONTH = "month";
-  public static final String MONTHNAME = "monthname";
-  public static final String NOW = "now";
-  public static final String QUARTER = "quarter";
-  public static final String SECOND = "second";
-  public static final String WEEK = "week";
-  public static final String YEAR = "year";
-  // for timestampadd and timestampdiff the fractional part of second is not supported
-  // by the backend
-  // timestampdiff is very partially supported
-  public static final String TIMESTAMPADD = "timestampadd";
-  public static final String TIMESTAMPDIFF = "timestampdiff";
+    // date time function names
+    public static final String CURDATE = "curdate";
+    public static final String CURTIME = "curtime";
+    public static final String DAYNAME = "dayname";
+    public static final String DAYOFMONTH = "dayofmonth";
+    public static final String DAYOFWEEK = "dayofweek";
+    public static final String DAYOFYEAR = "dayofyear";
+    public static final String HOUR = "hour";
+    public static final String MINUTE = "minute";
+    public static final String MONTH = "month";
+    public static final String MONTHNAME = "monthname";
+    public static final String NOW = "now";
+    public static final String QUARTER = "quarter";
+    public static final String SECOND = "second";
+    public static final String WEEK = "week";
+    public static final String YEAR = "year";
+    // for timestampadd and timestampdiff the fractional part of second is not supported
+    // by the backend
+    // timestampdiff is very partially supported
+    public static final String TIMESTAMPADD = "timestampadd";
+    public static final String TIMESTAMPDIFF = "timestampdiff";
 
-  // constants for timestampadd and timestampdiff
-  public static final String SQL_TSI_ROOT = "SQL_TSI_";
-  public static final String SQL_TSI_DAY = "DAY";
-  public static final String SQL_TSI_FRAC_SECOND = "FRAC_SECOND";
-  public static final String SQL_TSI_HOUR = "HOUR";
-  public static final String SQL_TSI_MINUTE = "MINUTE";
-  public static final String SQL_TSI_MONTH = "MONTH";
-  public static final String SQL_TSI_QUARTER = "QUARTER";
-  public static final String SQL_TSI_SECOND = "SECOND";
-  public static final String SQL_TSI_WEEK = "WEEK";
-  public static final String SQL_TSI_YEAR = "YEAR";
+    // constants for timestampadd and timestampdiff
+    public static final String SQL_TSI_ROOT = "SQL_TSI_";
+    public static final String SQL_TSI_DAY = "DAY";
+    public static final String SQL_TSI_FRAC_SECOND = "FRAC_SECOND";
+    public static final String SQL_TSI_HOUR = "HOUR";
+    public static final String SQL_TSI_MINUTE = "MINUTE";
+    public static final String SQL_TSI_MONTH = "MONTH";
+    public static final String SQL_TSI_QUARTER = "QUARTER";
+    public static final String SQL_TSI_SECOND = "SECOND";
+    public static final String SQL_TSI_WEEK = "WEEK";
+    public static final String SQL_TSI_YEAR = "YEAR";
 
-  // system functions
-  public static final String DATABASE = "database";
-  public static final String IFNULL = "ifnull";
-  public static final String USER = "user";
+    // system functions
+    public static final String DATABASE = "database";
+    public static final String IFNULL = "ifnull";
+    public static final String USER = "user";
 
-  /**
-   * storage for functions implementations.
-   */
-  private static final Map<String, Method> functionMap = createFunctionMap();
-
-  public EscapedFunctions() {
-  }
-
-  private static Map<String, Method> createFunctionMap() {
-    Method[] arrayMeths = EscapedFunctions.class.getDeclaredMethods();
-    Map<String, Method> functionMap = new HashMap<>(arrayMeths.length * 2);
-    for (Method meth : arrayMeths) {
-      if (meth.getName().startsWith("sql")) {
-        functionMap.put(meth.getName().toLowerCase(Locale.US), meth);
-      }
-    }
-    return functionMap;
-  }
-
-  /**
-   * get Method object implementing the given function.
-   *
-   * @param functionName name of the searched function
-   * @return a Method object or null if not found
-   */
-  public static Method getFunction(String functionName) {
-    return functionMap.get("sql" + functionName.toLowerCase(Locale.US));
-  }
-
-  // ** numeric functions translations **
-
-  /**
-   * ceiling to ceil translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlceiling(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("ceil(", "ceiling", parsedArgs);
-  }
-
-  /**
-   * log to ln translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqllog(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("ln(", "log", parsedArgs);
-  }
-
-  /**
-   * log10 to log translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqllog10(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("log(", "log10", parsedArgs);
-  }
-
-  /**
-   * power to pow translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlpower(List<?> parsedArgs) throws SQLException {
-    return twoArgumentsFunctionCall("pow(", "power", parsedArgs);
-  }
-
-  /**
-   * truncate to trunc translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqltruncate(List<?> parsedArgs) throws SQLException {
-    return twoArgumentsFunctionCall("trunc(", "truncate", parsedArgs);
-  }
-
-  // ** string functions translations **
-
-  /**
-   * char to chr translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlchar(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("chr(", "char", parsedArgs);
-  }
-
-  /**
-   * concat translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   */
-  public static String sqlconcat(List<?> parsedArgs) {
-    StringBuilder buf = new StringBuilder();
-    buf.append('(');
-    for (int iArg = 0; iArg < parsedArgs.size(); iArg++) {
-      buf.append(parsedArgs.get(iArg));
-      if (iArg != (parsedArgs.size() - 1)) {
-        buf.append(" || ");
-      }
-    }
-    return buf.append(')').toString();
-  }
-
-  /**
-   * insert to overlay translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlinsert(List<?> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 4) {
-      throw new PSQLException(GT.tr("{0} function takes four and only four argument.", "insert"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    StringBuilder buf = new StringBuilder();
-    buf.append("overlay(");
-    buf.append(parsedArgs.get(0)).append(" placing ").append(parsedArgs.get(3));
-    buf.append(" from ").append(parsedArgs.get(1)).append(" for ").append(parsedArgs.get(2));
-    return buf.append(')').toString();
-  }
-
-  /**
-   * lcase to lower translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqllcase(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("lower(", "lcase", parsedArgs);
-  }
-
-  /**
-   * left to substring translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlleft(List<?> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 2) {
-      throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", "left"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    StringBuilder buf = new StringBuilder();
-    buf.append("substring(");
-    buf.append(parsedArgs.get(0)).append(" for ").append(parsedArgs.get(1));
-    return buf.append(')').toString();
-  }
-
-  /**
-   * length translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqllength(List<?> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 1) {
-      throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "length"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    StringBuilder buf = new StringBuilder();
-    buf.append("length(trim(trailing from ");
-    buf.append(parsedArgs.get(0));
-    return buf.append("))").toString();
-  }
-
-  /**
-   * locate translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqllocate(List<?> parsedArgs) throws SQLException {
-    if (parsedArgs.size() == 2) {
-      return "position(" + parsedArgs.get(0) + " in " + parsedArgs.get(1) + ")";
-    } else if (parsedArgs.size() == 3) {
-      String tmp = "position(" + parsedArgs.get(0) + " in substring(" + parsedArgs.get(1) + " from "
-          + parsedArgs.get(2) + "))";
-      return "(" + parsedArgs.get(2) + "*sign(" + tmp + ")+" + tmp + ")";
-    } else {
-      throw new PSQLException(GT.tr("{0} function takes two or three arguments.", "locate"),
-          PSQLState.SYNTAX_ERROR);
-    }
-  }
-
-  /**
-   * ltrim translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlltrim(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("trim(leading from ", "ltrim", parsedArgs);
-  }
-
-  /**
-   * right to substring translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlright(List<?> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 2) {
-      throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", "right"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    StringBuilder buf = new StringBuilder();
-    buf.append("substring(");
-    buf.append(parsedArgs.get(0))
-        .append(" from (length(")
-        .append(parsedArgs.get(0))
-        .append(")+1-")
-        .append(parsedArgs.get(1));
-    return buf.append("))").toString();
-  }
-
-  /**
-   * rtrim translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlrtrim(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("trim(trailing from ", "rtrim", parsedArgs);
-  }
-
-  /**
-   * space translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlspace(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("repeat(' ',", "space", parsedArgs);
-  }
-
-  /**
-   * substring to substr translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlsubstring(List<?> parsedArgs) throws SQLException {
-    if (parsedArgs.size() == 2) {
-      return "substr(" + parsedArgs.get(0) + "," + parsedArgs.get(1) + ")";
-    } else if (parsedArgs.size() == 3) {
-      return "substr(" + parsedArgs.get(0) + "," + parsedArgs.get(1) + "," + parsedArgs.get(2)
-          + ")";
-    } else {
-      throw new PSQLException(GT.tr("{0} function takes two or three arguments.", "substring"),
-          PSQLState.SYNTAX_ERROR);
-    }
-  }
-
-  /**
-   * ucase to upper translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlucase(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("upper(", "ucase", parsedArgs);
-  }
-
-  /**
-   * curdate to current_date translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlcurdate(List<?> parsedArgs) throws SQLException {
-    if (!parsedArgs.isEmpty()) {
-      throw new PSQLException(GT.tr("{0} function doesn''t take any argument.", "curdate"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    return "current_date";
-  }
-
-  /**
-   * curtime to current_time translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlcurtime(List<?> parsedArgs) throws SQLException {
-    if (!parsedArgs.isEmpty()) {
-      throw new PSQLException(GT.tr("{0} function doesn''t take any argument.", "curtime"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    return "current_time";
-  }
-
-  /**
-   * dayname translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqldayname(List<?> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 1) {
-      throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "dayname"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    return "to_char(" + parsedArgs.get(0) + ",'Day')";
-  }
-
-  /**
-   * dayofmonth translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqldayofmonth(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("extract(day from ", "dayofmonth", parsedArgs);
-  }
-
-  /**
-   * dayofweek translation adding 1 to postgresql function since we expect values from 1 to 7.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqldayofweek(List<?> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 1) {
-      throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "dayofweek"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    return "extract(dow from " + parsedArgs.get(0) + ")+1";
-  }
-
-  /**
-   * dayofyear translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqldayofyear(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("extract(doy from ", "dayofyear", parsedArgs);
-  }
-
-  /**
-   * hour translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlhour(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("extract(hour from ", "hour", parsedArgs);
-  }
-
-  /**
-   * minute translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlminute(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("extract(minute from ", "minute", parsedArgs);
-  }
-
-  /**
-   * month translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlmonth(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("extract(month from ", "month", parsedArgs);
-  }
-
-  /**
-   * monthname translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlmonthname(List<?> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 1) {
-      throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "monthname"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    return "to_char(" + parsedArgs.get(0) + ",'Month')";
-  }
-
-  /**
-   * quarter translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlquarter(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("extract(quarter from ", "quarter", parsedArgs);
-  }
-
-  /**
-   * second translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlsecond(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("extract(second from ", "second", parsedArgs);
-  }
-
-  /**
-   * week translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlweek(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("extract(week from ", "week", parsedArgs);
-  }
-
-  /**
-   * year translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlyear(List<?> parsedArgs) throws SQLException {
-    return singleArgumentFunctionCall("extract(year from ", "year", parsedArgs);
-  }
-
-  /**
-   * time stamp add.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  @SuppressWarnings("TypeParameterExplicitlyExtendsObject")
-  public static String sqltimestampadd(List<? extends Object> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 3) {
-      throw new PSQLException(
-          GT.tr("{0} function takes three and only three arguments.", "timestampadd"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    String interval = EscapedFunctions.constantToInterval(parsedArgs.get(0).toString(),
-        parsedArgs.get(1).toString());
-    StringBuilder buf = new StringBuilder();
-    buf.append("(").append(interval).append("+");
-    buf.append(parsedArgs.get(2)).append(")");
-    return buf.toString();
-  }
-
-  private static String constantToInterval(String type, String value) throws SQLException {
-    if (!type.startsWith(SQL_TSI_ROOT)) {
-      throw new PSQLException(GT.tr("Interval {0} not yet implemented", type),
-          PSQLState.SYNTAX_ERROR);
-    }
-    String shortType = type.substring(SQL_TSI_ROOT.length());
-    if (SQL_TSI_DAY.equalsIgnoreCase(shortType)) {
-      return "CAST(" + value + " || ' day' as interval)";
-    } else if (SQL_TSI_SECOND.equalsIgnoreCase(shortType)) {
-      return "CAST(" + value + " || ' second' as interval)";
-    } else if (SQL_TSI_HOUR.equalsIgnoreCase(shortType)) {
-      return "CAST(" + value + " || ' hour' as interval)";
-    } else if (SQL_TSI_MINUTE.equalsIgnoreCase(shortType)) {
-      return "CAST(" + value + " || ' minute' as interval)";
-    } else if (SQL_TSI_MONTH.equalsIgnoreCase(shortType)) {
-      return "CAST(" + value + " || ' month' as interval)";
-    } else if (SQL_TSI_QUARTER.equalsIgnoreCase(shortType)) {
-      return "CAST((" + value + "::int * 3) || ' month' as interval)";
-    } else if (SQL_TSI_WEEK.equalsIgnoreCase(shortType)) {
-      return "CAST(" + value + " || ' week' as interval)";
-    } else if (SQL_TSI_YEAR.equalsIgnoreCase(shortType)) {
-      return "CAST(" + value + " || ' year' as interval)";
-    } else if (SQL_TSI_FRAC_SECOND.equalsIgnoreCase(shortType)) {
-      throw new PSQLException(GT.tr("Interval {0} not yet implemented", "SQL_TSI_FRAC_SECOND"),
-          PSQLState.SYNTAX_ERROR);
-    } else {
-      throw new PSQLException(GT.tr("Interval {0} not yet implemented", type),
-          PSQLState.SYNTAX_ERROR);
-    }
-  }
-
-  /**
-   * time stamp diff.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  @SuppressWarnings("TypeParameterExplicitlyExtendsObject")
-  public static String sqltimestampdiff(List<? extends Object> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 3) {
-      throw new PSQLException(
-          GT.tr("{0} function takes three and only three arguments.", "timestampdiff"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    String datePart = EscapedFunctions.constantToDatePart(parsedArgs.get(0).toString());
-    StringBuilder buf = new StringBuilder();
-    buf.append("extract( ")
-        .append(datePart)
-        .append(" from (")
-        .append(parsedArgs.get(2))
-        .append("-")
-        .append(parsedArgs.get(1))
-        .append("))");
-    return buf.toString();
-  }
-
-  private static String constantToDatePart(String type) throws SQLException {
-    if (!type.startsWith(SQL_TSI_ROOT)) {
-      throw new PSQLException(GT.tr("Interval {0} not yet implemented", type),
-          PSQLState.SYNTAX_ERROR);
-    }
-    String shortType = type.substring(SQL_TSI_ROOT.length());
-    if (SQL_TSI_DAY.equalsIgnoreCase(shortType)) {
-      return "day";
-    } else if (SQL_TSI_SECOND.equalsIgnoreCase(shortType)) {
-      return "second";
-    } else if (SQL_TSI_HOUR.equalsIgnoreCase(shortType)) {
-      return "hour";
-    } else if (SQL_TSI_MINUTE.equalsIgnoreCase(shortType)) {
-      return "minute";
-    } else if (SQL_TSI_FRAC_SECOND.equalsIgnoreCase(shortType)) {
-      throw new PSQLException(GT.tr("Interval {0} not yet implemented", "SQL_TSI_FRAC_SECOND"),
-          PSQLState.SYNTAX_ERROR);
-    } else {
-      throw new PSQLException(GT.tr("Interval {0} not yet implemented", type),
-          PSQLState.SYNTAX_ERROR);
-    }
-    // See http://archives.postgresql.org/pgsql-jdbc/2006-03/msg00096.php
-    /*
-     * else if (SQL_TSI_MONTH.equalsIgnoreCase(shortType)) return "month"; else if
-     * (SQL_TSI_QUARTER.equalsIgnoreCase(shortType)) return "quarter"; else if
-     * (SQL_TSI_WEEK.equalsIgnoreCase(shortType)) return "week"; else if
-     * (SQL_TSI_YEAR.equalsIgnoreCase(shortType)) return "year";
+    /**
+     * storage for functions implementations.
      */
-  }
+    private static final Map<String, Method> functionMap = createFunctionMap();
 
-  /**
-   * database translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqldatabase(List<?> parsedArgs) throws SQLException {
-    if (!parsedArgs.isEmpty()) {
-      throw new PSQLException(GT.tr("{0} function doesn''t take any argument.", "database"),
-          PSQLState.SYNTAX_ERROR);
+    public EscapedFunctions() {
     }
-    return "current_database()";
-  }
 
-  /**
-   * ifnull translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqlifnull(List<?> parsedArgs) throws SQLException {
-    return twoArgumentsFunctionCall("coalesce(", "ifnull", parsedArgs);
-  }
-
-  /**
-   * user translation.
-   *
-   * @param parsedArgs arguments
-   * @return sql call
-   * @throws SQLException if something wrong happens
-   */
-  public static String sqluser(List<?> parsedArgs) throws SQLException {
-    if (!parsedArgs.isEmpty()) {
-      throw new PSQLException(GT.tr("{0} function doesn''t take any argument.", "user"),
-          PSQLState.SYNTAX_ERROR);
+    private static Map<String, Method> createFunctionMap() {
+        Method[] arrayMeths = EscapedFunctions.class.getDeclaredMethods();
+        Map<String, Method> functionMap = new HashMap<>(arrayMeths.length * 2);
+        for (Method meth : arrayMeths) {
+            if (meth.getName().startsWith("sql")) {
+                functionMap.put(meth.getName().toLowerCase(Locale.US), meth);
+            }
+        }
+        return functionMap;
     }
-    return "user";
-  }
 
-  private static String singleArgumentFunctionCall(String call, String functionName,
-      List<?> parsedArgs) throws PSQLException {
-    if (parsedArgs.size() != 1) {
-      throw new PSQLException(GT.tr("{0} function takes one and only one argument.", functionName),
-          PSQLState.SYNTAX_ERROR);
+    /**
+     * get Method object implementing the given function.
+     *
+     * @param functionName name of the searched function
+     * @return a Method object or null if not found
+     */
+    public static Method getFunction(String functionName) {
+        return functionMap.get("sql" + functionName.toLowerCase(Locale.US));
     }
-    StringBuilder buf = new StringBuilder();
-    buf.append(call);
-    buf.append(parsedArgs.get(0));
-    return buf.append(')').toString();
-  }
 
-  private static String twoArgumentsFunctionCall(String call, String functionName,
-      List<?> parsedArgs) throws PSQLException {
-    if (parsedArgs.size() != 2) {
-      throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", functionName),
-          PSQLState.SYNTAX_ERROR);
+    // ** numeric functions translations **
+
+    /**
+     * ceiling to ceil translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlceiling(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("ceil(", "ceiling", parsedArgs);
+    }
+
+    /**
+     * log to ln translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqllog(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("ln(", "log", parsedArgs);
+    }
+
+    /**
+     * log10 to log translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqllog10(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("log(", "log10", parsedArgs);
+    }
+
+    /**
+     * power to pow translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlpower(List<?> parsedArgs) throws SQLException {
+        return twoArgumentsFunctionCall("pow(", "power", parsedArgs);
+    }
+
+    /**
+     * truncate to trunc translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqltruncate(List<?> parsedArgs) throws SQLException {
+        return twoArgumentsFunctionCall("trunc(", "truncate", parsedArgs);
+    }
+
+    // ** string functions translations **
+
+    /**
+     * char to chr translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlchar(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("chr(", "char", parsedArgs);
+    }
+
+    /**
+     * concat translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     */
+    public static String sqlconcat(List<?> parsedArgs) {
+        StringBuilder buf = new StringBuilder();
+        buf.append('(');
+        for (int iArg = 0; iArg < parsedArgs.size(); iArg++) {
+            buf.append(parsedArgs.get(iArg));
+            if (iArg != (parsedArgs.size() - 1)) {
+                buf.append(" || ");
+            }
+        }
+        return buf.append(')').toString();
+    }
+
+    /**
+     * insert to overlay translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlinsert(List<?> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 4) {
+            throw new PSQLException(GT.tr("{0} function takes four and only four argument.", "insert"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        StringBuilder buf = new StringBuilder();
+        buf.append("overlay(");
+        buf.append(parsedArgs.get(0)).append(" placing ").append(parsedArgs.get(3));
+        buf.append(" from ").append(parsedArgs.get(1)).append(" for ").append(parsedArgs.get(2));
+        return buf.append(')').toString();
+    }
+
+    /**
+     * lcase to lower translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqllcase(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("lower(", "lcase", parsedArgs);
+    }
+
+    /**
+     * left to substring translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlleft(List<?> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 2) {
+            throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", "left"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        StringBuilder buf = new StringBuilder();
+        buf.append("substring(");
+        buf.append(parsedArgs.get(0)).append(" for ").append(parsedArgs.get(1));
+        return buf.append(')').toString();
+    }
+
+    /**
+     * length translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqllength(List<?> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 1) {
+            throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "length"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        StringBuilder buf = new StringBuilder();
+        buf.append("length(trim(trailing from ");
+        buf.append(parsedArgs.get(0));
+        return buf.append("))").toString();
+    }
+
+    /**
+     * locate translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqllocate(List<?> parsedArgs) throws SQLException {
+        if (parsedArgs.size() == 2) {
+            return "position(" + parsedArgs.get(0) + " in " + parsedArgs.get(1) + ")";
+        } else if (parsedArgs.size() == 3) {
+            String tmp = "position(" + parsedArgs.get(0) + " in substring(" + parsedArgs.get(1) + " from "
+                    + parsedArgs.get(2) + "))";
+            return "(" + parsedArgs.get(2) + "*sign(" + tmp + ")+" + tmp + ")";
+        } else {
+            throw new PSQLException(GT.tr("{0} function takes two or three arguments.", "locate"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+    }
+
+    /**
+     * ltrim translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlltrim(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("trim(leading from ", "ltrim", parsedArgs);
+    }
+
+    /**
+     * right to substring translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlright(List<?> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 2) {
+            throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", "right"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        StringBuilder buf = new StringBuilder();
+        buf.append("substring(");
+        buf.append(parsedArgs.get(0))
+                .append(" from (length(")
+                .append(parsedArgs.get(0))
+                .append(")+1-")
+                .append(parsedArgs.get(1));
+        return buf.append("))").toString();
+    }
+
+    /**
+     * rtrim translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlrtrim(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("trim(trailing from ", "rtrim", parsedArgs);
+    }
+
+    /**
+     * space translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlspace(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("repeat(' ',", "space", parsedArgs);
+    }
+
+    /**
+     * substring to substr translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlsubstring(List<?> parsedArgs) throws SQLException {
+        if (parsedArgs.size() == 2) {
+            return "substr(" + parsedArgs.get(0) + "," + parsedArgs.get(1) + ")";
+        } else if (parsedArgs.size() == 3) {
+            return "substr(" + parsedArgs.get(0) + "," + parsedArgs.get(1) + "," + parsedArgs.get(2)
+                    + ")";
+        } else {
+            throw new PSQLException(GT.tr("{0} function takes two or three arguments.", "substring"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+    }
+
+    /**
+     * ucase to upper translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlucase(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("upper(", "ucase", parsedArgs);
+    }
+
+    /**
+     * curdate to current_date translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlcurdate(List<?> parsedArgs) throws SQLException {
+        if (!parsedArgs.isEmpty()) {
+            throw new PSQLException(GT.tr("{0} function doesn''t take any argument.", "curdate"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        return "current_date";
+    }
+
+    /**
+     * curtime to current_time translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlcurtime(List<?> parsedArgs) throws SQLException {
+        if (!parsedArgs.isEmpty()) {
+            throw new PSQLException(GT.tr("{0} function doesn''t take any argument.", "curtime"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        return "current_time";
+    }
+
+    /**
+     * dayname translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqldayname(List<?> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 1) {
+            throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "dayname"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        return "to_char(" + parsedArgs.get(0) + ",'Day')";
+    }
+
+    /**
+     * dayofmonth translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqldayofmonth(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("extract(day from ", "dayofmonth", parsedArgs);
+    }
+
+    /**
+     * dayofweek translation adding 1 to postgresql function since we expect values from 1 to 7.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqldayofweek(List<?> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 1) {
+            throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "dayofweek"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        return "extract(dow from " + parsedArgs.get(0) + ")+1";
+    }
+
+    /**
+     * dayofyear translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqldayofyear(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("extract(doy from ", "dayofyear", parsedArgs);
+    }
+
+    /**
+     * hour translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlhour(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("extract(hour from ", "hour", parsedArgs);
+    }
+
+    /**
+     * minute translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlminute(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("extract(minute from ", "minute", parsedArgs);
+    }
+
+    /**
+     * month translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlmonth(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("extract(month from ", "month", parsedArgs);
+    }
+
+    /**
+     * monthname translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlmonthname(List<?> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 1) {
+            throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "monthname"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        return "to_char(" + parsedArgs.get(0) + ",'Month')";
+    }
+
+    /**
+     * quarter translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlquarter(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("extract(quarter from ", "quarter", parsedArgs);
+    }
+
+    /**
+     * second translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlsecond(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("extract(second from ", "second", parsedArgs);
+    }
+
+    /**
+     * week translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlweek(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("extract(week from ", "week", parsedArgs);
+    }
+
+    /**
+     * year translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlyear(List<?> parsedArgs) throws SQLException {
+        return singleArgumentFunctionCall("extract(year from ", "year", parsedArgs);
+    }
+
+    /**
+     * time stamp add.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    @SuppressWarnings("TypeParameterExplicitlyExtendsObject")
+    public static String sqltimestampadd(List<? extends Object> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 3) {
+            throw new PSQLException(
+                    GT.tr("{0} function takes three and only three arguments.", "timestampadd"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        String interval = EscapedFunctions.constantToInterval(parsedArgs.get(0).toString(),
+                parsedArgs.get(1).toString());
+        StringBuilder buf = new StringBuilder();
+        buf.append("(").append(interval).append("+");
+        buf.append(parsedArgs.get(2)).append(")");
+        return buf.toString();
+    }
+
+    private static String constantToInterval(String type, String value) throws SQLException {
+        if (!type.startsWith(SQL_TSI_ROOT)) {
+            throw new PSQLException(GT.tr("Interval {0} not yet implemented", type),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        String shortType = type.substring(SQL_TSI_ROOT.length());
+        if (SQL_TSI_DAY.equalsIgnoreCase(shortType)) {
+            return "CAST(" + value + " || ' day' as interval)";
+        } else if (SQL_TSI_SECOND.equalsIgnoreCase(shortType)) {
+            return "CAST(" + value + " || ' second' as interval)";
+        } else if (SQL_TSI_HOUR.equalsIgnoreCase(shortType)) {
+            return "CAST(" + value + " || ' hour' as interval)";
+        } else if (SQL_TSI_MINUTE.equalsIgnoreCase(shortType)) {
+            return "CAST(" + value + " || ' minute' as interval)";
+        } else if (SQL_TSI_MONTH.equalsIgnoreCase(shortType)) {
+            return "CAST(" + value + " || ' month' as interval)";
+        } else if (SQL_TSI_QUARTER.equalsIgnoreCase(shortType)) {
+            return "CAST((" + value + "::int * 3) || ' month' as interval)";
+        } else if (SQL_TSI_WEEK.equalsIgnoreCase(shortType)) {
+            return "CAST(" + value + " || ' week' as interval)";
+        } else if (SQL_TSI_YEAR.equalsIgnoreCase(shortType)) {
+            return "CAST(" + value + " || ' year' as interval)";
+        } else if (SQL_TSI_FRAC_SECOND.equalsIgnoreCase(shortType)) {
+            throw new PSQLException(GT.tr("Interval {0} not yet implemented", "SQL_TSI_FRAC_SECOND"),
+                    PSQLState.SYNTAX_ERROR);
+        } else {
+            throw new PSQLException(GT.tr("Interval {0} not yet implemented", type),
+                    PSQLState.SYNTAX_ERROR);
+        }
+    }
+
+    /**
+     * time stamp diff.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    @SuppressWarnings("TypeParameterExplicitlyExtendsObject")
+    public static String sqltimestampdiff(List<? extends Object> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 3) {
+            throw new PSQLException(
+                    GT.tr("{0} function takes three and only three arguments.", "timestampdiff"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        String datePart = EscapedFunctions.constantToDatePart(parsedArgs.get(0).toString());
+        StringBuilder buf = new StringBuilder();
+        buf.append("extract( ")
+                .append(datePart)
+                .append(" from (")
+                .append(parsedArgs.get(2))
+                .append("-")
+                .append(parsedArgs.get(1))
+                .append("))");
+        return buf.toString();
+    }
+
+    private static String constantToDatePart(String type) throws SQLException {
+        if (!type.startsWith(SQL_TSI_ROOT)) {
+            throw new PSQLException(GT.tr("Interval {0} not yet implemented", type),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        String shortType = type.substring(SQL_TSI_ROOT.length());
+        if (SQL_TSI_DAY.equalsIgnoreCase(shortType)) {
+            return "day";
+        } else if (SQL_TSI_SECOND.equalsIgnoreCase(shortType)) {
+            return "second";
+        } else if (SQL_TSI_HOUR.equalsIgnoreCase(shortType)) {
+            return "hour";
+        } else if (SQL_TSI_MINUTE.equalsIgnoreCase(shortType)) {
+            return "minute";
+        } else if (SQL_TSI_FRAC_SECOND.equalsIgnoreCase(shortType)) {
+            throw new PSQLException(GT.tr("Interval {0} not yet implemented", "SQL_TSI_FRAC_SECOND"),
+                    PSQLState.SYNTAX_ERROR);
+        } else {
+            throw new PSQLException(GT.tr("Interval {0} not yet implemented", type),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        // See http://archives.postgresql.org/pgsql-jdbc/2006-03/msg00096.php
+        /*
+         * else if (SQL_TSI_MONTH.equalsIgnoreCase(shortType)) return "month"; else if
+         * (SQL_TSI_QUARTER.equalsIgnoreCase(shortType)) return "quarter"; else if
+         * (SQL_TSI_WEEK.equalsIgnoreCase(shortType)) return "week"; else if
+         * (SQL_TSI_YEAR.equalsIgnoreCase(shortType)) return "year";
+         */
+    }
+
+    /**
+     * database translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqldatabase(List<?> parsedArgs) throws SQLException {
+        if (!parsedArgs.isEmpty()) {
+            throw new PSQLException(GT.tr("{0} function doesn''t take any argument.", "database"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        return "current_database()";
+    }
+
+    /**
+     * ifnull translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqlifnull(List<?> parsedArgs) throws SQLException {
+        return twoArgumentsFunctionCall("coalesce(", "ifnull", parsedArgs);
+    }
+
+    /**
+     * user translation.
+     *
+     * @param parsedArgs arguments
+     * @return sql call
+     * @throws SQLException if something wrong happens
+     */
+    public static String sqluser(List<?> parsedArgs) throws SQLException {
+        if (!parsedArgs.isEmpty()) {
+            throw new PSQLException(GT.tr("{0} function doesn''t take any argument.", "user"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        return "user";
+    }
+
+    private static String singleArgumentFunctionCall(String call, String functionName,
+                                                     List<?> parsedArgs) throws PSQLException {
+        if (parsedArgs.size() != 1) {
+            throw new PSQLException(GT.tr("{0} function takes one and only one argument.", functionName),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        StringBuilder buf = new StringBuilder();
+        buf.append(call);
+        buf.append(parsedArgs.get(0));
+        return buf.append(')').toString();
+    }
+
+    private static String twoArgumentsFunctionCall(String call, String functionName,
+                                                   List<?> parsedArgs) throws PSQLException {
+        if (parsedArgs.size() != 2) {
+            throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", functionName),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        StringBuilder buf = new StringBuilder();
+        buf.append(call);
+        buf.append(parsedArgs.get(0)).append(',').append(parsedArgs.get(1));
+        return buf.append(')').toString();
     }
-    StringBuilder buf = new StringBuilder();
-    buf.append(call);
-    buf.append(parsedArgs.get(0)).append(',').append(parsedArgs.get(1));
-    return buf.append(')').toString();
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/EscapedFunctions2.java b/pgjdbc/src/main/java/org/postgresql/jdbc/EscapedFunctions2.java
index aa8a748..7aab8f1 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/EscapedFunctions2.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/EscapedFunctions2.java
@@ -21,685 +21,688 @@ import java.util.concurrent.ConcurrentMap;
  * Note: this is a pgjdbc-internal class, so it is not supposed to be used outside of the driver.
  */
 public final class EscapedFunctions2 {
-  // constants for timestampadd and timestampdiff
-  private static final String SQL_TSI_ROOT = "SQL_TSI_";
-  private static final String SQL_TSI_DAY = "SQL_TSI_DAY";
-  private static final String SQL_TSI_FRAC_SECOND = "SQL_TSI_FRAC_SECOND";
-  private static final String SQL_TSI_HOUR = "SQL_TSI_HOUR";
-  private static final String SQL_TSI_MINUTE = "SQL_TSI_MINUTE";
-  private static final String SQL_TSI_MONTH = "SQL_TSI_MONTH";
-  private static final String SQL_TSI_QUARTER = "SQL_TSI_QUARTER";
-  private static final String SQL_TSI_SECOND = "SQL_TSI_SECOND";
-  private static final String SQL_TSI_WEEK = "SQL_TSI_WEEK";
-  private static final String SQL_TSI_YEAR = "SQL_TSI_YEAR";
+    // constants for timestampadd and timestampdiff
+    private static final String SQL_TSI_ROOT = "SQL_TSI_";
+    private static final String SQL_TSI_DAY = "SQL_TSI_DAY";
+    private static final String SQL_TSI_FRAC_SECOND = "SQL_TSI_FRAC_SECOND";
+    private static final String SQL_TSI_HOUR = "SQL_TSI_HOUR";
+    private static final String SQL_TSI_MINUTE = "SQL_TSI_MINUTE";
+    private static final String SQL_TSI_MONTH = "SQL_TSI_MONTH";
+    private static final String SQL_TSI_QUARTER = "SQL_TSI_QUARTER";
+    private static final String SQL_TSI_SECOND = "SQL_TSI_SECOND";
+    private static final String SQL_TSI_WEEK = "SQL_TSI_WEEK";
+    private static final String SQL_TSI_YEAR = "SQL_TSI_YEAR";
 
-  /**
-   * storage for functions implementations
-   */
-  private static final ConcurrentMap<String, Method> FUNCTION_MAP = createFunctionMap("sql");
-
-  public EscapedFunctions2() {
-  }
-
-  private static ConcurrentMap<String, Method> createFunctionMap(String prefix) {
-    Method[] methods = EscapedFunctions2.class.getMethods();
-    ConcurrentMap<String, Method> functionMap = new ConcurrentHashMap<>(methods.length * 2);
-    for (Method method : methods) {
-      if (method.getName().startsWith(prefix)) {
-        functionMap.put(method.getName().substring(prefix.length()).toLowerCase(Locale.US), method);
-      }
-    }
-    return functionMap;
-  }
-
-  /**
-   * get Method object implementing the given function
-   *
-   * @param functionName name of the searched function
-   * @return a Method object or null if not found
-   */
-  public static Method getFunction(String functionName) {
-    Method method = FUNCTION_MAP.get(functionName);
-    if (method != null) {
-      return method;
-    }
-    //FIXME: this probably should not use the US locale
-    String nameLower = functionName.toLowerCase(Locale.US);
-    if (nameLower.equals(functionName)) {
-      // Input name was in lower case, the function is not there
-      return null;
-    }
-    method = FUNCTION_MAP.get(nameLower);
-    if (method != null && FUNCTION_MAP.size() < 1000) {
-      // Avoid OutOfMemoryError in case input function names are randomized
-      // The number of methods is finite, however the number of upper-lower case combinations
-      // is quite a few (e.g. substr, Substr, sUbstr, SUbstr, etc).
-      FUNCTION_MAP.putIfAbsent(functionName, method);
-    }
-    return method;
-  }
-
-  // ** numeric functions translations **
-
-  /**
-   * ceiling to ceil translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlceiling(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "ceil(", "ceiling", parsedArgs);
-  }
-
-  /**
-   * log to ln translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqllog(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "ln(", "log", parsedArgs);
-  }
-
-  /**
-   * log10 to log translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqllog10(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "log(", "log10", parsedArgs);
-  }
-
-  /**
-   * power to pow translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlpower(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    twoArgumentsFunctionCall(buf, "pow(", "power", parsedArgs);
-  }
-
-  /**
-   * truncate to trunc translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqltruncate(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    twoArgumentsFunctionCall(buf, "trunc(", "truncate", parsedArgs);
-  }
-
-  // ** string functions translations **
-
-  /**
-   * char to chr translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlchar(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "chr(", "char", parsedArgs);
-  }
-
-  /**
-   * concat translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   */
-  public static void sqlconcat(StringBuilder buf, List<? extends CharSequence> parsedArgs) {
-    appendCall(buf, "(", "||", ")", parsedArgs);
-  }
-
-  /**
-   * insert to overlay translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlinsert(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 4) {
-      throw new PSQLException(GT.tr("{0} function takes four and only four argument.", "insert"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    buf.append("overlay(");
-    buf.append(parsedArgs.get(0)).append(" placing ").append(parsedArgs.get(3));
-    buf.append(" from ").append(parsedArgs.get(1)).append(" for ").append(parsedArgs.get(2));
-    buf.append(')');
-  }
-
-  /**
-   * lcase to lower translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqllcase(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "lower(", "lcase", parsedArgs);
-  }
-
-  /**
-   * left to substring translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlleft(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 2) {
-      throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", "left"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    appendCall(buf, "substring(", " for ", ")", parsedArgs);
-  }
-
-  /**
-   * length translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqllength(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 1) {
-      throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "length"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    appendCall(buf, "length(trim(trailing from ", "", "))", parsedArgs);
-  }
-
-  /**
-   * locate translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqllocate(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    if (parsedArgs.size() == 2) {
-      appendCall(buf, "position(", " in ", ")", parsedArgs);
-    } else if (parsedArgs.size() == 3) {
-      String tmp = "position(" + parsedArgs.get(0) + " in substring(" + parsedArgs.get(1) + " from "
-          + parsedArgs.get(2) + "))";
-      buf.append("(")
-          .append(parsedArgs.get(2))
-          .append("*sign(")
-          .append(tmp)
-          .append(")+")
-          .append(tmp)
-          .append(")");
-    } else {
-      throw new PSQLException(GT.tr("{0} function takes two or three arguments.", "locate"),
-          PSQLState.SYNTAX_ERROR);
-    }
-  }
-
-  /**
-   * ltrim translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlltrim(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "trim(leading from ", "ltrim", parsedArgs);
-  }
-
-  /**
-   * right to substring translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlright(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 2) {
-      throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", "right"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    buf.append("substring(");
-    buf.append(parsedArgs.get(0))
-        .append(" from (length(")
-        .append(parsedArgs.get(0))
-        .append(")+1-")
-        .append(parsedArgs.get(1));
-    buf.append("))");
-  }
-
-  /**
-   * rtrim translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlrtrim(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "trim(trailing from ", "rtrim", parsedArgs);
-  }
-
-  /**
-   * space translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlspace(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "repeat(' ',", "space", parsedArgs);
-  }
-
-  /**
-   * substring to substr translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlsubstring(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    int argSize = parsedArgs.size();
-    if (argSize != 2 && argSize != 3) {
-      throw new PSQLException(GT.tr("{0} function takes two or three arguments.", "substring"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    appendCall(buf, "substr(", ",", ")", parsedArgs);
-  }
-
-  /**
-   * ucase to upper translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlucase(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "upper(", "ucase", parsedArgs);
-  }
-
-  /**
-   * curdate to current_date translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlcurdate(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    zeroArgumentFunctionCall(buf, "current_date", "curdate", parsedArgs);
-  }
-
-  /**
-   * curtime to current_time translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlcurtime(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    zeroArgumentFunctionCall(buf, "current_time", "curtime", parsedArgs);
-  }
-
-  /**
-   * dayname translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqldayname(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 1) {
-      throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "dayname"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    appendCall(buf, "to_char(", ",", ",'Day')", parsedArgs);
-  }
-
-  /**
-   * dayofmonth translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqldayofmonth(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "extract(day from ", "dayofmonth", parsedArgs);
-  }
-
-  /**
-   * dayofweek translation adding 1 to postgresql function since we expect values from 1 to 7
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqldayofweek(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 1) {
-      throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "dayofweek"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    appendCall(buf, "extract(dow from ", ",", ")+1", parsedArgs);
-  }
-
-  /**
-   * dayofyear translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqldayofyear(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "extract(doy from ", "dayofyear", parsedArgs);
-  }
-
-  /**
-   * hour translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlhour(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "extract(hour from ", "hour", parsedArgs);
-  }
-
-  /**
-   * minute translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlminute(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "extract(minute from ", "minute", parsedArgs);
-  }
-
-  /**
-   * month translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlmonth(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "extract(month from ", "month", parsedArgs);
-  }
-
-  /**
-   * monthname translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlmonthname(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 1) {
-      throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "monthname"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    appendCall(buf, "to_char(", ",", ",'Month')", parsedArgs);
-  }
-
-  /**
-   * quarter translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlquarter(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "extract(quarter from ", "quarter", parsedArgs);
-  }
-
-  /**
-   * second translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlsecond(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "extract(second from ", "second", parsedArgs);
-  }
-
-  /**
-   * week translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlweek(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "extract(week from ", "week", parsedArgs);
-  }
-
-  /**
-   * year translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlyear(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    singleArgumentFunctionCall(buf, "extract(year from ", "year", parsedArgs);
-  }
-
-  /**
-   * time stamp add
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqltimestampadd(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 3) {
-      throw new PSQLException(
-          GT.tr("{0} function takes three and only three arguments.", "timestampadd"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    buf.append('(');
-    appendInterval(buf, parsedArgs.get(0).toString(), parsedArgs.get(1).toString());
-    buf.append('+').append(parsedArgs.get(2)).append(')');
-  }
-
-  private static void appendInterval(StringBuilder buf, String type, String value) throws SQLException {
-    if (!isTsi(type)) {
-      throw new PSQLException(GT.tr("Interval {0} not yet implemented", type),
-          PSQLState.SYNTAX_ERROR);
-    }
-    if (appendSingleIntervalCast(buf, SQL_TSI_DAY, type, value, "day")
-        || appendSingleIntervalCast(buf, SQL_TSI_SECOND, type, value, "second")
-        || appendSingleIntervalCast(buf, SQL_TSI_HOUR, type, value, "hour")
-        || appendSingleIntervalCast(buf, SQL_TSI_MINUTE, type, value, "minute")
-        || appendSingleIntervalCast(buf, SQL_TSI_MONTH, type, value, "month")
-        || appendSingleIntervalCast(buf, SQL_TSI_WEEK, type, value, "week")
-        || appendSingleIntervalCast(buf, SQL_TSI_YEAR, type, value, "year")
-    ) {
-      return;
-    }
-    if (areSameTsi(SQL_TSI_QUARTER, type)) {
-      buf.append("CAST((").append(value).append("::int * 3) || ' month' as interval)");
-      return;
-    }
-    throw new PSQLException(GT.tr("Interval {0} not yet implemented", type),
-        PSQLState.NOT_IMPLEMENTED);
-  }
-
-  private static boolean appendSingleIntervalCast(StringBuilder buf, String cmp, String type, String value, String pgType) {
-    if (!areSameTsi(type, cmp)) {
-      return false;
-    }
-    buf.ensureCapacity(buf.length() + 5 + 4 + 14 + value.length() + pgType.length());
-    buf.append("CAST(").append(value).append("||' ").append(pgType).append("' as interval)");
-    return true;
-  }
-
-  /**
-   * Compares two TSI intervals. It is
-   * @param a first interval to compare
-   * @param b second interval to compare
-   * @return true when both intervals are equal (case insensitive)
-   */
-  private static boolean areSameTsi(String a, String b) {
-    return a.length() == b.length() && b.length() > SQL_TSI_ROOT.length()
-        && a.regionMatches(true, SQL_TSI_ROOT.length(), b, SQL_TSI_ROOT.length(), b.length() - SQL_TSI_ROOT.length());
-  }
-
-  /**
-   * Checks if given input starts with {@link #SQL_TSI_ROOT}
-   * @param interval input string
-   * @return true if interval.startsWithIgnoreCase(SQL_TSI_ROOT)
-   */
-  private static boolean isTsi(String interval) {
-    return interval.regionMatches(true, 0, SQL_TSI_ROOT, 0, SQL_TSI_ROOT.length());
-  }
-
-  /**
-   * time stamp diff
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqltimestampdiff(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    if (parsedArgs.size() != 3) {
-      throw new PSQLException(
-          GT.tr("{0} function takes three and only three arguments.", "timestampdiff"),
-          PSQLState.SYNTAX_ERROR);
-    }
-    buf.append("extract( ")
-        .append(constantToDatePart(buf, parsedArgs.get(0).toString()))
-        .append(" from (")
-        .append(parsedArgs.get(2))
-        .append("-")
-        .append(parsedArgs.get(1))
-        .append("))");
-  }
-
-  private static String constantToDatePart(StringBuilder buf, String type) throws SQLException {
-    if (!isTsi(type)) {
-      throw new PSQLException(GT.tr("Interval {0} not yet implemented", type),
-          PSQLState.SYNTAX_ERROR);
-    }
-    if (areSameTsi(SQL_TSI_DAY, type)) {
-      return "day";
-    } else if (areSameTsi(SQL_TSI_SECOND, type)) {
-      return "second";
-    } else if (areSameTsi(SQL_TSI_HOUR, type)) {
-      return "hour";
-    } else if (areSameTsi(SQL_TSI_MINUTE, type)) {
-      return "minute";
-    } else {
-      throw new PSQLException(GT.tr("Interval {0} not yet implemented", type),
-          PSQLState.SYNTAX_ERROR);
-    }
-    // See http://archives.postgresql.org/pgsql-jdbc/2006-03/msg00096.php
-    /*
-     * else if (SQL_TSI_MONTH.equalsIgnoreCase(shortType)) return "month"; else if
-     * (SQL_TSI_QUARTER.equalsIgnoreCase(shortType)) return "quarter"; else if
-     * (SQL_TSI_WEEK.equalsIgnoreCase(shortType)) return "week"; else if
-     * (SQL_TSI_YEAR.equalsIgnoreCase(shortType)) return "year";
+    /**
+     * storage for functions implementations
      */
-  }
+    private static final ConcurrentMap<String, Method> FUNCTION_MAP = createFunctionMap("sql");
 
-  /**
-   * database translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqldatabase(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    zeroArgumentFunctionCall(buf, "current_database()", "database", parsedArgs);
-  }
-
-  /**
-   * ifnull translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqlifnull(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    twoArgumentsFunctionCall(buf, "coalesce(", "ifnull", parsedArgs);
-  }
-
-  /**
-   * user translation
-   *
-   * @param buf The buffer to append into
-   * @param parsedArgs arguments
-   * @throws SQLException if something wrong happens
-   */
-  public static void sqluser(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
-    zeroArgumentFunctionCall(buf, "user", "user", parsedArgs);
-  }
-
-  private static void zeroArgumentFunctionCall(StringBuilder buf, String call, String functionName,
-      List<? extends CharSequence> parsedArgs) throws PSQLException {
-    if (!parsedArgs.isEmpty()) {
-      throw new PSQLException(GT.tr("{0} function doesn''t take any argument.", functionName),
-          PSQLState.SYNTAX_ERROR);
+    public EscapedFunctions2() {
     }
-    buf.append(call);
-  }
 
-  private static void singleArgumentFunctionCall(StringBuilder buf, String call, String functionName,
-      List<? extends CharSequence> parsedArgs) throws PSQLException {
-    if (parsedArgs.size() != 1) {
-      throw new PSQLException(GT.tr("{0} function takes one and only one argument.", functionName),
-          PSQLState.SYNTAX_ERROR);
+    private static ConcurrentMap<String, Method> createFunctionMap(String prefix) {
+        Method[] methods = EscapedFunctions2.class.getMethods();
+        ConcurrentMap<String, Method> functionMap = new ConcurrentHashMap<>(methods.length * 2);
+        for (Method method : methods) {
+            if (method.getName().startsWith(prefix)) {
+                functionMap.put(method.getName().substring(prefix.length()).toLowerCase(Locale.US), method);
+            }
+        }
+        return functionMap;
     }
-    CharSequence arg0 = parsedArgs.get(0);
-    buf.ensureCapacity(buf.length() + call.length() + arg0.length() + 1);
-    buf.append(call).append(arg0).append(')');
-  }
 
-  private static void twoArgumentsFunctionCall(StringBuilder buf, String call, String functionName,
-      List<? extends CharSequence> parsedArgs) throws PSQLException {
-    if (parsedArgs.size() != 2) {
-      throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", functionName),
-          PSQLState.SYNTAX_ERROR);
+    /**
+     * get Method object implementing the given function
+     *
+     * @param functionName name of the searched function
+     * @return a Method object or null if not found
+     */
+    public static Method getFunction(String functionName) {
+        Method method = FUNCTION_MAP.get(functionName);
+        if (method != null) {
+            return method;
+        }
+        //FIXME: this probably should not use the US locale
+        String nameLower = functionName.toLowerCase(Locale.US);
+        if (nameLower.equals(functionName)) {
+            // Input name was in lower case, the function is not there
+            return null;
+        }
+        method = FUNCTION_MAP.get(nameLower);
+        if (method != null && FUNCTION_MAP.size() < 1000) {
+            // Avoid OutOfMemoryError in case input function names are randomized
+            // The number of methods is finite, however the number of upper-lower case combinations
+            // is quite a few (e.g. substr, Substr, sUbstr, SUbstr, etc).
+            FUNCTION_MAP.putIfAbsent(functionName, method);
+        }
+        return method;
     }
-    appendCall(buf, call, ",", ")", parsedArgs);
-  }
 
-  /**
-   * Appends {@code begin arg0 separator arg1 separator end} sequence to the input {@link StringBuilder}
-   * @param sb destination StringBuilder
-   * @param begin begin string
-   * @param separator separator string
-   * @param end end string
-   * @param args arguments
-   */
-  public static void appendCall(StringBuilder sb, String begin, String separator,
-      String end, List<? extends CharSequence> args) {
-    int size = begin.length();
-    // Typically just-in-time compiler would eliminate Iterator in case foreach is used,
-    // however the code below uses indexed iteration to keep the code independent from
-    // various JIT implementations (== avoid Iterator allocations even for not-so-smart JITs)
-    // see https://bugs.openjdk.java.net/browse/JDK-8166840
-    // see http://2016.jpoint.ru/talks/cheremin/ (video and slides)
-    int numberOfArguments = args.size();
-    for (int i = 0; i < numberOfArguments; i++) {
-      size += args.get(i).length();
+    // ** numeric functions translations **
+
+    /**
+     * ceiling to ceil translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlceiling(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "ceil(", "ceiling", parsedArgs);
     }
-    size += separator.length() * (numberOfArguments - 1);
-    sb.ensureCapacity(sb.length() + size + 1);
-    sb.append(begin);
-    for (int i = 0; i < numberOfArguments; i++) {
-      if (i > 0) {
-        sb.append(separator);
-      }
-      sb.append(args.get(i));
+
+    /**
+     * log to ln translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqllog(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "ln(", "log", parsedArgs);
+    }
+
+    /**
+     * log10 to log translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqllog10(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "log(", "log10", parsedArgs);
+    }
+
+    /**
+     * power to pow translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlpower(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        twoArgumentsFunctionCall(buf, "pow(", "power", parsedArgs);
+    }
+
+    /**
+     * truncate to trunc translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqltruncate(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        twoArgumentsFunctionCall(buf, "trunc(", "truncate", parsedArgs);
+    }
+
+    // ** string functions translations **
+
+    /**
+     * char to chr translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlchar(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "chr(", "char", parsedArgs);
+    }
+
+    /**
+     * concat translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     */
+    public static void sqlconcat(StringBuilder buf, List<? extends CharSequence> parsedArgs) {
+        appendCall(buf, "(", "||", ")", parsedArgs);
+    }
+
+    /**
+     * insert to overlay translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlinsert(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 4) {
+            throw new PSQLException(GT.tr("{0} function takes four and only four argument.", "insert"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        buf.append("overlay(");
+        buf.append(parsedArgs.get(0)).append(" placing ").append(parsedArgs.get(3));
+        buf.append(" from ").append(parsedArgs.get(1)).append(" for ").append(parsedArgs.get(2));
+        buf.append(')');
+    }
+
+    /**
+     * lcase to lower translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqllcase(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "lower(", "lcase", parsedArgs);
+    }
+
+    /**
+     * left to substring translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlleft(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 2) {
+            throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", "left"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        appendCall(buf, "substring(", " for ", ")", parsedArgs);
+    }
+
+    /**
+     * length translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqllength(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 1) {
+            throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "length"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        appendCall(buf, "length(trim(trailing from ", "", "))", parsedArgs);
+    }
+
+    /**
+     * locate translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqllocate(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        if (parsedArgs.size() == 2) {
+            appendCall(buf, "position(", " in ", ")", parsedArgs);
+        } else if (parsedArgs.size() == 3) {
+            String tmp = "position(" + parsedArgs.get(0) + " in substring(" + parsedArgs.get(1) + " from "
+                    + parsedArgs.get(2) + "))";
+            buf.append("(")
+                    .append(parsedArgs.get(2))
+                    .append("*sign(")
+                    .append(tmp)
+                    .append(")+")
+                    .append(tmp)
+                    .append(")");
+        } else {
+            throw new PSQLException(GT.tr("{0} function takes two or three arguments.", "locate"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+    }
+
+    /**
+     * ltrim translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlltrim(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "trim(leading from ", "ltrim", parsedArgs);
+    }
+
+    /**
+     * right to substring translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlright(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 2) {
+            throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", "right"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        buf.append("substring(");
+        buf.append(parsedArgs.get(0))
+                .append(" from (length(")
+                .append(parsedArgs.get(0))
+                .append(")+1-")
+                .append(parsedArgs.get(1));
+        buf.append("))");
+    }
+
+    /**
+     * rtrim translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlrtrim(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "trim(trailing from ", "rtrim", parsedArgs);
+    }
+
+    /**
+     * space translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlspace(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "repeat(' ',", "space", parsedArgs);
+    }
+
+    /**
+     * substring to substr translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlsubstring(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        int argSize = parsedArgs.size();
+        if (argSize != 2 && argSize != 3) {
+            throw new PSQLException(GT.tr("{0} function takes two or three arguments.", "substring"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        appendCall(buf, "substr(", ",", ")", parsedArgs);
+    }
+
+    /**
+     * ucase to upper translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlucase(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "upper(", "ucase", parsedArgs);
+    }
+
+    /**
+     * curdate to current_date translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlcurdate(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        zeroArgumentFunctionCall(buf, "current_date", "curdate", parsedArgs);
+    }
+
+    /**
+     * curtime to current_time translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlcurtime(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        zeroArgumentFunctionCall(buf, "current_time", "curtime", parsedArgs);
+    }
+
+    /**
+     * dayname translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqldayname(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 1) {
+            throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "dayname"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        appendCall(buf, "to_char(", ",", ",'Day')", parsedArgs);
+    }
+
+    /**
+     * dayofmonth translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqldayofmonth(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "extract(day from ", "dayofmonth", parsedArgs);
+    }
+
+    /**
+     * dayofweek translation adding 1 to postgresql function since we expect values from 1 to 7
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqldayofweek(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 1) {
+            throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "dayofweek"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        appendCall(buf, "extract(dow from ", ",", ")+1", parsedArgs);
+    }
+
+    /**
+     * dayofyear translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqldayofyear(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "extract(doy from ", "dayofyear", parsedArgs);
+    }
+
+    /**
+     * hour translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlhour(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "extract(hour from ", "hour", parsedArgs);
+    }
+
+    /**
+     * minute translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlminute(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "extract(minute from ", "minute", parsedArgs);
+    }
+
+    /**
+     * month translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlmonth(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "extract(month from ", "month", parsedArgs);
+    }
+
+    /**
+     * monthname translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlmonthname(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 1) {
+            throw new PSQLException(GT.tr("{0} function takes one and only one argument.", "monthname"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        appendCall(buf, "to_char(", ",", ",'Month')", parsedArgs);
+    }
+
+    /**
+     * quarter translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlquarter(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "extract(quarter from ", "quarter", parsedArgs);
+    }
+
+    /**
+     * second translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlsecond(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "extract(second from ", "second", parsedArgs);
+    }
+
+    /**
+     * week translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlweek(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "extract(week from ", "week", parsedArgs);
+    }
+
+    /**
+     * year translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlyear(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        singleArgumentFunctionCall(buf, "extract(year from ", "year", parsedArgs);
+    }
+
+    /**
+     * time stamp add
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqltimestampadd(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 3) {
+            throw new PSQLException(
+                    GT.tr("{0} function takes three and only three arguments.", "timestampadd"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        buf.append('(');
+        appendInterval(buf, parsedArgs.get(0).toString(), parsedArgs.get(1).toString());
+        buf.append('+').append(parsedArgs.get(2)).append(')');
+    }
+
+    private static void appendInterval(StringBuilder buf, String type, String value) throws SQLException {
+        if (!isTsi(type)) {
+            throw new PSQLException(GT.tr("Interval {0} not yet implemented", type),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        if (appendSingleIntervalCast(buf, SQL_TSI_DAY, type, value, "day")
+                || appendSingleIntervalCast(buf, SQL_TSI_SECOND, type, value, "second")
+                || appendSingleIntervalCast(buf, SQL_TSI_HOUR, type, value, "hour")
+                || appendSingleIntervalCast(buf, SQL_TSI_MINUTE, type, value, "minute")
+                || appendSingleIntervalCast(buf, SQL_TSI_MONTH, type, value, "month")
+                || appendSingleIntervalCast(buf, SQL_TSI_WEEK, type, value, "week")
+                || appendSingleIntervalCast(buf, SQL_TSI_YEAR, type, value, "year")
+        ) {
+            return;
+        }
+        if (areSameTsi(SQL_TSI_QUARTER, type)) {
+            buf.append("CAST((").append(value).append("::int * 3) || ' month' as interval)");
+            return;
+        }
+        throw new PSQLException(GT.tr("Interval {0} not yet implemented", type),
+                PSQLState.NOT_IMPLEMENTED);
+    }
+
+    private static boolean appendSingleIntervalCast(StringBuilder buf, String cmp, String type, String value, String pgType) {
+        if (!areSameTsi(type, cmp)) {
+            return false;
+        }
+        buf.ensureCapacity(buf.length() + 5 + 4 + 14 + value.length() + pgType.length());
+        buf.append("CAST(").append(value).append("||' ").append(pgType).append("' as interval)");
+        return true;
+    }
+
+    /**
+     * Compares two TSI intervals. It is
+     *
+     * @param a first interval to compare
+     * @param b second interval to compare
+     * @return true when both intervals are equal (case insensitive)
+     */
+    private static boolean areSameTsi(String a, String b) {
+        return a.length() == b.length() && b.length() > SQL_TSI_ROOT.length()
+                && a.regionMatches(true, SQL_TSI_ROOT.length(), b, SQL_TSI_ROOT.length(), b.length() - SQL_TSI_ROOT.length());
+    }
+
+    /**
+     * Checks if given input starts with {@link #SQL_TSI_ROOT}
+     *
+     * @param interval input string
+     * @return true if interval.startsWithIgnoreCase(SQL_TSI_ROOT)
+     */
+    private static boolean isTsi(String interval) {
+        return interval.regionMatches(true, 0, SQL_TSI_ROOT, 0, SQL_TSI_ROOT.length());
+    }
+
+    /**
+     * time stamp diff
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqltimestampdiff(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        if (parsedArgs.size() != 3) {
+            throw new PSQLException(
+                    GT.tr("{0} function takes three and only three arguments.", "timestampdiff"),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        buf.append("extract( ")
+                .append(constantToDatePart(buf, parsedArgs.get(0).toString()))
+                .append(" from (")
+                .append(parsedArgs.get(2))
+                .append("-")
+                .append(parsedArgs.get(1))
+                .append("))");
+    }
+
+    private static String constantToDatePart(StringBuilder buf, String type) throws SQLException {
+        if (!isTsi(type)) {
+            throw new PSQLException(GT.tr("Interval {0} not yet implemented", type),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        if (areSameTsi(SQL_TSI_DAY, type)) {
+            return "day";
+        } else if (areSameTsi(SQL_TSI_SECOND, type)) {
+            return "second";
+        } else if (areSameTsi(SQL_TSI_HOUR, type)) {
+            return "hour";
+        } else if (areSameTsi(SQL_TSI_MINUTE, type)) {
+            return "minute";
+        } else {
+            throw new PSQLException(GT.tr("Interval {0} not yet implemented", type),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        // See http://archives.postgresql.org/pgsql-jdbc/2006-03/msg00096.php
+        /*
+         * else if (SQL_TSI_MONTH.equalsIgnoreCase(shortType)) return "month"; else if
+         * (SQL_TSI_QUARTER.equalsIgnoreCase(shortType)) return "quarter"; else if
+         * (SQL_TSI_WEEK.equalsIgnoreCase(shortType)) return "week"; else if
+         * (SQL_TSI_YEAR.equalsIgnoreCase(shortType)) return "year";
+         */
+    }
+
+    /**
+     * database translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqldatabase(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        zeroArgumentFunctionCall(buf, "current_database()", "database", parsedArgs);
+    }
+
+    /**
+     * ifnull translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqlifnull(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        twoArgumentsFunctionCall(buf, "coalesce(", "ifnull", parsedArgs);
+    }
+
+    /**
+     * user translation
+     *
+     * @param buf        The buffer to append into
+     * @param parsedArgs arguments
+     * @throws SQLException if something wrong happens
+     */
+    public static void sqluser(StringBuilder buf, List<? extends CharSequence> parsedArgs) throws SQLException {
+        zeroArgumentFunctionCall(buf, "user", "user", parsedArgs);
+    }
+
+    private static void zeroArgumentFunctionCall(StringBuilder buf, String call, String functionName,
+                                                 List<? extends CharSequence> parsedArgs) throws PSQLException {
+        if (!parsedArgs.isEmpty()) {
+            throw new PSQLException(GT.tr("{0} function doesn''t take any argument.", functionName),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        buf.append(call);
+    }
+
+    private static void singleArgumentFunctionCall(StringBuilder buf, String call, String functionName,
+                                                   List<? extends CharSequence> parsedArgs) throws PSQLException {
+        if (parsedArgs.size() != 1) {
+            throw new PSQLException(GT.tr("{0} function takes one and only one argument.", functionName),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        CharSequence arg0 = parsedArgs.get(0);
+        buf.ensureCapacity(buf.length() + call.length() + arg0.length() + 1);
+        buf.append(call).append(arg0).append(')');
+    }
+
+    private static void twoArgumentsFunctionCall(StringBuilder buf, String call, String functionName,
+                                                 List<? extends CharSequence> parsedArgs) throws PSQLException {
+        if (parsedArgs.size() != 2) {
+            throw new PSQLException(GT.tr("{0} function takes two and only two arguments.", functionName),
+                    PSQLState.SYNTAX_ERROR);
+        }
+        appendCall(buf, call, ",", ")", parsedArgs);
+    }
+
+    /**
+     * Appends {@code begin arg0 separator arg1 separator end} sequence to the input {@link StringBuilder}
+     *
+     * @param sb        destination StringBuilder
+     * @param begin     begin string
+     * @param separator separator string
+     * @param end       end string
+     * @param args      arguments
+     */
+    public static void appendCall(StringBuilder sb, String begin, String separator,
+                                  String end, List<? extends CharSequence> args) {
+        int size = begin.length();
+        // Typically just-in-time compiler would eliminate Iterator in case foreach is used,
+        // however the code below uses indexed iteration to keep the code independent from
+        // various JIT implementations (== avoid Iterator allocations even for not-so-smart JITs)
+        // see https://bugs.openjdk.java.net/browse/JDK-8166840
+        // see http://2016.jpoint.ru/talks/cheremin/ (video and slides)
+        int numberOfArguments = args.size();
+        for (int i = 0; i < numberOfArguments; i++) {
+            size += args.get(i).length();
+        }
+        size += separator.length() * (numberOfArguments - 1);
+        sb.ensureCapacity(sb.length() + size + 1);
+        sb.append(begin);
+        for (int i = 0; i < numberOfArguments; i++) {
+            if (i > 0) {
+                sb.append(separator);
+            }
+            sb.append(args.get(i));
+        }
+        sb.append(end);
     }
-    sb.append(end);
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/FieldMetadata.java b/pgjdbc/src/main/java/org/postgresql/jdbc/FieldMetadata.java
index dac04f7..3cc0a07 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/FieldMetadata.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/FieldMetadata.java
@@ -12,84 +12,83 @@ import org.postgresql.util.CanEstimateSize;
  * This class is not meant to be used outside of pgjdbc.
  */
 public class FieldMetadata implements CanEstimateSize {
-  public static class Key {
-    final int tableOid;
-    final int positionInTable;
+    final String columnName;
+    final String tableName;
+    final String schemaName;
+    final int nullable;
+    final boolean autoIncrement;
+    public FieldMetadata(String columnName) {
+        this(columnName, "", "", PgResultSetMetaData.columnNullableUnknown, false);
+    }
 
-    Key(int tableOid, int positionInTable) {
-      this.positionInTable = positionInTable;
-      this.tableOid = tableOid;
+    FieldMetadata(String columnName, String tableName, String schemaName, int nullable,
+                  boolean autoIncrement) {
+        this.columnName = columnName;
+        this.tableName = tableName;
+        this.schemaName = schemaName;
+        this.nullable = nullable;
+        this.autoIncrement = autoIncrement;
     }
 
     @Override
-    public boolean equals(Object o) {
-      if (this == o) {
-        return true;
-      }
-      if (o == null || getClass() != o.getClass()) {
-        return false;
-      }
-
-      Key key = (Key) o;
-
-      if (tableOid != key.tableOid) {
-        return false;
-      }
-      return positionInTable == key.positionInTable;
-    }
-
-    @Override
-    public int hashCode() {
-      int result = tableOid;
-      result = 31 * result + positionInTable;
-      return result;
+    public long getSize() {
+        return columnName.length() * 2
+                + tableName.length() * 2
+                + schemaName.length() * 2
+                + 4L
+                + 1L;
     }
 
     @Override
     public String toString() {
-      return "Key{"
-          + "tableOid=" + tableOid
-          + ", positionInTable=" + positionInTable
-          + '}';
+        return "FieldMetadata{"
+                + "columnName='" + columnName + '\''
+                + ", tableName='" + tableName + '\''
+                + ", schemaName='" + schemaName + '\''
+                + ", nullable=" + nullable
+                + ", autoIncrement=" + autoIncrement
+                + '}';
     }
-  }
 
-  final String columnName;
-  final String tableName;
-  final String schemaName;
-  final int nullable;
-  final boolean autoIncrement;
+    public static class Key {
+        final int tableOid;
+        final int positionInTable;
 
-  public FieldMetadata(String columnName) {
-    this(columnName, "", "", PgResultSetMetaData.columnNullableUnknown, false);
-  }
+        Key(int tableOid, int positionInTable) {
+            this.positionInTable = positionInTable;
+            this.tableOid = tableOid;
+        }
 
-  FieldMetadata(String columnName, String tableName, String schemaName, int nullable,
-      boolean autoIncrement) {
-    this.columnName = columnName;
-    this.tableName = tableName;
-    this.schemaName = schemaName;
-    this.nullable = nullable;
-    this.autoIncrement = autoIncrement;
-  }
+        @Override
+        public boolean equals(Object o) {
+            if (this == o) {
+                return true;
+            }
+            if (o == null || getClass() != o.getClass()) {
+                return false;
+            }
 
-  @Override
-  public long getSize() {
-    return columnName.length() * 2
-        + tableName.length() * 2
-        + schemaName.length() * 2
-        + 4L
-        + 1L;
-  }
+            Key key = (Key) o;
 
-  @Override
-  public String toString() {
-    return "FieldMetadata{"
-        + "columnName='" + columnName + '\''
-        + ", tableName='" + tableName + '\''
-        + ", schemaName='" + schemaName + '\''
-        + ", nullable=" + nullable
-        + ", autoIncrement=" + autoIncrement
-        + '}';
-  }
+            if (tableOid != key.tableOid) {
+                return false;
+            }
+            return positionInTable == key.positionInTable;
+        }
+
+        @Override
+        public int hashCode() {
+            int result = tableOid;
+            result = 31 * result + positionInTable;
+            return result;
+        }
+
+        @Override
+        public String toString() {
+            return "Key{"
+                    + "tableOid=" + tableOid
+                    + ", positionInTable=" + positionInTable
+                    + '}';
+        }
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/GSSEncMode.java b/pgjdbc/src/main/java/org/postgresql/jdbc/GSSEncMode.java
index ec755ba..faf17bb 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/GSSEncMode.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/GSSEncMode.java
@@ -14,52 +14,52 @@ import java.util.Properties;
 
 public enum GSSEncMode {
 
-  /**
-   * Do not use encrypted connections.
-   */
-  DISABLE("disable"),
+    /**
+     * Do not use encrypted connections.
+     */
+    DISABLE("disable"),
 
-  /**
-   * Start with non-encrypted connection, then try encrypted one.
-   */
-  ALLOW("allow"),
+    /**
+     * Start with non-encrypted connection, then try encrypted one.
+     */
+    ALLOW("allow"),
 
-  /**
-   * Start with encrypted connection, fallback to non-encrypted (default).
-   */
-  PREFER("prefer"),
+    /**
+     * Start with encrypted connection, fallback to non-encrypted (default).
+     */
+    PREFER("prefer"),
 
-  /**
-   * Ensure connection is encrypted.
-   */
-  REQUIRE("require");
+    /**
+     * Ensure connection is encrypted.
+     */
+    REQUIRE("require");
 
-  private static final GSSEncMode[] VALUES = values();
+    private static final GSSEncMode[] VALUES = values();
 
-  public final String value;
+    public final String value;
 
-  GSSEncMode(String value) {
-    this.value = value;
-  }
-
-  public boolean requireEncryption() {
-    return this.compareTo(REQUIRE) >= 0;
-  }
-
-  public static GSSEncMode of(Properties info) throws PSQLException {
-    String gssEncMode = PGProperty.GSS_ENC_MODE.getOrDefault(info);
-    // If gssEncMode is not set, fallback to allow
-    if (gssEncMode == null) {
-      return ALLOW;
+    GSSEncMode(String value) {
+        this.value = value;
     }
 
-    for (GSSEncMode mode : VALUES) {
-      if (mode.value.equalsIgnoreCase(gssEncMode)) {
-        return mode;
-      }
+    public static GSSEncMode of(Properties info) throws PSQLException {
+        String gssEncMode = PGProperty.GSS_ENC_MODE.getOrDefault(info);
+        // If gssEncMode is not set, fallback to allow
+        if (gssEncMode == null) {
+            return ALLOW;
+        }
+
+        for (GSSEncMode mode : VALUES) {
+            if (mode.value.equalsIgnoreCase(gssEncMode)) {
+                return mode;
+            }
+        }
+        throw new PSQLException(GT.tr("Invalid gssEncMode value: {0}", gssEncMode),
+                PSQLState.CONNECTION_UNABLE_TO_CONNECT);
+    }
+
+    public boolean requireEncryption() {
+        return this.compareTo(REQUIRE) >= 0;
     }
-    throw new PSQLException(GT.tr("Invalid gssEncMode value: {0}", gssEncMode),
-        PSQLState.CONNECTION_UNABLE_TO_CONNECT);
-  }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PSQLSavepoint.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PSQLSavepoint.java
index f4d3949..f2cb9fc 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/PSQLSavepoint.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PSQLSavepoint.java
@@ -15,70 +15,70 @@ import java.sql.Savepoint;
 
 public class PSQLSavepoint implements Savepoint {
 
-  private boolean isValid;
-  private final boolean isNamed;
-  private int id;
-  private String name;
+    private final boolean isNamed;
+    private boolean isValid;
+    private int id;
+    private String name;
 
-  public PSQLSavepoint(int id) {
-    this.isValid = true;
-    this.isNamed = false;
-    this.id = id;
-  }
-
-  public PSQLSavepoint(String name) {
-    this.isValid = true;
-    this.isNamed = true;
-    this.name = name;
-  }
-
-  @Override
-  public int getSavepointId() throws SQLException {
-    if (!isValid) {
-      throw new PSQLException(GT.tr("Cannot reference a savepoint after it has been released."),
-          PSQLState.INVALID_SAVEPOINT_SPECIFICATION);
+    public PSQLSavepoint(int id) {
+        this.isValid = true;
+        this.isNamed = false;
+        this.id = id;
     }
 
-    if (isNamed) {
-      throw new PSQLException(GT.tr("Cannot retrieve the id of a named savepoint."),
-          PSQLState.WRONG_OBJECT_TYPE);
+    public PSQLSavepoint(String name) {
+        this.isValid = true;
+        this.isNamed = true;
+        this.name = name;
     }
 
-    return id;
-  }
+    @Override
+    public int getSavepointId() throws SQLException {
+        if (!isValid) {
+            throw new PSQLException(GT.tr("Cannot reference a savepoint after it has been released."),
+                    PSQLState.INVALID_SAVEPOINT_SPECIFICATION);
+        }
 
-  @Override
-  public String getSavepointName() throws SQLException {
-    if (!isValid) {
-      throw new PSQLException(GT.tr("Cannot reference a savepoint after it has been released."),
-          PSQLState.INVALID_SAVEPOINT_SPECIFICATION);
+        if (isNamed) {
+            throw new PSQLException(GT.tr("Cannot retrieve the id of a named savepoint."),
+                    PSQLState.WRONG_OBJECT_TYPE);
+        }
+
+        return id;
     }
 
-    if (!isNamed || name == null) {
-      throw new PSQLException(GT.tr("Cannot retrieve the name of an unnamed savepoint."),
-          PSQLState.WRONG_OBJECT_TYPE);
+    @Override
+    public String getSavepointName() throws SQLException {
+        if (!isValid) {
+            throw new PSQLException(GT.tr("Cannot reference a savepoint after it has been released."),
+                    PSQLState.INVALID_SAVEPOINT_SPECIFICATION);
+        }
+
+        if (!isNamed || name == null) {
+            throw new PSQLException(GT.tr("Cannot retrieve the name of an unnamed savepoint."),
+                    PSQLState.WRONG_OBJECT_TYPE);
+        }
+
+        return name;
     }
 
-    return name;
-  }
-
-  public void invalidate() {
-    isValid = false;
-  }
-
-  public String getPGName() throws SQLException {
-    if (!isValid) {
-      throw new PSQLException(GT.tr("Cannot reference a savepoint after it has been released."),
-          PSQLState.INVALID_SAVEPOINT_SPECIFICATION);
+    public void invalidate() {
+        isValid = false;
     }
 
-    if (isNamed && name != null) {
-      // We need to quote and escape the name in case it
-      // contains spaces/quotes/etc.
-      //
-      return Utils.escapeIdentifier(null, name).toString();
-    }
+    public String getPGName() throws SQLException {
+        if (!isValid) {
+            throw new PSQLException(GT.tr("Cannot reference a savepoint after it has been released."),
+                    PSQLState.INVALID_SAVEPOINT_SPECIFICATION);
+        }
 
-    return "JDBC_SAVEPOINT_" + id;
-  }
+        if (isNamed && name != null) {
+            // We need to quote and escape the name in case it
+            // contains spaces/quotes/etc.
+            //
+            return Utils.escapeIdentifier(null, name).toString();
+        }
+
+        return "JDBC_SAVEPOINT_" + id;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PSQLWarningWrapper.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PSQLWarningWrapper.java
index f93b92d..01ca80a 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/PSQLWarningWrapper.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PSQLWarningWrapper.java
@@ -15,21 +15,21 @@ import java.sql.SQLWarning;
  */
 public class PSQLWarningWrapper {
 
-  private final SQLWarning firstWarning;
-  private SQLWarning lastWarning;
+    private final SQLWarning firstWarning;
+    private SQLWarning lastWarning;
 
-  public PSQLWarningWrapper(SQLWarning warning) {
-    firstWarning = warning;
-    lastWarning = warning;
-  }
+    public PSQLWarningWrapper(SQLWarning warning) {
+        firstWarning = warning;
+        lastWarning = warning;
+    }
 
-  void addWarning(SQLWarning sqlWarning) {
-    lastWarning.setNextWarning(sqlWarning);
-    lastWarning = sqlWarning;
-  }
+    void addWarning(SQLWarning sqlWarning) {
+        lastWarning.setNextWarning(sqlWarning);
+        lastWarning = sqlWarning;
+    }
 
-  SQLWarning getFirstWarning() {
-    return firstWarning;
-  }
+    SQLWarning getFirstWarning() {
+        return firstWarning;
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgArray.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgArray.java
index a7eeed5..bc41e86 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/PgArray.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgArray.java
@@ -39,462 +39,457 @@ import java.util.Map;
  */
 public class PgArray implements Array {
 
-  static {
-    ArrayAssistantRegistry.register(Oid.UUID, new UUIDArrayAssistant());
-    ArrayAssistantRegistry.register(Oid.UUID_ARRAY, new UUIDArrayAssistant());
-  }
-
-  /**
-   * A database connection.
-   */
-  protected BaseConnection connection;
-
-  /**
-   * The OID of this field.
-   */
-  private final int oid;
-
-  /**
-   * Field value as String.
-   */
-  protected String fieldString;
-
-  /**
-   * Value of field as {@link PgArrayList}. Will be initialized only once within
-   * {@link #buildArrayList(String)}.
-   */
-  protected ArrayDecoding.PgArrayList arrayList;
-
-  protected byte [] fieldBytes;
-
-  private final ResourceLock lock = new ResourceLock();
-
-  private PgArray(BaseConnection connection, int oid) throws SQLException {
-    this.connection = connection;
-    this.oid = oid;
-  }
-
-  /**
-   * Create a new Array.
-   *
-   * @param connection a database connection
-   * @param oid the oid of the array datatype
-   * @param fieldString the array data in string form
-   * @throws SQLException if something wrong happens
-   */
-  public PgArray(BaseConnection connection, int oid, String fieldString)
-      throws SQLException {
-    this(connection, oid);
-    this.fieldString = fieldString;
-  }
-
-  /**
-   * Create a new Array.
-   *
-   * @param connection a database connection
-   * @param oid the oid of the array datatype
-   * @param fieldBytes the array data in byte form
-   * @throws SQLException if something wrong happens
-   */
-  public PgArray(BaseConnection connection, int oid, byte [] fieldBytes)
-      throws SQLException {
-    this(connection, oid);
-    this.fieldBytes = fieldBytes;
-  }
-
-  private BaseConnection getConnection() {
-    return connection;
-  }
-
-  @Override
-  public Object getArray() throws SQLException {
-    return getArrayImpl(1, 0, null);
-  }
-
-  @Override
-  public Object getArray(long index, int count) throws SQLException {
-    return getArrayImpl(index, count, null);
-  }
-
-  public Object getArrayImpl(Map<String, Class<?>> map) throws SQLException {
-    return getArrayImpl(1, 0, map);
-  }
-
-  @Override
-  public Object getArray(Map<String, Class<?>> map) throws SQLException {
-    return getArrayImpl(map);
-  }
-
-  @Override
-  public Object getArray(long index, int count, Map<String, Class<?>> map)
-      throws SQLException {
-    return getArrayImpl(index, count, map);
-  }
-
-  public Object getArrayImpl(long index, int count, Map<String, Class<?>> map)
-      throws SQLException {
-
-    // for now maps aren't supported.
-    if (map != null && !map.isEmpty()) {
-      throw Driver.notImplemented(this.getClass(), "getArrayImpl(long,int,Map)");
+    static {
+        ArrayAssistantRegistry.register(Oid.UUID, new UUIDArrayAssistant());
+        ArrayAssistantRegistry.register(Oid.UUID_ARRAY, new UUIDArrayAssistant());
     }
 
-    // array index is out of range
-    if (index < 1) {
-      throw new PSQLException(GT.tr("The array index is out of range: {0}", index),
-          PSQLState.DATA_ERROR);
+    /**
+     * The OID of this field.
+     */
+    private final int oid;
+    private final ResourceLock lock = new ResourceLock();
+    /**
+     * A database connection.
+     */
+    protected BaseConnection connection;
+    /**
+     * Field value as String.
+     */
+    protected String fieldString;
+    /**
+     * Value of field as {@link PgArrayList}. Will be initialized only once within
+     * {@link #buildArrayList(String)}.
+     */
+    protected ArrayDecoding.PgArrayList arrayList;
+    protected byte[] fieldBytes;
+
+    private PgArray(BaseConnection connection, int oid) throws SQLException {
+        this.connection = connection;
+        this.oid = oid;
     }
 
-    if (fieldBytes != null) {
-      return readBinaryArray(fieldBytes, (int) index, count);
+    /**
+     * Create a new Array.
+     *
+     * @param connection  a database connection
+     * @param oid         the oid of the array datatype
+     * @param fieldString the array data in string form
+     * @throws SQLException if something wrong happens
+     */
+    public PgArray(BaseConnection connection, int oid, String fieldString)
+            throws SQLException {
+        this(connection, oid);
+        this.fieldString = fieldString;
     }
 
-    if (fieldString == null) {
-      return null;
+    /**
+     * Create a new Array.
+     *
+     * @param connection a database connection
+     * @param oid        the oid of the array datatype
+     * @param fieldBytes the array data in byte form
+     * @throws SQLException if something wrong happens
+     */
+    public PgArray(BaseConnection connection, int oid, byte[] fieldBytes)
+            throws SQLException {
+        this(connection, oid);
+        this.fieldBytes = fieldBytes;
     }
 
-    final PgArrayList arrayList = buildArrayList(fieldString);
+    public static void escapeArrayElement(StringBuilder b, String s) {
+        b.append('"');
+        for (int j = 0; j < s.length(); j++) {
+            char c = s.charAt(j);
+            if (c == '"' || c == '\\') {
+                b.append('\\');
+            }
 
-    if (count == 0) {
-      count = arrayList.size();
-    }
-
-    // array index out of range
-    if ((index - 1) + count > arrayList.size()) {
-      throw new PSQLException(
-          GT.tr("The array index is out of range: {0}, number of elements: {1}.",
-              index + count, (long) arrayList.size()),
-          PSQLState.DATA_ERROR);
-    }
-
-    return buildArray(arrayList, (int) index, count);
-  }
-
-  private Object readBinaryArray(byte[] fieldBytes, int index, int count) throws SQLException {
-    return ArrayDecoding.readBinaryArray(index, count, fieldBytes, getConnection());
-  }
-
-  private ResultSet readBinaryResultSet(byte[] fieldBytes, int index, int count)
-      throws SQLException {
-    int dimensions = ByteConverter.int4(fieldBytes, 0);
-    // int flags = ByteConverter.int4(fieldBytes, 4); // bit 0: 0=no-nulls, 1=has-nulls
-    int elementOid = ByteConverter.int4(fieldBytes, 8);
-    int pos = 12;
-    int[] dims = new int[dimensions];
-    for (int d = 0; d < dimensions; d++) {
-      dims[d] = ByteConverter.int4(fieldBytes, pos);
-      pos += 4;
-      /* int lbound = ByteConverter.int4(fieldBytes, pos); */
-      pos += 4;
-    }
-    if (count > 0 && dimensions > 0) {
-      dims[0] = Math.min(count, dims[0]);
-    }
-    List<Tuple> rows = new ArrayList<>();
-    Field[] fields = new Field[2];
-
-    storeValues(fieldBytes, rows, fields, elementOid, dims, pos, 0, index);
-
-    BaseStatement stat = (BaseStatement) getConnection()
-        .createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
-    return stat.createDriverResultSet(fields, rows);
-  }
-
-  private int storeValues(byte[] fieldBytes, List<Tuple> rows, Field[] fields, int elementOid,
-      final int[] dims,
-      int pos, final int thisDimension, int index) throws SQLException {
-    // handle an empty array
-    if (dims.length == 0) {
-      fields[0] = new Field("INDEX", Oid.INT4);
-      fields[0].setFormat(Field.BINARY_FORMAT);
-      fields[1] = new Field("VALUE", elementOid);
-      fields[1].setFormat(Field.BINARY_FORMAT);
-      for (int i = 1; i < index; i++) {
-        int len = ByteConverter.int4(fieldBytes, pos);
-        pos += 4;
-        if (len != -1) {
-          pos += len;
+            b.append(c);
         }
-      }
-    } else if (thisDimension == dims.length - 1) {
-      fields[0] = new Field("INDEX", Oid.INT4);
-      fields[0].setFormat(Field.BINARY_FORMAT);
-      fields[1] = new Field("VALUE", elementOid);
-      fields[1].setFormat(Field.BINARY_FORMAT);
-      for (int i = 1; i < index; i++) {
-        int len = ByteConverter.int4(fieldBytes, pos);
-        pos += 4;
-        if (len != -1) {
-          pos += len;
+        b.append('"');
+    }
+
+    private BaseConnection getConnection() {
+        return connection;
+    }
+
+    @Override
+    public Object getArray() throws SQLException {
+        return getArrayImpl(1, 0, null);
+    }
+
+    @Override
+    public Object getArray(long index, int count) throws SQLException {
+        return getArrayImpl(index, count, null);
+    }
+
+    public Object getArrayImpl(Map<String, Class<?>> map) throws SQLException {
+        return getArrayImpl(1, 0, map);
+    }
+
+    @Override
+    public Object getArray(Map<String, Class<?>> map) throws SQLException {
+        return getArrayImpl(map);
+    }
+
+    @Override
+    public Object getArray(long index, int count, Map<String, Class<?>> map)
+            throws SQLException {
+        return getArrayImpl(index, count, map);
+    }
+
+    public Object getArrayImpl(long index, int count, Map<String, Class<?>> map)
+            throws SQLException {
+
+        // for now maps aren't supported.
+        if (map != null && !map.isEmpty()) {
+            throw Driver.notImplemented(this.getClass(), "getArrayImpl(long,int,Map)");
         }
-      }
-      for (int i = 0; i < dims[thisDimension]; i++) {
-        byte[][] rowData = new byte[2][];
-        rowData[0] = new byte[4];
-        ByteConverter.int4(rowData[0], 0, i + index);
-        rows.add(new Tuple(rowData));
-        int len = ByteConverter.int4(fieldBytes, pos);
-        pos += 4;
-        if (len == -1) {
-          continue;
+
+        // array index is out of range
+        if (index < 1) {
+            throw new PSQLException(GT.tr("The array index is out of range: {0}", index),
+                    PSQLState.DATA_ERROR);
         }
-        rowData[1] = new byte[len];
-        System.arraycopy(fieldBytes, pos, rowData[1], 0, rowData[1].length);
-        pos += len;
-      }
-    } else {
-      fields[0] = new Field("INDEX", Oid.INT4);
-      fields[0].setFormat(Field.BINARY_FORMAT);
-      fields[1] = new Field("VALUE", oid);
-      fields[1].setFormat(Field.BINARY_FORMAT);
-      int nextDimension = thisDimension + 1;
-      int dimensionsLeft = dims.length - nextDimension;
-      for (int i = 1; i < index; i++) {
-        pos = calcRemainingDataLength(fieldBytes, dims, pos, elementOid, nextDimension);
-      }
-      for (int i = 0; i < dims[thisDimension]; i++) {
-        byte[][] rowData = new byte[2][];
-        rowData[0] = new byte[4];
-        ByteConverter.int4(rowData[0], 0, i + index);
-        rows.add(new Tuple(rowData));
-        int dataEndPos = calcRemainingDataLength(fieldBytes, dims, pos, elementOid, nextDimension);
-        int dataLength = dataEndPos - pos;
-        rowData[1] = new byte[12 + 8 * dimensionsLeft + dataLength];
-        ByteConverter.int4(rowData[1], 0, dimensionsLeft);
-        System.arraycopy(fieldBytes, 4, rowData[1], 4, 8);
-        System.arraycopy(fieldBytes, 12 + nextDimension * 8, rowData[1], 12, dimensionsLeft * 8);
-        System.arraycopy(fieldBytes, pos, rowData[1], 12 + dimensionsLeft * 8, dataLength);
-        pos = dataEndPos;
-      }
-    }
-    return pos;
-  }
 
-  private int calcRemainingDataLength(byte[] fieldBytes,
-      int[] dims, int pos, int elementOid, int thisDimension) {
-    if (thisDimension == dims.length - 1) {
-      for (int i = 0; i < dims[thisDimension]; i++) {
-        int len = ByteConverter.int4(fieldBytes, pos);
-        pos += 4;
-        if (len == -1) {
-          continue;
+        if (fieldBytes != null) {
+            return readBinaryArray(fieldBytes, (int) index, count);
         }
-        pos += len;
-      }
-    } else {
-      pos = calcRemainingDataLength(fieldBytes, dims, elementOid, pos, thisDimension + 1);
-    }
-    return pos;
-  }
 
-  /**
-   * Build {@link ArrayList} from field's string input. As a result of this method
-   * {@link #arrayList} is build. Method can be called many times in order to make sure that array
-   * list is ready to use, however {@link #arrayList} will be set only once during first call.
-   */
-  @SuppressWarnings("try")
-  private PgArrayList buildArrayList(String fieldString) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (arrayList == null) {
-        arrayList = ArrayDecoding.buildArrayList(fieldString, getConnection().getTypeInfo().getArrayDelimiter(oid));
-      }
-      return arrayList;
-    }
-  }
+        if (fieldString == null) {
+            return null;
+        }
 
-  /**
-   * Convert {@link ArrayList} to array.
-   *
-   * @param input list to be converted into array
-   */
-  private Object buildArray(ArrayDecoding.PgArrayList input, int index, int count) throws SQLException {
-    final BaseConnection connection = getConnection();
-    return ArrayDecoding.readStringArray(index, count, connection.getTypeInfo().getPGArrayElement(oid), input, connection);
-  }
+        final PgArrayList arrayList = buildArrayList(fieldString);
 
-  @Override
-  public int getBaseType() throws SQLException {
-    return getConnection().getTypeInfo().getSQLType(getBaseTypeName());
-  }
+        if (count == 0) {
+            count = arrayList.size();
+        }
 
-  @Override
-  public String getBaseTypeName() throws SQLException {
-    int elementOID = getConnection().getTypeInfo().getPGArrayElement(oid);
-    return getConnection().getTypeInfo().getPGType(elementOID);
-  }
+        // array index out of range
+        if ((index - 1) + count > arrayList.size()) {
+            throw new PSQLException(
+                    GT.tr("The array index is out of range: {0}, number of elements: {1}.",
+                            index + count, (long) arrayList.size()),
+                    PSQLState.DATA_ERROR);
+        }
 
-  @Override
-  public ResultSet getResultSet() throws SQLException {
-    return getResultSetImpl(1, 0, null);
-  }
-
-  @Override
-  public ResultSet getResultSet(long index, int count) throws SQLException {
-    return getResultSetImpl(index, count, null);
-  }
-
-  @Override
-  public ResultSet getResultSet(Map<String, Class<?>> map) throws SQLException {
-    return getResultSetImpl(map);
-  }
-
-  @Override
-  public ResultSet getResultSet(long index, int count, Map<String, Class<?>> map)
-      throws SQLException {
-    return getResultSetImpl(index, count, map);
-  }
-
-  public ResultSet getResultSetImpl(Map<String, Class<?>> map) throws SQLException {
-    return getResultSetImpl(1, 0, map);
-  }
-
-  public ResultSet getResultSetImpl(long index, int count, Map<String, Class<?>> map)
-      throws SQLException {
-
-    // for now maps aren't supported.
-    if (map != null && !map.isEmpty()) {
-      throw Driver.notImplemented(this.getClass(), "getResultSetImpl(long,int,Map)");
+        return buildArray(arrayList, (int) index, count);
     }
 
-    // array index is out of range
-    if (index < 1) {
-      throw new PSQLException(GT.tr("The array index is out of range: {0}", index),
-          PSQLState.DATA_ERROR);
+    private Object readBinaryArray(byte[] fieldBytes, int index, int count) throws SQLException {
+        return ArrayDecoding.readBinaryArray(index, count, fieldBytes, getConnection());
     }
 
-    if (fieldBytes != null) {
-      return readBinaryResultSet(fieldBytes, (int) index, count);
+    private ResultSet readBinaryResultSet(byte[] fieldBytes, int index, int count)
+            throws SQLException {
+        int dimensions = ByteConverter.int4(fieldBytes, 0);
+        // int flags = ByteConverter.int4(fieldBytes, 4); // bit 0: 0=no-nulls, 1=has-nulls
+        int elementOid = ByteConverter.int4(fieldBytes, 8);
+        int pos = 12;
+        int[] dims = new int[dimensions];
+        for (int d = 0; d < dimensions; d++) {
+            dims[d] = ByteConverter.int4(fieldBytes, pos);
+            pos += 4;
+            /* int lbound = ByteConverter.int4(fieldBytes, pos); */
+            pos += 4;
+        }
+        if (count > 0 && dimensions > 0) {
+            dims[0] = Math.min(count, dims[0]);
+        }
+        List<Tuple> rows = new ArrayList<>();
+        Field[] fields = new Field[2];
+
+        storeValues(fieldBytes, rows, fields, elementOid, dims, pos, 0, index);
+
+        BaseStatement stat = (BaseStatement) getConnection()
+                .createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
+        return stat.createDriverResultSet(fields, rows);
     }
 
-    final PgArrayList arrayList = buildArrayList(fieldString);
-
-    if (count == 0) {
-      count = arrayList.size();
+    private int storeValues(byte[] fieldBytes, List<Tuple> rows, Field[] fields, int elementOid,
+                            final int[] dims,
+                            int pos, final int thisDimension, int index) throws SQLException {
+        // handle an empty array
+        if (dims.length == 0) {
+            fields[0] = new Field("INDEX", Oid.INT4);
+            fields[0].setFormat(Field.BINARY_FORMAT);
+            fields[1] = new Field("VALUE", elementOid);
+            fields[1].setFormat(Field.BINARY_FORMAT);
+            for (int i = 1; i < index; i++) {
+                int len = ByteConverter.int4(fieldBytes, pos);
+                pos += 4;
+                if (len != -1) {
+                    pos += len;
+                }
+            }
+        } else if (thisDimension == dims.length - 1) {
+            fields[0] = new Field("INDEX", Oid.INT4);
+            fields[0].setFormat(Field.BINARY_FORMAT);
+            fields[1] = new Field("VALUE", elementOid);
+            fields[1].setFormat(Field.BINARY_FORMAT);
+            for (int i = 1; i < index; i++) {
+                int len = ByteConverter.int4(fieldBytes, pos);
+                pos += 4;
+                if (len != -1) {
+                    pos += len;
+                }
+            }
+            for (int i = 0; i < dims[thisDimension]; i++) {
+                byte[][] rowData = new byte[2][];
+                rowData[0] = new byte[4];
+                ByteConverter.int4(rowData[0], 0, i + index);
+                rows.add(new Tuple(rowData));
+                int len = ByteConverter.int4(fieldBytes, pos);
+                pos += 4;
+                if (len == -1) {
+                    continue;
+                }
+                rowData[1] = new byte[len];
+                System.arraycopy(fieldBytes, pos, rowData[1], 0, rowData[1].length);
+                pos += len;
+            }
+        } else {
+            fields[0] = new Field("INDEX", Oid.INT4);
+            fields[0].setFormat(Field.BINARY_FORMAT);
+            fields[1] = new Field("VALUE", oid);
+            fields[1].setFormat(Field.BINARY_FORMAT);
+            int nextDimension = thisDimension + 1;
+            int dimensionsLeft = dims.length - nextDimension;
+            for (int i = 1; i < index; i++) {
+                pos = calcRemainingDataLength(fieldBytes, dims, pos, elementOid, nextDimension);
+            }
+            for (int i = 0; i < dims[thisDimension]; i++) {
+                byte[][] rowData = new byte[2][];
+                rowData[0] = new byte[4];
+                ByteConverter.int4(rowData[0], 0, i + index);
+                rows.add(new Tuple(rowData));
+                int dataEndPos = calcRemainingDataLength(fieldBytes, dims, pos, elementOid, nextDimension);
+                int dataLength = dataEndPos - pos;
+                rowData[1] = new byte[12 + 8 * dimensionsLeft + dataLength];
+                ByteConverter.int4(rowData[1], 0, dimensionsLeft);
+                System.arraycopy(fieldBytes, 4, rowData[1], 4, 8);
+                System.arraycopy(fieldBytes, 12 + nextDimension * 8, rowData[1], 12, dimensionsLeft * 8);
+                System.arraycopy(fieldBytes, pos, rowData[1], 12 + dimensionsLeft * 8, dataLength);
+                pos = dataEndPos;
+            }
+        }
+        return pos;
     }
 
-    // array index out of range
-    if ((--index) + count > arrayList.size()) {
-      throw new PSQLException(
-          GT.tr("The array index is out of range: {0}, number of elements: {1}.",
-                  index + count, (long) arrayList.size()),
-          PSQLState.DATA_ERROR);
+    private int calcRemainingDataLength(byte[] fieldBytes,
+                                        int[] dims, int pos, int elementOid, int thisDimension) {
+        if (thisDimension == dims.length - 1) {
+            for (int i = 0; i < dims[thisDimension]; i++) {
+                int len = ByteConverter.int4(fieldBytes, pos);
+                pos += 4;
+                if (len == -1) {
+                    continue;
+                }
+                pos += len;
+            }
+        } else {
+            pos = calcRemainingDataLength(fieldBytes, dims, elementOid, pos, thisDimension + 1);
+        }
+        return pos;
     }
 
-    List<Tuple> rows = new ArrayList<>();
-
-    Field[] fields = new Field[2];
-
-    // one dimensional array
-    if (arrayList.dimensionsCount <= 1) {
-      // array element type
-      final int baseOid = getConnection().getTypeInfo().getPGArrayElement(oid);
-      fields[0] = new Field("INDEX", Oid.INT4);
-      fields[1] = new Field("VALUE", baseOid);
-
-      for (int i = 0; i < count; i++) {
-        int offset = (int) index + i;
-        byte[] [] t = new byte[2][0];
-        String v = (String) arrayList.get(offset);
-        t[0] = getConnection().encodeString(Integer.toString(offset + 1));
-        t[1] = v == null ? null : getConnection().encodeString(v);
-        rows.add(new Tuple(t));
-      }
-    } else {
-      // when multi-dimensional
-      fields[0] = new Field("INDEX", Oid.INT4);
-      fields[1] = new Field("VALUE", oid);
-      for (int i = 0; i < count; i++) {
-        int offset = (int) index + i;
-        byte[] [] t = new byte[2][0];
-        Object v = arrayList.get(offset);
-
-        t[0] = getConnection().encodeString(Integer.toString(offset + 1));
-        t[1] = v == null ? null : getConnection().encodeString(toString((ArrayDecoding.PgArrayList) v));
-        rows.add(new Tuple(t));
-      }
+    /**
+     * Build {@link ArrayList} from field's string input. As a result of this method
+     * {@link #arrayList} is build. Method can be called many times in order to make sure that array
+     * list is ready to use, however {@link #arrayList} will be set only once during first call.
+     */
+    @SuppressWarnings("try")
+    private PgArrayList buildArrayList(String fieldString) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (arrayList == null) {
+                arrayList = ArrayDecoding.buildArrayList(fieldString, getConnection().getTypeInfo().getArrayDelimiter(oid));
+            }
+            return arrayList;
+        }
     }
 
-    BaseStatement stat = (BaseStatement) getConnection()
-        .createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
-    return stat.createDriverResultSet(fields, rows);
-  }
-
-  @Override
-  @SuppressWarnings({"rawtypes", "unchecked"})
-  public String toString() {
-    if (fieldString == null && fieldBytes != null) {
-      try {
-        Object array = readBinaryArray(fieldBytes, 1, 0);
-
-        final ArrayEncoding.ArrayEncoder arraySupport = ArrayEncoding.getArrayEncoder(array);
-        assert arraySupport != null;
-        fieldString = arraySupport.toArrayString(connection.getTypeInfo().getArrayDelimiter(oid), array);
-      } catch (SQLException e) {
-        fieldString = "NULL"; // punt
-      }
-    }
-    return fieldString;
-  }
-
-  /**
-   * Convert array list to PG String representation (e.g. {0,1,2}).
-   */
-  private String toString(ArrayDecoding.PgArrayList list) throws SQLException {
-    if (list == null) {
-      return "NULL";
+    /**
+     * Convert {@link ArrayList} to array.
+     *
+     * @param input list to be converted into array
+     */
+    private Object buildArray(ArrayDecoding.PgArrayList input, int index, int count) throws SQLException {
+        final BaseConnection connection = getConnection();
+        return ArrayDecoding.readStringArray(index, count, connection.getTypeInfo().getPGArrayElement(oid), input, connection);
     }
 
-    StringBuilder b = new StringBuilder().append('{');
-
-    char delim = getConnection().getTypeInfo().getArrayDelimiter(oid);
-
-    for (int i = 0; i < list.size(); i++) {
-      Object v = list.get(i);
-
-      if (i > 0) {
-        b.append(delim);
-      }
-
-      if (v == null) {
-        b.append("NULL");
-      } else if (v instanceof ArrayDecoding.PgArrayList) {
-        b.append(toString((ArrayDecoding.PgArrayList) v));
-      } else {
-        escapeArrayElement(b, (String) v);
-      }
+    @Override
+    public int getBaseType() throws SQLException {
+        return getConnection().getTypeInfo().getSQLType(getBaseTypeName());
     }
 
-    b.append('}');
-
-    return b.toString();
-  }
-
-  public static void escapeArrayElement(StringBuilder b, String s) {
-    b.append('"');
-    for (int j = 0; j < s.length(); j++) {
-      char c = s.charAt(j);
-      if (c == '"' || c == '\\') {
-        b.append('\\');
-      }
-
-      b.append(c);
+    @Override
+    public String getBaseTypeName() throws SQLException {
+        int elementOID = getConnection().getTypeInfo().getPGArrayElement(oid);
+        return getConnection().getTypeInfo().getPGType(elementOID);
     }
-    b.append('"');
-  }
 
-  public boolean isBinary() {
-    return fieldBytes != null;
-  }
+    @Override
+    public ResultSet getResultSet() throws SQLException {
+        return getResultSetImpl(1, 0, null);
+    }
 
-  public byte [] toBytes() {
-    return fieldBytes;
-  }
+    @Override
+    public ResultSet getResultSet(long index, int count) throws SQLException {
+        return getResultSetImpl(index, count, null);
+    }
 
-  @Override
-  public void free() throws SQLException {
-    connection = null;
-    fieldString = null;
-    fieldBytes = null;
-    arrayList = null;
-  }
+    @Override
+    public ResultSet getResultSet(Map<String, Class<?>> map) throws SQLException {
+        return getResultSetImpl(map);
+    }
+
+    @Override
+    public ResultSet getResultSet(long index, int count, Map<String, Class<?>> map)
+            throws SQLException {
+        return getResultSetImpl(index, count, map);
+    }
+
+    public ResultSet getResultSetImpl(Map<String, Class<?>> map) throws SQLException {
+        return getResultSetImpl(1, 0, map);
+    }
+
+    public ResultSet getResultSetImpl(long index, int count, Map<String, Class<?>> map)
+            throws SQLException {
+
+        // for now maps aren't supported.
+        if (map != null && !map.isEmpty()) {
+            throw Driver.notImplemented(this.getClass(), "getResultSetImpl(long,int,Map)");
+        }
+
+        // array index is out of range
+        if (index < 1) {
+            throw new PSQLException(GT.tr("The array index is out of range: {0}", index),
+                    PSQLState.DATA_ERROR);
+        }
+
+        if (fieldBytes != null) {
+            return readBinaryResultSet(fieldBytes, (int) index, count);
+        }
+
+        final PgArrayList arrayList = buildArrayList(fieldString);
+
+        if (count == 0) {
+            count = arrayList.size();
+        }
+
+        // array index out of range
+        if ((--index) + count > arrayList.size()) {
+            throw new PSQLException(
+                    GT.tr("The array index is out of range: {0}, number of elements: {1}.",
+                            index + count, (long) arrayList.size()),
+                    PSQLState.DATA_ERROR);
+        }
+
+        List<Tuple> rows = new ArrayList<>();
+
+        Field[] fields = new Field[2];
+
+        // one dimensional array
+        if (arrayList.dimensionsCount <= 1) {
+            // array element type
+            final int baseOid = getConnection().getTypeInfo().getPGArrayElement(oid);
+            fields[0] = new Field("INDEX", Oid.INT4);
+            fields[1] = new Field("VALUE", baseOid);
+
+            for (int i = 0; i < count; i++) {
+                int offset = (int) index + i;
+                byte[][] t = new byte[2][0];
+                String v = (String) arrayList.get(offset);
+                t[0] = getConnection().encodeString(Integer.toString(offset + 1));
+                t[1] = v == null ? null : getConnection().encodeString(v);
+                rows.add(new Tuple(t));
+            }
+        } else {
+            // when multi-dimensional
+            fields[0] = new Field("INDEX", Oid.INT4);
+            fields[1] = new Field("VALUE", oid);
+            for (int i = 0; i < count; i++) {
+                int offset = (int) index + i;
+                byte[][] t = new byte[2][0];
+                Object v = arrayList.get(offset);
+
+                t[0] = getConnection().encodeString(Integer.toString(offset + 1));
+                t[1] = v == null ? null : getConnection().encodeString(toString((ArrayDecoding.PgArrayList) v));
+                rows.add(new Tuple(t));
+            }
+        }
+
+        BaseStatement stat = (BaseStatement) getConnection()
+                .createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
+        return stat.createDriverResultSet(fields, rows);
+    }
+
+    @Override
+    @SuppressWarnings({"rawtypes", "unchecked"})
+    public String toString() {
+        if (fieldString == null && fieldBytes != null) {
+            try {
+                Object array = readBinaryArray(fieldBytes, 1, 0);
+
+                final ArrayEncoding.ArrayEncoder arraySupport = ArrayEncoding.getArrayEncoder(array);
+                assert arraySupport != null;
+                fieldString = arraySupport.toArrayString(connection.getTypeInfo().getArrayDelimiter(oid), array);
+            } catch (SQLException e) {
+                fieldString = "NULL"; // punt
+            }
+        }
+        return fieldString;
+    }
+
+    /**
+     * Convert array list to PG String representation (e.g. {0,1,2}).
+     */
+    private String toString(ArrayDecoding.PgArrayList list) throws SQLException {
+        if (list == null) {
+            return "NULL";
+        }
+
+        StringBuilder b = new StringBuilder().append('{');
+
+        char delim = getConnection().getTypeInfo().getArrayDelimiter(oid);
+
+        for (int i = 0; i < list.size(); i++) {
+            Object v = list.get(i);
+
+            if (i > 0) {
+                b.append(delim);
+            }
+
+            if (v == null) {
+                b.append("NULL");
+            } else if (v instanceof ArrayDecoding.PgArrayList) {
+                b.append(toString((ArrayDecoding.PgArrayList) v));
+            } else {
+                escapeArrayElement(b, (String) v);
+            }
+        }
+
+        b.append('}');
+
+        return b.toString();
+    }
+
+    public boolean isBinary() {
+        return fieldBytes != null;
+    }
+
+    public byte[] toBytes() {
+        return fieldBytes;
+    }
+
+    @Override
+    public void free() throws SQLException {
+        connection = null;
+        fieldString = null;
+        fieldBytes = null;
+        arrayList = null;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgBlob.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgBlob.java
index c7800af..ece51d6 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/PgBlob.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgBlob.java
@@ -15,39 +15,39 @@ import java.sql.SQLException;
 @SuppressWarnings("try")
 public class PgBlob extends AbstractBlobClob implements Blob {
 
-  public PgBlob(BaseConnection conn, long oid) throws SQLException {
-    super(conn, oid);
-  }
-
-  @Override
-  public InputStream getBinaryStream(long pos, long length)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      LargeObject subLO = getLo(false).copy();
-      addSubLO(subLO);
-      if (pos > Integer.MAX_VALUE) {
-        subLO.seek64(pos - 1, LargeObject.SEEK_SET);
-      } else {
-        subLO.seek((int) pos - 1, LargeObject.SEEK_SET);
-      }
-      return subLO.getInputStream(length);
+    public PgBlob(BaseConnection conn, long oid) throws SQLException {
+        super(conn, oid);
     }
-  }
 
-  @Override
-  public int setBytes(long pos, byte[] bytes) throws SQLException {
-    return setBytes(pos, bytes, 0, bytes.length);
-  }
-
-  @Override
-  public int setBytes(long pos, byte[] bytes, int offset, int len)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      assertPosition(pos);
-      getLo(true).seek((int) (pos - 1));
-      getLo(true).write(bytes, offset, len);
-      return len;
+    @Override
+    public InputStream getBinaryStream(long pos, long length)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            LargeObject subLO = getLo(false).copy();
+            addSubLO(subLO);
+            if (pos > Integer.MAX_VALUE) {
+                subLO.seek64(pos - 1, LargeObject.SEEK_SET);
+            } else {
+                subLO.seek((int) pos - 1, LargeObject.SEEK_SET);
+            }
+            return subLO.getInputStream(length);
+        }
+    }
+
+    @Override
+    public int setBytes(long pos, byte[] bytes) throws SQLException {
+        return setBytes(pos, bytes, 0, bytes.length);
+    }
+
+    @Override
+    public int setBytes(long pos, byte[] bytes, int offset, int len)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            assertPosition(pos);
+            getLo(true).seek((int) (pos - 1));
+            getLo(true).write(bytes, offset, len);
+            return len;
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgCallableStatement.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgCallableStatement.java
index 55b7222..b232ac3 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/PgCallableStatement.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgCallableStatement.java
@@ -36,973 +36,973 @@ import java.util.Map;
 
 @SuppressWarnings("try")
 class PgCallableStatement extends PgPreparedStatement implements CallableStatement {
-  // Used by the callablestatement style methods
-  private final boolean isFunction;
-  // functionReturnType contains the user supplied value to check
-  // testReturn contains a modified version to make it easier to
-  // check the getXXX methods..
-  private int [] functionReturnType;
-  private int [] testReturn;
-  // returnTypeSet is true when a proper call to registerOutParameter has been made
-  private boolean returnTypeSet;
-  protected Object [] callResult;
-  private int lastIndex;
-
-  PgCallableStatement(PgConnection connection, String sql, int rsType, int rsConcurrency,
-      int rsHoldability) throws SQLException {
-    super(connection, connection.borrowCallableQuery(sql), rsType, rsConcurrency, rsHoldability);
-    this.isFunction = preparedQuery.isFunction;
-
-    if (this.isFunction) {
-      int inParamCount = this.preparedParameters.getInParameterCount() + 1;
-      this.testReturn = new int[inParamCount];
-      this.functionReturnType = new int[inParamCount];
-    }
-  }
-
-  @Override
-  public int executeUpdate() throws SQLException {
-    if (isFunction) {
-      executeWithFlags(0);
-      return 0;
-    }
-    return super.executeUpdate();
-  }
-
-  @Override
-  public Object getObject(int i, Map<String, Class<?>> map)
-      throws SQLException {
-    return getObjectImpl(i, map);
-  }
-
-  @Override
-  public Object getObject(String s, Map<String, Class<?>> map) throws SQLException {
-    return getObjectImpl(s, map);
-  }
-
-  @Override
-  public boolean executeWithFlags(int flags) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      boolean hasResultSet = super.executeWithFlags(flags);
-      int[] functionReturnType = this.functionReturnType;
-      if (!isFunction || !returnTypeSet || functionReturnType == null) {
-        return hasResultSet;
-      }
-
-      // If we are executing and there are out parameters
-      // callable statement function set the return data
-      if (!hasResultSet) {
-        throw new PSQLException(GT.tr("A CallableStatement was executed with nothing returned."),
-            PSQLState.NO_DATA);
-      }
-
-      ResultSet rs = getResultSet();
-      if (!rs.next()) {
-        throw new PSQLException(GT.tr("A CallableStatement was executed with nothing returned."),
-            PSQLState.NO_DATA);
-      }
-
-      // figure out how many columns
-      int cols = rs.getMetaData().getColumnCount();
-
-      int outParameterCount = preparedParameters.getOutParameterCount();
-
-      if (cols != outParameterCount) {
-        throw new PSQLException(
-            GT.tr("A CallableStatement was executed with an invalid number of parameters"),
-            PSQLState.SYNTAX_ERROR);
-      }
-
-      // reset last result fetched (for wasNull)
-      lastIndex = 0;
-
-      // allocate enough space for all possible parameters without regard to in/out
-      Object[] callResult = new Object[preparedParameters.getParameterCount() + 1];
-      this.callResult = callResult;
-
-      // move them into the result set
-      for (int i = 0, j = 0; i < cols; i++, j++) {
-        // find the next out parameter, the assumption is that the functionReturnType
-        // array will be initialized with 0 and only out parameters will have values
-        // other than 0. 0 is the value for java.sql.Types.NULL, which should not
-        // conflict
-        while (j < functionReturnType.length && functionReturnType[j] == 0) {
-          j++;
-        }
-
-        callResult[j] = rs.getObject(i + 1);
-        int columnType = rs.getMetaData().getColumnType(i + 1);
-
-        if (columnType != functionReturnType[j]) {
-          // this is here for the sole purpose of passing the cts
-          if (columnType == Types.DOUBLE && functionReturnType[j] == Types.REAL) {
-            // return it as a float
-            Object result = callResult[j];
-            if (result != null) {
-              callResult[j] = ((Double) result).floatValue();
-            }
-          } else if (columnType == Types.REF_CURSOR && functionReturnType[j] == Types.OTHER) {
-            // For backwards compatibility reasons we support that ref cursors can be
-            // registered with both Types.OTHER and Types.REF_CURSOR so we allow
-            // this specific mismatch
-          } else {
-            throw new PSQLException(GT.tr(
-                "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.",
-                i + 1, "java.sql.Types=" + columnType, "java.sql.Types=" + functionReturnType[j]),
-                PSQLState.DATA_TYPE_MISMATCH);
-          }
-        }
-
-      }
-      rs.close();
-      result = null;
-    }
-    return false;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * <p>Before executing a stored procedure call you must explicitly call registerOutParameter to
-   * register the java.sql.Type of each out parameter.</p>
-   *
-   * <p>Note: When reading the value of an out parameter, you must use the getXXX method whose Java
-   * type XXX corresponds to the parameter's registered SQL type.</p>
-   *
-   * <p>ONLY 1 RETURN PARAMETER if {?= call ..} syntax is used</p>
-   *
-   * @param parameterIndex the first parameter is 1, the second is 2,...
-   * @param sqlType SQL type code defined by java.sql.Types; for parameters of type Numeric or
-   *        Decimal use the version of registerOutParameter that accepts a scale value
-   * @throws SQLException if a database-access error occurs.
-   */
-  @Override
-  public void registerOutParameter(int parameterIndex, int sqlType)
-      throws SQLException {
-    checkClosed();
-    switch (sqlType) {
-      case Types.TINYINT:
-        // we don't have a TINYINT type use SMALLINT
-        sqlType = Types.SMALLINT;
-        break;
-      case Types.LONGVARCHAR:
-        sqlType = Types.VARCHAR;
-        break;
-      case Types.DECIMAL:
-        sqlType = Types.NUMERIC;
-        break;
-      case Types.FLOAT:
-        // float is the same as double
-        sqlType = Types.DOUBLE;
-        break;
-      case Types.VARBINARY:
-      case Types.LONGVARBINARY:
-        sqlType = Types.BINARY;
-        break;
-      case Types.BOOLEAN:
-        sqlType = Types.BIT;
-        break;
-      default:
-        break;
-    }
-    int[] functionReturnType = this.functionReturnType;
-    int[] testReturn = this.testReturn;
-    if (!isFunction || functionReturnType == null || testReturn == null) {
-      throw new PSQLException(
-          GT.tr(
-              "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one."),
-          PSQLState.STATEMENT_NOT_ALLOWED_IN_FUNCTION_CALL);
-    }
-
-    preparedParameters.registerOutParameter(parameterIndex, sqlType);
+    // Used by the callablestatement style methods
+    private final boolean isFunction;
+    protected Object[] callResult;
     // functionReturnType contains the user supplied value to check
     // testReturn contains a modified version to make it easier to
     // check the getXXX methods..
-    functionReturnType[parameterIndex - 1] = sqlType;
-    testReturn[parameterIndex - 1] = sqlType;
+    private int[] functionReturnType;
+    private int[] testReturn;
+    // returnTypeSet is true when a proper call to registerOutParameter has been made
+    private boolean returnTypeSet;
+    private int lastIndex;
 
-    if (functionReturnType[parameterIndex - 1] == Types.CHAR
-        || functionReturnType[parameterIndex - 1] == Types.LONGVARCHAR) {
-      testReturn[parameterIndex - 1] = Types.VARCHAR;
-    } else if (functionReturnType[parameterIndex - 1] == Types.FLOAT) {
-      testReturn[parameterIndex - 1] = Types.REAL; // changes to streamline later error checking
-    }
-    returnTypeSet = true;
-  }
+    PgCallableStatement(PgConnection connection, String sql, int rsType, int rsConcurrency,
+                        int rsHoldability) throws SQLException {
+        super(connection, connection.borrowCallableQuery(sql), rsType, rsConcurrency, rsHoldability);
+        this.isFunction = preparedQuery.isFunction;
 
-  @Override
-  public boolean wasNull() throws SQLException {
-    if (lastIndex == 0 || callResult == null) {
-      throw new PSQLException(GT.tr("wasNull cannot be call before fetching a result."),
-          PSQLState.OBJECT_NOT_IN_STATE);
+        if (this.isFunction) {
+            int inParamCount = this.preparedParameters.getInParameterCount() + 1;
+            this.testReturn = new int[inParamCount];
+            this.functionReturnType = new int[inParamCount];
+        }
     }
 
-    // check to see if the last access threw an exception
-    return callResult[lastIndex - 1] == null;
-  }
-
-  @Override
-  public String getString(int parameterIndex) throws SQLException {
-    Object result = checkIndex(parameterIndex, Types.VARCHAR, "String");
-    return (String) result;
-  }
-
-  @Override
-  public boolean getBoolean(int parameterIndex) throws SQLException {
-    Object result = checkIndex(parameterIndex, Types.BIT, "Boolean");
-    if (result == null) {
-      return false;
-    }
-    return BooleanTypeUtil.castToBoolean(result);
-  }
-
-  @Override
-  public byte getByte(int parameterIndex) throws SQLException {
-    // fake tiny int with smallint
-    Object result = checkIndex(parameterIndex, Types.SMALLINT, "Byte");
-
-    if (result == null) {
-      return 0;
+    @Override
+    public int executeUpdate() throws SQLException {
+        if (isFunction) {
+            executeWithFlags(0);
+            return 0;
+        }
+        return super.executeUpdate();
     }
 
-    return ((Integer) result).byteValue();
-
-  }
-
-  @Override
-  public short getShort(int parameterIndex) throws SQLException {
-    Object result = checkIndex(parameterIndex, Types.SMALLINT, "Short");
-    if (result == null) {
-      return 0;
-    }
-    return ((Integer) result).shortValue();
-  }
-
-  @Override
-  public int getInt(int parameterIndex) throws SQLException {
-    Object result = checkIndex(parameterIndex, Types.INTEGER, "Int");
-    if (result == null) {
-      return 0;
+    @Override
+    public Object getObject(int i, Map<String, Class<?>> map)
+            throws SQLException {
+        return getObjectImpl(i, map);
     }
 
-    return (Integer) result;
-  }
-
-  @Override
-  public long getLong(int parameterIndex) throws SQLException {
-    Object result = checkIndex(parameterIndex, Types.BIGINT, "Long");
-    if (result == null) {
-      return 0;
+    @Override
+    public Object getObject(String s, Map<String, Class<?>> map) throws SQLException {
+        return getObjectImpl(s, map);
     }
 
-    return (Long) result;
-  }
+    @Override
+    public boolean executeWithFlags(int flags) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            boolean hasResultSet = super.executeWithFlags(flags);
+            int[] functionReturnType = this.functionReturnType;
+            if (!isFunction || !returnTypeSet || functionReturnType == null) {
+                return hasResultSet;
+            }
 
-  @Override
-  public float getFloat(int parameterIndex) throws SQLException {
-    Object result = checkIndex(parameterIndex, Types.REAL, "Float");
-    if (result == null) {
-      return 0;
+            // If we are executing and there are out parameters
+            // callable statement function set the return data
+            if (!hasResultSet) {
+                throw new PSQLException(GT.tr("A CallableStatement was executed with nothing returned."),
+                        PSQLState.NO_DATA);
+            }
+
+            ResultSet rs = getResultSet();
+            if (!rs.next()) {
+                throw new PSQLException(GT.tr("A CallableStatement was executed with nothing returned."),
+                        PSQLState.NO_DATA);
+            }
+
+            // figure out how many columns
+            int cols = rs.getMetaData().getColumnCount();
+
+            int outParameterCount = preparedParameters.getOutParameterCount();
+
+            if (cols != outParameterCount) {
+                throw new PSQLException(
+                        GT.tr("A CallableStatement was executed with an invalid number of parameters"),
+                        PSQLState.SYNTAX_ERROR);
+            }
+
+            // reset last result fetched (for wasNull)
+            lastIndex = 0;
+
+            // allocate enough space for all possible parameters without regard to in/out
+            Object[] callResult = new Object[preparedParameters.getParameterCount() + 1];
+            this.callResult = callResult;
+
+            // move them into the result set
+            for (int i = 0, j = 0; i < cols; i++, j++) {
+                // find the next out parameter, the assumption is that the functionReturnType
+                // array will be initialized with 0 and only out parameters will have values
+                // other than 0. 0 is the value for java.sql.Types.NULL, which should not
+                // conflict
+                while (j < functionReturnType.length && functionReturnType[j] == 0) {
+                    j++;
+                }
+
+                callResult[j] = rs.getObject(i + 1);
+                int columnType = rs.getMetaData().getColumnType(i + 1);
+
+                if (columnType != functionReturnType[j]) {
+                    // this is here for the sole purpose of passing the cts
+                    if (columnType == Types.DOUBLE && functionReturnType[j] == Types.REAL) {
+                        // return it as a float
+                        Object result = callResult[j];
+                        if (result != null) {
+                            callResult[j] = ((Double) result).floatValue();
+                        }
+                    } else if (columnType == Types.REF_CURSOR && functionReturnType[j] == Types.OTHER) {
+                        // For backwards compatibility reasons we support that ref cursors can be
+                        // registered with both Types.OTHER and Types.REF_CURSOR so we allow
+                        // this specific mismatch
+                    } else {
+                        throw new PSQLException(GT.tr(
+                                "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.",
+                                i + 1, "java.sql.Types=" + columnType, "java.sql.Types=" + functionReturnType[j]),
+                                PSQLState.DATA_TYPE_MISMATCH);
+                    }
+                }
+
+            }
+            rs.close();
+            result = null;
+        }
+        return false;
     }
 
-    return (Float) result;
-  }
+    /**
+     * {@inheritDoc}
+     *
+     * <p>Before executing a stored procedure call you must explicitly call registerOutParameter to
+     * register the java.sql.Type of each out parameter.</p>
+     *
+     * <p>Note: When reading the value of an out parameter, you must use the getXXX method whose Java
+     * type XXX corresponds to the parameter's registered SQL type.</p>
+     *
+     * <p>ONLY 1 RETURN PARAMETER if {?= call ..} syntax is used</p>
+     *
+     * @param parameterIndex the first parameter is 1, the second is 2,...
+     * @param sqlType        SQL type code defined by java.sql.Types; for parameters of type Numeric or
+     *                       Decimal use the version of registerOutParameter that accepts a scale value
+     * @throws SQLException if a database-access error occurs.
+     */
+    @Override
+    public void registerOutParameter(int parameterIndex, int sqlType)
+            throws SQLException {
+        checkClosed();
+        switch (sqlType) {
+            case Types.TINYINT:
+                // we don't have a TINYINT type use SMALLINT
+                sqlType = Types.SMALLINT;
+                break;
+            case Types.LONGVARCHAR:
+                sqlType = Types.VARCHAR;
+                break;
+            case Types.DECIMAL:
+                sqlType = Types.NUMERIC;
+                break;
+            case Types.FLOAT:
+                // float is the same as double
+                sqlType = Types.DOUBLE;
+                break;
+            case Types.VARBINARY:
+            case Types.LONGVARBINARY:
+                sqlType = Types.BINARY;
+                break;
+            case Types.BOOLEAN:
+                sqlType = Types.BIT;
+                break;
+            default:
+                break;
+        }
+        int[] functionReturnType = this.functionReturnType;
+        int[] testReturn = this.testReturn;
+        if (!isFunction || functionReturnType == null || testReturn == null) {
+            throw new PSQLException(
+                    GT.tr(
+                            "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one."),
+                    PSQLState.STATEMENT_NOT_ALLOWED_IN_FUNCTION_CALL);
+        }
 
-  @Override
-  public double getDouble(int parameterIndex) throws SQLException {
-    Object result = checkIndex(parameterIndex, Types.DOUBLE, "Double");
-    if (result == null) {
-      return 0;
+        preparedParameters.registerOutParameter(parameterIndex, sqlType);
+        // functionReturnType contains the user supplied value to check
+        // testReturn contains a modified version to make it easier to
+        // check the getXXX methods..
+        functionReturnType[parameterIndex - 1] = sqlType;
+        testReturn[parameterIndex - 1] = sqlType;
+
+        if (functionReturnType[parameterIndex - 1] == Types.CHAR
+                || functionReturnType[parameterIndex - 1] == Types.LONGVARCHAR) {
+            testReturn[parameterIndex - 1] = Types.VARCHAR;
+        } else if (functionReturnType[parameterIndex - 1] == Types.FLOAT) {
+            testReturn[parameterIndex - 1] = Types.REAL; // changes to streamline later error checking
+        }
+        returnTypeSet = true;
     }
 
-    return (Double) result;
-  }
+    @Override
+    public boolean wasNull() throws SQLException {
+        if (lastIndex == 0 || callResult == null) {
+            throw new PSQLException(GT.tr("wasNull cannot be call before fetching a result."),
+                    PSQLState.OBJECT_NOT_IN_STATE);
+        }
 
-  @Override
-  @SuppressWarnings("deprecation")
-  public BigDecimal getBigDecimal(int parameterIndex, int scale) throws SQLException {
-    Object result = checkIndex(parameterIndex, Types.NUMERIC, "BigDecimal");
-    return (BigDecimal) result;
-  }
-
-  @Override
-  public byte [] getBytes(int parameterIndex) throws SQLException {
-    Object result = checkIndex(parameterIndex, Types.VARBINARY, Types.BINARY, "Bytes");
-    return (byte []) result;
-  }
-
-  @Override
-  public Date getDate(int parameterIndex) throws SQLException {
-    Object result = checkIndex(parameterIndex, Types.DATE, "Date");
-    return (Date) result;
-  }
-
-  @Override
-  public Time getTime(int parameterIndex) throws SQLException {
-    Object result = checkIndex(parameterIndex, Types.TIME, "Time");
-    return (Time) result;
-  }
-
-  @Override
-  public Timestamp getTimestamp(int parameterIndex) throws SQLException {
-    Object result = checkIndex(parameterIndex, Types.TIMESTAMP, "Timestamp");
-    return (Timestamp) result;
-  }
-
-  @Override
-  public Object getObject(int parameterIndex) throws SQLException {
-    return getCallResult(parameterIndex);
-  }
-
-  /**
-   * helperfunction for the getXXX calls to check isFunction and index == 1 Compare BOTH type fields
-   * against the return type.
-   *
-   * @param parameterIndex parameter index (1-based)
-   * @param type1 type 1
-   * @param type2 type 2
-   * @param getName getter name
-   * @throws SQLException if something goes wrong
-   */
-  protected Object checkIndex(int parameterIndex, int type1, int type2, String getName)
-      throws SQLException {
-    Object result = getCallResult(parameterIndex);
-    int testReturn = this.testReturn != null ? this.testReturn[parameterIndex - 1] : -1;
-    if (type1 != testReturn && type2 != testReturn) {
-      throw new PSQLException(
-          GT.tr("Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.",
-                  "java.sql.Types=" + testReturn, getName,
-                  "java.sql.Types=" + type1),
-          PSQLState.MOST_SPECIFIC_TYPE_DOES_NOT_MATCH);
-    }
-    return result;
-  }
-
-  /**
-   * Helper function for the getXXX calls to check isFunction and index == 1.
-   *
-   * @param parameterIndex parameter index (1-based)
-   * @param type type
-   * @param getName getter name
-   * @throws SQLException if given index is not valid
-   */
-  protected Object checkIndex(int parameterIndex,
-      int type, String getName) throws SQLException {
-    Object result = getCallResult(parameterIndex);
-    int testReturn = this.testReturn != null ? this.testReturn[parameterIndex - 1] : -1;
-    if (type != testReturn) {
-      throw new PSQLException(
-          GT.tr("Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.",
-              "java.sql.Types=" + testReturn, getName,
-                  "java.sql.Types=" + type),
-          PSQLState.MOST_SPECIFIC_TYPE_DOES_NOT_MATCH);
-    }
-    return result;
-  }
-
-  private Object getCallResult(int parameterIndex) throws SQLException {
-    checkClosed();
-
-    if (!isFunction) {
-      throw new PSQLException(
-          GT.tr(
-              "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made."),
-          PSQLState.STATEMENT_NOT_ALLOWED_IN_FUNCTION_CALL);
+        // check to see if the last access threw an exception
+        return callResult[lastIndex - 1] == null;
     }
 
-    if (!returnTypeSet) {
-      throw new PSQLException(GT.tr("No function outputs were registered."),
-          PSQLState.OBJECT_NOT_IN_STATE);
+    @Override
+    public String getString(int parameterIndex) throws SQLException {
+        Object result = checkIndex(parameterIndex, Types.VARCHAR, "String");
+        return (String) result;
     }
 
-    Object [] callResult = this.callResult;
-    if (callResult == null) {
-      throw new PSQLException(
-          GT.tr("Results cannot be retrieved from a CallableStatement before it is executed."),
-          PSQLState.NO_DATA);
+    @Override
+    public boolean getBoolean(int parameterIndex) throws SQLException {
+        Object result = checkIndex(parameterIndex, Types.BIT, "Boolean");
+        if (result == null) {
+            return false;
+        }
+        return BooleanTypeUtil.castToBoolean(result);
     }
 
-    lastIndex = parameterIndex;
-    return callResult[parameterIndex - 1];
-  }
+    @Override
+    public byte getByte(int parameterIndex) throws SQLException {
+        // fake tiny int with smallint
+        Object result = checkIndex(parameterIndex, Types.SMALLINT, "Byte");
 
-  @Override
-  protected BatchResultHandler createBatchHandler(Query[] queries,
-      ParameterList[] parameterLists) {
-    return new CallableBatchResultHandler(this, queries, parameterLists);
-  }
+        if (result == null) {
+            return 0;
+        }
 
-  @Override
-  public Array getArray(int i) throws SQLException {
-    Object result = checkIndex(i, Types.ARRAY, "Array");
-    return (Array) result;
-  }
+        return ((Integer) result).byteValue();
 
-  @Override
-  public BigDecimal getBigDecimal(int parameterIndex) throws SQLException {
-    Object result = checkIndex(parameterIndex, Types.NUMERIC, "BigDecimal");
-    return (BigDecimal) result;
-  }
-
-  @Override
-  public Blob getBlob(int i) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getBlob(int)");
-  }
-
-  @Override
-  public Clob getClob(int i) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getClob(int)");
-  }
-
-  public Object getObjectImpl(int i, Map<String, Class<?>> map) throws SQLException {
-    if (map == null || map.isEmpty()) {
-      return getObject(i);
-    }
-    throw Driver.notImplemented(this.getClass(), "getObjectImpl(int,Map)");
-  }
-
-  @Override
-  public Ref getRef(int i) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getRef(int)");
-  }
-
-  @Override
-  public Date getDate(int i, Calendar cal) throws SQLException {
-    Object result = checkIndex(i, Types.DATE, "Date");
-
-    if (result == null) {
-      return null;
     }
 
-    String value = result.toString();
-    return getTimestampUtils().toDate(cal, value);
-  }
-
-  @Override
-  public Time getTime(int i, Calendar cal) throws SQLException {
-    Object result = checkIndex(i, Types.TIME, "Time");
-
-    if (result == null) {
-      return null;
+    @Override
+    public short getShort(int parameterIndex) throws SQLException {
+        Object result = checkIndex(parameterIndex, Types.SMALLINT, "Short");
+        if (result == null) {
+            return 0;
+        }
+        return ((Integer) result).shortValue();
     }
 
-    String value = result.toString();
-    return getTimestampUtils().toTime(cal, value);
-  }
+    @Override
+    public int getInt(int parameterIndex) throws SQLException {
+        Object result = checkIndex(parameterIndex, Types.INTEGER, "Int");
+        if (result == null) {
+            return 0;
+        }
 
-  @Override
-  public Timestamp getTimestamp(int i, Calendar cal) throws SQLException {
-    Object result = checkIndex(i, Types.TIMESTAMP, "Timestamp");
-
-    if (result == null) {
-      return null;
+        return (Integer) result;
     }
 
-    String value = result.toString();
-    return getTimestampUtils().toTimestamp(cal, value);
-  }
+    @Override
+    public long getLong(int parameterIndex) throws SQLException {
+        Object result = checkIndex(parameterIndex, Types.BIGINT, "Long");
+        if (result == null) {
+            return 0;
+        }
 
-  @Override
-  public void registerOutParameter(int parameterIndex, int sqlType, String typeName)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "registerOutParameter(int,int,String)");
-  }
-
-  @Override
-  public void setObject(String parameterName, Object x, SQLType targetSqlType,
-      int scaleOrLength) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setObject");
-  }
-
-  @Override
-  public void setObject(String parameterName, Object x, SQLType targetSqlType)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setObject");
-  }
-
-  @Override
-  public void registerOutParameter(int parameterIndex, SQLType sqlType)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "registerOutParameter");
-  }
-
-  @Override
-  public void registerOutParameter(int parameterIndex, SQLType sqlType, int scale)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "registerOutParameter");
-  }
-
-  @Override
-  public void registerOutParameter(int parameterIndex, SQLType sqlType, String typeName)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "registerOutParameter");
-  }
-
-  @Override
-  public void registerOutParameter(String parameterName, SQLType sqlType)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "registerOutParameter");
-  }
-
-  @Override
-  public void registerOutParameter(String parameterName, SQLType sqlType, int scale)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "registerOutParameter");
-  }
-
-  @Override
-  public void registerOutParameter(String parameterName, SQLType sqlType, String typeName)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "registerOutParameter");
-  }
-
-  @Override
-  public RowId getRowId(int parameterIndex) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getRowId(int)");
-  }
-
-  @Override
-  public RowId getRowId(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getRowId(String)");
-  }
-
-  @Override
-  public void setRowId(String parameterName, RowId x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setRowId(String, RowId)");
-  }
-
-  @Override
-  public void setNString(String parameterName, String value) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setNString(String, String)");
-  }
-
-  @Override
-  public void setNCharacterStream(String parameterName, Reader value, long length)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setNCharacterStream(String, Reader, long)");
-  }
-
-  @Override
-  public void setNCharacterStream(String parameterName, Reader value) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setNCharacterStream(String, Reader)");
-  }
-
-  @Override
-  public void setCharacterStream(String parameterName, Reader value, long length)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setCharacterStream(String, Reader, long)");
-  }
-
-  @Override
-  public void setCharacterStream(String parameterName, Reader value) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setCharacterStream(String, Reader)");
-  }
-
-  @Override
-  public void setBinaryStream(String parameterName, InputStream value, long length)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setBinaryStream(String, InputStream, long)");
-  }
-
-  @Override
-  public void setBinaryStream(String parameterName, InputStream value) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setBinaryStream(String, InputStream)");
-  }
-
-  @Override
-  public void setAsciiStream(String parameterName, InputStream value, long length)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setAsciiStream(String, InputStream, long)");
-  }
-
-  @Override
-  public void setAsciiStream(String parameterName, InputStream value) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setAsciiStream(String, InputStream)");
-  }
-
-  @Override
-  public void setNClob(String parameterName, NClob value) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setNClob(String, NClob)");
-  }
-
-  @Override
-  public void setClob(String parameterName, Reader reader, long length) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setClob(String, Reader, long)");
-  }
-
-  @Override
-  public void setClob(String parameterName, Reader reader) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setClob(String, Reader)");
-  }
-
-  @Override
-  public void setBlob(String parameterName, InputStream inputStream, long length)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setBlob(String, InputStream, long)");
-  }
-
-  @Override
-  public void setBlob(String parameterName, InputStream inputStream) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setBlob(String, InputStream)");
-  }
-
-  @Override
-  public void setBlob(String parameterName, Blob x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setBlob(String, Blob)");
-  }
-
-  @Override
-  public void setClob(String parameterName, Clob x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setClob(String, Clob)");
-  }
-
-  @Override
-  public void setNClob(String parameterName, Reader reader, long length) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setNClob(String, Reader, long)");
-  }
-
-  @Override
-  public void setNClob(String parameterName, Reader reader) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setNClob(String, Reader)");
-  }
-
-  @Override
-  public NClob getNClob(int parameterIndex) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getNClob(int)");
-  }
-
-  @Override
-  public NClob getNClob(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getNClob(String)");
-  }
-
-  @Override
-  public void setSQLXML(String parameterName, SQLXML xmlObject) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setSQLXML(String, SQLXML)");
-  }
-
-  @Override
-  public SQLXML getSQLXML(int parameterIndex) throws SQLException {
-    Object result = checkIndex(parameterIndex, Types.SQLXML, "SQLXML");
-    return (SQLXML) result;
-  }
-
-  @Override
-  public SQLXML getSQLXML(String parameterIndex) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getSQLXML(String)");
-  }
-
-  @Override
-  public String getNString(int parameterIndex) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getNString(int)");
-  }
-
-  @Override
-  public String getNString(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getNString(String)");
-  }
-
-  @Override
-  public Reader getNCharacterStream(int parameterIndex) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getNCharacterStream(int)");
-  }
-
-  @Override
-  public Reader getNCharacterStream(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getNCharacterStream(String)");
-  }
-
-  @Override
-  public Reader getCharacterStream(int parameterIndex) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getCharacterStream(int)");
-  }
-
-  @Override
-  public Reader getCharacterStream(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getCharacterStream(String)");
-  }
-
-  @Override
-  public <T> T getObject(int parameterIndex, Class<T> type)
-      throws SQLException {
-    if (type == ResultSet.class) {
-      return type.cast(getObject(parameterIndex));
+        return (Long) result;
     }
-    throw new PSQLException(GT.tr("Unsupported type conversion to {1}.", type),
-            PSQLState.INVALID_PARAMETER_VALUE);
-  }
 
-  @Override
-  public <T> T getObject(String parameterName, Class<T> type) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getObject(String, Class<T>)");
-  }
+    @Override
+    public float getFloat(int parameterIndex) throws SQLException {
+        Object result = checkIndex(parameterIndex, Types.REAL, "Float");
+        if (result == null) {
+            return 0;
+        }
 
-  @Override
-  public void registerOutParameter(String parameterName, int sqlType) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "registerOutParameter(String,int)");
-  }
+        return (Float) result;
+    }
 
-  @Override
-  public void registerOutParameter(String parameterName, int sqlType, int scale)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "registerOutParameter(String,int,int)");
-  }
+    @Override
+    public double getDouble(int parameterIndex) throws SQLException {
+        Object result = checkIndex(parameterIndex, Types.DOUBLE, "Double");
+        if (result == null) {
+            return 0;
+        }
 
-  @Override
-  public void registerOutParameter(String parameterName, int sqlType, String typeName)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "registerOutParameter(String,int,String)");
-  }
+        return (Double) result;
+    }
 
-  @Override
-  public URL getURL(int parameterIndex) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getURL(String)");
-  }
+    @Override
+    @SuppressWarnings("deprecation")
+    public BigDecimal getBigDecimal(int parameterIndex, int scale) throws SQLException {
+        Object result = checkIndex(parameterIndex, Types.NUMERIC, "BigDecimal");
+        return (BigDecimal) result;
+    }
 
-  @Override
-  public void setURL(String parameterName, URL val) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setURL(String,URL)");
-  }
+    @Override
+    public byte[] getBytes(int parameterIndex) throws SQLException {
+        Object result = checkIndex(parameterIndex, Types.VARBINARY, Types.BINARY, "Bytes");
+        return (byte[]) result;
+    }
 
-  @Override
-  public void setNull(String parameterName, int sqlType) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setNull(String,int)");
-  }
+    @Override
+    public Date getDate(int parameterIndex) throws SQLException {
+        Object result = checkIndex(parameterIndex, Types.DATE, "Date");
+        return (Date) result;
+    }
 
-  @Override
-  public void setBoolean(String parameterName, boolean x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setBoolean(String,boolean)");
-  }
+    @Override
+    public Time getTime(int parameterIndex) throws SQLException {
+        Object result = checkIndex(parameterIndex, Types.TIME, "Time");
+        return (Time) result;
+    }
 
-  @Override
-  public void setByte(String parameterName, byte x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setByte(String,byte)");
-  }
+    @Override
+    public Timestamp getTimestamp(int parameterIndex) throws SQLException {
+        Object result = checkIndex(parameterIndex, Types.TIMESTAMP, "Timestamp");
+        return (Timestamp) result;
+    }
 
-  @Override
-  public void setShort(String parameterName, short x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setShort(String,short)");
-  }
+    @Override
+    public Object getObject(int parameterIndex) throws SQLException {
+        return getCallResult(parameterIndex);
+    }
 
-  @Override
-  public void setInt(String parameterName, int x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setInt(String,int)");
-  }
+    /**
+     * helperfunction for the getXXX calls to check isFunction and index == 1 Compare BOTH type fields
+     * against the return type.
+     *
+     * @param parameterIndex parameter index (1-based)
+     * @param type1          type 1
+     * @param type2          type 2
+     * @param getName        getter name
+     * @throws SQLException if something goes wrong
+     */
+    protected Object checkIndex(int parameterIndex, int type1, int type2, String getName)
+            throws SQLException {
+        Object result = getCallResult(parameterIndex);
+        int testReturn = this.testReturn != null ? this.testReturn[parameterIndex - 1] : -1;
+        if (type1 != testReturn && type2 != testReturn) {
+            throw new PSQLException(
+                    GT.tr("Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.",
+                            "java.sql.Types=" + testReturn, getName,
+                            "java.sql.Types=" + type1),
+                    PSQLState.MOST_SPECIFIC_TYPE_DOES_NOT_MATCH);
+        }
+        return result;
+    }
 
-  @Override
-  public void setLong(String parameterName, long x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setLong(String,long)");
-  }
+    /**
+     * Helper function for the getXXX calls to check isFunction and index == 1.
+     *
+     * @param parameterIndex parameter index (1-based)
+     * @param type           type
+     * @param getName        getter name
+     * @throws SQLException if given index is not valid
+     */
+    protected Object checkIndex(int parameterIndex,
+                                int type, String getName) throws SQLException {
+        Object result = getCallResult(parameterIndex);
+        int testReturn = this.testReturn != null ? this.testReturn[parameterIndex - 1] : -1;
+        if (type != testReturn) {
+            throw new PSQLException(
+                    GT.tr("Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.",
+                            "java.sql.Types=" + testReturn, getName,
+                            "java.sql.Types=" + type),
+                    PSQLState.MOST_SPECIFIC_TYPE_DOES_NOT_MATCH);
+        }
+        return result;
+    }
 
-  @Override
-  public void setFloat(String parameterName, float x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setFloat(String,float)");
-  }
+    private Object getCallResult(int parameterIndex) throws SQLException {
+        checkClosed();
 
-  @Override
-  public void setDouble(String parameterName, double x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setDouble(String,double)");
-  }
+        if (!isFunction) {
+            throw new PSQLException(
+                    GT.tr(
+                            "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made."),
+                    PSQLState.STATEMENT_NOT_ALLOWED_IN_FUNCTION_CALL);
+        }
 
-  @Override
-  public void setBigDecimal(String parameterName, BigDecimal x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setBigDecimal(String,BigDecimal)");
-  }
+        if (!returnTypeSet) {
+            throw new PSQLException(GT.tr("No function outputs were registered."),
+                    PSQLState.OBJECT_NOT_IN_STATE);
+        }
 
-  @Override
-  public void setString(String parameterName, String x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setString(String,String)");
-  }
+        Object[] callResult = this.callResult;
+        if (callResult == null) {
+            throw new PSQLException(
+                    GT.tr("Results cannot be retrieved from a CallableStatement before it is executed."),
+                    PSQLState.NO_DATA);
+        }
 
-  @Override
-  public void setBytes(String parameterName, byte [] x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setBytes(String,byte)");
-  }
+        lastIndex = parameterIndex;
+        return callResult[parameterIndex - 1];
+    }
 
-  @Override
-  public void setDate(String parameterName, Date x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setDate(String,Date)");
-  }
+    @Override
+    protected BatchResultHandler createBatchHandler(Query[] queries,
+                                                    ParameterList[] parameterLists) {
+        return new CallableBatchResultHandler(this, queries, parameterLists);
+    }
 
-  @Override
-  public void setTime(String parameterName, Time x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setTime(String,Time)");
-  }
+    @Override
+    public Array getArray(int i) throws SQLException {
+        Object result = checkIndex(i, Types.ARRAY, "Array");
+        return (Array) result;
+    }
 
-  @Override
-  public void setTimestamp(String parameterName, Timestamp x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setTimestamp(String,Timestamp)");
-  }
+    @Override
+    public BigDecimal getBigDecimal(int parameterIndex) throws SQLException {
+        Object result = checkIndex(parameterIndex, Types.NUMERIC, "BigDecimal");
+        return (BigDecimal) result;
+    }
 
-  @Override
-  public void setAsciiStream(String parameterName, InputStream x, int length) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setAsciiStream(String,InputStream,int)");
-  }
+    @Override
+    public Blob getBlob(int i) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getBlob(int)");
+    }
 
-  @Override
-  public void setBinaryStream(String parameterName, InputStream x, int length) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setBinaryStream(String,InputStream,int)");
-  }
+    @Override
+    public Clob getClob(int i) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getClob(int)");
+    }
 
-  @Override
-  public void setObject(String parameterName, Object x, int targetSqlType, int scale)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setObject(String,Object,int,int)");
-  }
+    public Object getObjectImpl(int i, Map<String, Class<?>> map) throws SQLException {
+        if (map == null || map.isEmpty()) {
+            return getObject(i);
+        }
+        throw Driver.notImplemented(this.getClass(), "getObjectImpl(int,Map)");
+    }
 
-  @Override
-  public void setObject(String parameterName, Object x, int targetSqlType) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setObject(String,Object,int)");
-  }
+    @Override
+    public Ref getRef(int i) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getRef(int)");
+    }
 
-  @Override
-  public void setObject(String parameterName, Object x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setObject(String,Object)");
-  }
+    @Override
+    public Date getDate(int i, Calendar cal) throws SQLException {
+        Object result = checkIndex(i, Types.DATE, "Date");
 
-  @Override
-  public void setCharacterStream(String parameterName, Reader reader, int length)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setCharacterStream(String,Reader,int)");
-  }
+        if (result == null) {
+            return null;
+        }
 
-  @Override
-  public void setDate(String parameterName, Date x, Calendar cal) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setDate(String,Date,Calendar)");
-  }
+        String value = result.toString();
+        return getTimestampUtils().toDate(cal, value);
+    }
 
-  @Override
-  public void setTime(String parameterName, Time x, Calendar cal) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setTime(String,Time,Calendar)");
-  }
+    @Override
+    public Time getTime(int i, Calendar cal) throws SQLException {
+        Object result = checkIndex(i, Types.TIME, "Time");
 
-  @Override
-  public void setTimestamp(String parameterName, Timestamp x, Calendar cal) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setTimestamp(String,Timestamp,Calendar)");
-  }
+        if (result == null) {
+            return null;
+        }
 
-  @Override
-  public void setNull(String parameterName, int sqlType, String typeName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setNull(String,int,String)");
-  }
+        String value = result.toString();
+        return getTimestampUtils().toTime(cal, value);
+    }
 
-  @Override
-  public String getString(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getString(String)");
-  }
+    @Override
+    public Timestamp getTimestamp(int i, Calendar cal) throws SQLException {
+        Object result = checkIndex(i, Types.TIMESTAMP, "Timestamp");
 
-  @Override
-  public boolean getBoolean(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getBoolean(String)");
-  }
+        if (result == null) {
+            return null;
+        }
 
-  @Override
-  public byte getByte(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getByte(String)");
-  }
+        String value = result.toString();
+        return getTimestampUtils().toTimestamp(cal, value);
+    }
 
-  @Override
-  public short getShort(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getShort(String)");
-  }
+    @Override
+    public void registerOutParameter(int parameterIndex, int sqlType, String typeName)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "registerOutParameter(int,int,String)");
+    }
 
-  @Override
-  public int getInt(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getInt(String)");
-  }
+    @Override
+    public void setObject(String parameterName, Object x, SQLType targetSqlType,
+                          int scaleOrLength) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setObject");
+    }
 
-  @Override
-  public long getLong(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getLong(String)");
-  }
+    @Override
+    public void setObject(String parameterName, Object x, SQLType targetSqlType)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setObject");
+    }
 
-  @Override
-  public float getFloat(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getFloat(String)");
-  }
+    @Override
+    public void registerOutParameter(int parameterIndex, SQLType sqlType)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "registerOutParameter");
+    }
 
-  @Override
-  public double getDouble(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getDouble(String)");
-  }
+    @Override
+    public void registerOutParameter(int parameterIndex, SQLType sqlType, int scale)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "registerOutParameter");
+    }
 
-  @Override
-  public byte [] getBytes(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getBytes(String)");
-  }
+    @Override
+    public void registerOutParameter(int parameterIndex, SQLType sqlType, String typeName)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "registerOutParameter");
+    }
 
-  @Override
-  public Date getDate(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getDate(String)");
-  }
+    @Override
+    public void registerOutParameter(String parameterName, SQLType sqlType)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "registerOutParameter");
+    }
 
-  @Override
-  public Time getTime(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getTime(String)");
-  }
+    @Override
+    public void registerOutParameter(String parameterName, SQLType sqlType, int scale)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "registerOutParameter");
+    }
 
-  @Override
-  public Timestamp getTimestamp(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getTimestamp(String)");
-  }
+    @Override
+    public void registerOutParameter(String parameterName, SQLType sqlType, String typeName)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "registerOutParameter");
+    }
 
-  @Override
-  public Object getObject(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getObject(String)");
-  }
+    @Override
+    public RowId getRowId(int parameterIndex) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getRowId(int)");
+    }
 
-  @Override
-  public BigDecimal getBigDecimal(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getBigDecimal(String)");
-  }
+    @Override
+    public RowId getRowId(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getRowId(String)");
+    }
 
-  public Object getObjectImpl(String parameterName, Map<String, Class<?>> map) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getObject(String,Map)");
-  }
+    @Override
+    public void setRowId(String parameterName, RowId x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setRowId(String, RowId)");
+    }
 
-  @Override
-  public Ref getRef(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getRef(String)");
-  }
+    @Override
+    public void setNString(String parameterName, String value) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setNString(String, String)");
+    }
 
-  @Override
-  public Blob getBlob(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getBlob(String)");
-  }
+    @Override
+    public void setNCharacterStream(String parameterName, Reader value, long length)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setNCharacterStream(String, Reader, long)");
+    }
 
-  @Override
-  public Clob getClob(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getClob(String)");
-  }
+    @Override
+    public void setNCharacterStream(String parameterName, Reader value) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setNCharacterStream(String, Reader)");
+    }
 
-  @Override
-  public Array getArray(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getArray(String)");
-  }
+    @Override
+    public void setCharacterStream(String parameterName, Reader value, long length)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setCharacterStream(String, Reader, long)");
+    }
 
-  @Override
-  public Date getDate(String parameterName, Calendar cal) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getDate(String,Calendar)");
-  }
+    @Override
+    public void setCharacterStream(String parameterName, Reader value) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setCharacterStream(String, Reader)");
+    }
 
-  @Override
-  public Time getTime(String parameterName, Calendar cal) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getTime(String,Calendar)");
-  }
+    @Override
+    public void setBinaryStream(String parameterName, InputStream value, long length)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setBinaryStream(String, InputStream, long)");
+    }
 
-  @Override
-  public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getTimestamp(String,Calendar)");
-  }
+    @Override
+    public void setBinaryStream(String parameterName, InputStream value) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setBinaryStream(String, InputStream)");
+    }
 
-  @Override
-  public URL getURL(String parameterName) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getURL(String)");
-  }
+    @Override
+    public void setAsciiStream(String parameterName, InputStream value, long length)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setAsciiStream(String, InputStream, long)");
+    }
 
-  @Override
-  public void registerOutParameter(int parameterIndex, int sqlType, int scale) throws SQLException {
-    // ignore scale for now
-    registerOutParameter(parameterIndex, sqlType);
-  }
+    @Override
+    public void setAsciiStream(String parameterName, InputStream value) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setAsciiStream(String, InputStream)");
+    }
+
+    @Override
+    public void setNClob(String parameterName, NClob value) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setNClob(String, NClob)");
+    }
+
+    @Override
+    public void setClob(String parameterName, Reader reader, long length) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setClob(String, Reader, long)");
+    }
+
+    @Override
+    public void setClob(String parameterName, Reader reader) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setClob(String, Reader)");
+    }
+
+    @Override
+    public void setBlob(String parameterName, InputStream inputStream, long length)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setBlob(String, InputStream, long)");
+    }
+
+    @Override
+    public void setBlob(String parameterName, InputStream inputStream) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setBlob(String, InputStream)");
+    }
+
+    @Override
+    public void setBlob(String parameterName, Blob x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setBlob(String, Blob)");
+    }
+
+    @Override
+    public void setClob(String parameterName, Clob x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setClob(String, Clob)");
+    }
+
+    @Override
+    public void setNClob(String parameterName, Reader reader, long length) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setNClob(String, Reader, long)");
+    }
+
+    @Override
+    public void setNClob(String parameterName, Reader reader) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setNClob(String, Reader)");
+    }
+
+    @Override
+    public NClob getNClob(int parameterIndex) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getNClob(int)");
+    }
+
+    @Override
+    public NClob getNClob(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getNClob(String)");
+    }
+
+    @Override
+    public void setSQLXML(String parameterName, SQLXML xmlObject) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setSQLXML(String, SQLXML)");
+    }
+
+    @Override
+    public SQLXML getSQLXML(int parameterIndex) throws SQLException {
+        Object result = checkIndex(parameterIndex, Types.SQLXML, "SQLXML");
+        return (SQLXML) result;
+    }
+
+    @Override
+    public SQLXML getSQLXML(String parameterIndex) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getSQLXML(String)");
+    }
+
+    @Override
+    public String getNString(int parameterIndex) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getNString(int)");
+    }
+
+    @Override
+    public String getNString(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getNString(String)");
+    }
+
+    @Override
+    public Reader getNCharacterStream(int parameterIndex) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getNCharacterStream(int)");
+    }
+
+    @Override
+    public Reader getNCharacterStream(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getNCharacterStream(String)");
+    }
+
+    @Override
+    public Reader getCharacterStream(int parameterIndex) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getCharacterStream(int)");
+    }
+
+    @Override
+    public Reader getCharacterStream(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getCharacterStream(String)");
+    }
+
+    @Override
+    public <T> T getObject(int parameterIndex, Class<T> type)
+            throws SQLException {
+        if (type == ResultSet.class) {
+            return type.cast(getObject(parameterIndex));
+        }
+        throw new PSQLException(GT.tr("Unsupported type conversion to {1}.", type),
+                PSQLState.INVALID_PARAMETER_VALUE);
+    }
+
+    @Override
+    public <T> T getObject(String parameterName, Class<T> type) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getObject(String, Class<T>)");
+    }
+
+    @Override
+    public void registerOutParameter(String parameterName, int sqlType) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "registerOutParameter(String,int)");
+    }
+
+    @Override
+    public void registerOutParameter(String parameterName, int sqlType, int scale)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "registerOutParameter(String,int,int)");
+    }
+
+    @Override
+    public void registerOutParameter(String parameterName, int sqlType, String typeName)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "registerOutParameter(String,int,String)");
+    }
+
+    @Override
+    public URL getURL(int parameterIndex) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getURL(String)");
+    }
+
+    @Override
+    public void setURL(String parameterName, URL val) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setURL(String,URL)");
+    }
+
+    @Override
+    public void setNull(String parameterName, int sqlType) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setNull(String,int)");
+    }
+
+    @Override
+    public void setBoolean(String parameterName, boolean x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setBoolean(String,boolean)");
+    }
+
+    @Override
+    public void setByte(String parameterName, byte x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setByte(String,byte)");
+    }
+
+    @Override
+    public void setShort(String parameterName, short x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setShort(String,short)");
+    }
+
+    @Override
+    public void setInt(String parameterName, int x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setInt(String,int)");
+    }
+
+    @Override
+    public void setLong(String parameterName, long x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setLong(String,long)");
+    }
+
+    @Override
+    public void setFloat(String parameterName, float x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setFloat(String,float)");
+    }
+
+    @Override
+    public void setDouble(String parameterName, double x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setDouble(String,double)");
+    }
+
+    @Override
+    public void setBigDecimal(String parameterName, BigDecimal x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setBigDecimal(String,BigDecimal)");
+    }
+
+    @Override
+    public void setString(String parameterName, String x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setString(String,String)");
+    }
+
+    @Override
+    public void setBytes(String parameterName, byte[] x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setBytes(String,byte)");
+    }
+
+    @Override
+    public void setDate(String parameterName, Date x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setDate(String,Date)");
+    }
+
+    @Override
+    public void setTime(String parameterName, Time x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setTime(String,Time)");
+    }
+
+    @Override
+    public void setTimestamp(String parameterName, Timestamp x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setTimestamp(String,Timestamp)");
+    }
+
+    @Override
+    public void setAsciiStream(String parameterName, InputStream x, int length) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setAsciiStream(String,InputStream,int)");
+    }
+
+    @Override
+    public void setBinaryStream(String parameterName, InputStream x, int length) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setBinaryStream(String,InputStream,int)");
+    }
+
+    @Override
+    public void setObject(String parameterName, Object x, int targetSqlType, int scale)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setObject(String,Object,int,int)");
+    }
+
+    @Override
+    public void setObject(String parameterName, Object x, int targetSqlType) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setObject(String,Object,int)");
+    }
+
+    @Override
+    public void setObject(String parameterName, Object x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setObject(String,Object)");
+    }
+
+    @Override
+    public void setCharacterStream(String parameterName, Reader reader, int length)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setCharacterStream(String,Reader,int)");
+    }
+
+    @Override
+    public void setDate(String parameterName, Date x, Calendar cal) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setDate(String,Date,Calendar)");
+    }
+
+    @Override
+    public void setTime(String parameterName, Time x, Calendar cal) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setTime(String,Time,Calendar)");
+    }
+
+    @Override
+    public void setTimestamp(String parameterName, Timestamp x, Calendar cal) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setTimestamp(String,Timestamp,Calendar)");
+    }
+
+    @Override
+    public void setNull(String parameterName, int sqlType, String typeName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setNull(String,int,String)");
+    }
+
+    @Override
+    public String getString(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getString(String)");
+    }
+
+    @Override
+    public boolean getBoolean(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getBoolean(String)");
+    }
+
+    @Override
+    public byte getByte(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getByte(String)");
+    }
+
+    @Override
+    public short getShort(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getShort(String)");
+    }
+
+    @Override
+    public int getInt(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getInt(String)");
+    }
+
+    @Override
+    public long getLong(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getLong(String)");
+    }
+
+    @Override
+    public float getFloat(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getFloat(String)");
+    }
+
+    @Override
+    public double getDouble(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getDouble(String)");
+    }
+
+    @Override
+    public byte[] getBytes(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getBytes(String)");
+    }
+
+    @Override
+    public Date getDate(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getDate(String)");
+    }
+
+    @Override
+    public Time getTime(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getTime(String)");
+    }
+
+    @Override
+    public Timestamp getTimestamp(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getTimestamp(String)");
+    }
+
+    @Override
+    public Object getObject(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getObject(String)");
+    }
+
+    @Override
+    public BigDecimal getBigDecimal(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getBigDecimal(String)");
+    }
+
+    public Object getObjectImpl(String parameterName, Map<String, Class<?>> map) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getObject(String,Map)");
+    }
+
+    @Override
+    public Ref getRef(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getRef(String)");
+    }
+
+    @Override
+    public Blob getBlob(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getBlob(String)");
+    }
+
+    @Override
+    public Clob getClob(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getClob(String)");
+    }
+
+    @Override
+    public Array getArray(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getArray(String)");
+    }
+
+    @Override
+    public Date getDate(String parameterName, Calendar cal) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getDate(String,Calendar)");
+    }
+
+    @Override
+    public Time getTime(String parameterName, Calendar cal) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getTime(String,Calendar)");
+    }
+
+    @Override
+    public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getTimestamp(String,Calendar)");
+    }
+
+    @Override
+    public URL getURL(String parameterName) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getURL(String)");
+    }
+
+    @Override
+    public void registerOutParameter(int parameterIndex, int sqlType, int scale) throws SQLException {
+        // ignore scale for now
+        registerOutParameter(parameterIndex, sqlType);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgClob.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgClob.java
index b4784e6..4413b90 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/PgClob.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgClob.java
@@ -21,92 +21,92 @@ import java.sql.SQLException;
 @SuppressWarnings("try")
 public class PgClob extends AbstractBlobClob implements Clob {
 
-  public PgClob(BaseConnection conn, long oid) throws SQLException {
-    super(conn, oid);
-  }
-
-  @Override
-  public Reader getCharacterStream(long pos, long length) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      throw Driver.notImplemented(this.getClass(), "getCharacterStream(long, long)");
+    public PgClob(BaseConnection conn, long oid) throws SQLException {
+        super(conn, oid);
     }
-  }
 
-  @Override
-  public int setString(long pos, String str) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      throw Driver.notImplemented(this.getClass(), "setString(long,str)");
+    @Override
+    public Reader getCharacterStream(long pos, long length) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            throw Driver.notImplemented(this.getClass(), "getCharacterStream(long, long)");
+        }
     }
-  }
 
-  @Override
-  public int setString(long pos, String str, int offset, int len) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      throw Driver.notImplemented(this.getClass(), "setString(long,String,int,int)");
+    @Override
+    public int setString(long pos, String str) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            throw Driver.notImplemented(this.getClass(), "setString(long,str)");
+        }
     }
-  }
 
-  @Override
-  public OutputStream setAsciiStream(long pos) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      throw Driver.notImplemented(this.getClass(), "setAsciiStream(long)");
+    @Override
+    public int setString(long pos, String str, int offset, int len) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            throw Driver.notImplemented(this.getClass(), "setString(long,String,int,int)");
+        }
     }
-  }
 
-  @Override
-  public Writer setCharacterStream(long pos) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      throw Driver.notImplemented(this.getClass(), "setCharacterStream(long)");
+    @Override
+    public OutputStream setAsciiStream(long pos) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            throw Driver.notImplemented(this.getClass(), "setAsciiStream(long)");
+        }
     }
-  }
 
-  @Override
-  public InputStream getAsciiStream() throws SQLException {
-    return getBinaryStream();
-  }
-
-  @Override
-  public Reader getCharacterStream() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      Charset connectionCharset = Charset.forName(conn.getEncoding().name());
-      return new InputStreamReader(getBinaryStream(), connectionCharset);
+    @Override
+    public Writer setCharacterStream(long pos) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            throw Driver.notImplemented(this.getClass(), "setCharacterStream(long)");
+        }
     }
-  }
 
-  @Override
-  public String getSubString(long i, int j) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      assertPosition(i, j);
-      LargeObject lo = getLo(false);
-      lo.seek((int) i - 1);
-      return new String(lo.read(j));
+    @Override
+    public InputStream getAsciiStream() throws SQLException {
+        return getBinaryStream();
     }
-  }
 
-  /**
-   * For now, this is not implemented.
-   */
-  @Override
-  public long position(String pattern, long start) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      throw Driver.notImplemented(this.getClass(), "position(String,long)");
+    @Override
+    public Reader getCharacterStream() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            Charset connectionCharset = Charset.forName(conn.getEncoding().name());
+            return new InputStreamReader(getBinaryStream(), connectionCharset);
+        }
     }
-  }
 
-  /**
-   * This should be simply passing the byte value of the pattern Blob.
-   */
-  @Override
-  public long position(Clob pattern, long start) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      throw Driver.notImplemented(this.getClass(), "position(Clob,start)");
+    @Override
+    public String getSubString(long i, int j) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            assertPosition(i, j);
+            LargeObject lo = getLo(false);
+            lo.seek((int) i - 1);
+            return new String(lo.read(j));
+        }
+    }
+
+    /**
+     * For now, this is not implemented.
+     */
+    @Override
+    public long position(String pattern, long start) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            throw Driver.notImplemented(this.getClass(), "position(String,long)");
+        }
+    }
+
+    /**
+     * This should be simply passing the byte value of the pattern Blob.
+     */
+    @Override
+    public long position(Clob pattern, long start) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            throw Driver.notImplemented(this.getClass(), "position(Clob,start)");
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgConnection.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgConnection.java
index 1b9de33..3c8a01f 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/PgConnection.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgConnection.java
@@ -96,1869 +96,1843 @@ import java.util.logging.Logger;
 @SuppressWarnings("try")
 public class PgConnection implements BaseConnection {
 
-  private static final Logger LOGGER = Logger.getLogger(PgConnection.class.getName());
-  private static final Set<Integer> SUPPORTED_BINARY_OIDS = getSupportedBinaryOids();
-  private static final SQLPermission SQL_PERMISSION_ABORT = new SQLPermission("callAbort");
-  private static final SQLPermission SQL_PERMISSION_NETWORK_TIMEOUT = new SQLPermission("setNetworkTimeout");
+    private static final Logger LOGGER = Logger.getLogger(PgConnection.class.getName());
+    private static final Set<Integer> SUPPORTED_BINARY_OIDS = getSupportedBinaryOids();
+    private static final SQLPermission SQL_PERMISSION_ABORT = new SQLPermission("callAbort");
+    private static final SQLPermission SQL_PERMISSION_NETWORK_TIMEOUT = new SQLPermission("setNetworkTimeout");
 
-  private static final MethodHandle SYSTEM_GET_SECURITY_MANAGER;
-  private static final MethodHandle SECURITY_MANAGER_CHECK_PERMISSION;
-
-  static {
-    MethodHandle systemGetSecurityManagerHandle = null;
-    MethodHandle securityManagerCheckPermission = null;
-    try {
-      Class<?> securityManagerClass = Class.forName("java.lang.SecurityManager");
-      systemGetSecurityManagerHandle =
-          MethodHandles.lookup().findStatic(System.class, "getSecurityManager",
-              MethodType.methodType(securityManagerClass));
-      securityManagerCheckPermission =
-          MethodHandles.lookup().findVirtual(securityManagerClass, "checkPermission",
-              MethodType.methodType(void.class, Permission.class));
-    } catch (NoSuchMethodException | IllegalAccessException | ClassNotFoundException ignore) {
-    }
-    SYSTEM_GET_SECURITY_MANAGER = systemGetSecurityManagerHandle;
-    SECURITY_MANAGER_CHECK_PERMISSION = securityManagerCheckPermission;
-  }
-
-  private enum ReadOnlyBehavior {
-    ignore,
-    transaction,
-    always
-  }
-
-  private final ResourceLock lock = new ResourceLock();
-  private final Condition lockCondition = lock.newCondition();
-
-  //
-  // Data initialized on construction:
-  //
-  private final Properties clientInfo;
-
-  /* URL we were created via */
-  private final String creatingURL;
-
-  private final ReadOnlyBehavior readOnlyBehavior;
-
-  private Throwable openStackTrace;
-
-  /**
-   * This field keeps finalize action alive, so its .finalize() method is called only
-   * when the connection itself becomes unreachable.
-   * Moving .finalize() to a different object allows JVM to release all the other objects
-   * referenced in PgConnection early.
-   */
-  private final PgConnectionCleaningAction finalizeAction;
-  private final Object leakHandle = new Object();
-
-  /* Actual network handler */
-  private final QueryExecutor queryExecutor;
-
-  /* Query that runs COMMIT */
-  private final Query commitQuery;
-  /* Query that runs ROLLBACK */
-  private final Query rollbackQuery;
-
-  private final CachedQuery setSessionReadOnly;
-
-  private final CachedQuery setSessionNotReadOnly;
-
-  private final TypeInfo typeCache;
-
-  private boolean disableColumnSanitiser;
-
-  // Default statement prepare threshold.
-  protected int prepareThreshold;
-
-  /**
-   * Default fetch size for statement.
-   *
-   * @see PGProperty#DEFAULT_ROW_FETCH_SIZE
-   */
-  protected int defaultFetchSize;
-
-  // Default forcebinary option.
-  protected boolean forcebinary;
-
-  /**
-   * Oids for which binary transfer should be disabled.
-   */
-  private final Set<? extends Integer> binaryDisabledOids;
-
-  private int rsHoldability = ResultSet.CLOSE_CURSORS_AT_COMMIT;
-  private int savepointId;
-  // Connection's autocommit state.
-  private boolean autoCommit = true;
-  // Connection's readonly state.
-  private boolean readOnly;
-  // Filter out database objects for which the current user has no privileges granted from the DatabaseMetaData
-  private final boolean  hideUnprivilegedObjects ;
-  // Whether to include error details in logging and exceptions
-  private final boolean logServerErrorDetail;
-  // Bind String to UNSPECIFIED or VARCHAR?
-  private final boolean bindStringAsVarchar;
-
-  // Current warnings; there might be more on queryExecutor too.
-  private SQLWarning firstWarning;
-
-  /**
-   * Replication protocol in current version postgresql(10devel) supports a limited number of
-   * commands.
-   */
-  private final boolean replicationConnection;
-
-  private final LruCache<FieldMetadata.Key, FieldMetadata> fieldMetadataCache;
-
-  private final String xmlFactoryFactoryClass;
-  private PGXmlFactoryFactory xmlFactoryFactory;
-  private final LazyCleaner.Cleanable<IOException> cleanable;
-
-  final CachedQuery borrowQuery(String sql) throws SQLException {
-    return queryExecutor.borrowQuery(sql);
-  }
-
-  final CachedQuery borrowCallableQuery(String sql) throws SQLException {
-    return queryExecutor.borrowCallableQuery(sql);
-  }
-
-  private CachedQuery borrowReturningQuery(String sql, String [] columnNames)
-      throws SQLException {
-    return queryExecutor.borrowReturningQuery(sql, columnNames);
-  }
-
-  @Override
-  public CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized,
-      String... columnNames)
-      throws SQLException {
-    return queryExecutor.createQuery(sql, escapeProcessing, isParameterized, columnNames);
-  }
-
-  void releaseQuery(CachedQuery cachedQuery) {
-    queryExecutor.releaseQuery(cachedQuery);
-  }
-
-  @Override
-  public void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate) {
-    queryExecutor.setFlushCacheOnDeallocate(flushCacheOnDeallocate);
-    LOGGER.log(Level.FINE, "  setFlushCacheOnDeallocate = {0}", flushCacheOnDeallocate);
-  }
-
-  //
-  // Ctor.
-  //
-  @SuppressWarnings("this-escape")
-  public PgConnection(HostSpec[] hostSpecs,
-                      Properties info,
-                      String url) throws SQLException {
-    // Print out the driver version number
-    LOGGER.log(Level.FINE, DriverInfo.DRIVER_FULL_NAME);
-
-    this.creatingURL = url;
-
-    this.readOnlyBehavior = getReadOnlyBehavior(PGProperty.READ_ONLY_MODE.getOrDefault(info));
-
-    setDefaultFetchSize(PGProperty.DEFAULT_ROW_FETCH_SIZE.getInt(info));
-
-    setPrepareThreshold(PGProperty.PREPARE_THRESHOLD.getInt(info));
-    if (prepareThreshold == -1) {
-      setForceBinary(true);
-    }
-
-    // Now make the initial connection and set up local state
-    this.queryExecutor = ConnectionFactory.openConnection(hostSpecs, info);
-
-    // WARNING for unsupported servers (8.1 and lower are not supported)
-    if (LOGGER.isLoggable(Level.WARNING) && !haveMinimumServerVersion(ServerVersion.v8_2)) {
-      LOGGER.log(Level.WARNING, "Unsupported Server Version: {0}", queryExecutor.getServerVersion());
-    }
-
-    setSessionReadOnly = createQuery("SET SESSION CHARACTERISTICS AS TRANSACTION READ ONLY", false, true);
-    setSessionNotReadOnly = createQuery("SET SESSION CHARACTERISTICS AS TRANSACTION READ WRITE", false, true);
-
-    // Set read-only early if requested
-    if (PGProperty.READ_ONLY.getBoolean(info)) {
-      setReadOnly(true);
-    }
-
-    this.hideUnprivilegedObjects = PGProperty.HIDE_UNPRIVILEGED_OBJECTS.getBoolean(info);
-
-    // get oids that support binary transfer
-    Set<Integer> binaryOids = getBinaryEnabledOids(info);
-    // get oids that should be disabled from transfer
-    binaryDisabledOids = getBinaryDisabledOids(info);
-    // if there are any, remove them from the enabled ones
-    if (!binaryDisabledOids.isEmpty()) {
-      binaryOids.removeAll(binaryDisabledOids);
-    }
-
-    // split for receive and send for better control
-    Set<Integer> useBinarySendForOids = new HashSet<>(binaryOids);
-
-    Set<Integer> useBinaryReceiveForOids = new HashSet<>(binaryOids);
-
-    /*
-     * Does not pass unit tests because unit tests expect setDate to have millisecond accuracy
-     * whereas the binary transfer only supports date accuracy.
-     */
-    useBinarySendForOids.remove(Oid.DATE);
-
-    queryExecutor.setBinaryReceiveOids(useBinaryReceiveForOids);
-    queryExecutor.setBinarySendOids(useBinarySendForOids);
-
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      LOGGER.log(Level.FINEST, "    types using binary send = {0}", oidsToString(useBinarySendForOids));
-      LOGGER.log(Level.FINEST, "    types using binary receive = {0}", oidsToString(useBinaryReceiveForOids));
-      LOGGER.log(Level.FINEST, "    integer date/time = {0}", queryExecutor.getIntegerDateTimes());
-    }
-
-    //
-    // String -> text or unknown?
-    //
-
-    String stringType = PGProperty.STRING_TYPE.getOrDefault(info);
-    if (stringType != null) {
-      if ("unspecified".equalsIgnoreCase(stringType)) {
-        bindStringAsVarchar = false;
-      } else if ("varchar".equalsIgnoreCase(stringType)) {
-        bindStringAsVarchar = true;
-      } else {
-        throw new PSQLException(
-            GT.tr("Unsupported value for stringtype parameter: {0}", stringType),
-            PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else {
-      bindStringAsVarchar = true;
-    }
-
-    // Initialize timestamp stuff
-    timestampUtils = new TimestampUtils(!queryExecutor.getIntegerDateTimes(),
-        new QueryExecutorTimeZoneProvider(queryExecutor));
-
-    // Initialize common queries.
-    // isParameterized==true so full parse is performed and the engine knows the query
-    // is not a compound query with ; inside, so it could use parse/bind/exec messages
-    commitQuery = createQuery("COMMIT", false, true).query;
-    rollbackQuery = createQuery("ROLLBACK", false, true).query;
-
-    int unknownLength = PGProperty.UNKNOWN_LENGTH.getInt(info);
-
-    // Initialize object handling
-    TypeInfo typeCache = createTypeInfo(this, unknownLength);
-    this.typeCache = typeCache;
-    initObjectTypes(info);
-
-    if (PGProperty.LOG_UNCLOSED_CONNECTIONS.getBoolean(info)) {
-      openStackTrace = new Throwable("Connection was created at this point:");
-    }
-    finalizeAction = new PgConnectionCleaningAction(lock, openStackTrace, queryExecutor.getCloseAction());
-    this.logServerErrorDetail = PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info);
-    this.disableColumnSanitiser = PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(info);
-
-    if (haveMinimumServerVersion(ServerVersion.v8_3)) {
-      typeCache.addCoreType("uuid", Oid.UUID, Types.OTHER, "java.util.UUID", Oid.UUID_ARRAY);
-      typeCache.addCoreType("xml", Oid.XML, Types.SQLXML, "java.sql.SQLXML", Oid.XML_ARRAY);
-    }
-
-    this.clientInfo = new Properties();
-    if (haveMinimumServerVersion(ServerVersion.v9_0)) {
-      String appName = PGProperty.APPLICATION_NAME.getOrDefault(info);
-      if (appName == null) {
-        appName = "";
-      }
-      this.clientInfo.put("ApplicationName", appName);
-    }
-
-    fieldMetadataCache = new LruCache<>(
-        Math.max(0, PGProperty.DATABASE_METADATA_CACHE_FIELDS.getInt(info)),
-        Math.max(0, PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.getInt(info) * 1024L * 1024L),
-        false);
-
-    replicationConnection = PGProperty.REPLICATION.getOrDefault(info) != null;
-
-    xmlFactoryFactoryClass = PGProperty.XML_FACTORY_FACTORY.getOrDefault(info);
-    cleanable = LazyCleaner.getInstance().register(leakHandle, finalizeAction);
-  }
-
-  private static ReadOnlyBehavior getReadOnlyBehavior(String property) {
-    if (property == null) {
-      return ReadOnlyBehavior.transaction;
-    }
-    try {
-      return ReadOnlyBehavior.valueOf(property);
-    } catch (IllegalArgumentException e) {
-      try {
-        return ReadOnlyBehavior.valueOf(property.toLowerCase(Locale.US));
-      } catch (IllegalArgumentException e2) {
-        return ReadOnlyBehavior.transaction;
-      }
-    }
-  }
-
-  private static Set<Integer> getSupportedBinaryOids() {
-    return new HashSet<>(Arrays.asList(
-        Oid.BYTEA,
-        Oid.INT2,
-        Oid.INT4,
-        Oid.INT8,
-        Oid.FLOAT4,
-        Oid.FLOAT8,
-        Oid.NUMERIC,
-        Oid.TIME,
-        Oid.DATE,
-        Oid.TIMETZ,
-        Oid.TIMESTAMP,
-        Oid.TIMESTAMPTZ,
-        Oid.BYTEA_ARRAY,
-        Oid.INT2_ARRAY,
-        Oid.INT4_ARRAY,
-        Oid.INT8_ARRAY,
-        Oid.OID_ARRAY,
-        Oid.FLOAT4_ARRAY,
-        Oid.FLOAT8_ARRAY,
-        Oid.VARCHAR_ARRAY,
-        Oid.TEXT_ARRAY,
-        Oid.POINT,
-        Oid.BOX,
-        Oid.UUID));
-  }
-
-  /**
-   * Gets all oids for which binary transfer can be enabled.
-   *
-   * @param info properties
-   * @return oids for which binary transfer can be enabled
-   * @throws PSQLException if any oid is not valid
-   */
-  private static Set<Integer> getBinaryEnabledOids(Properties info) throws PSQLException {
-    // check if binary transfer should be enabled for built-in types
-    boolean binaryTransfer = PGProperty.BINARY_TRANSFER.getBoolean(info);
-    // get formats that currently have binary protocol support
-    Set<Integer> binaryOids = new HashSet<>(32);
-    if (binaryTransfer) {
-      binaryOids.addAll(SUPPORTED_BINARY_OIDS);
-    }
-    // add all oids which are enabled for binary transfer by the creator of the connection
-    String oids = PGProperty.BINARY_TRANSFER_ENABLE.getOrDefault(info);
-    if (oids != null) {
-      binaryOids.addAll(getOidSet(oids));
-    }
-    return binaryOids;
-  }
-
-  /**
-   * Gets all oids for which binary transfer should be disabled.
-   *
-   * @param info properties
-   * @return oids for which binary transfer should be disabled
-   * @throws PSQLException if any oid is not valid
-   */
-  private static Set<? extends Integer> getBinaryDisabledOids(Properties info)
-      throws PSQLException {
-    // check for oids that should explicitly be disabled
-    String oids = PGProperty.BINARY_TRANSFER_DISABLE.getOrDefault(info);
-    if (oids == null) {
-      return Collections.emptySet();
-    }
-    return getOidSet(oids);
-  }
-
-  private static Set<? extends Integer> getOidSet(String oidList) throws PSQLException {
-    if (oidList.isEmpty()) {
-      return Collections.emptySet();
-    }
-    Set<Integer> oids = new HashSet<>();
-    StringTokenizer tokenizer = new StringTokenizer(oidList, ",");
-    while (tokenizer.hasMoreTokens()) {
-      String oid = tokenizer.nextToken();
-      oids.add(Oid.valueOf(oid));
-    }
-    return oids;
-  }
-
-  private String oidsToString(Set<Integer> oids) {
-    StringBuilder sb = new StringBuilder();
-    for (Integer oid : oids) {
-      sb.append(Oid.toString(oid));
-      sb.append(',');
-    }
-    if (sb.length() > 0) {
-      sb.setLength(sb.length() - 1);
-    } else {
-      sb.append(" <none>");
-    }
-    return sb.toString();
-  }
-
-  private final TimestampUtils timestampUtils;
-
-  @Deprecated
-  @Override
-  public TimestampUtils getTimestampUtils() {
-    return timestampUtils;
-  }
-
-  /**
-   * The current type mappings.
-   */
-  protected Map<String, Class<?>> typemap = new HashMap<>();
-
-  /**
-   * Obtain the connection lock and return it. Callers must use try-with-resources to ensure that
-   * unlock() is performed on the lock.
-   */
-  final ResourceLock obtainLock() {
-    return lock.obtain();
-  }
-
-  /**
-   * Return the lock condition for this connection.
-   */
-  final Condition lockCondition() {
-    return lockCondition;
-  }
-
-  @Override
-  public Statement createStatement() throws SQLException {
-    // We now follow the spec and default to TYPE_FORWARD_ONLY.
-    return createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
-  }
-
-  @Override
-  public PreparedStatement prepareStatement(String sql) throws SQLException {
-    return prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
-  }
-
-  @Override
-  public CallableStatement prepareCall(String sql) throws SQLException {
-    return prepareCall(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
-  }
-
-  @Override
-  public Map<String, Class<?>> getTypeMap() throws SQLException {
-    checkClosed();
-    return typemap;
-  }
-
-  @Override
-  public QueryExecutor getQueryExecutor() {
-    return queryExecutor;
-  }
-
-  @Override
-  public ReplicationProtocol getReplicationProtocol() {
-    return queryExecutor.getReplicationProtocol();
-  }
-
-  /**
-   * This adds a warning to the warning chain.
-   *
-   * @param warn warning to add
-   */
-  public void addWarning(SQLWarning warn) {
-    // Add the warning to the chain
-    if (firstWarning != null) {
-      firstWarning.setNextWarning(warn);
-    } else {
-      firstWarning = warn;
-    }
-
-  }
-
-  @Override
-  public ResultSet execSQLQuery(String s) throws SQLException {
-    return execSQLQuery(s, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
-  }
-
-  @Override
-  public ResultSet execSQLQuery(String s, int resultSetType, int resultSetConcurrency)
-      throws SQLException {
-    BaseStatement stat = (BaseStatement) createStatement(resultSetType, resultSetConcurrency);
-    boolean hasResultSet = stat.executeWithFlags(s, QueryExecutor.QUERY_SUPPRESS_BEGIN);
-
-    while (!hasResultSet && stat.getUpdateCount() != -1) {
-      hasResultSet = stat.getMoreResults();
-    }
-
-    if (!hasResultSet) {
-      throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
-    }
-
-    // Transfer warnings to the connection, since the user never
-    // has a chance to see the statement itself.
-    SQLWarning warnings = stat.getWarnings();
-    if (warnings != null) {
-      addWarning(warnings);
-    }
-
-    return stat.getResultSet();
-  }
-
-  @Override
-  public void execSQLUpdate(String s) throws SQLException {
-    try (BaseStatement stmt = (BaseStatement) createStatement()) {
-      if (stmt.executeWithFlags(s, QueryExecutor.QUERY_NO_METADATA | QueryExecutor.QUERY_NO_RESULTS
-          | QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
-        throw new PSQLException(GT.tr("A result was returned when none was expected."),
-            PSQLState.TOO_MANY_RESULTS);
-      }
-
-      // Transfer warnings to the connection, since the user never
-      // has a chance to see the statement itself.
-      SQLWarning warnings = stmt.getWarnings();
-      if (warnings != null) {
-        addWarning(warnings);
-      }
-    }
-  }
-
-  void execSQLUpdate(CachedQuery query) throws SQLException {
-    try (BaseStatement stmt = (BaseStatement) createStatement()) {
-      if (stmt.executeWithFlags(query, QueryExecutor.QUERY_NO_METADATA | QueryExecutor.QUERY_NO_RESULTS
-          | QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
-        throw new PSQLException(GT.tr("A result was returned when none was expected."),
-            PSQLState.TOO_MANY_RESULTS);
-      }
-
-      // Transfer warnings to the connection, since the user never
-      // has a chance to see the statement itself.
-      SQLWarning warnings = stmt.getWarnings();
-      if (warnings != null) {
-        addWarning(warnings);
-      }
-    }
-  }
-
-  /**
-   * <p>In SQL, a result table can be retrieved through a cursor that is named. The current row of a
-   * result can be updated or deleted using a positioned update/delete statement that references the
-   * cursor name.</p>
-   *
-   * <p>We do not support positioned update/delete, so this is a no-op.</p>
-   *
-   * @param cursor the cursor name
-   * @throws SQLException if a database access error occurs
-   */
-  public void setCursorName(String cursor) throws SQLException {
-    checkClosed();
-    // No-op.
-  }
-
-  /**
-   * getCursorName gets the cursor name.
-   *
-   * @return the current cursor name
-   * @throws SQLException if a database access error occurs
-   */
-  public String getCursorName() throws SQLException {
-    checkClosed();
-    return null;
-  }
-
-  /**
-   * <p>We are required to bring back certain information by the DatabaseMetaData class. These
-   * functions do that.</p>
-   *
-   * <p>Method getURL() brings back the URL (good job we saved it)</p>
-   *
-   * @return the url
-   * @throws SQLException just in case...
-   */
-  public String getURL() throws SQLException {
-    return creatingURL;
-  }
-
-  /**
-   * Method getUserName() brings back the User Name (again, we saved it).
-   *
-   * @return the user name
-   * @throws SQLException just in case...
-   */
-  public String getUserName() throws SQLException {
-    return queryExecutor.getUser();
-  }
-
-  @SuppressWarnings("deprecation")
-  @Override
-  public Fastpath getFastpathAPI() throws SQLException {
-    checkClosed();
-    if (fastpath == null) {
-      fastpath = new Fastpath(this);
-    }
-    return fastpath;
-  }
-
-  // This holds a reference to the Fastpath API if already open
-  @SuppressWarnings("deprecation")
-  private Fastpath fastpath;
-
-  @Override
-  public LargeObjectManager getLargeObjectAPI() throws SQLException {
-    checkClosed();
-    if (largeobject == null) {
-      largeobject = new LargeObjectManager(this);
-    }
-    return largeobject;
-  }
-
-  // This holds a reference to the LargeObject API if already open
-  private LargeObjectManager largeobject;
-
-  /*
-   * This method is used internally to return an object based around org.postgresql's more unique
-   * data types.
-   *
-   * <p>It uses an internal HashMap to get the handling class. If the type is not supported, then an
-   * instance of org.postgresql.util.PGobject is returned.
-   *
-   * You can use the getValue() or setValue() methods to handle the returned object. Custom objects
-   * can have their own methods.
-   *
-   * @return PGobject for this type, and set to value
-   *
-   * @exception SQLException if value is not correct for this type
-   */
-  @Override
-  public Object getObject(String type, String value, byte [] byteValue)
-      throws SQLException {
-    if (typemap != null) {
-      Class<?> c = typemap.get(type);
-      if (c != null) {
-        // Handle the type (requires SQLInput & SQLOutput classes to be implemented)
-        throw new PSQLException(GT.tr("Custom type maps are not supported."),
-            PSQLState.NOT_IMPLEMENTED);
-      }
-    }
-
-    PGobject obj = null;
-
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      LOGGER.log(Level.FINEST, "Constructing object from type={0} value=<{1}>", new Object[]{type, value});
-    }
-
-    try {
-      Class<? extends PGobject> klass = typeCache.getPGobject(type);
-
-      // If className is not null, then try to instantiate it,
-      // It must be basetype PGobject
-
-      // This is used to implement the org.postgresql unique types (like lseg,
-      // point, etc).
-
-      if (klass != null) {
-        obj = klass.getDeclaredConstructor().newInstance();
-        obj.setType(type);
-        if (byteValue != null && obj instanceof PGBinaryObject) {
-          PGBinaryObject binObj = (PGBinaryObject) obj;
-          binObj.setByteValue(byteValue, 0);
-        } else {
-          obj.setValue(value);
-        }
-      } else {
-        // If className is null, then the type is unknown.
-        // so return a PGobject with the type set, and the value set
-        obj = new PGobject();
-        obj.setType(type);
-        obj.setValue(value);
-      }
-
-      return obj;
-    } catch (SQLException sx) {
-      // rethrow the exception. Done because we capture any others next
-      throw sx;
-    } catch (Exception ex) {
-      throw new PSQLException(GT.tr("Failed to create object for: {0}.", type),
-          PSQLState.CONNECTION_FAILURE, ex);
-    }
-  }
-
-  protected TypeInfo createTypeInfo(BaseConnection conn, int unknownLength) {
-    return new TypeInfoCache(conn, unknownLength);
-  }
-
-  @Override
-  public TypeInfo getTypeInfo() {
-    return typeCache;
-  }
-
-  @Deprecated
-  @Override
-  public void addDataType(String type, String name) {
-    try {
-      addDataType(type, Class.forName(name).asSubclass(PGobject.class));
-    } catch (Exception e) {
-      throw new RuntimeException("Cannot register new type " + type, e);
-    }
-  }
-
-  @Override
-  public void addDataType(String type, Class<? extends PGobject> klass) throws SQLException {
-    checkClosed();
-    // first add the data type to the type cache
-    typeCache.addDataType(type, klass);
-    // then check if this type supports binary transfer
-    if (PGBinaryObject.class.isAssignableFrom(klass) && getPreferQueryMode() != PreferQueryMode.SIMPLE) {
-      // try to get an oid for this type (will return 0 if the type does not exist in the database)
-      int oid = typeCache.getPGType(type);
-      // check if oid is there and if it is not disabled for binary transfer
-      if (oid > 0 && !binaryDisabledOids.contains(oid)) {
-        // allow using binary transfer for receiving and sending of this type
-        queryExecutor.addBinaryReceiveOid(oid);
-        queryExecutor.addBinarySendOid(oid);
-      }
-    }
-  }
-
-  // This initialises the objectTypes hash map
-  private void initObjectTypes(Properties info) throws SQLException {
-    // Add in the types that come packaged with the driver.
-    // These can be overridden later if desired.
-    addDataType("box", PGbox.class);
-    addDataType("circle", PGcircle.class);
-    addDataType("line", PGline.class);
-    addDataType("lseg", PGlseg.class);
-    addDataType("path", PGpath.class);
-    addDataType("point", PGpoint.class);
-    addDataType("polygon", PGpolygon.class);
-    addDataType("money", PGmoney.class);
-    addDataType("interval", PGInterval.class);
-
-    Enumeration<?> e = info.propertyNames();
-    while (e.hasMoreElements()) {
-      String propertyName = (String) e.nextElement();
-      if (propertyName != null && propertyName.startsWith("datatype.")) {
-        String typeName = propertyName.substring(9);
-        String className = info.getProperty(propertyName);
-        Class<?> klass;
+    private static final MethodHandle SYSTEM_GET_SECURITY_MANAGER;
+    private static final MethodHandle SECURITY_MANAGER_CHECK_PERMISSION;
 
+    static {
+        MethodHandle systemGetSecurityManagerHandle = null;
+        MethodHandle securityManagerCheckPermission = null;
         try {
-          klass = Class.forName(className);
-        } catch (ClassNotFoundException cnfe) {
-          throw new PSQLException(
-              GT.tr("Unable to load the class {0} responsible for the datatype {1}",
-                  className, typeName),
-              PSQLState.SYSTEM_ERROR, cnfe);
+            Class<?> securityManagerClass = Class.forName("java.lang.SecurityManager");
+            systemGetSecurityManagerHandle =
+                    MethodHandles.lookup().findStatic(System.class, "getSecurityManager",
+                            MethodType.methodType(securityManagerClass));
+            securityManagerCheckPermission =
+                    MethodHandles.lookup().findVirtual(securityManagerClass, "checkPermission",
+                            MethodType.methodType(void.class, Permission.class));
+        } catch (NoSuchMethodException | IllegalAccessException | ClassNotFoundException ignore) {
+        }
+        SYSTEM_GET_SECURITY_MANAGER = systemGetSecurityManagerHandle;
+        SECURITY_MANAGER_CHECK_PERMISSION = securityManagerCheckPermission;
+    }
+
+    private final ResourceLock lock = new ResourceLock();
+    private final Condition lockCondition = lock.newCondition();
+    //
+    // Data initialized on construction:
+    //
+    private final Properties clientInfo;
+    /* URL we were created via */
+    private final String creatingURL;
+    private final ReadOnlyBehavior readOnlyBehavior;
+    /**
+     * This field keeps finalize action alive, so its .finalize() method is called only
+     * when the connection itself becomes unreachable.
+     * Moving .finalize() to a different object allows JVM to release all the other objects
+     * referenced in PgConnection early.
+     */
+    private final PgConnectionCleaningAction finalizeAction;
+    private final Object leakHandle = new Object();
+    /* Actual network handler */
+    private final QueryExecutor queryExecutor;
+    /* Query that runs COMMIT */
+    private final Query commitQuery;
+    /* Query that runs ROLLBACK */
+    private final Query rollbackQuery;
+    private final CachedQuery setSessionReadOnly;
+    private final CachedQuery setSessionNotReadOnly;
+    private final TypeInfo typeCache;
+    /**
+     * Oids for which binary transfer should be disabled.
+     */
+    private final Set<? extends Integer> binaryDisabledOids;
+    // Filter out database objects for which the current user has no privileges granted from the DatabaseMetaData
+    private final boolean hideUnprivilegedObjects;
+    // Whether to include error details in logging and exceptions
+    private final boolean logServerErrorDetail;
+    // Bind String to UNSPECIFIED or VARCHAR?
+    private final boolean bindStringAsVarchar;
+    /**
+     * Replication protocol in current version postgresql(10devel) supports a limited number of
+     * commands.
+     */
+    private final boolean replicationConnection;
+    private final LruCache<FieldMetadata.Key, FieldMetadata> fieldMetadataCache;
+    private final String xmlFactoryFactoryClass;
+    private final LazyCleaner.Cleanable<IOException> cleanable;
+    private final TimestampUtils timestampUtils;
+    // Default statement prepare threshold.
+    protected int prepareThreshold;
+    /**
+     * Default fetch size for statement.
+     *
+     * @see PGProperty#DEFAULT_ROW_FETCH_SIZE
+     */
+    protected int defaultFetchSize;
+    // Default forcebinary option.
+    protected boolean forcebinary;
+    /**
+     * The current type mappings.
+     */
+    protected Map<String, Class<?>> typemap = new HashMap<>();
+    // This is a cache of the DatabaseMetaData instance for this connection
+    protected DatabaseMetaData metadata;
+    private Throwable openStackTrace;
+    private boolean disableColumnSanitiser;
+    private int rsHoldability = ResultSet.CLOSE_CURSORS_AT_COMMIT;
+    private int savepointId;
+    // Connection's autocommit state.
+    private boolean autoCommit = true;
+    // Connection's readonly state.
+    private boolean readOnly;
+    // Current warnings; there might be more on queryExecutor too.
+    private SQLWarning firstWarning;
+    private PGXmlFactoryFactory xmlFactoryFactory;
+    // This holds a reference to the Fastpath API if already open
+    @SuppressWarnings("deprecation")
+    private Fastpath fastpath;
+    // This holds a reference to the LargeObject API if already open
+    private LargeObjectManager largeobject;
+    private CopyManager copyManager;
+
+    //
+    // Ctor.
+    //
+    @SuppressWarnings("this-escape")
+    public PgConnection(HostSpec[] hostSpecs,
+                        Properties info,
+                        String url) throws SQLException {
+        // Print out the driver version number
+        LOGGER.log(Level.FINE, DriverInfo.DRIVER_FULL_NAME);
+
+        this.creatingURL = url;
+
+        this.readOnlyBehavior = getReadOnlyBehavior(PGProperty.READ_ONLY_MODE.getOrDefault(info));
+
+        setDefaultFetchSize(PGProperty.DEFAULT_ROW_FETCH_SIZE.getInt(info));
+
+        setPrepareThreshold(PGProperty.PREPARE_THRESHOLD.getInt(info));
+        if (prepareThreshold == -1) {
+            setForceBinary(true);
         }
 
-        addDataType(typeName, klass.asSubclass(PGobject.class));
-      }
-    }
-  }
+        // Now make the initial connection and set up local state
+        this.queryExecutor = ConnectionFactory.openConnection(hostSpecs, info);
 
-  /**
-   * <B>Note:</B> even though {@code Statement} is automatically closed when it is garbage
-   * collected, it is better to close it explicitly to lower resource consumption.
-   * The spec says that calling close on a closed connection is a no-op.
-   * {@inheritDoc}
-   */
-  @Override
-  public void close() throws SQLException {
-    if (queryExecutor == null) {
-      // This might happen in case constructor throws an exception (e.g. host being not available).
-      // When that happens the connection is still registered in the finalizer queue, so it gets finalized
-      return;
-    }
-    openStackTrace = null;
-    try {
-      cleanable.clean();
-    } catch (IOException e) {
-      throw new PSQLException(
-          GT.tr("Unable to close connection properly"),
-          PSQLState.UNKNOWN_STATE, e);
-    }
-  }
+        // WARNING for unsupported servers (8.1 and lower are not supported)
+        if (LOGGER.isLoggable(Level.WARNING) && !haveMinimumServerVersion(ServerVersion.v8_2)) {
+            LOGGER.log(Level.WARNING, "Unsupported Server Version: {0}", queryExecutor.getServerVersion());
+        }
 
-  @Override
-  public String nativeSQL(String sql) throws SQLException {
-    checkClosed();
-    CachedQuery cachedQuery = queryExecutor.createQuery(sql, false, true);
+        setSessionReadOnly = createQuery("SET SESSION CHARACTERISTICS AS TRANSACTION READ ONLY", false, true);
+        setSessionNotReadOnly = createQuery("SET SESSION CHARACTERISTICS AS TRANSACTION READ WRITE", false, true);
 
-    return cachedQuery.query.getNativeSql();
-  }
+        // Set read-only early if requested
+        if (PGProperty.READ_ONLY.getBoolean(info)) {
+            setReadOnly(true);
+        }
 
-  @Override
-  public SQLWarning getWarnings() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkClosed();
-      SQLWarning newWarnings = queryExecutor.getWarnings(); // NB: also clears them.
-      if (firstWarning == null) {
-        firstWarning = newWarnings;
-      } else if (newWarnings != null) {
-        firstWarning.setNextWarning(newWarnings); // Chain them on.
-      }
+        this.hideUnprivilegedObjects = PGProperty.HIDE_UNPRIVILEGED_OBJECTS.getBoolean(info);
 
-      return firstWarning;
-    }
-  }
+        // get oids that support binary transfer
+        Set<Integer> binaryOids = getBinaryEnabledOids(info);
+        // get oids that should be disabled from transfer
+        binaryDisabledOids = getBinaryDisabledOids(info);
+        // if there are any, remove them from the enabled ones
+        if (!binaryDisabledOids.isEmpty()) {
+            binaryOids.removeAll(binaryDisabledOids);
+        }
 
-  @Override
-  public void clearWarnings() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkClosed();
-      queryExecutor.getWarnings(); // Clear and discard.
-      firstWarning = null;
-    }
-  }
+        // split for receive and send for better control
+        Set<Integer> useBinarySendForOids = new HashSet<>(binaryOids);
 
-  @Override
-  public void setReadOnly(boolean readOnly) throws SQLException {
-    checkClosed();
-    if (queryExecutor.getTransactionState() != TransactionState.IDLE) {
-      throw new PSQLException(
-          GT.tr("Cannot change transaction read-only property in the middle of a transaction."),
-          PSQLState.ACTIVE_SQL_TRANSACTION);
+        Set<Integer> useBinaryReceiveForOids = new HashSet<>(binaryOids);
+
+        /*
+         * Does not pass unit tests because unit tests expect setDate to have millisecond accuracy
+         * whereas the binary transfer only supports date accuracy.
+         */
+        useBinarySendForOids.remove(Oid.DATE);
+
+        queryExecutor.setBinaryReceiveOids(useBinaryReceiveForOids);
+        queryExecutor.setBinarySendOids(useBinarySendForOids);
+
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            LOGGER.log(Level.FINEST, "    types using binary send = {0}", oidsToString(useBinarySendForOids));
+            LOGGER.log(Level.FINEST, "    types using binary receive = {0}", oidsToString(useBinaryReceiveForOids));
+            LOGGER.log(Level.FINEST, "    integer date/time = {0}", queryExecutor.getIntegerDateTimes());
+        }
+
+        //
+        // String -> text or unknown?
+        //
+
+        String stringType = PGProperty.STRING_TYPE.getOrDefault(info);
+        if (stringType != null) {
+            if ("unspecified".equalsIgnoreCase(stringType)) {
+                bindStringAsVarchar = false;
+            } else if ("varchar".equalsIgnoreCase(stringType)) {
+                bindStringAsVarchar = true;
+            } else {
+                throw new PSQLException(
+                        GT.tr("Unsupported value for stringtype parameter: {0}", stringType),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else {
+            bindStringAsVarchar = true;
+        }
+
+        // Initialize timestamp stuff
+        timestampUtils = new TimestampUtils(!queryExecutor.getIntegerDateTimes(),
+                new QueryExecutorTimeZoneProvider(queryExecutor));
+
+        // Initialize common queries.
+        // isParameterized==true so full parse is performed and the engine knows the query
+        // is not a compound query with ; inside, so it could use parse/bind/exec messages
+        commitQuery = createQuery("COMMIT", false, true).query;
+        rollbackQuery = createQuery("ROLLBACK", false, true).query;
+
+        int unknownLength = PGProperty.UNKNOWN_LENGTH.getInt(info);
+
+        // Initialize object handling
+        TypeInfo typeCache = createTypeInfo(this, unknownLength);
+        this.typeCache = typeCache;
+        initObjectTypes(info);
+
+        if (PGProperty.LOG_UNCLOSED_CONNECTIONS.getBoolean(info)) {
+            openStackTrace = new Throwable("Connection was created at this point:");
+        }
+        finalizeAction = new PgConnectionCleaningAction(lock, openStackTrace, queryExecutor.getCloseAction());
+        this.logServerErrorDetail = PGProperty.LOG_SERVER_ERROR_DETAIL.getBoolean(info);
+        this.disableColumnSanitiser = PGProperty.DISABLE_COLUMN_SANITISER.getBoolean(info);
+
+        if (haveMinimumServerVersion(ServerVersion.v8_3)) {
+            typeCache.addCoreType("uuid", Oid.UUID, Types.OTHER, "java.util.UUID", Oid.UUID_ARRAY);
+            typeCache.addCoreType("xml", Oid.XML, Types.SQLXML, "java.sql.SQLXML", Oid.XML_ARRAY);
+        }
+
+        this.clientInfo = new Properties();
+        if (haveMinimumServerVersion(ServerVersion.v9_0)) {
+            String appName = PGProperty.APPLICATION_NAME.getOrDefault(info);
+            if (appName == null) {
+                appName = "";
+            }
+            this.clientInfo.put("ApplicationName", appName);
+        }
+
+        fieldMetadataCache = new LruCache<>(
+                Math.max(0, PGProperty.DATABASE_METADATA_CACHE_FIELDS.getInt(info)),
+                Math.max(0, PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.getInt(info) * 1024L * 1024L),
+                false);
+
+        replicationConnection = PGProperty.REPLICATION.getOrDefault(info) != null;
+
+        xmlFactoryFactoryClass = PGProperty.XML_FACTORY_FACTORY.getOrDefault(info);
+        cleanable = LazyCleaner.getInstance().register(leakHandle, finalizeAction);
     }
 
-    if (readOnly != this.readOnly && autoCommit && this.readOnlyBehavior == ReadOnlyBehavior.always) {
-      execSQLUpdate(readOnly ? setSessionReadOnly : setSessionNotReadOnly);
+    private static ReadOnlyBehavior getReadOnlyBehavior(String property) {
+        if (property == null) {
+            return ReadOnlyBehavior.transaction;
+        }
+        try {
+            return ReadOnlyBehavior.valueOf(property);
+        } catch (IllegalArgumentException e) {
+            try {
+                return ReadOnlyBehavior.valueOf(property.toLowerCase(Locale.US));
+            } catch (IllegalArgumentException e2) {
+                return ReadOnlyBehavior.transaction;
+            }
+        }
     }
 
-    this.readOnly = readOnly;
-    LOGGER.log(Level.FINE, "  setReadOnly = {0}", readOnly);
-  }
-
-  @Override
-  public boolean isReadOnly() throws SQLException {
-    checkClosed();
-    return readOnly;
-  }
-
-  @Override
-  public boolean hintReadOnly() {
-    return readOnly && readOnlyBehavior != ReadOnlyBehavior.ignore;
-  }
-
-  @Override
-  public void setAutoCommit(boolean autoCommit) throws SQLException {
-    checkClosed();
-
-    if (this.autoCommit == autoCommit) {
-      return;
+    private static Set<Integer> getSupportedBinaryOids() {
+        return new HashSet<>(Arrays.asList(
+                Oid.BYTEA,
+                Oid.INT2,
+                Oid.INT4,
+                Oid.INT8,
+                Oid.FLOAT4,
+                Oid.FLOAT8,
+                Oid.NUMERIC,
+                Oid.TIME,
+                Oid.DATE,
+                Oid.TIMETZ,
+                Oid.TIMESTAMP,
+                Oid.TIMESTAMPTZ,
+                Oid.BYTEA_ARRAY,
+                Oid.INT2_ARRAY,
+                Oid.INT4_ARRAY,
+                Oid.INT8_ARRAY,
+                Oid.OID_ARRAY,
+                Oid.FLOAT4_ARRAY,
+                Oid.FLOAT8_ARRAY,
+                Oid.VARCHAR_ARRAY,
+                Oid.TEXT_ARRAY,
+                Oid.POINT,
+                Oid.BOX,
+                Oid.UUID));
     }
 
-    if (!this.autoCommit) {
-      commit();
+    /**
+     * Gets all oids for which binary transfer can be enabled.
+     *
+     * @param info properties
+     * @return oids for which binary transfer can be enabled
+     * @throws PSQLException if any oid is not valid
+     */
+    private static Set<Integer> getBinaryEnabledOids(Properties info) throws PSQLException {
+        // check if binary transfer should be enabled for built-in types
+        boolean binaryTransfer = PGProperty.BINARY_TRANSFER.getBoolean(info);
+        // get formats that currently have binary protocol support
+        Set<Integer> binaryOids = new HashSet<>(32);
+        if (binaryTransfer) {
+            binaryOids.addAll(SUPPORTED_BINARY_OIDS);
+        }
+        // add all oids which are enabled for binary transfer by the creator of the connection
+        String oids = PGProperty.BINARY_TRANSFER_ENABLE.getOrDefault(info);
+        if (oids != null) {
+            binaryOids.addAll(getOidSet(oids));
+        }
+        return binaryOids;
     }
 
-    // if the connection is read only, we need to make sure session settings are
-    // correct when autocommit status changed
-    if (this.readOnly && readOnlyBehavior == ReadOnlyBehavior.always) {
-      // if we are turning on autocommit, we need to set session
-      // to read only
-      if (autoCommit) {
-        this.autoCommit = true;
-        execSQLUpdate(setSessionReadOnly);
-      } else {
-        // if we are turning auto commit off, we need to
-        // disable session
-        execSQLUpdate(setSessionNotReadOnly);
-      }
+    /**
+     * Gets all oids for which binary transfer should be disabled.
+     *
+     * @param info properties
+     * @return oids for which binary transfer should be disabled
+     * @throws PSQLException if any oid is not valid
+     */
+    private static Set<? extends Integer> getBinaryDisabledOids(Properties info)
+            throws PSQLException {
+        // check for oids that should explicitly be disabled
+        String oids = PGProperty.BINARY_TRANSFER_DISABLE.getOrDefault(info);
+        if (oids == null) {
+            return Collections.emptySet();
+        }
+        return getOidSet(oids);
     }
 
-    this.autoCommit = autoCommit;
-    LOGGER.log(Level.FINE, "  setAutoCommit = {0}", autoCommit);
-  }
-
-  @Override
-  public boolean getAutoCommit() throws SQLException {
-    checkClosed();
-    return this.autoCommit;
-  }
-
-  private void executeTransactionCommand(Query query) throws SQLException {
-    int flags = QueryExecutor.QUERY_NO_METADATA | QueryExecutor.QUERY_NO_RESULTS
-        | QueryExecutor.QUERY_SUPPRESS_BEGIN;
-    if (prepareThreshold == 0) {
-      flags |= QueryExecutor.QUERY_ONESHOT;
+    private static Set<? extends Integer> getOidSet(String oidList) throws PSQLException {
+        if (oidList.isEmpty()) {
+            return Collections.emptySet();
+        }
+        Set<Integer> oids = new HashSet<>();
+        StringTokenizer tokenizer = new StringTokenizer(oidList, ",");
+        while (tokenizer.hasMoreTokens()) {
+            String oid = tokenizer.nextToken();
+            oids.add(Oid.valueOf(oid));
+        }
+        return oids;
     }
 
-    try {
-      getQueryExecutor().execute(query, null, new TransactionCommandHandler(), 0, 0, flags);
-    } catch (SQLException e) {
-      // Don't retry composite queries as it might get partially executed
-      if (query.getSubqueries() != null || !queryExecutor.willHealOnRetry(e)) {
-        throw e;
-      }
-      query.close();
-      // retry
-      getQueryExecutor().execute(query, null, new TransactionCommandHandler(), 0, 0, flags);
-    }
-  }
+    // Parse a "dirty" integer surrounded by non-numeric characters
+    private static int integerPart(String dirtyString) {
+        int start = 0;
 
-  @Override
-  public void commit() throws SQLException {
-    checkClosed();
+        while (start < dirtyString.length() && !Character.isDigit(dirtyString.charAt(start))) {
+            ++start;
+        }
 
-    if (autoCommit) {
-      throw new PSQLException(GT.tr("Cannot commit when autoCommit is enabled."),
-          PSQLState.NO_ACTIVE_SQL_TRANSACTION);
+        int end = start;
+        while (end < dirtyString.length() && Character.isDigit(dirtyString.charAt(end))) {
+            ++end;
+        }
+
+        if (start == end) {
+            return 0;
+        }
+
+        return Integer.parseInt(dirtyString.substring(start, end));
     }
 
-    if (queryExecutor.getTransactionState() != TransactionState.IDLE) {
-      executeTransactionCommand(commitQuery);
-    }
-  }
-
-  protected void checkClosed() throws SQLException {
-    if (isClosed()) {
-      throw new PSQLException(GT.tr("This connection has been closed."),
-          PSQLState.CONNECTION_DOES_NOT_EXIST);
-    }
-  }
-
-  @Override
-  public void rollback() throws SQLException {
-    checkClosed();
-
-    if (autoCommit) {
-      throw new PSQLException(GT.tr("Cannot rollback when autoCommit is enabled."),
-          PSQLState.NO_ACTIVE_SQL_TRANSACTION);
+    final CachedQuery borrowQuery(String sql) throws SQLException {
+        return queryExecutor.borrowQuery(sql);
     }
 
-    if (queryExecutor.getTransactionState() != TransactionState.IDLE) {
-      executeTransactionCommand(rollbackQuery);
-    } else {
-      // just log for debugging
-      LOGGER.log(Level.FINE, "Rollback requested but no transaction in progress");
-    }
-  }
-
-  @Override
-  public TransactionState getTransactionState() {
-    return queryExecutor.getTransactionState();
-  }
-
-  @Override
-  public int getTransactionIsolation() throws SQLException {
-    checkClosed();
-
-    String level = null;
-    final ResultSet rs = execSQLQuery("SHOW TRANSACTION ISOLATION LEVEL"); // nb: no BEGIN triggered
-    if (rs.next()) {
-      level = rs.getString(1);
-    }
-    rs.close();
-
-    // TODO revisit: throw exception instead of silently eating the error in unknown cases?
-    if (level == null) {
-      return Connection.TRANSACTION_READ_COMMITTED; // Best guess.
+    final CachedQuery borrowCallableQuery(String sql) throws SQLException {
+        return queryExecutor.borrowCallableQuery(sql);
     }
 
-    level = level.toUpperCase(Locale.US);
-    if ("READ COMMITTED".equals(level)) {
-      return Connection.TRANSACTION_READ_COMMITTED;
-    }
-    if ("READ UNCOMMITTED".equals(level)) {
-      return Connection.TRANSACTION_READ_UNCOMMITTED;
-    }
-    if ("REPEATABLE READ".equals(level)) {
-      return Connection.TRANSACTION_REPEATABLE_READ;
-    }
-    if ("SERIALIZABLE".equals(level)) {
-      return Connection.TRANSACTION_SERIALIZABLE;
+    private CachedQuery borrowReturningQuery(String sql, String[] columnNames)
+            throws SQLException {
+        return queryExecutor.borrowReturningQuery(sql, columnNames);
     }
 
-    return Connection.TRANSACTION_READ_COMMITTED; // Best guess.
-  }
-
-  @Override
-  public void setTransactionIsolation(int level) throws SQLException {
-    checkClosed();
-
-    if (queryExecutor.getTransactionState() != TransactionState.IDLE) {
-      throw new PSQLException(
-          GT.tr("Cannot change transaction isolation level in the middle of a transaction."),
-          PSQLState.ACTIVE_SQL_TRANSACTION);
+    @Override
+    public CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized,
+                                   String... columnNames)
+            throws SQLException {
+        return queryExecutor.createQuery(sql, escapeProcessing, isParameterized, columnNames);
     }
 
-    String isolationLevelName = getIsolationLevelName(level);
-    if (isolationLevelName == null) {
-      throw new PSQLException(GT.tr("Transaction isolation level {0} not supported.", level),
-          PSQLState.NOT_IMPLEMENTED);
+    void releaseQuery(CachedQuery cachedQuery) {
+        queryExecutor.releaseQuery(cachedQuery);
     }
 
-    String isolationLevelSQL =
-        "SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL " + isolationLevelName;
-    execSQLUpdate(isolationLevelSQL); // nb: no BEGIN triggered
-    LOGGER.log(Level.FINE, "  setTransactionIsolation = {0}", isolationLevelName);
-  }
+    @Override
+    public void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate) {
+        queryExecutor.setFlushCacheOnDeallocate(flushCacheOnDeallocate);
+        LOGGER.log(Level.FINE, "  setFlushCacheOnDeallocate = {0}", flushCacheOnDeallocate);
+    }
 
-  protected String getIsolationLevelName(int level) {
-    switch (level) {
-      case Connection.TRANSACTION_READ_COMMITTED:
-        return "READ COMMITTED";
-      case Connection.TRANSACTION_SERIALIZABLE:
-        return "SERIALIZABLE";
-      case Connection.TRANSACTION_READ_UNCOMMITTED:
-        return "READ UNCOMMITTED";
-      case Connection.TRANSACTION_REPEATABLE_READ:
-        return "REPEATABLE READ";
-      default:
+    private String oidsToString(Set<Integer> oids) {
+        StringBuilder sb = new StringBuilder();
+        for (Integer oid : oids) {
+            sb.append(Oid.toString(oid));
+            sb.append(',');
+        }
+        if (sb.length() > 0) {
+            sb.setLength(sb.length() - 1);
+        } else {
+            sb.append(" <none>");
+        }
+        return sb.toString();
+    }
+
+    @Deprecated
+    @Override
+    public TimestampUtils getTimestampUtils() {
+        return timestampUtils;
+    }
+
+    /**
+     * Obtain the connection lock and return it. Callers must use try-with-resources to ensure that
+     * unlock() is performed on the lock.
+     */
+    final ResourceLock obtainLock() {
+        return lock.obtain();
+    }
+
+    /**
+     * Return the lock condition for this connection.
+     */
+    final Condition lockCondition() {
+        return lockCondition;
+    }
+
+    @Override
+    public Statement createStatement() throws SQLException {
+        // We now follow the spec and default to TYPE_FORWARD_ONLY.
+        return createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
+    }
+
+    @Override
+    public PreparedStatement prepareStatement(String sql) throws SQLException {
+        return prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
+    }
+
+    @Override
+    public CallableStatement prepareCall(String sql) throws SQLException {
+        return prepareCall(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
+    }
+
+    @Override
+    public Map<String, Class<?>> getTypeMap() throws SQLException {
+        checkClosed();
+        return typemap;
+    }
+
+    @Override
+    public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
+        setTypeMapImpl(map);
+        LOGGER.log(Level.FINE, "  setTypeMap = {0}", map);
+    }
+
+    @Override
+    public QueryExecutor getQueryExecutor() {
+        return queryExecutor;
+    }
+
+    @Override
+    public ReplicationProtocol getReplicationProtocol() {
+        return queryExecutor.getReplicationProtocol();
+    }
+
+    /**
+     * This adds a warning to the warning chain.
+     *
+     * @param warn warning to add
+     */
+    public void addWarning(SQLWarning warn) {
+        // Add the warning to the chain
+        if (firstWarning != null) {
+            firstWarning.setNextWarning(warn);
+        } else {
+            firstWarning = warn;
+        }
+
+    }
+
+    @Override
+    public ResultSet execSQLQuery(String s) throws SQLException {
+        return execSQLQuery(s, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
+    }
+
+    @Override
+    public ResultSet execSQLQuery(String s, int resultSetType, int resultSetConcurrency)
+            throws SQLException {
+        BaseStatement stat = (BaseStatement) createStatement(resultSetType, resultSetConcurrency);
+        boolean hasResultSet = stat.executeWithFlags(s, QueryExecutor.QUERY_SUPPRESS_BEGIN);
+
+        while (!hasResultSet && stat.getUpdateCount() != -1) {
+            hasResultSet = stat.getMoreResults();
+        }
+
+        if (!hasResultSet) {
+            throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+        }
+
+        // Transfer warnings to the connection, since the user never
+        // has a chance to see the statement itself.
+        SQLWarning warnings = stat.getWarnings();
+        if (warnings != null) {
+            addWarning(warnings);
+        }
+
+        return stat.getResultSet();
+    }
+
+    @Override
+    public void execSQLUpdate(String s) throws SQLException {
+        try (BaseStatement stmt = (BaseStatement) createStatement()) {
+            if (stmt.executeWithFlags(s, QueryExecutor.QUERY_NO_METADATA | QueryExecutor.QUERY_NO_RESULTS
+                    | QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
+                throw new PSQLException(GT.tr("A result was returned when none was expected."),
+                        PSQLState.TOO_MANY_RESULTS);
+            }
+
+            // Transfer warnings to the connection, since the user never
+            // has a chance to see the statement itself.
+            SQLWarning warnings = stmt.getWarnings();
+            if (warnings != null) {
+                addWarning(warnings);
+            }
+        }
+    }
+
+    void execSQLUpdate(CachedQuery query) throws SQLException {
+        try (BaseStatement stmt = (BaseStatement) createStatement()) {
+            if (stmt.executeWithFlags(query, QueryExecutor.QUERY_NO_METADATA | QueryExecutor.QUERY_NO_RESULTS
+                    | QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
+                throw new PSQLException(GT.tr("A result was returned when none was expected."),
+                        PSQLState.TOO_MANY_RESULTS);
+            }
+
+            // Transfer warnings to the connection, since the user never
+            // has a chance to see the statement itself.
+            SQLWarning warnings = stmt.getWarnings();
+            if (warnings != null) {
+                addWarning(warnings);
+            }
+        }
+    }
+
+    /**
+     * getCursorName gets the cursor name.
+     *
+     * @return the current cursor name
+     * @throws SQLException if a database access error occurs
+     */
+    public String getCursorName() throws SQLException {
+        checkClosed();
         return null;
     }
-  }
 
-  @Override
-  public void setCatalog(String catalog) throws SQLException {
-    checkClosed();
-    // no-op
-  }
-
-  @Override
-  public String getCatalog() throws SQLException {
-    checkClosed();
-    return queryExecutor.getDatabase();
-  }
-
-  public boolean getHideUnprivilegedObjects() {
-    return hideUnprivilegedObjects;
-  }
-
-  /**
-   * Get server version number.
-   *
-   * @return server version number
-   */
-  public String getDBVersionNumber() {
-    return queryExecutor.getServerVersion();
-  }
-
-  /**
-   * Get server major version.
-   *
-   * @return server major version
-   */
-  public int getServerMajorVersion() {
-    try {
-      StringTokenizer versionTokens = new StringTokenizer(queryExecutor.getServerVersion(), "."); // aaXbb.ccYdd
-      return integerPart(versionTokens.nextToken()); // return X
-    } catch (NoSuchElementException e) {
-      return 0;
+    /**
+     * <p>In SQL, a result table can be retrieved through a cursor that is named. The current row of a
+     * result can be updated or deleted using a positioned update/delete statement that references the
+     * cursor name.</p>
+     *
+     * <p>We do not support positioned update/delete, so this is a no-op.</p>
+     *
+     * @param cursor the cursor name
+     * @throws SQLException if a database access error occurs
+     */
+    public void setCursorName(String cursor) throws SQLException {
+        checkClosed();
+        // No-op.
     }
-  }
 
-  /**
-   * Get server minor version.
-   *
-   * @return server minor version
-   */
-  public int getServerMinorVersion() {
-    try {
-      StringTokenizer versionTokens = new StringTokenizer(queryExecutor.getServerVersion(), "."); // aaXbb.ccYdd
-      versionTokens.nextToken(); // Skip aaXbb
-      return integerPart(versionTokens.nextToken()); // return Y
-    } catch (NoSuchElementException e) {
-      return 0;
+    /**
+     * <p>We are required to bring back certain information by the DatabaseMetaData class. These
+     * functions do that.</p>
+     *
+     * <p>Method getURL() brings back the URL (good job we saved it)</p>
+     *
+     * @return the url
+     * @throws SQLException just in case...
+     */
+    public String getURL() throws SQLException {
+        return creatingURL;
     }
-  }
 
-  @Override
-  public boolean haveMinimumServerVersion(int ver) {
-    return queryExecutor.getServerVersionNum() >= ver;
-  }
-
-  @Override
-  public boolean haveMinimumServerVersion(Version ver) {
-    return haveMinimumServerVersion(ver.getVersionNum());
-  }
-
-  @Override
-  public Encoding getEncoding() {
-    return queryExecutor.getEncoding();
-  }
-
-  @Override
-  public byte[] encodeString(String str) throws SQLException {
-    try {
-      return getEncoding().encode(str);
-    } catch (IOException ioe) {
-      throw new PSQLException(GT.tr("Unable to translate data into the desired encoding."),
-          PSQLState.DATA_ERROR, ioe);
+    /**
+     * Method getUserName() brings back the User Name (again, we saved it).
+     *
+     * @return the user name
+     * @throws SQLException just in case...
+     */
+    public String getUserName() throws SQLException {
+        return queryExecutor.getUser();
     }
-  }
 
-  @Override
-  public String escapeString(String str) throws SQLException {
-    return Utils.escapeLiteral(null, str, queryExecutor.getStandardConformingStrings())
-        .toString();
-  }
-
-  @Override
-  public boolean getStandardConformingStrings() {
-    return queryExecutor.getStandardConformingStrings();
-  }
-
-  // This is a cache of the DatabaseMetaData instance for this connection
-  protected DatabaseMetaData metadata;
-
-  @Override
-  public boolean isClosed() throws SQLException {
-    return queryExecutor.isClosed();
-  }
-
-  @Override
-  public void cancelQuery() throws SQLException {
-    checkClosed();
-    queryExecutor.sendQueryCancel();
-  }
-
-  @Override
-  public PGNotification[] getNotifications() throws SQLException {
-    return getNotifications(-1);
-  }
-
-  @Override
-  public PGNotification[] getNotifications(int timeoutMillis) throws SQLException {
-    checkClosed();
-    getQueryExecutor().processNotifies(timeoutMillis);
-    // Backwards-compatibility hand-holding.
-    PGNotification[] notifications = queryExecutor.getNotifications();
-    return notifications;
-  }
-
-  /**
-   * Handler for transaction queries.
-   */
-  private class TransactionCommandHandler extends ResultHandlerBase {
+    @SuppressWarnings("deprecation")
     @Override
-    public void handleCompletion() throws SQLException {
-      SQLWarning warning = getWarning();
-      if (warning != null) {
-        PgConnection.this.addWarning(warning);
-      }
-      super.handleCompletion();
-    }
-  }
-
-  @Override
-  public int getPrepareThreshold() {
-    return prepareThreshold;
-  }
-
-  @Override
-  public void setDefaultFetchSize(int fetchSize) throws SQLException {
-    if (fetchSize < 0) {
-      throw new PSQLException(GT.tr("Fetch size must be a value greater to or equal to 0."),
-          PSQLState.INVALID_PARAMETER_VALUE);
-    }
-
-    this.defaultFetchSize = fetchSize;
-    LOGGER.log(Level.FINE, "  setDefaultFetchSize = {0}", fetchSize);
-  }
-
-  @Override
-  public int getDefaultFetchSize() {
-    return defaultFetchSize;
-  }
-
-  @Override
-  public void setPrepareThreshold(int newThreshold) {
-    this.prepareThreshold = newThreshold;
-    LOGGER.log(Level.FINE, "  setPrepareThreshold = {0}", newThreshold);
-  }
-
-  public boolean getForceBinary() {
-    return forcebinary;
-  }
-
-  public void setForceBinary(boolean newValue) {
-    this.forcebinary = newValue;
-    LOGGER.log(Level.FINE, "  setForceBinary = {0}", newValue);
-  }
-
-  public void setTypeMapImpl(Map<String, Class<?>> map) throws SQLException {
-    typemap = map;
-  }
-
-  @Override
-  public Logger getLogger() {
-    return LOGGER;
-  }
-
-  public int getProtocolVersion() {
-    return queryExecutor.getProtocolVersion();
-  }
-
-  @Override
-  public boolean getStringVarcharFlag() {
-    return bindStringAsVarchar;
-  }
-
-  private CopyManager copyManager;
-
-  @Override
-  public CopyManager getCopyAPI() throws SQLException {
-    checkClosed();
-    if (copyManager == null) {
-      copyManager = new CopyManager(this);
-    }
-    return copyManager;
-  }
-
-  @Override
-  public boolean binaryTransferSend(int oid) {
-    return queryExecutor.useBinaryForSend(oid);
-  }
-
-  @Override
-  public int getBackendPID() {
-    return queryExecutor.getBackendPID();
-  }
-
-  @Override
-  public boolean isColumnSanitiserDisabled() {
-    return this.disableColumnSanitiser;
-  }
-
-  public void setDisableColumnSanitiser(boolean disableColumnSanitiser) {
-    this.disableColumnSanitiser = disableColumnSanitiser;
-    LOGGER.log(Level.FINE, "  setDisableColumnSanitiser = {0}", disableColumnSanitiser);
-  }
-
-  @Override
-  public PreferQueryMode getPreferQueryMode() {
-    return queryExecutor.getPreferQueryMode();
-  }
-
-  @Override
-  public AutoSave getAutosave() {
-    return queryExecutor.getAutoSave();
-  }
-
-  @Override
-  public void setAutosave(AutoSave autoSave) {
-    queryExecutor.setAutoSave(autoSave);
-    LOGGER.log(Level.FINE, "  setAutosave = {0}", autoSave.value());
-  }
-
-  protected void abort() {
-    queryExecutor.abort();
-  }
-
-  private Timer getTimer() {
-    return finalizeAction.getTimer();
-  }
-
-  @Override
-  public void addTimerTask(TimerTask timerTask, long milliSeconds) {
-    Timer timer = getTimer();
-    timer.schedule(timerTask, milliSeconds);
-  }
-
-  @Override
-  public void purgeTimerTasks() {
-    finalizeAction.purgeTimerTasks();
-  }
-
-  @Override
-  public String escapeIdentifier(String identifier) throws SQLException {
-    return Utils.escapeIdentifier(null, identifier).toString();
-  }
-
-  @Override
-  public String escapeLiteral(String literal) throws SQLException {
-    return Utils.escapeLiteral(null, literal, queryExecutor.getStandardConformingStrings())
-        .toString();
-  }
-
-  @Override
-  public LruCache<FieldMetadata.Key, FieldMetadata> getFieldMetadataCache() {
-    return fieldMetadataCache;
-  }
-
-  @Override
-  public PGReplicationConnection getReplicationAPI() {
-    return new PGReplicationConnectionImpl(this);
-  }
-
-  // Parse a "dirty" integer surrounded by non-numeric characters
-  private static int integerPart(String dirtyString) {
-    int start = 0;
-
-    while (start < dirtyString.length() && !Character.isDigit(dirtyString.charAt(start))) {
-      ++start;
-    }
-
-    int end = start;
-    while (end < dirtyString.length() && Character.isDigit(dirtyString.charAt(end))) {
-      ++end;
-    }
-
-    if (start == end) {
-      return 0;
-    }
-
-    return Integer.parseInt(dirtyString.substring(start, end));
-  }
-
-  @Override
-  public Statement createStatement(int resultSetType, int resultSetConcurrency,
-      int resultSetHoldability) throws SQLException {
-    checkClosed();
-    return new PgStatement(this, resultSetType, resultSetConcurrency, resultSetHoldability);
-  }
-
-  @Override
-  public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency,
-      int resultSetHoldability) throws SQLException {
-    checkClosed();
-    return new PgPreparedStatement(this, sql, resultSetType, resultSetConcurrency, resultSetHoldability);
-  }
-
-  @Override
-  public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency,
-      int resultSetHoldability) throws SQLException {
-    checkClosed();
-    return new PgCallableStatement(this, sql, resultSetType, resultSetConcurrency, resultSetHoldability);
-  }
-
-  @Override
-  public DatabaseMetaData getMetaData() throws SQLException {
-    checkClosed();
-    if (metadata == null) {
-      metadata = new PgDatabaseMetaData(this);
-    }
-    return metadata;
-  }
-
-  @Override
-  public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
-    setTypeMapImpl(map);
-    LOGGER.log(Level.FINE, "  setTypeMap = {0}", map);
-  }
-
-  protected Array makeArray(int oid, String fieldString) throws SQLException {
-    return new PgArray(this, oid, fieldString);
-  }
-
-  protected Blob makeBlob(long oid) throws SQLException {
-    return new PgBlob(this, oid);
-  }
-
-  protected Clob makeClob(long oid) throws SQLException {
-    return new PgClob(this, oid);
-  }
-
-  protected SQLXML makeSQLXML() throws SQLException {
-    return new PgSQLXML(this);
-  }
-
-  @Override
-  public Clob createClob() throws SQLException {
-    checkClosed();
-    throw Driver.notImplemented(this.getClass(), "createClob()");
-  }
-
-  @Override
-  public Blob createBlob() throws SQLException {
-    checkClosed();
-    throw Driver.notImplemented(this.getClass(), "createBlob()");
-  }
-
-  @Override
-  public NClob createNClob() throws SQLException {
-    checkClosed();
-    throw Driver.notImplemented(this.getClass(), "createNClob()");
-  }
-
-  @Override
-  public SQLXML createSQLXML() throws SQLException {
-    checkClosed();
-    return makeSQLXML();
-  }
-
-  @Override
-  public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
-    checkClosed();
-    throw Driver.notImplemented(this.getClass(), "createStruct(String, Object[])");
-  }
-
-  @SuppressWarnings({"rawtypes", "unchecked"})
-  @Override
-  public Array createArrayOf(String typeName, Object elements) throws SQLException {
-    checkClosed();
-
-    final TypeInfo typeInfo = getTypeInfo();
-
-    final int oid = typeInfo.getPGArrayType(typeName);
-    final char delim = typeInfo.getArrayDelimiter(oid);
-
-    if (oid == Oid.UNSPECIFIED) {
-      throw new PSQLException(GT.tr("Unable to find server array type for provided name {0}.", typeName),
-          PSQLState.INVALID_NAME);
-    }
-
-    if (elements == null) {
-      return makeArray(oid, null);
-    }
-
-    final ArrayEncoding.ArrayEncoder arraySupport = ArrayEncoding.getArrayEncoder(elements);
-    if (arraySupport.supportBinaryRepresentation(oid) && getPreferQueryMode() != PreferQueryMode.SIMPLE) {
-      return new PgArray(this, oid, arraySupport.toBinaryRepresentation(this, elements, oid));
-    }
-
-    final String arrayString = arraySupport.toArrayString(delim, elements);
-    return makeArray(oid, arrayString);
-  }
-
-  @Override
-  public Array createArrayOf(String typeName, Object [] elements)
-      throws SQLException {
-    return createArrayOf(typeName, (Object) elements);
-  }
-
-  @Override
-  public boolean isValid(int timeout) throws SQLException {
-    if (timeout < 0) {
-      throw new PSQLException(GT.tr("Invalid timeout ({0}<0).", timeout),
-          PSQLState.INVALID_PARAMETER_VALUE);
-    }
-    if (isClosed()) {
-      return false;
-    }
-    boolean changedNetworkTimeout = false;
-    try {
-      int oldNetworkTimeout = getNetworkTimeout();
-      int newNetworkTimeout = (int) Math.min(timeout * 1000L, Integer.MAX_VALUE);
-      try {
-        // change network timeout only if the new value is less than the current
-        // (zero means infinite timeout)
-        if (newNetworkTimeout != 0 && (oldNetworkTimeout == 0 || newNetworkTimeout < oldNetworkTimeout)) {
-          changedNetworkTimeout = true;
-          setNetworkTimeout(null, newNetworkTimeout);
+    public Fastpath getFastpathAPI() throws SQLException {
+        checkClosed();
+        if (fastpath == null) {
+            fastpath = new Fastpath(this);
         }
-        if (replicationConnection) {
-          try (Statement statement = createStatement()) {
-            statement.execute("IDENTIFY_SYSTEM");
-          }
+        return fastpath;
+    }
+
+    @Override
+    public LargeObjectManager getLargeObjectAPI() throws SQLException {
+        checkClosed();
+        if (largeobject == null) {
+            largeobject = new LargeObjectManager(this);
+        }
+        return largeobject;
+    }
+
+    /*
+     * This method is used internally to return an object based around org.postgresql's more unique
+     * data types.
+     *
+     * <p>It uses an internal HashMap to get the handling class. If the type is not supported, then an
+     * instance of org.postgresql.util.PGobject is returned.
+     *
+     * You can use the getValue() or setValue() methods to handle the returned object. Custom objects
+     * can have their own methods.
+     *
+     * @return PGobject for this type, and set to value
+     *
+     * @exception SQLException if value is not correct for this type
+     */
+    @Override
+    public Object getObject(String type, String value, byte[] byteValue)
+            throws SQLException {
+        if (typemap != null) {
+            Class<?> c = typemap.get(type);
+            if (c != null) {
+                // Handle the type (requires SQLInput & SQLOutput classes to be implemented)
+                throw new PSQLException(GT.tr("Custom type maps are not supported."),
+                        PSQLState.NOT_IMPLEMENTED);
+            }
+        }
+
+        PGobject obj = null;
+
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            LOGGER.log(Level.FINEST, "Constructing object from type={0} value=<{1}>", new Object[]{type, value});
+        }
+
+        try {
+            Class<? extends PGobject> klass = typeCache.getPGobject(type);
+
+            // If className is not null, then try to instantiate it,
+            // It must be basetype PGobject
+
+            // This is used to implement the org.postgresql unique types (like lseg,
+            // point, etc).
+
+            if (klass != null) {
+                obj = klass.getDeclaredConstructor().newInstance();
+                obj.setType(type);
+                if (byteValue != null && obj instanceof PGBinaryObject) {
+                    PGBinaryObject binObj = (PGBinaryObject) obj;
+                    binObj.setByteValue(byteValue, 0);
+                } else {
+                    obj.setValue(value);
+                }
+            } else {
+                // If className is null, then the type is unknown.
+                // so return a PGobject with the type set, and the value set
+                obj = new PGobject();
+                obj.setType(type);
+                obj.setValue(value);
+            }
+
+            return obj;
+        } catch (SQLException sx) {
+            // rethrow the exception. Done because we capture any others next
+            throw sx;
+        } catch (Exception ex) {
+            throw new PSQLException(GT.tr("Failed to create object for: {0}.", type),
+                    PSQLState.CONNECTION_FAILURE, ex);
+        }
+    }
+
+    protected TypeInfo createTypeInfo(BaseConnection conn, int unknownLength) {
+        return new TypeInfoCache(conn, unknownLength);
+    }
+
+    @Override
+    public TypeInfo getTypeInfo() {
+        return typeCache;
+    }
+
+    @Deprecated
+    @Override
+    public void addDataType(String type, String name) {
+        try {
+            addDataType(type, Class.forName(name).asSubclass(PGobject.class));
+        } catch (Exception e) {
+            throw new RuntimeException("Cannot register new type " + type, e);
+        }
+    }
+
+    @Override
+    public void addDataType(String type, Class<? extends PGobject> klass) throws SQLException {
+        checkClosed();
+        // first add the data type to the type cache
+        typeCache.addDataType(type, klass);
+        // then check if this type supports binary transfer
+        if (PGBinaryObject.class.isAssignableFrom(klass) && getPreferQueryMode() != PreferQueryMode.SIMPLE) {
+            // try to get an oid for this type (will return 0 if the type does not exist in the database)
+            int oid = typeCache.getPGType(type);
+            // check if oid is there and if it is not disabled for binary transfer
+            if (oid > 0 && !binaryDisabledOids.contains(oid)) {
+                // allow using binary transfer for receiving and sending of this type
+                queryExecutor.addBinaryReceiveOid(oid);
+                queryExecutor.addBinarySendOid(oid);
+            }
+        }
+    }
+
+    // This initialises the objectTypes hash map
+    private void initObjectTypes(Properties info) throws SQLException {
+        // Add in the types that come packaged with the driver.
+        // These can be overridden later if desired.
+        addDataType("box", PGbox.class);
+        addDataType("circle", PGcircle.class);
+        addDataType("line", PGline.class);
+        addDataType("lseg", PGlseg.class);
+        addDataType("path", PGpath.class);
+        addDataType("point", PGpoint.class);
+        addDataType("polygon", PGpolygon.class);
+        addDataType("money", PGmoney.class);
+        addDataType("interval", PGInterval.class);
+
+        Enumeration<?> e = info.propertyNames();
+        while (e.hasMoreElements()) {
+            String propertyName = (String) e.nextElement();
+            if (propertyName != null && propertyName.startsWith("datatype.")) {
+                String typeName = propertyName.substring(9);
+                String className = info.getProperty(propertyName);
+                Class<?> klass;
+
+                try {
+                    klass = Class.forName(className);
+                } catch (ClassNotFoundException cnfe) {
+                    throw new PSQLException(
+                            GT.tr("Unable to load the class {0} responsible for the datatype {1}",
+                                    className, typeName),
+                            PSQLState.SYSTEM_ERROR, cnfe);
+                }
+
+                addDataType(typeName, klass.asSubclass(PGobject.class));
+            }
+        }
+    }
+
+    /**
+     * <B>Note:</B> even though {@code Statement} is automatically closed when it is garbage
+     * collected, it is better to close it explicitly to lower resource consumption.
+     * The spec says that calling close on a closed connection is a no-op.
+     * {@inheritDoc}
+     */
+    @Override
+    public void close() throws SQLException {
+        if (queryExecutor == null) {
+            // This might happen in case constructor throws an exception (e.g. host being not available).
+            // When that happens the connection is still registered in the finalizer queue, so it gets finalized
+            return;
+        }
+        openStackTrace = null;
+        try {
+            cleanable.clean();
+        } catch (IOException e) {
+            throw new PSQLException(
+                    GT.tr("Unable to close connection properly"),
+                    PSQLState.UNKNOWN_STATE, e);
+        }
+    }
+
+    @Override
+    public String nativeSQL(String sql) throws SQLException {
+        checkClosed();
+        CachedQuery cachedQuery = queryExecutor.createQuery(sql, false, true);
+
+        return cachedQuery.query.getNativeSql();
+    }
+
+    @Override
+    public SQLWarning getWarnings() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkClosed();
+            SQLWarning newWarnings = queryExecutor.getWarnings(); // NB: also clears them.
+            if (firstWarning == null) {
+                firstWarning = newWarnings;
+            } else if (newWarnings != null) {
+                firstWarning.setNextWarning(newWarnings); // Chain them on.
+            }
+
+            return firstWarning;
+        }
+    }
+
+    @Override
+    public void clearWarnings() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkClosed();
+            queryExecutor.getWarnings(); // Clear and discard.
+            firstWarning = null;
+        }
+    }
+
+    @Override
+    public boolean isReadOnly() throws SQLException {
+        checkClosed();
+        return readOnly;
+    }
+
+    @Override
+    public void setReadOnly(boolean readOnly) throws SQLException {
+        checkClosed();
+        if (queryExecutor.getTransactionState() != TransactionState.IDLE) {
+            throw new PSQLException(
+                    GT.tr("Cannot change transaction read-only property in the middle of a transaction."),
+                    PSQLState.ACTIVE_SQL_TRANSACTION);
+        }
+
+        if (readOnly != this.readOnly && autoCommit && this.readOnlyBehavior == ReadOnlyBehavior.always) {
+            execSQLUpdate(readOnly ? setSessionReadOnly : setSessionNotReadOnly);
+        }
+
+        this.readOnly = readOnly;
+        LOGGER.log(Level.FINE, "  setReadOnly = {0}", readOnly);
+    }
+
+    @Override
+    public boolean hintReadOnly() {
+        return readOnly && readOnlyBehavior != ReadOnlyBehavior.ignore;
+    }
+
+    @Override
+    public boolean getAutoCommit() throws SQLException {
+        checkClosed();
+        return this.autoCommit;
+    }
+
+    @Override
+    public void setAutoCommit(boolean autoCommit) throws SQLException {
+        checkClosed();
+
+        if (this.autoCommit == autoCommit) {
+            return;
+        }
+
+        if (!this.autoCommit) {
+            commit();
+        }
+
+        // if the connection is read only, we need to make sure session settings are
+        // correct when autocommit status changed
+        if (this.readOnly && readOnlyBehavior == ReadOnlyBehavior.always) {
+            // if we are turning on autocommit, we need to set session
+            // to read only
+            if (autoCommit) {
+                this.autoCommit = true;
+                execSQLUpdate(setSessionReadOnly);
+            } else {
+                // if we are turning auto commit off, we need to
+                // disable session
+                execSQLUpdate(setSessionNotReadOnly);
+            }
+        }
+
+        this.autoCommit = autoCommit;
+        LOGGER.log(Level.FINE, "  setAutoCommit = {0}", autoCommit);
+    }
+
+    private void executeTransactionCommand(Query query) throws SQLException {
+        int flags = QueryExecutor.QUERY_NO_METADATA | QueryExecutor.QUERY_NO_RESULTS
+                | QueryExecutor.QUERY_SUPPRESS_BEGIN;
+        if (prepareThreshold == 0) {
+            flags |= QueryExecutor.QUERY_ONESHOT;
+        }
+
+        try {
+            getQueryExecutor().execute(query, null, new TransactionCommandHandler(), 0, 0, flags);
+        } catch (SQLException e) {
+            // Don't retry composite queries as it might get partially executed
+            if (query.getSubqueries() != null || !queryExecutor.willHealOnRetry(e)) {
+                throw e;
+            }
+            query.close();
+            // retry
+            getQueryExecutor().execute(query, null, new TransactionCommandHandler(), 0, 0, flags);
+        }
+    }
+
+    @Override
+    public void commit() throws SQLException {
+        checkClosed();
+
+        if (autoCommit) {
+            throw new PSQLException(GT.tr("Cannot commit when autoCommit is enabled."),
+                    PSQLState.NO_ACTIVE_SQL_TRANSACTION);
+        }
+
+        if (queryExecutor.getTransactionState() != TransactionState.IDLE) {
+            executeTransactionCommand(commitQuery);
+        }
+    }
+
+    protected void checkClosed() throws SQLException {
+        if (isClosed()) {
+            throw new PSQLException(GT.tr("This connection has been closed."),
+                    PSQLState.CONNECTION_DOES_NOT_EXIST);
+        }
+    }
+
+    @Override
+    public void rollback() throws SQLException {
+        checkClosed();
+
+        if (autoCommit) {
+            throw new PSQLException(GT.tr("Cannot rollback when autoCommit is enabled."),
+                    PSQLState.NO_ACTIVE_SQL_TRANSACTION);
+        }
+
+        if (queryExecutor.getTransactionState() != TransactionState.IDLE) {
+            executeTransactionCommand(rollbackQuery);
         } else {
-          try (Statement checkConnectionQuery = createStatement()) {
-            ((PgStatement)checkConnectionQuery).execute("", QueryExecutor.QUERY_EXECUTE_AS_SIMPLE);
-          }
+            // just log for debugging
+            LOGGER.log(Level.FINE, "Rollback requested but no transaction in progress");
         }
-        return true;
-      } finally {
-        if (changedNetworkTimeout) {
-          setNetworkTimeout(null, oldNetworkTimeout);
-        }
-      }
-    } catch (SQLException e) {
-      if (PSQLState.IN_FAILED_SQL_TRANSACTION.getState().equals(e.getSQLState())) {
-        // "current transaction aborted", assume the connection is up and running
-        return true;
-      }
-      LOGGER.log(Level.FINE, GT.tr("Validating connection."), e);
-    }
-    return false;
-  }
-
-  @Override
-  public void setClientInfo(String name, String value) throws SQLClientInfoException {
-    try {
-      checkClosed();
-    } catch (final SQLException cause) {
-      Map<String, ClientInfoStatus> failures = new HashMap<>();
-      failures.put(name, ClientInfoStatus.REASON_UNKNOWN);
-      throw new SQLClientInfoException(GT.tr("This connection has been closed."), failures, cause);
-    }
-
-    if (haveMinimumServerVersion(ServerVersion.v9_0) && "ApplicationName".equals(name)) {
-      if (value == null) {
-        value = "";
-      }
-      final String oldValue = queryExecutor.getApplicationName();
-      if (value.equals(oldValue)) {
-        return;
-      }
-
-      try {
-        StringBuilder sql = new StringBuilder("SET application_name = '");
-        Utils.escapeLiteral(sql, value, getStandardConformingStrings());
-        sql.append("'");
-        execSQLUpdate(sql.toString());
-      } catch (SQLException sqle) {
-        Map<String, ClientInfoStatus> failures = new HashMap<>();
-        failures.put(name, ClientInfoStatus.REASON_UNKNOWN);
-        throw new SQLClientInfoException(
-            GT.tr("Failed to set ClientInfo property: {0}", "ApplicationName"), sqle.getSQLState(),
-            failures, sqle);
-      }
-      if (LOGGER.isLoggable(Level.FINE)) {
-        LOGGER.log(Level.FINE, "  setClientInfo = {0} {1}", new Object[]{name, value});
-      }
-      clientInfo.put(name, value);
-      return;
-    }
-
-    addWarning(new SQLWarning(GT.tr("ClientInfo property not supported."),
-        PSQLState.NOT_IMPLEMENTED.getState()));
-  }
-
-  @Override
-  public void setClientInfo(Properties properties) throws SQLClientInfoException {
-    try {
-      checkClosed();
-    } catch (final SQLException cause) {
-      Map<String, ClientInfoStatus> failures = new HashMap<>();
-      for (Map.Entry<Object, Object> e : properties.entrySet()) {
-        failures.put((String) e.getKey(), ClientInfoStatus.REASON_UNKNOWN);
-      }
-      throw new SQLClientInfoException(GT.tr("This connection has been closed."), failures, cause);
-    }
-
-    Map<String, ClientInfoStatus> failures = new HashMap<>();
-    for (String name : new String[]{"ApplicationName"}) {
-      try {
-        setClientInfo(name, properties.getProperty(name, null));
-      } catch (SQLClientInfoException e) {
-        failures.putAll(e.getFailedProperties());
-      }
-    }
-
-    if (!failures.isEmpty()) {
-      throw new SQLClientInfoException(GT.tr("One or more ClientInfo failed."),
-          PSQLState.NOT_IMPLEMENTED.getState(), failures);
-    }
-  }
-
-  @Override
-  public String getClientInfo(String name) throws SQLException {
-    checkClosed();
-    clientInfo.put("ApplicationName", queryExecutor.getApplicationName());
-    return clientInfo.getProperty(name);
-  }
-
-  @Override
-  public Properties getClientInfo() throws SQLException {
-    checkClosed();
-    clientInfo.put("ApplicationName", queryExecutor.getApplicationName());
-    return clientInfo;
-  }
-
-  public <T> T createQueryObject(Class<T> ifc) throws SQLException {
-    checkClosed();
-    throw Driver.notImplemented(this.getClass(), "createQueryObject(Class<T>)");
-  }
-
-  @Override
-  public boolean getLogServerErrorDetail() {
-    return logServerErrorDetail;
-  }
-
-  @Override
-  public boolean isWrapperFor(Class<?> iface) throws SQLException {
-    checkClosed();
-    return iface.isAssignableFrom(getClass());
-  }
-
-  @Override
-  public <T> T unwrap(Class<T> iface) throws SQLException {
-    checkClosed();
-    if (iface.isAssignableFrom(getClass())) {
-      return iface.cast(this);
-    }
-    throw new SQLException("Cannot unwrap to " + iface.getName());
-  }
-
-  @Override
-  public String getSchema() throws SQLException {
-    checkClosed();
-    try (Statement stmt = createStatement()) {
-      try (ResultSet rs = stmt.executeQuery("select current_schema()")) {
-        if (!rs.next()) {
-          return null; // Is it ever possible?
-        }
-        return rs.getString(1);
-      }
-    }
-  }
-
-  @Override
-  public void setSchema(String schema) throws SQLException {
-    checkClosed();
-    try (Statement stmt = createStatement()) {
-      if (schema == null) {
-        stmt.executeUpdate("SET SESSION search_path TO DEFAULT");
-      } else {
-        StringBuilder sb = new StringBuilder();
-        sb.append("SET SESSION search_path TO '");
-        Utils.escapeLiteral(sb, schema, getStandardConformingStrings());
-        sb.append("'");
-        stmt.executeUpdate(sb.toString());
-        LOGGER.log(Level.FINE, "  setSchema = {0}", schema);
-      }
-    }
-  }
-
-  public class AbortCommand implements Runnable {
-
-    public AbortCommand() {
     }
 
     @Override
-    public void run() {
-      abort();
-    }
-  }
-
-  @Override
-  public void abort(Executor executor) throws SQLException {
-    if (executor == null) {
-      throw new SQLException("executor is null");
-    }
-    if (isClosed()) {
-      return;
+    public TransactionState getTransactionState() {
+        return queryExecutor.getTransactionState();
     }
 
-    SQL_PERMISSION_ABORT.checkGuard(this);
+    @Override
+    public int getTransactionIsolation() throws SQLException {
+        checkClosed();
 
-    AbortCommand command = new AbortCommand();
-    executor.execute(command);
-  }
-
-  @Override
-  public void setNetworkTimeout(Executor executor /*not used*/, int milliseconds)
-      throws SQLException {
-    checkClosed();
-
-    if (milliseconds < 0) {
-      throw new PSQLException(GT.tr("Network timeout must be a value greater than or equal to 0."),
-              PSQLState.INVALID_PARAMETER_VALUE);
-    }
-
-    checkPermission(SQL_PERMISSION_NETWORK_TIMEOUT);
-
-    try {
-      queryExecutor.setNetworkTimeout(milliseconds);
-    } catch (IOException ioe) {
-      throw new PSQLException(GT.tr("Unable to set network timeout."),
-              PSQLState.COMMUNICATION_ERROR, ioe);
-    }
-  }
-
-  private void checkPermission(SQLPermission sqlPermissionNetworkTimeout) {
-    if (SYSTEM_GET_SECURITY_MANAGER != null && SECURITY_MANAGER_CHECK_PERMISSION != null) {
-      try {
-        Object securityManager = SYSTEM_GET_SECURITY_MANAGER.invoke();
-        if (securityManager != null) {
-          SECURITY_MANAGER_CHECK_PERMISSION.invoke(securityManager, sqlPermissionNetworkTimeout);
+        String level = null;
+        final ResultSet rs = execSQLQuery("SHOW TRANSACTION ISOLATION LEVEL"); // nb: no BEGIN triggered
+        if (rs.next()) {
+            level = rs.getString(1);
         }
-      } catch (Throwable e) {
-        throw new RuntimeException(e);
-      }
-    }
-  }
+        rs.close();
 
-  @Override
-  public int getNetworkTimeout() throws SQLException {
-    checkClosed();
+        // TODO revisit: throw exception instead of silently eating the error in unknown cases?
+        if (level == null) {
+            return Connection.TRANSACTION_READ_COMMITTED; // Best guess.
+        }
 
-    try {
-      return queryExecutor.getNetworkTimeout();
-    } catch (IOException ioe) {
-      throw new PSQLException(GT.tr("Unable to get network timeout."),
-              PSQLState.COMMUNICATION_ERROR, ioe);
-    }
-  }
+        level = level.toUpperCase(Locale.US);
+        if ("READ COMMITTED".equals(level)) {
+            return Connection.TRANSACTION_READ_COMMITTED;
+        }
+        if ("READ UNCOMMITTED".equals(level)) {
+            return Connection.TRANSACTION_READ_UNCOMMITTED;
+        }
+        if ("REPEATABLE READ".equals(level)) {
+            return Connection.TRANSACTION_REPEATABLE_READ;
+        }
+        if ("SERIALIZABLE".equals(level)) {
+            return Connection.TRANSACTION_SERIALIZABLE;
+        }
 
-  @Override
-  public void setHoldability(int holdability) throws SQLException {
-    checkClosed();
-
-    switch (holdability) {
-      case ResultSet.CLOSE_CURSORS_AT_COMMIT:
-      case ResultSet.HOLD_CURSORS_OVER_COMMIT:
-        rsHoldability = holdability;
-        break;
-      default:
-        throw new PSQLException(GT.tr("Unknown ResultSet holdability setting: {0}.", holdability),
-            PSQLState.INVALID_PARAMETER_VALUE);
-    }
-    LOGGER.log(Level.FINE, "  setHoldability = {0}", holdability);
-  }
-
-  @Override
-  public int getHoldability() throws SQLException {
-    checkClosed();
-    return rsHoldability;
-  }
-
-  @Override
-  public Savepoint setSavepoint() throws SQLException {
-    checkClosed();
-
-    String pgName;
-    if (getAutoCommit()) {
-      throw new PSQLException(GT.tr("Cannot establish a savepoint in auto-commit mode."),
-          PSQLState.NO_ACTIVE_SQL_TRANSACTION);
+        return Connection.TRANSACTION_READ_COMMITTED; // Best guess.
     }
 
-    PSQLSavepoint savepoint = new PSQLSavepoint(savepointId++);
-    pgName = savepoint.getPGName();
+    @Override
+    public void setTransactionIsolation(int level) throws SQLException {
+        checkClosed();
 
-    // Note we can't use execSQLUpdate because we don't want
-    // to suppress BEGIN.
-    Statement stmt = createStatement();
-    stmt.executeUpdate("SAVEPOINT " + pgName);
-    stmt.close();
+        if (queryExecutor.getTransactionState() != TransactionState.IDLE) {
+            throw new PSQLException(
+                    GT.tr("Cannot change transaction isolation level in the middle of a transaction."),
+                    PSQLState.ACTIVE_SQL_TRANSACTION);
+        }
 
-    return savepoint;
-  }
+        String isolationLevelName = getIsolationLevelName(level);
+        if (isolationLevelName == null) {
+            throw new PSQLException(GT.tr("Transaction isolation level {0} not supported.", level),
+                    PSQLState.NOT_IMPLEMENTED);
+        }
 
-  @Override
-  public Savepoint setSavepoint(String name) throws SQLException {
-    checkClosed();
-
-    if (getAutoCommit()) {
-      throw new PSQLException(GT.tr("Cannot establish a savepoint in auto-commit mode."),
-          PSQLState.NO_ACTIVE_SQL_TRANSACTION);
+        String isolationLevelSQL =
+                "SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL " + isolationLevelName;
+        execSQLUpdate(isolationLevelSQL); // nb: no BEGIN triggered
+        LOGGER.log(Level.FINE, "  setTransactionIsolation = {0}", isolationLevelName);
     }
 
-    PSQLSavepoint savepoint = new PSQLSavepoint(name);
-
-    // Note we can't use execSQLUpdate because we don't want
-    // to suppress BEGIN.
-    Statement stmt = createStatement();
-    stmt.executeUpdate("SAVEPOINT " + savepoint.getPGName());
-    stmt.close();
-
-    return savepoint;
-  }
-
-  @Override
-  public void rollback(Savepoint savepoint) throws SQLException {
-    checkClosed();
-
-    PSQLSavepoint pgSavepoint = (PSQLSavepoint) savepoint;
-    execSQLUpdate("ROLLBACK TO SAVEPOINT " + pgSavepoint.getPGName());
-  }
-
-  @Override
-  public void releaseSavepoint(Savepoint savepoint) throws SQLException {
-    checkClosed();
-
-    PSQLSavepoint pgSavepoint = (PSQLSavepoint) savepoint;
-    execSQLUpdate("RELEASE SAVEPOINT " + pgSavepoint.getPGName());
-    pgSavepoint.invalidate();
-  }
-
-  @Override
-  public Statement createStatement(int resultSetType, int resultSetConcurrency)
-      throws SQLException {
-    checkClosed();
-    return createStatement(resultSetType, resultSetConcurrency, getHoldability());
-  }
-
-  @Override
-  public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency)
-      throws SQLException {
-    checkClosed();
-    return prepareStatement(sql, resultSetType, resultSetConcurrency, getHoldability());
-  }
-
-  @Override
-  public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency)
-      throws SQLException {
-    checkClosed();
-    return prepareCall(sql, resultSetType, resultSetConcurrency, getHoldability());
-  }
-
-  @Override
-  public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
-    if (autoGeneratedKeys != Statement.RETURN_GENERATED_KEYS) {
-      return prepareStatement(sql);
+    protected String getIsolationLevelName(int level) {
+        switch (level) {
+            case Connection.TRANSACTION_READ_COMMITTED:
+                return "READ COMMITTED";
+            case Connection.TRANSACTION_SERIALIZABLE:
+                return "SERIALIZABLE";
+            case Connection.TRANSACTION_READ_UNCOMMITTED:
+                return "READ UNCOMMITTED";
+            case Connection.TRANSACTION_REPEATABLE_READ:
+                return "REPEATABLE READ";
+            default:
+                return null;
+        }
     }
 
-    return prepareStatement(sql, (String[]) null);
-  }
-
-  @Override
-  public PreparedStatement prepareStatement(String sql, int [] columnIndexes) throws SQLException {
-    if (columnIndexes != null && columnIndexes.length == 0) {
-      return prepareStatement(sql);
+    @Override
+    public String getCatalog() throws SQLException {
+        checkClosed();
+        return queryExecutor.getDatabase();
     }
 
-    checkClosed();
-    throw new PSQLException(GT.tr("Returning autogenerated keys is not supported."),
-        PSQLState.NOT_IMPLEMENTED);
-  }
-
-  @Override
-  public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
-    if (columnNames != null && columnNames.length == 0) {
-      return prepareStatement(sql);
+    @Override
+    public void setCatalog(String catalog) throws SQLException {
+        checkClosed();
+        // no-op
     }
 
-    CachedQuery cachedQuery = borrowReturningQuery(sql, columnNames);
-    PgPreparedStatement ps =
-        new PgPreparedStatement(this, cachedQuery,
-            ResultSet.TYPE_FORWARD_ONLY,
-            ResultSet.CONCUR_READ_ONLY,
-            getHoldability());
-    Query query = cachedQuery.query;
-    SqlCommand sqlCommand = query.getSqlCommand();
-    if (sqlCommand != null) {
-      ps.wantsGeneratedKeysAlways = sqlCommand.isReturningKeywordPresent();
-    } else {
-      // If composite query is given, just ignore "generated keys" arguments
+    public boolean getHideUnprivilegedObjects() {
+        return hideUnprivilegedObjects;
     }
-    return ps;
-  }
 
-  @Override
-  public final Map<String, String> getParameterStatuses() {
-    return queryExecutor.getParameterStatuses();
-  }
-
-  @Override
-  public final String getParameterStatus(String parameterName) {
-    return queryExecutor.getParameterStatus(parameterName);
-  }
-
-  @Override
-  public boolean getAdaptiveFetch() {
-    return queryExecutor.getAdaptiveFetch();
-  }
-
-  @Override
-  public void setAdaptiveFetch(boolean adaptiveFetch) {
-    queryExecutor.setAdaptiveFetch(adaptiveFetch);
-  }
-
-  @Override
-  public PGXmlFactoryFactory getXmlFactoryFactory() throws SQLException {
-    PGXmlFactoryFactory xmlFactoryFactory = this.xmlFactoryFactory;
-    if (xmlFactoryFactory != null) {
-      return xmlFactoryFactory;
+    /**
+     * Get server version number.
+     *
+     * @return server version number
+     */
+    public String getDBVersionNumber() {
+        return queryExecutor.getServerVersion();
     }
-    if (xmlFactoryFactoryClass == null || "".equals(xmlFactoryFactoryClass)) {
-      xmlFactoryFactory = DefaultPGXmlFactoryFactory.INSTANCE;
-    } else if ("LEGACY_INSECURE".equals(xmlFactoryFactoryClass)) {
-      xmlFactoryFactory = LegacyInsecurePGXmlFactoryFactory.INSTANCE;
-    } else {
-      Class<?> clazz;
-      try {
-        clazz = Class.forName(xmlFactoryFactoryClass);
-      } catch (ClassNotFoundException ex) {
-        throw new PSQLException(
-            GT.tr("Could not instantiate xmlFactoryFactory: {0}", xmlFactoryFactoryClass),
-            PSQLState.INVALID_PARAMETER_VALUE, ex);
-      }
-      if (!clazz.isAssignableFrom(PGXmlFactoryFactory.class)) {
-        throw new PSQLException(
-            GT.tr("Connection property xmlFactoryFactory must implement PGXmlFactoryFactory: {0}", xmlFactoryFactoryClass),
-            PSQLState.INVALID_PARAMETER_VALUE);
-      }
-      try {
-        xmlFactoryFactory = clazz.asSubclass(PGXmlFactoryFactory.class)
-            .getDeclaredConstructor()
-            .newInstance();
-      } catch (Exception ex) {
-        throw new PSQLException(
-            GT.tr("Could not instantiate xmlFactoryFactory: {0}", xmlFactoryFactoryClass),
-            PSQLState.INVALID_PARAMETER_VALUE, ex);
-      }
+
+    /**
+     * Get server major version.
+     *
+     * @return server major version
+     */
+    public int getServerMajorVersion() {
+        try {
+            StringTokenizer versionTokens = new StringTokenizer(queryExecutor.getServerVersion(), "."); // aaXbb.ccYdd
+            return integerPart(versionTokens.nextToken()); // return X
+        } catch (NoSuchElementException e) {
+            return 0;
+        }
+    }
+
+    /**
+     * Get server minor version.
+     *
+     * @return server minor version
+     */
+    public int getServerMinorVersion() {
+        try {
+            StringTokenizer versionTokens = new StringTokenizer(queryExecutor.getServerVersion(), "."); // aaXbb.ccYdd
+            versionTokens.nextToken(); // Skip aaXbb
+            return integerPart(versionTokens.nextToken()); // return Y
+        } catch (NoSuchElementException e) {
+            return 0;
+        }
+    }
+
+    @Override
+    public boolean haveMinimumServerVersion(int ver) {
+        return queryExecutor.getServerVersionNum() >= ver;
+    }
+
+    @Override
+    public boolean haveMinimumServerVersion(Version ver) {
+        return haveMinimumServerVersion(ver.getVersionNum());
+    }
+
+    @Override
+    public Encoding getEncoding() {
+        return queryExecutor.getEncoding();
+    }
+
+    @Override
+    public byte[] encodeString(String str) throws SQLException {
+        try {
+            return getEncoding().encode(str);
+        } catch (IOException ioe) {
+            throw new PSQLException(GT.tr("Unable to translate data into the desired encoding."),
+                    PSQLState.DATA_ERROR, ioe);
+        }
+    }
+
+    @Override
+    public String escapeString(String str) throws SQLException {
+        return Utils.escapeLiteral(null, str, queryExecutor.getStandardConformingStrings())
+                .toString();
+    }
+
+    @Override
+    public boolean getStandardConformingStrings() {
+        return queryExecutor.getStandardConformingStrings();
+    }
+
+    @Override
+    public boolean isClosed() throws SQLException {
+        return queryExecutor.isClosed();
+    }
+
+    @Override
+    public void cancelQuery() throws SQLException {
+        checkClosed();
+        queryExecutor.sendQueryCancel();
+    }
+
+    @Override
+    public PGNotification[] getNotifications() throws SQLException {
+        return getNotifications(-1);
+    }
+
+    @Override
+    public PGNotification[] getNotifications(int timeoutMillis) throws SQLException {
+        checkClosed();
+        getQueryExecutor().processNotifies(timeoutMillis);
+        // Backwards-compatibility hand-holding.
+        PGNotification[] notifications = queryExecutor.getNotifications();
+        return notifications;
+    }
+
+    @Override
+    public int getPrepareThreshold() {
+        return prepareThreshold;
+    }
+
+    @Override
+    public void setPrepareThreshold(int newThreshold) {
+        this.prepareThreshold = newThreshold;
+        LOGGER.log(Level.FINE, "  setPrepareThreshold = {0}", newThreshold);
+    }
+
+    @Override
+    public int getDefaultFetchSize() {
+        return defaultFetchSize;
+    }
+
+    @Override
+    public void setDefaultFetchSize(int fetchSize) throws SQLException {
+        if (fetchSize < 0) {
+            throw new PSQLException(GT.tr("Fetch size must be a value greater to or equal to 0."),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+
+        this.defaultFetchSize = fetchSize;
+        LOGGER.log(Level.FINE, "  setDefaultFetchSize = {0}", fetchSize);
+    }
+
+    public boolean getForceBinary() {
+        return forcebinary;
+    }
+
+    public void setForceBinary(boolean newValue) {
+        this.forcebinary = newValue;
+        LOGGER.log(Level.FINE, "  setForceBinary = {0}", newValue);
+    }
+
+    public void setTypeMapImpl(Map<String, Class<?>> map) throws SQLException {
+        typemap = map;
+    }
+
+    @Override
+    public Logger getLogger() {
+        return LOGGER;
+    }
+
+    public int getProtocolVersion() {
+        return queryExecutor.getProtocolVersion();
+    }
+
+    @Override
+    public boolean getStringVarcharFlag() {
+        return bindStringAsVarchar;
+    }
+
+    @Override
+    public CopyManager getCopyAPI() throws SQLException {
+        checkClosed();
+        if (copyManager == null) {
+            copyManager = new CopyManager(this);
+        }
+        return copyManager;
+    }
+
+    @Override
+    public boolean binaryTransferSend(int oid) {
+        return queryExecutor.useBinaryForSend(oid);
+    }
+
+    @Override
+    public int getBackendPID() {
+        return queryExecutor.getBackendPID();
+    }
+
+    @Override
+    public boolean isColumnSanitiserDisabled() {
+        return this.disableColumnSanitiser;
+    }
+
+    public void setDisableColumnSanitiser(boolean disableColumnSanitiser) {
+        this.disableColumnSanitiser = disableColumnSanitiser;
+        LOGGER.log(Level.FINE, "  setDisableColumnSanitiser = {0}", disableColumnSanitiser);
+    }
+
+    @Override
+    public PreferQueryMode getPreferQueryMode() {
+        return queryExecutor.getPreferQueryMode();
+    }
+
+    @Override
+    public AutoSave getAutosave() {
+        return queryExecutor.getAutoSave();
+    }
+
+    @Override
+    public void setAutosave(AutoSave autoSave) {
+        queryExecutor.setAutoSave(autoSave);
+        LOGGER.log(Level.FINE, "  setAutosave = {0}", autoSave.value());
+    }
+
+    protected void abort() {
+        queryExecutor.abort();
+    }
+
+    private Timer getTimer() {
+        return finalizeAction.getTimer();
+    }
+
+    @Override
+    public void addTimerTask(TimerTask timerTask, long milliSeconds) {
+        Timer timer = getTimer();
+        timer.schedule(timerTask, milliSeconds);
+    }
+
+    @Override
+    public void purgeTimerTasks() {
+        finalizeAction.purgeTimerTasks();
+    }
+
+    @Override
+    public String escapeIdentifier(String identifier) throws SQLException {
+        return Utils.escapeIdentifier(null, identifier).toString();
+    }
+
+    @Override
+    public String escapeLiteral(String literal) throws SQLException {
+        return Utils.escapeLiteral(null, literal, queryExecutor.getStandardConformingStrings())
+                .toString();
+    }
+
+    @Override
+    public LruCache<FieldMetadata.Key, FieldMetadata> getFieldMetadataCache() {
+        return fieldMetadataCache;
+    }
+
+    @Override
+    public PGReplicationConnection getReplicationAPI() {
+        return new PGReplicationConnectionImpl(this);
+    }
+
+    @Override
+    public Statement createStatement(int resultSetType, int resultSetConcurrency,
+                                     int resultSetHoldability) throws SQLException {
+        checkClosed();
+        return new PgStatement(this, resultSetType, resultSetConcurrency, resultSetHoldability);
+    }
+
+    @Override
+    public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency,
+                                              int resultSetHoldability) throws SQLException {
+        checkClosed();
+        return new PgPreparedStatement(this, sql, resultSetType, resultSetConcurrency, resultSetHoldability);
+    }
+
+    @Override
+    public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency,
+                                         int resultSetHoldability) throws SQLException {
+        checkClosed();
+        return new PgCallableStatement(this, sql, resultSetType, resultSetConcurrency, resultSetHoldability);
+    }
+
+    @Override
+    public DatabaseMetaData getMetaData() throws SQLException {
+        checkClosed();
+        if (metadata == null) {
+            metadata = new PgDatabaseMetaData(this);
+        }
+        return metadata;
+    }
+
+    protected Array makeArray(int oid, String fieldString) throws SQLException {
+        return new PgArray(this, oid, fieldString);
+    }
+
+    protected Blob makeBlob(long oid) throws SQLException {
+        return new PgBlob(this, oid);
+    }
+
+    protected Clob makeClob(long oid) throws SQLException {
+        return new PgClob(this, oid);
+    }
+
+    protected SQLXML makeSQLXML() throws SQLException {
+        return new PgSQLXML(this);
+    }
+
+    @Override
+    public Clob createClob() throws SQLException {
+        checkClosed();
+        throw Driver.notImplemented(this.getClass(), "createClob()");
+    }
+
+    @Override
+    public Blob createBlob() throws SQLException {
+        checkClosed();
+        throw Driver.notImplemented(this.getClass(), "createBlob()");
+    }
+
+    @Override
+    public NClob createNClob() throws SQLException {
+        checkClosed();
+        throw Driver.notImplemented(this.getClass(), "createNClob()");
+    }
+
+    @Override
+    public SQLXML createSQLXML() throws SQLException {
+        checkClosed();
+        return makeSQLXML();
+    }
+
+    @Override
+    public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
+        checkClosed();
+        throw Driver.notImplemented(this.getClass(), "createStruct(String, Object[])");
+    }
+
+    @SuppressWarnings({"rawtypes", "unchecked"})
+    @Override
+    public Array createArrayOf(String typeName, Object elements) throws SQLException {
+        checkClosed();
+
+        final TypeInfo typeInfo = getTypeInfo();
+
+        final int oid = typeInfo.getPGArrayType(typeName);
+        final char delim = typeInfo.getArrayDelimiter(oid);
+
+        if (oid == Oid.UNSPECIFIED) {
+            throw new PSQLException(GT.tr("Unable to find server array type for provided name {0}.", typeName),
+                    PSQLState.INVALID_NAME);
+        }
+
+        if (elements == null) {
+            return makeArray(oid, null);
+        }
+
+        final ArrayEncoding.ArrayEncoder arraySupport = ArrayEncoding.getArrayEncoder(elements);
+        if (arraySupport.supportBinaryRepresentation(oid) && getPreferQueryMode() != PreferQueryMode.SIMPLE) {
+            return new PgArray(this, oid, arraySupport.toBinaryRepresentation(this, elements, oid));
+        }
+
+        final String arrayString = arraySupport.toArrayString(delim, elements);
+        return makeArray(oid, arrayString);
+    }
+
+    @Override
+    public Array createArrayOf(String typeName, Object[] elements)
+            throws SQLException {
+        return createArrayOf(typeName, (Object) elements);
+    }
+
+    @Override
+    public boolean isValid(int timeout) throws SQLException {
+        if (timeout < 0) {
+            throw new PSQLException(GT.tr("Invalid timeout ({0}<0).", timeout),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+        if (isClosed()) {
+            return false;
+        }
+        boolean changedNetworkTimeout = false;
+        try {
+            int oldNetworkTimeout = getNetworkTimeout();
+            int newNetworkTimeout = (int) Math.min(timeout * 1000L, Integer.MAX_VALUE);
+            try {
+                // change network timeout only if the new value is less than the current
+                // (zero means infinite timeout)
+                if (newNetworkTimeout != 0 && (oldNetworkTimeout == 0 || newNetworkTimeout < oldNetworkTimeout)) {
+                    changedNetworkTimeout = true;
+                    setNetworkTimeout(null, newNetworkTimeout);
+                }
+                if (replicationConnection) {
+                    try (Statement statement = createStatement()) {
+                        statement.execute("IDENTIFY_SYSTEM");
+                    }
+                } else {
+                    try (Statement checkConnectionQuery = createStatement()) {
+                        ((PgStatement) checkConnectionQuery).execute("", QueryExecutor.QUERY_EXECUTE_AS_SIMPLE);
+                    }
+                }
+                return true;
+            } finally {
+                if (changedNetworkTimeout) {
+                    setNetworkTimeout(null, oldNetworkTimeout);
+                }
+            }
+        } catch (SQLException e) {
+            if (PSQLState.IN_FAILED_SQL_TRANSACTION.getState().equals(e.getSQLState())) {
+                // "current transaction aborted", assume the connection is up and running
+                return true;
+            }
+            LOGGER.log(Level.FINE, GT.tr("Validating connection."), e);
+        }
+        return false;
+    }
+
+    @Override
+    public void setClientInfo(String name, String value) throws SQLClientInfoException {
+        try {
+            checkClosed();
+        } catch (final SQLException cause) {
+            Map<String, ClientInfoStatus> failures = new HashMap<>();
+            failures.put(name, ClientInfoStatus.REASON_UNKNOWN);
+            throw new SQLClientInfoException(GT.tr("This connection has been closed."), failures, cause);
+        }
+
+        if (haveMinimumServerVersion(ServerVersion.v9_0) && "ApplicationName".equals(name)) {
+            if (value == null) {
+                value = "";
+            }
+            final String oldValue = queryExecutor.getApplicationName();
+            if (value.equals(oldValue)) {
+                return;
+            }
+
+            try {
+                StringBuilder sql = new StringBuilder("SET application_name = '");
+                Utils.escapeLiteral(sql, value, getStandardConformingStrings());
+                sql.append("'");
+                execSQLUpdate(sql.toString());
+            } catch (SQLException sqle) {
+                Map<String, ClientInfoStatus> failures = new HashMap<>();
+                failures.put(name, ClientInfoStatus.REASON_UNKNOWN);
+                throw new SQLClientInfoException(
+                        GT.tr("Failed to set ClientInfo property: {0}", "ApplicationName"), sqle.getSQLState(),
+                        failures, sqle);
+            }
+            if (LOGGER.isLoggable(Level.FINE)) {
+                LOGGER.log(Level.FINE, "  setClientInfo = {0} {1}", new Object[]{name, value});
+            }
+            clientInfo.put(name, value);
+            return;
+        }
+
+        addWarning(new SQLWarning(GT.tr("ClientInfo property not supported."),
+                PSQLState.NOT_IMPLEMENTED.getState()));
+    }
+
+    @Override
+    public String getClientInfo(String name) throws SQLException {
+        checkClosed();
+        clientInfo.put("ApplicationName", queryExecutor.getApplicationName());
+        return clientInfo.getProperty(name);
+    }
+
+    @Override
+    public Properties getClientInfo() throws SQLException {
+        checkClosed();
+        clientInfo.put("ApplicationName", queryExecutor.getApplicationName());
+        return clientInfo;
+    }
+
+    @Override
+    public void setClientInfo(Properties properties) throws SQLClientInfoException {
+        try {
+            checkClosed();
+        } catch (final SQLException cause) {
+            Map<String, ClientInfoStatus> failures = new HashMap<>();
+            for (Map.Entry<Object, Object> e : properties.entrySet()) {
+                failures.put((String) e.getKey(), ClientInfoStatus.REASON_UNKNOWN);
+            }
+            throw new SQLClientInfoException(GT.tr("This connection has been closed."), failures, cause);
+        }
+
+        Map<String, ClientInfoStatus> failures = new HashMap<>();
+        for (String name : new String[]{"ApplicationName"}) {
+            try {
+                setClientInfo(name, properties.getProperty(name, null));
+            } catch (SQLClientInfoException e) {
+                failures.putAll(e.getFailedProperties());
+            }
+        }
+
+        if (!failures.isEmpty()) {
+            throw new SQLClientInfoException(GT.tr("One or more ClientInfo failed."),
+                    PSQLState.NOT_IMPLEMENTED.getState(), failures);
+        }
+    }
+
+    public <T> T createQueryObject(Class<T> ifc) throws SQLException {
+        checkClosed();
+        throw Driver.notImplemented(this.getClass(), "createQueryObject(Class<T>)");
+    }
+
+    @Override
+    public boolean getLogServerErrorDetail() {
+        return logServerErrorDetail;
+    }
+
+    @Override
+    public boolean isWrapperFor(Class<?> iface) throws SQLException {
+        checkClosed();
+        return iface.isAssignableFrom(getClass());
+    }
+
+    @Override
+    public <T> T unwrap(Class<T> iface) throws SQLException {
+        checkClosed();
+        if (iface.isAssignableFrom(getClass())) {
+            return iface.cast(this);
+        }
+        throw new SQLException("Cannot unwrap to " + iface.getName());
+    }
+
+    @Override
+    public String getSchema() throws SQLException {
+        checkClosed();
+        try (Statement stmt = createStatement()) {
+            try (ResultSet rs = stmt.executeQuery("select current_schema()")) {
+                if (!rs.next()) {
+                    return null; // Is it ever possible?
+                }
+                return rs.getString(1);
+            }
+        }
+    }
+
+    @Override
+    public void setSchema(String schema) throws SQLException {
+        checkClosed();
+        try (Statement stmt = createStatement()) {
+            if (schema == null) {
+                stmt.executeUpdate("SET SESSION search_path TO DEFAULT");
+            } else {
+                StringBuilder sb = new StringBuilder();
+                sb.append("SET SESSION search_path TO '");
+                Utils.escapeLiteral(sb, schema, getStandardConformingStrings());
+                sb.append("'");
+                stmt.executeUpdate(sb.toString());
+                LOGGER.log(Level.FINE, "  setSchema = {0}", schema);
+            }
+        }
+    }
+
+    @Override
+    public void abort(Executor executor) throws SQLException {
+        if (executor == null) {
+            throw new SQLException("executor is null");
+        }
+        if (isClosed()) {
+            return;
+        }
+
+        SQL_PERMISSION_ABORT.checkGuard(this);
+
+        AbortCommand command = new AbortCommand();
+        executor.execute(command);
+    }
+
+    @Override
+    public void setNetworkTimeout(Executor executor /*not used*/, int milliseconds)
+            throws SQLException {
+        checkClosed();
+
+        if (milliseconds < 0) {
+            throw new PSQLException(GT.tr("Network timeout must be a value greater than or equal to 0."),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+
+        checkPermission(SQL_PERMISSION_NETWORK_TIMEOUT);
+
+        try {
+            queryExecutor.setNetworkTimeout(milliseconds);
+        } catch (IOException ioe) {
+            throw new PSQLException(GT.tr("Unable to set network timeout."),
+                    PSQLState.COMMUNICATION_ERROR, ioe);
+        }
+    }
+
+    private void checkPermission(SQLPermission sqlPermissionNetworkTimeout) {
+        if (SYSTEM_GET_SECURITY_MANAGER != null && SECURITY_MANAGER_CHECK_PERMISSION != null) {
+            try {
+                Object securityManager = SYSTEM_GET_SECURITY_MANAGER.invoke();
+                if (securityManager != null) {
+                    SECURITY_MANAGER_CHECK_PERMISSION.invoke(securityManager, sqlPermissionNetworkTimeout);
+                }
+            } catch (Throwable e) {
+                throw new RuntimeException(e);
+            }
+        }
+    }
+
+    @Override
+    public int getNetworkTimeout() throws SQLException {
+        checkClosed();
+
+        try {
+            return queryExecutor.getNetworkTimeout();
+        } catch (IOException ioe) {
+            throw new PSQLException(GT.tr("Unable to get network timeout."),
+                    PSQLState.COMMUNICATION_ERROR, ioe);
+        }
+    }
+
+    @Override
+    public int getHoldability() throws SQLException {
+        checkClosed();
+        return rsHoldability;
+    }
+
+    @Override
+    public void setHoldability(int holdability) throws SQLException {
+        checkClosed();
+
+        switch (holdability) {
+            case ResultSet.CLOSE_CURSORS_AT_COMMIT:
+            case ResultSet.HOLD_CURSORS_OVER_COMMIT:
+                rsHoldability = holdability;
+                break;
+            default:
+                throw new PSQLException(GT.tr("Unknown ResultSet holdability setting: {0}.", holdability),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+        }
+        LOGGER.log(Level.FINE, "  setHoldability = {0}", holdability);
+    }
+
+    @Override
+    public Savepoint setSavepoint() throws SQLException {
+        checkClosed();
+
+        String pgName;
+        if (getAutoCommit()) {
+            throw new PSQLException(GT.tr("Cannot establish a savepoint in auto-commit mode."),
+                    PSQLState.NO_ACTIVE_SQL_TRANSACTION);
+        }
+
+        PSQLSavepoint savepoint = new PSQLSavepoint(savepointId++);
+        pgName = savepoint.getPGName();
+
+        // Note we can't use execSQLUpdate because we don't want
+        // to suppress BEGIN.
+        Statement stmt = createStatement();
+        stmt.executeUpdate("SAVEPOINT " + pgName);
+        stmt.close();
+
+        return savepoint;
+    }
+
+    @Override
+    public Savepoint setSavepoint(String name) throws SQLException {
+        checkClosed();
+
+        if (getAutoCommit()) {
+            throw new PSQLException(GT.tr("Cannot establish a savepoint in auto-commit mode."),
+                    PSQLState.NO_ACTIVE_SQL_TRANSACTION);
+        }
+
+        PSQLSavepoint savepoint = new PSQLSavepoint(name);
+
+        // Note we can't use execSQLUpdate because we don't want
+        // to suppress BEGIN.
+        Statement stmt = createStatement();
+        stmt.executeUpdate("SAVEPOINT " + savepoint.getPGName());
+        stmt.close();
+
+        return savepoint;
+    }
+
+    @Override
+    public void rollback(Savepoint savepoint) throws SQLException {
+        checkClosed();
+
+        PSQLSavepoint pgSavepoint = (PSQLSavepoint) savepoint;
+        execSQLUpdate("ROLLBACK TO SAVEPOINT " + pgSavepoint.getPGName());
+    }
+
+    @Override
+    public void releaseSavepoint(Savepoint savepoint) throws SQLException {
+        checkClosed();
+
+        PSQLSavepoint pgSavepoint = (PSQLSavepoint) savepoint;
+        execSQLUpdate("RELEASE SAVEPOINT " + pgSavepoint.getPGName());
+        pgSavepoint.invalidate();
+    }
+
+    @Override
+    public Statement createStatement(int resultSetType, int resultSetConcurrency)
+            throws SQLException {
+        checkClosed();
+        return createStatement(resultSetType, resultSetConcurrency, getHoldability());
+    }
+
+    @Override
+    public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency)
+            throws SQLException {
+        checkClosed();
+        return prepareStatement(sql, resultSetType, resultSetConcurrency, getHoldability());
+    }
+
+    @Override
+    public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency)
+            throws SQLException {
+        checkClosed();
+        return prepareCall(sql, resultSetType, resultSetConcurrency, getHoldability());
+    }
+
+    @Override
+    public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
+        if (autoGeneratedKeys != Statement.RETURN_GENERATED_KEYS) {
+            return prepareStatement(sql);
+        }
+
+        return prepareStatement(sql, (String[]) null);
+    }
+
+    @Override
+    public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
+        if (columnIndexes != null && columnIndexes.length == 0) {
+            return prepareStatement(sql);
+        }
+
+        checkClosed();
+        throw new PSQLException(GT.tr("Returning autogenerated keys is not supported."),
+                PSQLState.NOT_IMPLEMENTED);
+    }
+
+    @Override
+    public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
+        if (columnNames != null && columnNames.length == 0) {
+            return prepareStatement(sql);
+        }
+
+        CachedQuery cachedQuery = borrowReturningQuery(sql, columnNames);
+        PgPreparedStatement ps =
+                new PgPreparedStatement(this, cachedQuery,
+                        ResultSet.TYPE_FORWARD_ONLY,
+                        ResultSet.CONCUR_READ_ONLY,
+                        getHoldability());
+        Query query = cachedQuery.query;
+        SqlCommand sqlCommand = query.getSqlCommand();
+        if (sqlCommand != null) {
+            ps.wantsGeneratedKeysAlways = sqlCommand.isReturningKeywordPresent();
+        } else {
+            // If composite query is given, just ignore "generated keys" arguments
+        }
+        return ps;
+    }
+
+    @Override
+    public final Map<String, String> getParameterStatuses() {
+        return queryExecutor.getParameterStatuses();
+    }
+
+    @Override
+    public final String getParameterStatus(String parameterName) {
+        return queryExecutor.getParameterStatus(parameterName);
+    }
+
+    @Override
+    public boolean getAdaptiveFetch() {
+        return queryExecutor.getAdaptiveFetch();
+    }
+
+    @Override
+    public void setAdaptiveFetch(boolean adaptiveFetch) {
+        queryExecutor.setAdaptiveFetch(adaptiveFetch);
+    }
+
+    @Override
+    public PGXmlFactoryFactory getXmlFactoryFactory() throws SQLException {
+        PGXmlFactoryFactory xmlFactoryFactory = this.xmlFactoryFactory;
+        if (xmlFactoryFactory != null) {
+            return xmlFactoryFactory;
+        }
+        if (xmlFactoryFactoryClass == null || "".equals(xmlFactoryFactoryClass)) {
+            xmlFactoryFactory = DefaultPGXmlFactoryFactory.INSTANCE;
+        } else if ("LEGACY_INSECURE".equals(xmlFactoryFactoryClass)) {
+            xmlFactoryFactory = LegacyInsecurePGXmlFactoryFactory.INSTANCE;
+        } else {
+            Class<?> clazz;
+            try {
+                clazz = Class.forName(xmlFactoryFactoryClass);
+            } catch (ClassNotFoundException ex) {
+                throw new PSQLException(
+                        GT.tr("Could not instantiate xmlFactoryFactory: {0}", xmlFactoryFactoryClass),
+                        PSQLState.INVALID_PARAMETER_VALUE, ex);
+            }
+            if (!clazz.isAssignableFrom(PGXmlFactoryFactory.class)) {
+                throw new PSQLException(
+                        GT.tr("Connection property xmlFactoryFactory must implement PGXmlFactoryFactory: {0}", xmlFactoryFactoryClass),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+            try {
+                xmlFactoryFactory = clazz.asSubclass(PGXmlFactoryFactory.class)
+                        .getDeclaredConstructor()
+                        .newInstance();
+            } catch (Exception ex) {
+                throw new PSQLException(
+                        GT.tr("Could not instantiate xmlFactoryFactory: {0}", xmlFactoryFactoryClass),
+                        PSQLState.INVALID_PARAMETER_VALUE, ex);
+            }
+        }
+        this.xmlFactoryFactory = xmlFactoryFactory;
+        return xmlFactoryFactory;
+    }
+
+    private enum ReadOnlyBehavior {
+        ignore,
+        transaction,
+        always
+    }
+
+    /**
+     * Handler for transaction queries.
+     */
+    private class TransactionCommandHandler extends ResultHandlerBase {
+        @Override
+        public void handleCompletion() throws SQLException {
+            SQLWarning warning = getWarning();
+            if (warning != null) {
+                PgConnection.this.addWarning(warning);
+            }
+            super.handleCompletion();
+        }
+    }
+
+    public class AbortCommand implements Runnable {
+
+        public AbortCommand() {
+        }
+
+        @Override
+        public void run() {
+            abort();
+        }
     }
-    this.xmlFactoryFactory = xmlFactoryFactory;
-    return xmlFactoryFactory;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgConnectionCleaningAction.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgConnectionCleaningAction.java
index 7ac5e42..b92d5e3 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/PgConnectionCleaningAction.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgConnectionCleaningAction.java
@@ -26,65 +26,63 @@ import java.util.logging.Logger;
  */
 @SuppressWarnings("try")
 class PgConnectionCleaningAction implements LazyCleaner.CleaningAction<IOException> {
-  private static final Logger LOGGER = Logger.getLogger(PgConnection.class.getName());
+    private static final Logger LOGGER = Logger.getLogger(PgConnection.class.getName());
 
-  private final ResourceLock lock;
+    private final ResourceLock lock;
+    private final Closeable queryExecutorCloseAction;
+    private Throwable openStackTrace;
+    /**
+     * Timer for scheduling TimerTasks for the connection.
+     * Only instantiated if a task is actually scheduled.
+     * Access should be guarded with {@link #lock}
+     */
+    private Timer cancelTimer;
 
-  private Throwable openStackTrace;
-  private final Closeable queryExecutorCloseAction;
-
-  /**
-   * Timer for scheduling TimerTasks for the connection.
-   * Only instantiated if a task is actually scheduled.
-   * Access should be guarded with {@link #lock}
-   */
-  private Timer cancelTimer;
-
-  PgConnectionCleaningAction(
-      ResourceLock lock,
-      Throwable openStackTrace,
-      Closeable queryExecutorCloseAction) {
-    this.lock = lock;
-    this.openStackTrace = openStackTrace;
-    this.queryExecutorCloseAction = queryExecutorCloseAction;
-  }
-
-  public Timer getTimer() {
-    try (ResourceLock ignore = lock.obtain()) {
-      Timer cancelTimer = this.cancelTimer;
-      if (cancelTimer == null) {
-        cancelTimer = Driver.getSharedTimer().getTimer();
-        this.cancelTimer = cancelTimer;
-      }
-      return cancelTimer;
+    PgConnectionCleaningAction(
+            ResourceLock lock,
+            Throwable openStackTrace,
+            Closeable queryExecutorCloseAction) {
+        this.lock = lock;
+        this.openStackTrace = openStackTrace;
+        this.queryExecutorCloseAction = queryExecutorCloseAction;
     }
-  }
 
-  public void releaseTimer() {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (cancelTimer != null) {
-        cancelTimer = null;
-        Driver.getSharedTimer().releaseTimer();
-      }
+    public Timer getTimer() {
+        try (ResourceLock ignore = lock.obtain()) {
+            Timer cancelTimer = this.cancelTimer;
+            if (cancelTimer == null) {
+                cancelTimer = Driver.getSharedTimer().getTimer();
+                this.cancelTimer = cancelTimer;
+            }
+            return cancelTimer;
+        }
     }
-  }
 
-  public void purgeTimerTasks() {
-    try (ResourceLock ignore = lock.obtain()) {
-      Timer timer = cancelTimer;
-      if (timer != null) {
-        timer.purge();
-      }
+    public void releaseTimer() {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (cancelTimer != null) {
+                cancelTimer = null;
+                Driver.getSharedTimer().releaseTimer();
+            }
+        }
     }
-  }
 
-  @Override
-  public void onClean(boolean leak) throws IOException {
-    if (leak && openStackTrace != null) {
-      LOGGER.log(Level.WARNING, GT.tr("Leak detected: Connection.close() was not called"), openStackTrace);
+    public void purgeTimerTasks() {
+        try (ResourceLock ignore = lock.obtain()) {
+            Timer timer = cancelTimer;
+            if (timer != null) {
+                timer.purge();
+            }
+        }
+    }
+
+    @Override
+    public void onClean(boolean leak) throws IOException {
+        if (leak && openStackTrace != null) {
+            LOGGER.log(Level.WARNING, GT.tr("Leak detected: Connection.close() was not called"), openStackTrace);
+        }
+        openStackTrace = null;
+        releaseTimer();
+        queryExecutorCloseAction.close();
     }
-    openStackTrace = null;
-    releaseTimer();
-    queryExecutorCloseAction.close();
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgDatabaseMetaData.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgDatabaseMetaData.java
index 1184b79..19839ef 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/PgDatabaseMetaData.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgDatabaseMetaData.java
@@ -41,1737 +41,1868 @@ import java.util.StringTokenizer;
 
 public class PgDatabaseMetaData implements DatabaseMetaData {
 
-  public PgDatabaseMetaData(PgConnection conn) {
-    this.connection = conn;
-  }
+    private static final Map<String, Map<String, String>> tableTypeClauses;
 
-  private String keywords;
+    static {
+        tableTypeClauses = new HashMap<>();
+        Map<String, String> ht = new HashMap<>();
+        tableTypeClauses.put("TABLE", ht);
+        ht.put("SCHEMAS", "c.relkind = 'r' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'");
+        ht.put("NOSCHEMAS", "c.relkind = 'r' AND c.relname !~ '^pg_'");
+        ht = new HashMap<>();
+        tableTypeClauses.put("PARTITIONED TABLE", ht);
+        ht.put("SCHEMAS", "c.relkind = 'p' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'");
+        ht.put("NOSCHEMAS", "c.relkind = 'p' AND c.relname !~ '^pg_'");
+        ht = new HashMap<>();
+        tableTypeClauses.put("VIEW", ht);
+        ht.put("SCHEMAS",
+                "c.relkind = 'v' AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema'");
+        ht.put("NOSCHEMAS", "c.relkind = 'v' AND c.relname !~ '^pg_'");
+        ht = new HashMap<>();
+        tableTypeClauses.put("INDEX", ht);
+        ht.put("SCHEMAS",
+                "c.relkind = 'i' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'");
+        ht.put("NOSCHEMAS", "c.relkind = 'i' AND c.relname !~ '^pg_'");
+        ht = new HashMap<>();
+        tableTypeClauses.put("PARTITIONED INDEX", ht);
+        ht.put("SCHEMAS", "c.relkind = 'I' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'");
+        ht.put("NOSCHEMAS", "c.relkind = 'I' AND c.relname !~ '^pg_'");
+        ht = new HashMap<>();
+        tableTypeClauses.put("SEQUENCE", ht);
+        ht.put("SCHEMAS", "c.relkind = 'S'");
+        ht.put("NOSCHEMAS", "c.relkind = 'S'");
+        ht = new HashMap<>();
+        tableTypeClauses.put("TYPE", ht);
+        ht.put("SCHEMAS",
+                "c.relkind = 'c' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'");
+        ht.put("NOSCHEMAS", "c.relkind = 'c' AND c.relname !~ '^pg_'");
+        ht = new HashMap<>();
+        tableTypeClauses.put("SYSTEM TABLE", ht);
+        ht.put("SCHEMAS",
+                "c.relkind = 'r' AND (n.nspname = 'pg_catalog' OR n.nspname = 'information_schema')");
+        ht.put("NOSCHEMAS",
+                "c.relkind = 'r' AND c.relname ~ '^pg_' AND c.relname !~ '^pg_toast_' AND c.relname !~ '^pg_temp_'");
+        ht = new HashMap<>();
+        tableTypeClauses.put("SYSTEM TOAST TABLE", ht);
+        ht.put("SCHEMAS", "c.relkind = 'r' AND n.nspname = 'pg_toast'");
+        ht.put("NOSCHEMAS", "c.relkind = 'r' AND c.relname ~ '^pg_toast_'");
+        ht = new HashMap<>();
+        tableTypeClauses.put("SYSTEM TOAST INDEX", ht);
+        ht.put("SCHEMAS", "c.relkind = 'i' AND n.nspname = 'pg_toast'");
+        ht.put("NOSCHEMAS", "c.relkind = 'i' AND c.relname ~ '^pg_toast_'");
+        ht = new HashMap<>();
+        tableTypeClauses.put("SYSTEM VIEW", ht);
+        ht.put("SCHEMAS",
+                "c.relkind = 'v' AND (n.nspname = 'pg_catalog' OR n.nspname = 'information_schema') ");
+        ht.put("NOSCHEMAS", "c.relkind = 'v' AND c.relname ~ '^pg_'");
+        ht = new HashMap<>();
+        tableTypeClauses.put("SYSTEM INDEX", ht);
+        ht.put("SCHEMAS",
+                "c.relkind = 'i' AND (n.nspname = 'pg_catalog' OR n.nspname = 'information_schema') ");
+        ht.put("NOSCHEMAS",
+                "c.relkind = 'v' AND c.relname ~ '^pg_' AND c.relname !~ '^pg_toast_' AND c.relname !~ '^pg_temp_'");
+        ht = new HashMap<>();
+        tableTypeClauses.put("TEMPORARY TABLE", ht);
+        ht.put("SCHEMAS", "c.relkind IN ('r','p') AND n.nspname ~ '^pg_temp_' ");
+        ht.put("NOSCHEMAS", "c.relkind IN ('r','p') AND c.relname ~ '^pg_temp_' ");
+        ht = new HashMap<>();
+        tableTypeClauses.put("TEMPORARY INDEX", ht);
+        ht.put("SCHEMAS", "c.relkind = 'i' AND n.nspname ~ '^pg_temp_' ");
+        ht.put("NOSCHEMAS", "c.relkind = 'i' AND c.relname ~ '^pg_temp_' ");
+        ht = new HashMap<>();
+        tableTypeClauses.put("TEMPORARY VIEW", ht);
+        ht.put("SCHEMAS", "c.relkind = 'v' AND n.nspname ~ '^pg_temp_' ");
+        ht.put("NOSCHEMAS", "c.relkind = 'v' AND c.relname ~ '^pg_temp_' ");
+        ht = new HashMap<>();
+        tableTypeClauses.put("TEMPORARY SEQUENCE", ht);
+        ht.put("SCHEMAS", "c.relkind = 'S' AND n.nspname ~ '^pg_temp_' ");
+        ht.put("NOSCHEMAS", "c.relkind = 'S' AND c.relname ~ '^pg_temp_' ");
+        ht = new HashMap<>();
+        tableTypeClauses.put("FOREIGN TABLE", ht);
+        ht.put("SCHEMAS", "c.relkind = 'f'");
+        ht.put("NOSCHEMAS", "c.relkind = 'f'");
+        ht = new HashMap<>();
+        tableTypeClauses.put("MATERIALIZED VIEW", ht);
+        ht.put("SCHEMAS", "c.relkind = 'm'");
+        ht.put("NOSCHEMAS", "c.relkind = 'm'");
+    }
 
-  protected final PgConnection connection; // The connection association
+    protected final PgConnection connection; // The connection association
+    private String keywords;
+    private int nameDataLength; // length for name datatype
+    private int indexMaxKeys; // maximum number of keys in an index.
 
-  private int nameDataLength; // length for name datatype
-  private int indexMaxKeys; // maximum number of keys in an index.
+    public PgDatabaseMetaData(PgConnection conn) {
+        this.connection = conn;
+    }
 
-  protected int getMaxIndexKeys() throws SQLException {
-    if (indexMaxKeys == 0) {
-      String sql;
-      sql = "SELECT setting FROM pg_catalog.pg_settings WHERE name='max_index_keys'";
-
-      Statement stmt = connection.createStatement();
-      ResultSet rs = null;
-      try {
-        rs = stmt.executeQuery(sql);
-        if (!rs.next()) {
-          stmt.close();
-          throw new PSQLException(
-              GT.tr(
-                  "Unable to determine a value for MaxIndexKeys due to missing system catalog data."),
-              PSQLState.UNEXPECTED_ERROR);
+    /**
+     * Parse an String of ACLs into a List of ACLs.
+     */
+    private static List<String> parseACLArray(String aclString) {
+        List<String> acls = new ArrayList<>();
+        if (aclString == null || aclString.isEmpty()) {
+            return acls;
         }
-        indexMaxKeys = rs.getInt(1);
-      } finally {
-        JdbcBlackHole.close(rs);
-        JdbcBlackHole.close(stmt);
-      }
-    }
-    return indexMaxKeys;
-  }
+        boolean inQuotes = false;
+        // start at 1 because of leading "{"
+        int beginIndex = 1;
+        char prevChar = ' ';
+        for (int i = beginIndex; i < aclString.length(); i++) {
 
-  protected int getMaxNameLength() throws SQLException {
-    if (nameDataLength == 0) {
-      String sql;
-      sql = "SELECT t.typlen FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n "
-            + "WHERE t.typnamespace=n.oid AND t.typname='name' AND n.nspname='pg_catalog'";
-
-      Statement stmt = connection.createStatement();
-      ResultSet rs = null;
-      try {
-        rs = stmt.executeQuery(sql);
-        if (!rs.next()) {
-          throw new PSQLException(GT.tr("Unable to find name datatype in the system catalogs."),
-              PSQLState.UNEXPECTED_ERROR);
+            char c = aclString.charAt(i);
+            if (c == '"' && prevChar != '\\') {
+                inQuotes = !inQuotes;
+            } else if (c == ',' && !inQuotes) {
+                acls.add(aclString.substring(beginIndex, i));
+                beginIndex = i + 1;
+            }
+            prevChar = c;
         }
-        nameDataLength = rs.getInt("typlen");
-      } finally {
-        JdbcBlackHole.close(rs);
-        JdbcBlackHole.close(stmt);
-      }
-    }
-    return nameDataLength - 1;
-  }
+        // add last element removing the trailing "}"
+        acls.add(aclString.substring(beginIndex, aclString.length() - 1));
 
-  @Override
-  public boolean allProceduresAreCallable() throws SQLException {
-    return true; // For now...
-  }
-
-  @Override
-  public boolean allTablesAreSelectable() throws SQLException {
-    return true; // For now...
-  }
-
-  @Override
-  public String getURL() throws SQLException {
-    return connection.getURL();
-  }
-
-  @Override
-  public String getUserName() throws SQLException {
-    return connection.getUserName();
-  }
-
-  @Override
-  public boolean isReadOnly() throws SQLException {
-    return connection.isReadOnly();
-  }
-
-  @Override
-  public boolean nullsAreSortedHigh() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean nullsAreSortedLow() throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean nullsAreSortedAtStart() throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean nullsAreSortedAtEnd() throws SQLException {
-    return false;
-  }
-
-  /**
-   * Retrieves the name of this database product. We hope that it is PostgreSQL, so we return that
-   * explicitly.
-   *
-   * @return "PostgreSQL"
-   */
-  @Override
-  public String getDatabaseProductName() throws SQLException {
-    return "PostgreSQL";
-  }
-
-  @Override
-  public String getDatabaseProductVersion() throws SQLException {
-    return connection.getDBVersionNumber();
-  }
-
-  @Override
-  public String getDriverName() {
-    return DriverInfo.DRIVER_NAME;
-  }
-
-  @Override
-  public String getDriverVersion() {
-    return DriverInfo.DRIVER_VERSION;
-  }
-
-  @Override
-  public int getDriverMajorVersion() {
-    return DriverInfo.MAJOR_VERSION;
-  }
-
-  @Override
-  public int getDriverMinorVersion() {
-    return DriverInfo.MINOR_VERSION;
-  }
-
-  /**
-   * Does the database store tables in a local file? No - it stores them in a file on the server.
-   *
-   * @return true if so
-   * @throws SQLException if a database access error occurs
-   */
-  @Override
-  public boolean usesLocalFiles() throws SQLException {
-    return false;
-  }
-
-  /**
-   * Does the database use a file for each table? Well, not really, since it doesn't use local files.
-   *
-   * @return true if so
-   * @throws SQLException if a database access error occurs
-   */
-  @Override
-  public boolean usesLocalFilePerTable() throws SQLException {
-    return false;
-  }
-
-  /**
-   * Does the database treat mixed case unquoted SQL identifiers as case sensitive and as a result
-   * store them in mixed case? A JDBC-Compliant driver will always return false.
-   *
-   * @return true if so
-   * @throws SQLException if a database access error occurs
-   */
-  @Override
-  public boolean supportsMixedCaseIdentifiers() throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean storesUpperCaseIdentifiers() throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean storesLowerCaseIdentifiers() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean storesMixedCaseIdentifiers() throws SQLException {
-    return false;
-  }
-
-  /**
-   * Does the database treat mixed case quoted SQL identifiers as case sensitive and as a result
-   * store them in mixed case? A JDBC compliant driver will always return true.
-   *
-   * @return true if so
-   * @throws SQLException if a database access error occurs
-   */
-  @Override
-  public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean storesUpperCaseQuotedIdentifiers() throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean storesLowerCaseQuotedIdentifiers() throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean storesMixedCaseQuotedIdentifiers() throws SQLException {
-    return false;
-  }
-
-  /**
-   * What is the string used to quote SQL identifiers? This returns a space if identifier quoting
-   * isn't supported. A JDBC Compliant driver will always use a double quote character.
-   *
-   * @return the quoting string
-   * @throws SQLException if a database access error occurs
-   */
-  @Override
-  public String getIdentifierQuoteString() throws SQLException {
-    return "\"";
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * <p>From PostgreSQL 9.0+ return the keywords from pg_catalog.pg_get_keywords()</p>
-   *
-   * @return a comma separated list of keywords we use
-   * @throws SQLException if a database access error occurs
-   */
-  @Override
-  public String getSQLKeywords() throws SQLException {
-    connection.checkClosed();
-    String keywords = this.keywords;
-    if (keywords == null) {
-      if (connection.haveMinimumServerVersion(ServerVersion.v9_0)) {
-        // Exclude SQL:2003 keywords (https://github.com/ronsavage/SQL/blob/master/sql-2003-2.bnf)
-        // from the returned list, ugly but required by jdbc spec.
-        String sql = "select string_agg(word, ',') from pg_catalog.pg_get_keywords() "
-            + "where word <> ALL ('{a,abs,absolute,action,ada,add,admin,after,all,allocate,alter,"
-            + "always,and,any,are,array,as,asc,asensitive,assertion,assignment,asymmetric,at,atomic,"
-            + "attribute,attributes,authorization,avg,before,begin,bernoulli,between,bigint,binary,"
-            + "blob,boolean,both,breadth,by,c,call,called,cardinality,cascade,cascaded,case,cast,"
-            + "catalog,catalog_name,ceil,ceiling,chain,char,char_length,character,character_length,"
-            + "character_set_catalog,character_set_name,character_set_schema,characteristics,"
-            + "characters,check,checked,class_origin,clob,close,coalesce,cobol,code_units,collate,"
-            + "collation,collation_catalog,collation_name,collation_schema,collect,column,"
-            + "column_name,command_function,command_function_code,commit,committed,condition,"
-            + "condition_number,connect,connection_name,constraint,constraint_catalog,constraint_name,"
-            + "constraint_schema,constraints,constructors,contains,continue,convert,corr,"
-            + "corresponding,count,covar_pop,covar_samp,create,cross,cube,cume_dist,current,"
-            + "current_collation,current_date,current_default_transform_group,current_path,"
-            + "current_role,current_time,current_timestamp,current_transform_group_for_type,current_user,"
-            + "cursor,cursor_name,cycle,data,date,datetime_interval_code,datetime_interval_precision,"
-            + "day,deallocate,dec,decimal,declare,default,defaults,deferrable,deferred,defined,definer,"
-            + "degree,delete,dense_rank,depth,deref,derived,desc,describe,descriptor,deterministic,"
-            + "diagnostics,disconnect,dispatch,distinct,domain,double,drop,dynamic,dynamic_function,"
-            + "dynamic_function_code,each,element,else,end,end-exec,equals,escape,every,except,"
-            + "exception,exclude,excluding,exec,execute,exists,exp,external,extract,false,fetch,filter,"
-            + "final,first,float,floor,following,for,foreign,fortran,found,free,from,full,function,"
-            + "fusion,g,general,get,global,go,goto,grant,granted,group,grouping,having,hierarchy,hold,"
-            + "hour,identity,immediate,implementation,in,including,increment,indicator,initially,"
-            + "inner,inout,input,insensitive,insert,instance,instantiable,int,integer,intersect,"
-            + "intersection,interval,into,invoker,is,isolation,join,k,key,key_member,key_type,language,"
-            + "large,last,lateral,leading,left,length,level,like,ln,local,localtime,localtimestamp,"
-            + "locator,lower,m,map,match,matched,max,maxvalue,member,merge,message_length,"
-            + "message_octet_length,message_text,method,min,minute,minvalue,mod,modifies,module,month,"
-            + "more,multiset,mumps,name,names,national,natural,nchar,nclob,nesting,new,next,no,none,"
-            + "normalize,normalized,not,\"null\",nullable,nullif,nulls,number,numeric,object,"
-            + "octet_length,octets,of,old,on,only,open,option,options,or,order,ordering,ordinality,"
-            + "others,out,outer,output,over,overlaps,overlay,overriding,pad,parameter,parameter_mode,"
-            + "parameter_name,parameter_ordinal_position,parameter_specific_catalog,"
-            + "parameter_specific_name,parameter_specific_schema,partial,partition,pascal,path,"
-            + "percent_rank,percentile_cont,percentile_disc,placing,pli,position,power,preceding,"
-            + "precision,prepare,preserve,primary,prior,privileges,procedure,public,range,rank,read,"
-            + "reads,real,recursive,ref,references,referencing,regr_avgx,regr_avgy,regr_count,"
-            + "regr_intercept,regr_r2,regr_slope,regr_sxx,regr_sxy,regr_syy,relative,release,"
-            + "repeatable,restart,result,return,returned_cardinality,returned_length,"
-            + "returned_octet_length,returned_sqlstate,returns,revoke,right,role,rollback,rollup,"
-            + "routine,routine_catalog,routine_name,routine_schema,row,row_count,row_number,rows,"
-            + "savepoint,scale,schema,schema_name,scope_catalog,scope_name,scope_schema,scroll,"
-            + "search,second,section,security,select,self,sensitive,sequence,serializable,server_name,"
-            + "session,session_user,set,sets,similar,simple,size,smallint,some,source,space,specific,"
-            + "specific_name,specifictype,sql,sqlexception,sqlstate,sqlwarning,sqrt,start,state,"
-            + "statement,static,stddev_pop,stddev_samp,structure,style,subclass_origin,submultiset,"
-            + "substring,sum,symmetric,system,system_user,table,table_name,tablesample,temporary,then,"
-            + "ties,time,timestamp,timezone_hour,timezone_minute,to,top_level_count,trailing,"
-            + "transaction,transaction_active,transactions_committed,transactions_rolled_back,"
-            + "transform,transforms,translate,translation,treat,trigger,trigger_catalog,trigger_name,"
-            + "trigger_schema,trim,true,type,uescape,unbounded,uncommitted,under,union,unique,unknown,"
-            + "unnamed,unnest,update,upper,usage,user,user_defined_type_catalog,user_defined_type_code,"
-            + "user_defined_type_name,user_defined_type_schema,using,value,values,var_pop,var_samp,"
-            + "varchar,varying,view,when,whenever,where,width_bucket,window,with,within,without,work,"
-            + "write,year,zone}'::text[])";
-
-        Statement stmt = null;
-        ResultSet rs = null;
-        try {
-          stmt = connection.createStatement();
-          rs = stmt.executeQuery(sql);
-          if (!rs.next()) {
-            throw new PSQLException(GT.tr("Unable to find keywords in the system catalogs."),
-                PSQLState.UNEXPECTED_ERROR);
-          }
-          keywords = rs.getString(1);
-        } finally {
-          JdbcBlackHole.close(rs);
-          JdbcBlackHole.close(stmt);
+        // Strip out enclosing quotes, if any.
+        for (int i = 0; i < acls.size(); i++) {
+            String acl = acls.get(i);
+            if (acl.startsWith("\"") && acl.endsWith("\"")) {
+                acl = acl.substring(1, acl.length() - 1);
+                acls.set(i, acl);
+            }
         }
-      } else {
-        // Static list from PG8.2 src/backend/parser/keywords.c with SQL:2003 excluded.
-        keywords = "abort,access,aggregate,also,analyse,analyze,backward,bit,cache,checkpoint,class,"
-            + "cluster,comment,concurrently,connection,conversion,copy,csv,database,delimiter,"
-            + "delimiters,disable,do,enable,encoding,encrypted,exclusive,explain,force,forward,freeze,"
-            + "greatest,handler,header,if,ilike,immutable,implicit,index,indexes,inherit,inherits,"
-            + "instead,isnull,least,limit,listen,load,location,lock,mode,move,nothing,notify,notnull,"
-            + "nowait,off,offset,oids,operator,owned,owner,password,prepared,procedural,quote,reassign,"
-            + "recheck,reindex,rename,replace,reset,restrict,returning,rule,setof,share,show,stable,"
-            + "statistics,stdin,stdout,storage,strict,sysid,tablespace,temp,template,truncate,trusted,"
-            + "unencrypted,unlisten,until,vacuum,valid,validator,verbose,volatile";
-      }
-      this.keywords = keywords;
+        return acls;
     }
-    return keywords;
-  }
 
-  @Override
-  @SuppressWarnings("deprecation")
-  public String getNumericFunctions() throws SQLException {
-    return EscapedFunctions.ABS + ',' + EscapedFunctions.ACOS + ',' + EscapedFunctions.ASIN + ','
-        + EscapedFunctions.ATAN + ',' + EscapedFunctions.ATAN2 + ',' + EscapedFunctions.CEILING
-        + ',' + EscapedFunctions.COS + ',' + EscapedFunctions.COT + ',' + EscapedFunctions.DEGREES
-        + ',' + EscapedFunctions.EXP + ',' + EscapedFunctions.FLOOR + ',' + EscapedFunctions.LOG
-        + ',' + EscapedFunctions.LOG10 + ',' + EscapedFunctions.MOD + ',' + EscapedFunctions.PI
-        + ',' + EscapedFunctions.POWER + ',' + EscapedFunctions.RADIANS + ','
-        + EscapedFunctions.ROUND + ',' + EscapedFunctions.SIGN + ',' + EscapedFunctions.SIN + ','
-        + EscapedFunctions.SQRT + ',' + EscapedFunctions.TAN + ',' + EscapedFunctions.TRUNCATE;
-
-  }
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public String getStringFunctions() throws SQLException {
-    String funcs = EscapedFunctions.ASCII + ',' + EscapedFunctions.CHAR + ','
-        + EscapedFunctions.CONCAT + ',' + EscapedFunctions.LCASE + ',' + EscapedFunctions.LEFT + ','
-        + EscapedFunctions.LENGTH + ',' + EscapedFunctions.LTRIM + ',' + EscapedFunctions.REPEAT
-        + ',' + EscapedFunctions.RTRIM + ',' + EscapedFunctions.SPACE + ','
-        + EscapedFunctions.SUBSTRING + ',' + EscapedFunctions.UCASE;
-
-    // Currently these don't work correctly with parameterized
-    // arguments, so leave them out. They reorder the arguments
-    // when rewriting the query, but no translation layer is provided,
-    // so a setObject(N, obj) will not go to the correct parameter.
-    // ','+EscapedFunctions.INSERT+','+EscapedFunctions.LOCATE+
-    // ','+EscapedFunctions.RIGHT+
-
-    funcs += ',' + EscapedFunctions.REPLACE;
-
-    return funcs;
-  }
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public String getSystemFunctions() throws SQLException {
-    return EscapedFunctions.DATABASE + ',' + EscapedFunctions.IFNULL + ',' + EscapedFunctions.USER;
-  }
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public String getTimeDateFunctions() throws SQLException {
-    String timeDateFuncs = EscapedFunctions.CURDATE + ',' + EscapedFunctions.CURTIME + ','
-        + EscapedFunctions.DAYNAME + ',' + EscapedFunctions.DAYOFMONTH + ','
-        + EscapedFunctions.DAYOFWEEK + ',' + EscapedFunctions.DAYOFYEAR + ','
-        + EscapedFunctions.HOUR + ',' + EscapedFunctions.MINUTE + ',' + EscapedFunctions.MONTH + ','
-        + EscapedFunctions.MONTHNAME + ',' + EscapedFunctions.NOW + ',' + EscapedFunctions.QUARTER
-        + ',' + EscapedFunctions.SECOND + ',' + EscapedFunctions.WEEK + ',' + EscapedFunctions.YEAR;
-
-    timeDateFuncs += ',' + EscapedFunctions.TIMESTAMPADD;
-
-    // +','+EscapedFunctions.TIMESTAMPDIFF;
-
-    return timeDateFuncs;
-  }
-
-  @Override
-  public String getSearchStringEscape() throws SQLException {
-    // This method originally returned "\\\\" assuming that it
-    // would be fed directly into pg's input parser so it would
-    // need two backslashes. This isn't how it's supposed to be
-    // used though. If passed as a PreparedStatement parameter
-    // or fed to a DatabaseMetaData method then double backslashes
-    // are incorrect. If you're feeding something directly into
-    // a query you are responsible for correctly escaping it.
-    // With 8.2+ this escaping is a little trickier because you
-    // must know the setting of standard_conforming_strings, but
-    // that's not our problem.
-
-    return "\\";
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * <p>Postgresql allows any high-bit character to be used in an unquoted identifier, so we can't
-   * possibly list them all.</p>
-   *
-   * <p>From the file src/backend/parser/scan.l, an identifier is ident_start [A-Za-z\200-\377_]
-   * ident_cont [A-Za-z\200-\377_0-9\$] identifier {ident_start}{ident_cont}*</p>
-   *
-   * @return a string containing the extra characters
-   * @throws SQLException if a database access error occurs
-   */
-  @Override
-  public String getExtraNameCharacters() throws SQLException {
-    return "";
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 6.1+
-   */
-  @Override
-  public boolean supportsAlterTableWithAddColumn() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 7.3+
-   */
-  @Override
-  public boolean supportsAlterTableWithDropColumn() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean supportsColumnAliasing() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean nullPlusNonNullIsNull() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean supportsConvert() throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean supportsConvert(int fromType, int toType) throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean supportsTableCorrelationNames() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean supportsDifferentTableCorrelationNames() throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean supportsExpressionsInOrderBy() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 6.4+
-   */
-  @Override
-  public boolean supportsOrderByUnrelated() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean supportsGroupBy() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 6.4+
-   */
-  @Override
-  public boolean supportsGroupByUnrelated() throws SQLException {
-    return true;
-  }
-
-  /*
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 6.4+
-   */
-  @Override
-  public boolean supportsGroupByBeyondSelect() throws SQLException {
-    return true;
-  }
-
-  /*
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 7.1+
-   */
-  @Override
-  public boolean supportsLikeEscapeClause() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean supportsMultipleResultSets() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean supportsMultipleTransactions() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean supportsNonNullableColumns() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * <p>This grammar is defined at:
-   * <a href="http://www.microsoft.com/msdn/sdk/platforms/doc/odbc/src/intropr.htm">
-   *     http://www.microsoft.com/msdn/sdk/platforms/doc/odbc/src/intropr.htm</a></p>
-   *
-   * <p>In Appendix C. From this description, we seem to support the ODBC minimal (Level 0) grammar.</p>
-   *
-   * @return true
-   */
-  @Override
-  public boolean supportsMinimumSQLGrammar() throws SQLException {
-    return true;
-  }
-
-  /**
-   * Does this driver support the Core ODBC SQL grammar. We need SQL-92 conformance for this.
-   *
-   * @return false
-   * @throws SQLException if a database access error occurs
-   */
-  @Override
-  public boolean supportsCoreSQLGrammar() throws SQLException {
-    return false;
-  }
-
-  /**
-   * Does this driver support the Extended (Level 2) ODBC SQL grammar. We don't conform to the Core
-   * (Level 1), so we can't conform to the Extended SQL Grammar.
-   *
-   * @return false
-   * @throws SQLException if a database access error occurs
-   */
-  @Override
-  public boolean supportsExtendedSQLGrammar() throws SQLException {
-    return false;
-  }
-
-  /**
-   * Does this driver support the ANSI-92 entry level SQL grammar? All JDBC Compliant drivers must
-   * return true. We currently report false until 'schema' support is added. Then this should be
-   * changed to return true, since we will be mostly compliant (probably more compliant than many
-   * other databases) And since this is a requirement for all JDBC drivers we need to get to the
-   * point where we can return true.
-   *
-   * @return true if connected to PostgreSQL 7.3+
-   * @throws SQLException if a database access error occurs
-   */
-  @Override
-  public boolean supportsANSI92EntryLevelSQL() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return false
-   */
-  @Override
-  public boolean supportsANSI92IntermediateSQL() throws SQLException {
-    return false;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return false
-   */
-  @Override
-  public boolean supportsANSI92FullSQL() throws SQLException {
-    return false;
-  }
-
-  /*
-   * Is the SQL Integrity Enhancement Facility supported? Our best guess is that this means support
-   * for constraints
-   *
-   * @return true
-   *
-   * @exception SQLException if a database access error occurs
-   */
-  @Override
-  public boolean supportsIntegrityEnhancementFacility() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 7.1+
-   */
-  @Override
-  public boolean supportsOuterJoins() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 7.1+
-   */
-  @Override
-  public boolean supportsFullOuterJoins() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 7.1+
-   */
-  @Override
-  public boolean supportsLimitedOuterJoins() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   * <p>PostgreSQL doesn't have schemas, but when it does, we'll use the term "schema".</p>
-   *
-   * @return {@code "schema"}
-   */
-  @Override
-  public String getSchemaTerm() throws SQLException {
-    return "schema";
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return {@code "function"}
-   */
-  @Override
-  public String getProcedureTerm() throws SQLException {
-    return "function";
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return {@code "database"}
-   */
-  @Override
-  public String getCatalogTerm() throws SQLException {
-    return "database";
-  }
-
-  @Override
-  public boolean isCatalogAtStart() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public String getCatalogSeparator() throws SQLException {
-    return ".";
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 7.3+
-   */
-  @Override
-  public boolean supportsSchemasInDataManipulation() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 7.3+
-   */
-  @Override
-  public boolean supportsSchemasInProcedureCalls() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 7.3+
-   */
-  @Override
-  public boolean supportsSchemasInTableDefinitions() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 7.3+
-   */
-  @Override
-  public boolean supportsSchemasInIndexDefinitions() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 7.3+
-   */
-  @Override
-  public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean supportsCatalogsInDataManipulation() throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean supportsCatalogsInProcedureCalls() throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean supportsCatalogsInTableDefinitions() throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean supportsCatalogsInIndexDefinitions() throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException {
-    return false;
-  }
-
-  /**
-   * We support cursors for gets only it seems. I dont see a method to get a positioned delete.
-   *
-   * @return false
-   * @throws SQLException if a database access error occurs
-   */
-  @Override
-  public boolean supportsPositionedDelete() throws SQLException {
-    return false; // For now...
-  }
-
-  @Override
-  public boolean supportsPositionedUpdate() throws SQLException {
-    return false; // For now...
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 6.5+
-   */
-  @Override
-  public boolean supportsSelectForUpdate() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean supportsStoredProcedures() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean supportsSubqueriesInComparisons() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean supportsSubqueriesInExists() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean supportsSubqueriesInIns() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean supportsSubqueriesInQuantifieds() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 7.1+
-   */
-  @Override
-  public boolean supportsCorrelatedSubqueries() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 6.3+
-   */
-  @Override
-  public boolean supportsUnion() throws SQLException {
-    return true; // since 6.3
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * @return true if connected to PostgreSQL 7.1+
-   */
-  @Override
-  public boolean supportsUnionAll() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc} In PostgreSQL, Cursors are only open within transactions.
-   */
-  @Override
-  public boolean supportsOpenCursorsAcrossCommit() throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean supportsOpenCursorsAcrossRollback() throws SQLException {
-    return false;
-  }
-
-  /**
-   * {@inheritDoc}
-   * <p>Can statements remain open across commits? They may, but this driver cannot guarantee that. In
-   * further reflection. we are talking a Statement object here, so the answer is yes, since the
-   * Statement is only a vehicle to ExecSQL()</p>
-   *
-   * @return true
-   */
-  @Override
-  public boolean supportsOpenStatementsAcrossCommit() throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   * <p>Can statements remain open across rollbacks? They may, but this driver cannot guarantee that.
-   * In further contemplation, we are talking a Statement object here, so the answer is yes, since
-   * the Statement is only a vehicle to ExecSQL() in Connection</p>
-   *
-   * @return true
-   */
-  @Override
-  public boolean supportsOpenStatementsAcrossRollback() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public int getMaxCharLiteralLength() throws SQLException {
-    return 0; // no limit
-  }
-
-  @Override
-  public int getMaxBinaryLiteralLength() throws SQLException {
-    return 0; // no limit
-  }
-
-  @Override
-  public int getMaxColumnNameLength() throws SQLException {
-    return getMaxNameLength();
-  }
-
-  @Override
-  public int getMaxColumnsInGroupBy() throws SQLException {
-    return 0; // no limit
-  }
-
-  @Override
-  public int getMaxColumnsInIndex() throws SQLException {
-    return getMaxIndexKeys();
-  }
-
-  @Override
-  public int getMaxColumnsInOrderBy() throws SQLException {
-    return 0; // no limit
-  }
-
-  @Override
-  public int getMaxColumnsInSelect() throws SQLException {
-    return 0; // no limit
-  }
-
-  /**
-   * {@inheritDoc} What is the maximum number of columns in a table? From the CREATE TABLE reference
-   * page...
-   *
-   * <p>"The new class is created as a heap with no initial data. A class can have no more than 1600
-   * attributes (realistically, this is limited by the fact that tuple sizes must be less than 8192
-   * bytes)..."</p>
-   *
-   * @return the max columns
-   * @throws SQLException if a database access error occurs
-   */
-  @Override
-  public int getMaxColumnsInTable() throws SQLException {
-    return 1600;
-  }
-
-  /**
-   * {@inheritDoc} How many active connection can we have at a time to this database? Well, since it
-   * depends on postmaster, which just does a listen() followed by an accept() and fork(), its
-   * basically very high. Unless the system runs out of processes, it can be 65535 (the number of
-   * aux. ports on a TCP/IP system). I will return 8192 since that is what even the largest system
-   * can realistically handle,
-   *
-   * @return the maximum number of connections
-   * @throws SQLException if a database access error occurs
-   */
-  @Override
-  public int getMaxConnections() throws SQLException {
-    return 8192;
-  }
-
-  @Override
-  public int getMaxCursorNameLength() throws SQLException {
-    return getMaxNameLength();
-  }
-
-  @Override
-  public int getMaxIndexLength() throws SQLException {
-    return 0; // no limit (larger than an int anyway)
-  }
-
-  @Override
-  public int getMaxSchemaNameLength() throws SQLException {
-    return getMaxNameLength();
-  }
-
-  @Override
-  public int getMaxProcedureNameLength() throws SQLException {
-    return getMaxNameLength();
-  }
-
-  @Override
-  public int getMaxCatalogNameLength() throws SQLException {
-    return getMaxNameLength();
-  }
-
-  @Override
-  public int getMaxRowSize() throws SQLException {
-    return 1073741824; // 1 GB
-  }
-
-  @Override
-  public boolean doesMaxRowSizeIncludeBlobs() throws SQLException {
-    return false;
-  }
-
-  @Override
-  public int getMaxStatementLength() throws SQLException {
-    return 0; // actually whatever fits in size_t
-  }
-
-  @Override
-  public int getMaxStatements() throws SQLException {
-    return 0;
-  }
-
-  @Override
-  public int getMaxTableNameLength() throws SQLException {
-    return getMaxNameLength();
-  }
-
-  @Override
-  public int getMaxTablesInSelect() throws SQLException {
-    return 0; // no limit
-  }
-
-  @Override
-  public int getMaxUserNameLength() throws SQLException {
-    return getMaxNameLength();
-  }
-
-  @Override
-  public int getDefaultTransactionIsolation() throws SQLException {
-    String sql =
-        "SELECT setting FROM pg_catalog.pg_settings WHERE name='default_transaction_isolation'";
-
-    try (Statement stmt = connection.createStatement();
-         ResultSet rs = stmt.executeQuery(sql)) {
-      String level = null;
-      if (rs.next()) {
-        level = rs.getString(1);
-      }
-      if (level == null) {
-        throw new PSQLException(
-            GT.tr(
-                "Unable to determine a value for DefaultTransactionIsolation due to missing "
-                    + " entry in pg_catalog.pg_settings WHERE name='default_transaction_isolation'."),
-            PSQLState.UNEXPECTED_ERROR);
-      }
-      // PostgreSQL returns the value in lower case, so using "toLowerCase" here would be
-      // slightly more efficient.
-      switch (level.toLowerCase(Locale.ROOT)) {
-        case "read uncommitted":
-          return Connection.TRANSACTION_READ_UNCOMMITTED;
-        case "repeatable read":
-          return Connection.TRANSACTION_REPEATABLE_READ;
-        case "serializable":
-          return Connection.TRANSACTION_SERIALIZABLE;
-        case "read committed":
-        default: // Best guess.
-          return Connection.TRANSACTION_READ_COMMITTED;
-      }
+    /**
+     * Add the user described by the given acl to the Lists of users with the privileges described by
+     * the acl.
+     */
+    private static void addACLPrivileges(String acl,
+                                         Map<String, Map<String, List<String[]>>> privileges) {
+        int equalIndex = acl.lastIndexOf("=");
+        int slashIndex = acl.lastIndexOf("/");
+        if (equalIndex == -1) {
+            return;
+        }
+
+        String user = acl.substring(0, equalIndex);
+        String grantor = null;
+        if (user.isEmpty()) {
+            user = "PUBLIC";
+        }
+        String privs;
+        if (slashIndex != -1) {
+            privs = acl.substring(equalIndex + 1, slashIndex);
+            grantor = acl.substring(slashIndex + 1, acl.length());
+        } else {
+            privs = acl.substring(equalIndex + 1, acl.length());
+        }
+
+        for (int i = 0; i < privs.length(); i++) {
+            char c = privs.charAt(i);
+            if (c != '*') {
+                String sqlpriv;
+                String grantable;
+                if (i < privs.length() - 1 && privs.charAt(i + 1) == '*') {
+                    grantable = "YES";
+                } else {
+                    grantable = "NO";
+                }
+                switch (c) {
+                    case 'a':
+                        sqlpriv = "INSERT";
+                        break;
+                    case 'r':
+                    case 'p':
+                        sqlpriv = "SELECT";
+                        break;
+                    case 'w':
+                        sqlpriv = "UPDATE";
+                        break;
+                    case 'd':
+                        sqlpriv = "DELETE";
+                        break;
+                    case 'D':
+                        sqlpriv = "TRUNCATE";
+                        break;
+                    case 'R':
+                        sqlpriv = "RULE";
+                        break;
+                    case 'x':
+                        sqlpriv = "REFERENCES";
+                        break;
+                    case 't':
+                        sqlpriv = "TRIGGER";
+                        break;
+                    // the following can't be granted to a table, but
+                    // we'll keep them for completeness.
+                    case 'X':
+                        sqlpriv = "EXECUTE";
+                        break;
+                    case 'U':
+                        sqlpriv = "USAGE";
+                        break;
+                    case 'C':
+                        sqlpriv = "CREATE";
+                        break;
+                    case 'T':
+                        sqlpriv = "CREATE TEMP";
+                        break;
+                    default:
+                        sqlpriv = "UNKNOWN";
+                }
+
+                Map<String, List<String[]>> usersWithPermission = privileges.get(sqlpriv);
+                if (usersWithPermission == null) {
+                    usersWithPermission = new HashMap<>();
+                    privileges.put(sqlpriv, usersWithPermission);
+                }
+
+                List<String[]> permissionByGrantor = usersWithPermission.get(user);
+                if (permissionByGrantor == null) {
+                    permissionByGrantor = new ArrayList<>();
+                    usersWithPermission.put(user, permissionByGrantor);
+                }
+
+                String[] grant = {grantor, grantable};
+                permissionByGrantor.add(grant);
+            }
+        }
     }
-  }
 
-  @Override
-  public boolean supportsTransactions() throws SQLException {
-    return true;
-  }
+    protected int getMaxIndexKeys() throws SQLException {
+        if (indexMaxKeys == 0) {
+            String sql;
+            sql = "SELECT setting FROM pg_catalog.pg_settings WHERE name='max_index_keys'";
 
-  /**
-   * {@inheritDoc}
-   * <p>We only support TRANSACTION_SERIALIZABLE and TRANSACTION_READ_COMMITTED before 8.0; from 8.0
-   * READ_UNCOMMITTED and REPEATABLE_READ are accepted aliases for READ_COMMITTED.</p>
-   */
-  @Override
-  public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
-    switch (level) {
-      case Connection.TRANSACTION_READ_UNCOMMITTED:
-      case Connection.TRANSACTION_READ_COMMITTED:
-      case Connection.TRANSACTION_REPEATABLE_READ:
-      case Connection.TRANSACTION_SERIALIZABLE:
+            Statement stmt = connection.createStatement();
+            ResultSet rs = null;
+            try {
+                rs = stmt.executeQuery(sql);
+                if (!rs.next()) {
+                    stmt.close();
+                    throw new PSQLException(
+                            GT.tr(
+                                    "Unable to determine a value for MaxIndexKeys due to missing system catalog data."),
+                            PSQLState.UNEXPECTED_ERROR);
+                }
+                indexMaxKeys = rs.getInt(1);
+            } finally {
+                JdbcBlackHole.close(rs);
+                JdbcBlackHole.close(stmt);
+            }
+        }
+        return indexMaxKeys;
+    }
+
+    protected int getMaxNameLength() throws SQLException {
+        if (nameDataLength == 0) {
+            String sql;
+            sql = "SELECT t.typlen FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n "
+                    + "WHERE t.typnamespace=n.oid AND t.typname='name' AND n.nspname='pg_catalog'";
+
+            Statement stmt = connection.createStatement();
+            ResultSet rs = null;
+            try {
+                rs = stmt.executeQuery(sql);
+                if (!rs.next()) {
+                    throw new PSQLException(GT.tr("Unable to find name datatype in the system catalogs."),
+                            PSQLState.UNEXPECTED_ERROR);
+                }
+                nameDataLength = rs.getInt("typlen");
+            } finally {
+                JdbcBlackHole.close(rs);
+                JdbcBlackHole.close(stmt);
+            }
+        }
+        return nameDataLength - 1;
+    }
+
+    @Override
+    public boolean allProceduresAreCallable() throws SQLException {
+        return true; // For now...
+    }
+
+    @Override
+    public boolean allTablesAreSelectable() throws SQLException {
+        return true; // For now...
+    }
+
+    @Override
+    public String getURL() throws SQLException {
+        return connection.getURL();
+    }
+
+    @Override
+    public String getUserName() throws SQLException {
+        return connection.getUserName();
+    }
+
+    @Override
+    public boolean isReadOnly() throws SQLException {
+        return connection.isReadOnly();
+    }
+
+    @Override
+    public boolean nullsAreSortedHigh() throws SQLException {
         return true;
-      default:
+    }
+
+    @Override
+    public boolean nullsAreSortedLow() throws SQLException {
         return false;
     }
-  }
 
-  @Override
-  public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean supportsDataManipulationTransactionsOnly() throws SQLException {
-    return false;
-  }
-
-  /**
-   * <p>Does a data definition statement within a transaction force the transaction to commit? It seems
-   * to mean something like:</p>
-   *
-   * <pre>
-   * CREATE TABLE T (A INT);
-   * INSERT INTO T (A) VALUES (2);
-   * BEGIN;
-   * UPDATE T SET A = A + 1;
-   * CREATE TABLE X (A INT);
-   * SELECT A FROM T INTO X;
-   * COMMIT;
-   * </pre>
-   *
-   * <p>Does the CREATE TABLE call cause a commit? The answer is no.</p>
-   *
-   * @return true if so
-   * @throws SQLException if a database access error occurs
-   */
-  @Override
-  public boolean dataDefinitionCausesTransactionCommit() throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean dataDefinitionIgnoredInTransactions() throws SQLException {
-    return false;
-  }
-
-  /**
-   * Turn the provided value into a valid string literal for direct inclusion into a query. This
-   * includes the single quotes needed around it.
-   *
-   * @param s input value
-   *
-   * @return string literal for direct inclusion into a query
-   * @throws SQLException if something wrong happens
-   */
-  protected String escapeQuotes(String s) throws SQLException {
-    StringBuilder sb = new StringBuilder();
-    if (!connection.getStandardConformingStrings()) {
-      sb.append("E");
+    @Override
+    public boolean nullsAreSortedAtStart() throws SQLException {
+        return false;
     }
-    sb.append("'");
-    sb.append(connection.escapeString(s));
-    sb.append("'");
-    return sb.toString();
-  }
 
-  @Override
-  public ResultSet getProcedures(String catalog, String schemaPattern,
-      String procedureNamePattern)
-      throws SQLException {
-    String sql;
-    sql = "SELECT NULL AS PROCEDURE_CAT, n.nspname AS PROCEDURE_SCHEM, p.proname AS PROCEDURE_NAME, "
-          + "NULL, NULL, NULL, d.description AS REMARKS, "
-          + DatabaseMetaData.procedureReturnsResult + " AS PROCEDURE_TYPE, "
-          + " p.proname || '_' || p.oid AS SPECIFIC_NAME "
-          + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_proc p "
-          + " LEFT JOIN pg_catalog.pg_description d ON (p.oid=d.objoid) "
-          + " LEFT JOIN pg_catalog.pg_class c ON (d.classoid=c.oid AND c.relname='pg_proc') "
-          + " LEFT JOIN pg_catalog.pg_namespace pn ON (c.relnamespace=pn.oid AND pn.nspname='pg_catalog') "
-          + " WHERE p.pronamespace=n.oid ";
-
-    if (connection.haveMinimumServerVersion(ServerVersion.v11)) {
-      sql += " AND p.prokind='p'";
+    @Override
+    public boolean nullsAreSortedAtEnd() throws SQLException {
+        return false;
     }
-    if (schemaPattern != null && !schemaPattern.isEmpty()) {
-      sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern);
+
+    /**
+     * Retrieves the name of this database product. We hope that it is PostgreSQL, so we return that
+     * explicitly.
+     *
+     * @return "PostgreSQL"
+     */
+    @Override
+    public String getDatabaseProductName() throws SQLException {
+        return "PostgreSQL";
     }
-    if (procedureNamePattern != null && !procedureNamePattern.isEmpty()) {
-      sql += " AND p.proname LIKE " + escapeQuotes(procedureNamePattern);
+
+    @Override
+    public String getDatabaseProductVersion() throws SQLException {
+        return connection.getDBVersionNumber();
     }
-    if (connection.getHideUnprivilegedObjects()) {
-      sql += " AND has_function_privilege(p.oid,'EXECUTE')";
+
+    @Override
+    public String getDriverName() {
+        return DriverInfo.DRIVER_NAME;
     }
-    sql += " ORDER BY PROCEDURE_SCHEM, PROCEDURE_NAME, p.oid::text ";
 
-    return createMetaDataStatement().executeQuery(sql);
-  }
-
-  @Override
-  public ResultSet getProcedureColumns(String catalog, String schemaPattern,
-      String procedureNamePattern, String columnNamePattern)
-      throws SQLException {
-    int columns = 20;
-
-    Field[] f = new Field[columns];
-    List<Tuple> v = new ArrayList<>(); // The new ResultSet tuple stuff
-
-    f[0] = new Field("PROCEDURE_CAT", Oid.VARCHAR);
-    f[1] = new Field("PROCEDURE_SCHEM", Oid.VARCHAR);
-    f[2] = new Field("PROCEDURE_NAME", Oid.VARCHAR);
-    f[3] = new Field("COLUMN_NAME", Oid.VARCHAR);
-    f[4] = new Field("COLUMN_TYPE", Oid.INT2);
-    f[5] = new Field("DATA_TYPE", Oid.INT2);
-    f[6] = new Field("TYPE_NAME", Oid.VARCHAR);
-    f[7] = new Field("PRECISION", Oid.INT4);
-    f[8] = new Field("LENGTH", Oid.INT4);
-    f[9] = new Field("SCALE", Oid.INT2);
-    f[10] = new Field("RADIX", Oid.INT2);
-    f[11] = new Field("NULLABLE", Oid.INT2);
-    f[12] = new Field("REMARKS", Oid.VARCHAR);
-    f[13] = new Field("COLUMN_DEF", Oid.VARCHAR);
-    f[14] = new Field("SQL_DATA_TYPE", Oid.INT4);
-    f[15] = new Field("SQL_DATETIME_SUB", Oid.INT4);
-    f[16] = new Field("CHAR_OCTET_LENGTH", Oid.INT4);
-    f[17] = new Field("ORDINAL_POSITION", Oid.INT4);
-    f[18] = new Field("IS_NULLABLE", Oid.VARCHAR);
-    f[19] = new Field("SPECIFIC_NAME", Oid.VARCHAR);
-
-    String sql;
-    sql = "SELECT n.nspname,p.proname,p.prorettype,p.proargtypes, t.typtype,t.typrelid, "
-          + " p.proargnames, p.proargmodes, p.proallargtypes, p.oid "
-          + " FROM pg_catalog.pg_proc p, pg_catalog.pg_namespace n, pg_catalog.pg_type t "
-          + " WHERE p.pronamespace=n.oid AND p.prorettype=t.oid ";
-    if (schemaPattern != null && !schemaPattern.isEmpty()) {
-      sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern);
+    @Override
+    public String getDriverVersion() {
+        return DriverInfo.DRIVER_VERSION;
     }
-    if (procedureNamePattern != null && !procedureNamePattern.isEmpty()) {
-      sql += " AND p.proname LIKE " + escapeQuotes(procedureNamePattern);
+
+    @Override
+    public int getDriverMajorVersion() {
+        return DriverInfo.MAJOR_VERSION;
     }
-    sql += " ORDER BY n.nspname, p.proname, p.oid::text ";
 
-    byte[] isnullableUnknown = new byte[0];
+    @Override
+    public int getDriverMinorVersion() {
+        return DriverInfo.MINOR_VERSION;
+    }
 
-    Statement stmt = connection.createStatement();
-    ResultSet rs = stmt.executeQuery(sql);
-    while (rs.next()) {
-      byte[] schema = rs.getBytes("nspname");
-      byte[] procedureName = rs.getBytes("proname");
-      byte[] specificName =
-                connection.encodeString(rs.getString("proname") + "_" + rs.getString("oid"));
-      int returnType = (int) rs.getLong("prorettype");
-      String returnTypeType = rs.getString("typtype");
-      int returnTypeRelid = (int) rs.getLong("typrelid");
+    /**
+     * Does the database store tables in a local file? No - it stores them in a file on the server.
+     *
+     * @return true if so
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean usesLocalFiles() throws SQLException {
+        return false;
+    }
 
-      String strArgTypes = rs.getString("proargtypes");
-      StringTokenizer st = new StringTokenizer(strArgTypes);
-      List<Long> argTypes = new ArrayList<>();
-      while (st.hasMoreTokens()) {
-        argTypes.add(Long.valueOf(st.nextToken()));
-      }
+    /**
+     * Does the database use a file for each table? Well, not really, since it doesn't use local files.
+     *
+     * @return true if so
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean usesLocalFilePerTable() throws SQLException {
+        return false;
+    }
 
-      String[] argNames = null;
-      Array argNamesArray = rs.getArray("proargnames");
-      if (argNamesArray != null) {
-        argNames = (String[]) argNamesArray.getArray();
-      }
+    /**
+     * Does the database treat mixed case unquoted SQL identifiers as case sensitive and as a result
+     * store them in mixed case? A JDBC-Compliant driver will always return false.
+     *
+     * @return true if so
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean supportsMixedCaseIdentifiers() throws SQLException {
+        return false;
+    }
 
-      String[] argModes = null;
-      Array argModesArray = rs.getArray("proargmodes");
-      if (argModesArray != null) {
-        argModes = (String[]) argModesArray.getArray();
-      }
+    @Override
+    public boolean storesUpperCaseIdentifiers() throws SQLException {
+        return false;
+    }
 
-      int numArgs = argTypes.size();
+    @Override
+    public boolean storesLowerCaseIdentifiers() throws SQLException {
+        return true;
+    }
 
-      Long[] allArgTypes = null;
-      Array allArgTypesArray = rs.getArray("proallargtypes");
-      if (allArgTypesArray != null) {
-        allArgTypes = (Long[]) allArgTypesArray.getArray();
-        numArgs = allArgTypes.length;
-      }
+    @Override
+    public boolean storesMixedCaseIdentifiers() throws SQLException {
+        return false;
+    }
 
-      // decide if we are returning a single column result.
-      if ("b".equals(returnTypeType) || "d".equals(returnTypeType) || "e".equals(returnTypeType)
-          || ("p".equals(returnTypeType) && argModesArray == null)) {
-        byte[] [] tuple = new byte[columns][];
-        tuple[0] = null;
-        tuple[1] = schema;
-        tuple[2] = procedureName;
-        tuple[3] = connection.encodeString("returnValue");
-        tuple[4] = connection
-            .encodeString(Integer.toString(DatabaseMetaData.procedureColumnReturn));
-        tuple[5] = connection
-            .encodeString(Integer.toString(connection.getTypeInfo().getSQLType(returnType)));
-        tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(returnType));
-        tuple[7] = null;
-        tuple[8] = null;
-        tuple[9] = null;
-        tuple[10] = null;
-        tuple[11] = connection
-            .encodeString(Integer.toString(DatabaseMetaData.procedureNullableUnknown));
-        tuple[12] = null;
-        tuple[17] = connection.encodeString(Integer.toString(0));
-        tuple[18] = isnullableUnknown;
-        tuple[19] = specificName;
+    /**
+     * Does the database treat mixed case quoted SQL identifiers as case sensitive and as a result
+     * store them in mixed case? A JDBC compliant driver will always return true.
+     *
+     * @return true if so
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException {
+        return true;
+    }
 
-        v.add(new Tuple(tuple));
-      }
+    @Override
+    public boolean storesUpperCaseQuotedIdentifiers() throws SQLException {
+        return false;
+    }
 
-      // Add a row for each argument.
-      for (int i = 0; i < numArgs; i++) {
-        byte[] [] tuple = new byte[columns][];
-        tuple[0] = null;
-        tuple[1] = schema;
-        tuple[2] = procedureName;
+    @Override
+    public boolean storesLowerCaseQuotedIdentifiers() throws SQLException {
+        return false;
+    }
 
-        if (argNames != null) {
-          tuple[3] = connection.encodeString(argNames[i]);
+    @Override
+    public boolean storesMixedCaseQuotedIdentifiers() throws SQLException {
+        return false;
+    }
+
+    /**
+     * What is the string used to quote SQL identifiers? This returns a space if identifier quoting
+     * isn't supported. A JDBC Compliant driver will always use a double quote character.
+     *
+     * @return the quoting string
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public String getIdentifierQuoteString() throws SQLException {
+        return "\"";
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * <p>From PostgreSQL 9.0+ return the keywords from pg_catalog.pg_get_keywords()</p>
+     *
+     * @return a comma separated list of keywords we use
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public String getSQLKeywords() throws SQLException {
+        connection.checkClosed();
+        String keywords = this.keywords;
+        if (keywords == null) {
+            if (connection.haveMinimumServerVersion(ServerVersion.v9_0)) {
+                // Exclude SQL:2003 keywords (https://github.com/ronsavage/SQL/blob/master/sql-2003-2.bnf)
+                // from the returned list, ugly but required by jdbc spec.
+                String sql = "select string_agg(word, ',') from pg_catalog.pg_get_keywords() "
+                        + "where word <> ALL ('{a,abs,absolute,action,ada,add,admin,after,all,allocate,alter,"
+                        + "always,and,any,are,array,as,asc,asensitive,assertion,assignment,asymmetric,at,atomic,"
+                        + "attribute,attributes,authorization,avg,before,begin,bernoulli,between,bigint,binary,"
+                        + "blob,boolean,both,breadth,by,c,call,called,cardinality,cascade,cascaded,case,cast,"
+                        + "catalog,catalog_name,ceil,ceiling,chain,char,char_length,character,character_length,"
+                        + "character_set_catalog,character_set_name,character_set_schema,characteristics,"
+                        + "characters,check,checked,class_origin,clob,close,coalesce,cobol,code_units,collate,"
+                        + "collation,collation_catalog,collation_name,collation_schema,collect,column,"
+                        + "column_name,command_function,command_function_code,commit,committed,condition,"
+                        + "condition_number,connect,connection_name,constraint,constraint_catalog,constraint_name,"
+                        + "constraint_schema,constraints,constructors,contains,continue,convert,corr,"
+                        + "corresponding,count,covar_pop,covar_samp,create,cross,cube,cume_dist,current,"
+                        + "current_collation,current_date,current_default_transform_group,current_path,"
+                        + "current_role,current_time,current_timestamp,current_transform_group_for_type,current_user,"
+                        + "cursor,cursor_name,cycle,data,date,datetime_interval_code,datetime_interval_precision,"
+                        + "day,deallocate,dec,decimal,declare,default,defaults,deferrable,deferred,defined,definer,"
+                        + "degree,delete,dense_rank,depth,deref,derived,desc,describe,descriptor,deterministic,"
+                        + "diagnostics,disconnect,dispatch,distinct,domain,double,drop,dynamic,dynamic_function,"
+                        + "dynamic_function_code,each,element,else,end,end-exec,equals,escape,every,except,"
+                        + "exception,exclude,excluding,exec,execute,exists,exp,external,extract,false,fetch,filter,"
+                        + "final,first,float,floor,following,for,foreign,fortran,found,free,from,full,function,"
+                        + "fusion,g,general,get,global,go,goto,grant,granted,group,grouping,having,hierarchy,hold,"
+                        + "hour,identity,immediate,implementation,in,including,increment,indicator,initially,"
+                        + "inner,inout,input,insensitive,insert,instance,instantiable,int,integer,intersect,"
+                        + "intersection,interval,into,invoker,is,isolation,join,k,key,key_member,key_type,language,"
+                        + "large,last,lateral,leading,left,length,level,like,ln,local,localtime,localtimestamp,"
+                        + "locator,lower,m,map,match,matched,max,maxvalue,member,merge,message_length,"
+                        + "message_octet_length,message_text,method,min,minute,minvalue,mod,modifies,module,month,"
+                        + "more,multiset,mumps,name,names,national,natural,nchar,nclob,nesting,new,next,no,none,"
+                        + "normalize,normalized,not,\"null\",nullable,nullif,nulls,number,numeric,object,"
+                        + "octet_length,octets,of,old,on,only,open,option,options,or,order,ordering,ordinality,"
+                        + "others,out,outer,output,over,overlaps,overlay,overriding,pad,parameter,parameter_mode,"
+                        + "parameter_name,parameter_ordinal_position,parameter_specific_catalog,"
+                        + "parameter_specific_name,parameter_specific_schema,partial,partition,pascal,path,"
+                        + "percent_rank,percentile_cont,percentile_disc,placing,pli,position,power,preceding,"
+                        + "precision,prepare,preserve,primary,prior,privileges,procedure,public,range,rank,read,"
+                        + "reads,real,recursive,ref,references,referencing,regr_avgx,regr_avgy,regr_count,"
+                        + "regr_intercept,regr_r2,regr_slope,regr_sxx,regr_sxy,regr_syy,relative,release,"
+                        + "repeatable,restart,result,return,returned_cardinality,returned_length,"
+                        + "returned_octet_length,returned_sqlstate,returns,revoke,right,role,rollback,rollup,"
+                        + "routine,routine_catalog,routine_name,routine_schema,row,row_count,row_number,rows,"
+                        + "savepoint,scale,schema,schema_name,scope_catalog,scope_name,scope_schema,scroll,"
+                        + "search,second,section,security,select,self,sensitive,sequence,serializable,server_name,"
+                        + "session,session_user,set,sets,similar,simple,size,smallint,some,source,space,specific,"
+                        + "specific_name,specifictype,sql,sqlexception,sqlstate,sqlwarning,sqrt,start,state,"
+                        + "statement,static,stddev_pop,stddev_samp,structure,style,subclass_origin,submultiset,"
+                        + "substring,sum,symmetric,system,system_user,table,table_name,tablesample,temporary,then,"
+                        + "ties,time,timestamp,timezone_hour,timezone_minute,to,top_level_count,trailing,"
+                        + "transaction,transaction_active,transactions_committed,transactions_rolled_back,"
+                        + "transform,transforms,translate,translation,treat,trigger,trigger_catalog,trigger_name,"
+                        + "trigger_schema,trim,true,type,uescape,unbounded,uncommitted,under,union,unique,unknown,"
+                        + "unnamed,unnest,update,upper,usage,user,user_defined_type_catalog,user_defined_type_code,"
+                        + "user_defined_type_name,user_defined_type_schema,using,value,values,var_pop,var_samp,"
+                        + "varchar,varying,view,when,whenever,where,width_bucket,window,with,within,without,work,"
+                        + "write,year,zone}'::text[])";
+
+                Statement stmt = null;
+                ResultSet rs = null;
+                try {
+                    stmt = connection.createStatement();
+                    rs = stmt.executeQuery(sql);
+                    if (!rs.next()) {
+                        throw new PSQLException(GT.tr("Unable to find keywords in the system catalogs."),
+                                PSQLState.UNEXPECTED_ERROR);
+                    }
+                    keywords = rs.getString(1);
+                } finally {
+                    JdbcBlackHole.close(rs);
+                    JdbcBlackHole.close(stmt);
+                }
+            } else {
+                // Static list from PG8.2 src/backend/parser/keywords.c with SQL:2003 excluded.
+                keywords = "abort,access,aggregate,also,analyse,analyze,backward,bit,cache,checkpoint,class,"
+                        + "cluster,comment,concurrently,connection,conversion,copy,csv,database,delimiter,"
+                        + "delimiters,disable,do,enable,encoding,encrypted,exclusive,explain,force,forward,freeze,"
+                        + "greatest,handler,header,if,ilike,immutable,implicit,index,indexes,inherit,inherits,"
+                        + "instead,isnull,least,limit,listen,load,location,lock,mode,move,nothing,notify,notnull,"
+                        + "nowait,off,offset,oids,operator,owned,owner,password,prepared,procedural,quote,reassign,"
+                        + "recheck,reindex,rename,replace,reset,restrict,returning,rule,setof,share,show,stable,"
+                        + "statistics,stdin,stdout,storage,strict,sysid,tablespace,temp,template,truncate,trusted,"
+                        + "unencrypted,unlisten,until,vacuum,valid,validator,verbose,volatile";
+            }
+            this.keywords = keywords;
+        }
+        return keywords;
+    }
+
+    @Override
+    @SuppressWarnings("deprecation")
+    public String getNumericFunctions() throws SQLException {
+        return EscapedFunctions.ABS + ',' + EscapedFunctions.ACOS + ',' + EscapedFunctions.ASIN + ','
+                + EscapedFunctions.ATAN + ',' + EscapedFunctions.ATAN2 + ',' + EscapedFunctions.CEILING
+                + ',' + EscapedFunctions.COS + ',' + EscapedFunctions.COT + ',' + EscapedFunctions.DEGREES
+                + ',' + EscapedFunctions.EXP + ',' + EscapedFunctions.FLOOR + ',' + EscapedFunctions.LOG
+                + ',' + EscapedFunctions.LOG10 + ',' + EscapedFunctions.MOD + ',' + EscapedFunctions.PI
+                + ',' + EscapedFunctions.POWER + ',' + EscapedFunctions.RADIANS + ','
+                + EscapedFunctions.ROUND + ',' + EscapedFunctions.SIGN + ',' + EscapedFunctions.SIN + ','
+                + EscapedFunctions.SQRT + ',' + EscapedFunctions.TAN + ',' + EscapedFunctions.TRUNCATE;
+
+    }
+
+    @Override
+    @SuppressWarnings("deprecation")
+    public String getStringFunctions() throws SQLException {
+        String funcs = EscapedFunctions.ASCII + ',' + EscapedFunctions.CHAR + ','
+                + EscapedFunctions.CONCAT + ',' + EscapedFunctions.LCASE + ',' + EscapedFunctions.LEFT + ','
+                + EscapedFunctions.LENGTH + ',' + EscapedFunctions.LTRIM + ',' + EscapedFunctions.REPEAT
+                + ',' + EscapedFunctions.RTRIM + ',' + EscapedFunctions.SPACE + ','
+                + EscapedFunctions.SUBSTRING + ',' + EscapedFunctions.UCASE;
+
+        // Currently these don't work correctly with parameterized
+        // arguments, so leave them out. They reorder the arguments
+        // when rewriting the query, but no translation layer is provided,
+        // so a setObject(N, obj) will not go to the correct parameter.
+        // ','+EscapedFunctions.INSERT+','+EscapedFunctions.LOCATE+
+        // ','+EscapedFunctions.RIGHT+
+
+        funcs += ',' + EscapedFunctions.REPLACE;
+
+        return funcs;
+    }
+
+    @Override
+    @SuppressWarnings("deprecation")
+    public String getSystemFunctions() throws SQLException {
+        return EscapedFunctions.DATABASE + ',' + EscapedFunctions.IFNULL + ',' + EscapedFunctions.USER;
+    }
+
+    @Override
+    @SuppressWarnings("deprecation")
+    public String getTimeDateFunctions() throws SQLException {
+        String timeDateFuncs = EscapedFunctions.CURDATE + ',' + EscapedFunctions.CURTIME + ','
+                + EscapedFunctions.DAYNAME + ',' + EscapedFunctions.DAYOFMONTH + ','
+                + EscapedFunctions.DAYOFWEEK + ',' + EscapedFunctions.DAYOFYEAR + ','
+                + EscapedFunctions.HOUR + ',' + EscapedFunctions.MINUTE + ',' + EscapedFunctions.MONTH + ','
+                + EscapedFunctions.MONTHNAME + ',' + EscapedFunctions.NOW + ',' + EscapedFunctions.QUARTER
+                + ',' + EscapedFunctions.SECOND + ',' + EscapedFunctions.WEEK + ',' + EscapedFunctions.YEAR;
+
+        timeDateFuncs += ',' + EscapedFunctions.TIMESTAMPADD;
+
+        // +','+EscapedFunctions.TIMESTAMPDIFF;
+
+        return timeDateFuncs;
+    }
+
+    @Override
+    public String getSearchStringEscape() throws SQLException {
+        // This method originally returned "\\\\" assuming that it
+        // would be fed directly into pg's input parser so it would
+        // need two backslashes. This isn't how it's supposed to be
+        // used though. If passed as a PreparedStatement parameter
+        // or fed to a DatabaseMetaData method then double backslashes
+        // are incorrect. If you're feeding something directly into
+        // a query you are responsible for correctly escaping it.
+        // With 8.2+ this escaping is a little trickier because you
+        // must know the setting of standard_conforming_strings, but
+        // that's not our problem.
+
+        return "\\";
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * <p>Postgresql allows any high-bit character to be used in an unquoted identifier, so we can't
+     * possibly list them all.</p>
+     *
+     * <p>From the file src/backend/parser/scan.l, an identifier is ident_start [A-Za-z\200-\377_]
+     * ident_cont [A-Za-z\200-\377_0-9\$] identifier {ident_start}{ident_cont}*</p>
+     *
+     * @return a string containing the extra characters
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public String getExtraNameCharacters() throws SQLException {
+        return "";
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 6.1+
+     */
+    @Override
+    public boolean supportsAlterTableWithAddColumn() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 7.3+
+     */
+    @Override
+    public boolean supportsAlterTableWithDropColumn() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean supportsColumnAliasing() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean nullPlusNonNullIsNull() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean supportsConvert() throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean supportsConvert(int fromType, int toType) throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean supportsTableCorrelationNames() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean supportsDifferentTableCorrelationNames() throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean supportsExpressionsInOrderBy() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 6.4+
+     */
+    @Override
+    public boolean supportsOrderByUnrelated() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean supportsGroupBy() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 6.4+
+     */
+    @Override
+    public boolean supportsGroupByUnrelated() throws SQLException {
+        return true;
+    }
+
+    /*
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 6.4+
+     */
+    @Override
+    public boolean supportsGroupByBeyondSelect() throws SQLException {
+        return true;
+    }
+
+    /*
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 7.1+
+     */
+    @Override
+    public boolean supportsLikeEscapeClause() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean supportsMultipleResultSets() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean supportsMultipleTransactions() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean supportsNonNullableColumns() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * <p>This grammar is defined at:
+     * <a href="http://www.microsoft.com/msdn/sdk/platforms/doc/odbc/src/intropr.htm">
+     * http://www.microsoft.com/msdn/sdk/platforms/doc/odbc/src/intropr.htm</a></p>
+     *
+     * <p>In Appendix C. From this description, we seem to support the ODBC minimal (Level 0) grammar.</p>
+     *
+     * @return true
+     */
+    @Override
+    public boolean supportsMinimumSQLGrammar() throws SQLException {
+        return true;
+    }
+
+    /**
+     * Does this driver support the Core ODBC SQL grammar. We need SQL-92 conformance for this.
+     *
+     * @return false
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean supportsCoreSQLGrammar() throws SQLException {
+        return false;
+    }
+
+    /**
+     * Does this driver support the Extended (Level 2) ODBC SQL grammar. We don't conform to the Core
+     * (Level 1), so we can't conform to the Extended SQL Grammar.
+     *
+     * @return false
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean supportsExtendedSQLGrammar() throws SQLException {
+        return false;
+    }
+
+    /**
+     * Does this driver support the ANSI-92 entry level SQL grammar? All JDBC Compliant drivers must
+     * return true. We currently report false until 'schema' support is added. Then this should be
+     * changed to return true, since we will be mostly compliant (probably more compliant than many
+     * other databases) And since this is a requirement for all JDBC drivers we need to get to the
+     * point where we can return true.
+     *
+     * @return true if connected to PostgreSQL 7.3+
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean supportsANSI92EntryLevelSQL() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return false
+     */
+    @Override
+    public boolean supportsANSI92IntermediateSQL() throws SQLException {
+        return false;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return false
+     */
+    @Override
+    public boolean supportsANSI92FullSQL() throws SQLException {
+        return false;
+    }
+
+    /*
+     * Is the SQL Integrity Enhancement Facility supported? Our best guess is that this means support
+     * for constraints
+     *
+     * @return true
+     *
+     * @exception SQLException if a database access error occurs
+     */
+    @Override
+    public boolean supportsIntegrityEnhancementFacility() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 7.1+
+     */
+    @Override
+    public boolean supportsOuterJoins() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 7.1+
+     */
+    @Override
+    public boolean supportsFullOuterJoins() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 7.1+
+     */
+    @Override
+    public boolean supportsLimitedOuterJoins() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     * <p>PostgreSQL doesn't have schemas, but when it does, we'll use the term "schema".</p>
+     *
+     * @return {@code "schema"}
+     */
+    @Override
+    public String getSchemaTerm() throws SQLException {
+        return "schema";
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return {@code "function"}
+     */
+    @Override
+    public String getProcedureTerm() throws SQLException {
+        return "function";
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return {@code "database"}
+     */
+    @Override
+    public String getCatalogTerm() throws SQLException {
+        return "database";
+    }
+
+    @Override
+    public boolean isCatalogAtStart() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public String getCatalogSeparator() throws SQLException {
+        return ".";
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 7.3+
+     */
+    @Override
+    public boolean supportsSchemasInDataManipulation() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 7.3+
+     */
+    @Override
+    public boolean supportsSchemasInProcedureCalls() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 7.3+
+     */
+    @Override
+    public boolean supportsSchemasInTableDefinitions() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 7.3+
+     */
+    @Override
+    public boolean supportsSchemasInIndexDefinitions() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 7.3+
+     */
+    @Override
+    public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean supportsCatalogsInDataManipulation() throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean supportsCatalogsInProcedureCalls() throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean supportsCatalogsInTableDefinitions() throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean supportsCatalogsInIndexDefinitions() throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException {
+        return false;
+    }
+
+    /**
+     * We support cursors for gets only it seems. I dont see a method to get a positioned delete.
+     *
+     * @return false
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean supportsPositionedDelete() throws SQLException {
+        return false; // For now...
+    }
+
+    @Override
+    public boolean supportsPositionedUpdate() throws SQLException {
+        return false; // For now...
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 6.5+
+     */
+    @Override
+    public boolean supportsSelectForUpdate() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean supportsStoredProcedures() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean supportsSubqueriesInComparisons() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean supportsSubqueriesInExists() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean supportsSubqueriesInIns() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean supportsSubqueriesInQuantifieds() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 7.1+
+     */
+    @Override
+    public boolean supportsCorrelatedSubqueries() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 6.3+
+     */
+    @Override
+    public boolean supportsUnion() throws SQLException {
+        return true; // since 6.3
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * @return true if connected to PostgreSQL 7.1+
+     */
+    @Override
+    public boolean supportsUnionAll() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc} In PostgreSQL, Cursors are only open within transactions.
+     */
+    @Override
+    public boolean supportsOpenCursorsAcrossCommit() throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean supportsOpenCursorsAcrossRollback() throws SQLException {
+        return false;
+    }
+
+    /**
+     * {@inheritDoc}
+     * <p>Can statements remain open across commits? They may, but this driver cannot guarantee that. In
+     * further reflection. we are talking a Statement object here, so the answer is yes, since the
+     * Statement is only a vehicle to ExecSQL()</p>
+     *
+     * @return true
+     */
+    @Override
+    public boolean supportsOpenStatementsAcrossCommit() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     * <p>Can statements remain open across rollbacks? They may, but this driver cannot guarantee that.
+     * In further contemplation, we are talking a Statement object here, so the answer is yes, since
+     * the Statement is only a vehicle to ExecSQL() in Connection</p>
+     *
+     * @return true
+     */
+    @Override
+    public boolean supportsOpenStatementsAcrossRollback() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public int getMaxCharLiteralLength() throws SQLException {
+        return 0; // no limit
+    }
+
+    @Override
+    public int getMaxBinaryLiteralLength() throws SQLException {
+        return 0; // no limit
+    }
+
+    @Override
+    public int getMaxColumnNameLength() throws SQLException {
+        return getMaxNameLength();
+    }
+
+    @Override
+    public int getMaxColumnsInGroupBy() throws SQLException {
+        return 0; // no limit
+    }
+
+    @Override
+    public int getMaxColumnsInIndex() throws SQLException {
+        return getMaxIndexKeys();
+    }
+
+    @Override
+    public int getMaxColumnsInOrderBy() throws SQLException {
+        return 0; // no limit
+    }
+
+    @Override
+    public int getMaxColumnsInSelect() throws SQLException {
+        return 0; // no limit
+    }
+
+    /**
+     * {@inheritDoc} What is the maximum number of columns in a table? From the CREATE TABLE reference
+     * page...
+     *
+     * <p>"The new class is created as a heap with no initial data. A class can have no more than 1600
+     * attributes (realistically, this is limited by the fact that tuple sizes must be less than 8192
+     * bytes)..."</p>
+     *
+     * @return the max columns
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public int getMaxColumnsInTable() throws SQLException {
+        return 1600;
+    }
+
+    /**
+     * {@inheritDoc} How many active connection can we have at a time to this database? Well, since it
+     * depends on postmaster, which just does a listen() followed by an accept() and fork(), its
+     * basically very high. Unless the system runs out of processes, it can be 65535 (the number of
+     * aux. ports on a TCP/IP system). I will return 8192 since that is what even the largest system
+     * can realistically handle,
+     *
+     * @return the maximum number of connections
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public int getMaxConnections() throws SQLException {
+        return 8192;
+    }
+
+    @Override
+    public int getMaxCursorNameLength() throws SQLException {
+        return getMaxNameLength();
+    }
+
+    @Override
+    public int getMaxIndexLength() throws SQLException {
+        return 0; // no limit (larger than an int anyway)
+    }
+
+    @Override
+    public int getMaxSchemaNameLength() throws SQLException {
+        return getMaxNameLength();
+    }
+
+    @Override
+    public int getMaxProcedureNameLength() throws SQLException {
+        return getMaxNameLength();
+    }
+
+    @Override
+    public int getMaxCatalogNameLength() throws SQLException {
+        return getMaxNameLength();
+    }
+
+    @Override
+    public int getMaxRowSize() throws SQLException {
+        return 1073741824; // 1 GB
+    }
+
+    @Override
+    public boolean doesMaxRowSizeIncludeBlobs() throws SQLException {
+        return false;
+    }
+
+    @Override
+    public int getMaxStatementLength() throws SQLException {
+        return 0; // actually whatever fits in size_t
+    }
+
+    @Override
+    public int getMaxStatements() throws SQLException {
+        return 0;
+    }
+
+    @Override
+    public int getMaxTableNameLength() throws SQLException {
+        return getMaxNameLength();
+    }
+
+    @Override
+    public int getMaxTablesInSelect() throws SQLException {
+        return 0; // no limit
+    }
+
+    @Override
+    public int getMaxUserNameLength() throws SQLException {
+        return getMaxNameLength();
+    }
+
+    @Override
+    public int getDefaultTransactionIsolation() throws SQLException {
+        String sql =
+                "SELECT setting FROM pg_catalog.pg_settings WHERE name='default_transaction_isolation'";
+
+        try (Statement stmt = connection.createStatement();
+             ResultSet rs = stmt.executeQuery(sql)) {
+            String level = null;
+            if (rs.next()) {
+                level = rs.getString(1);
+            }
+            if (level == null) {
+                throw new PSQLException(
+                        GT.tr(
+                                "Unable to determine a value for DefaultTransactionIsolation due to missing "
+                                        + " entry in pg_catalog.pg_settings WHERE name='default_transaction_isolation'."),
+                        PSQLState.UNEXPECTED_ERROR);
+            }
+            // PostgreSQL returns the value in lower case, so using "toLowerCase" here would be
+            // slightly more efficient.
+            switch (level.toLowerCase(Locale.ROOT)) {
+                case "read uncommitted":
+                    return Connection.TRANSACTION_READ_UNCOMMITTED;
+                case "repeatable read":
+                    return Connection.TRANSACTION_REPEATABLE_READ;
+                case "serializable":
+                    return Connection.TRANSACTION_SERIALIZABLE;
+                case "read committed":
+                default: // Best guess.
+                    return Connection.TRANSACTION_READ_COMMITTED;
+            }
+        }
+    }
+
+    @Override
+    public boolean supportsTransactions() throws SQLException {
+        return true;
+    }
+
+    /**
+     * {@inheritDoc}
+     * <p>We only support TRANSACTION_SERIALIZABLE and TRANSACTION_READ_COMMITTED before 8.0; from 8.0
+     * READ_UNCOMMITTED and REPEATABLE_READ are accepted aliases for READ_COMMITTED.</p>
+     */
+    @Override
+    public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
+        switch (level) {
+            case Connection.TRANSACTION_READ_UNCOMMITTED:
+            case Connection.TRANSACTION_READ_COMMITTED:
+            case Connection.TRANSACTION_REPEATABLE_READ:
+            case Connection.TRANSACTION_SERIALIZABLE:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    @Override
+    public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean supportsDataManipulationTransactionsOnly() throws SQLException {
+        return false;
+    }
+
+    /**
+     * <p>Does a data definition statement within a transaction force the transaction to commit? It seems
+     * to mean something like:</p>
+     *
+     * <pre>
+     * CREATE TABLE T (A INT);
+     * INSERT INTO T (A) VALUES (2);
+     * BEGIN;
+     * UPDATE T SET A = A + 1;
+     * CREATE TABLE X (A INT);
+     * SELECT A FROM T INTO X;
+     * COMMIT;
+     * </pre>
+     *
+     * <p>Does the CREATE TABLE call cause a commit? The answer is no.</p>
+     *
+     * @return true if so
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean dataDefinitionCausesTransactionCommit() throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean dataDefinitionIgnoredInTransactions() throws SQLException {
+        return false;
+    }
+
+    /**
+     * Turn the provided value into a valid string literal for direct inclusion into a query. This
+     * includes the single quotes needed around it.
+     *
+     * @param s input value
+     * @return string literal for direct inclusion into a query
+     * @throws SQLException if something wrong happens
+     */
+    protected String escapeQuotes(String s) throws SQLException {
+        StringBuilder sb = new StringBuilder();
+        if (!connection.getStandardConformingStrings()) {
+            sb.append("E");
+        }
+        sb.append("'");
+        sb.append(connection.escapeString(s));
+        sb.append("'");
+        return sb.toString();
+    }
+
+    @Override
+    public ResultSet getProcedures(String catalog, String schemaPattern,
+                                   String procedureNamePattern)
+            throws SQLException {
+        String sql;
+        sql = "SELECT NULL AS PROCEDURE_CAT, n.nspname AS PROCEDURE_SCHEM, p.proname AS PROCEDURE_NAME, "
+                + "NULL, NULL, NULL, d.description AS REMARKS, "
+                + DatabaseMetaData.procedureReturnsResult + " AS PROCEDURE_TYPE, "
+                + " p.proname || '_' || p.oid AS SPECIFIC_NAME "
+                + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_proc p "
+                + " LEFT JOIN pg_catalog.pg_description d ON (p.oid=d.objoid) "
+                + " LEFT JOIN pg_catalog.pg_class c ON (d.classoid=c.oid AND c.relname='pg_proc') "
+                + " LEFT JOIN pg_catalog.pg_namespace pn ON (c.relnamespace=pn.oid AND pn.nspname='pg_catalog') "
+                + " WHERE p.pronamespace=n.oid ";
+
+        if (connection.haveMinimumServerVersion(ServerVersion.v11)) {
+            sql += " AND p.prokind='p'";
+        }
+        if (schemaPattern != null && !schemaPattern.isEmpty()) {
+            sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern);
+        }
+        if (procedureNamePattern != null && !procedureNamePattern.isEmpty()) {
+            sql += " AND p.proname LIKE " + escapeQuotes(procedureNamePattern);
+        }
+        if (connection.getHideUnprivilegedObjects()) {
+            sql += " AND has_function_privilege(p.oid,'EXECUTE')";
+        }
+        sql += " ORDER BY PROCEDURE_SCHEM, PROCEDURE_NAME, p.oid::text ";
+
+        return createMetaDataStatement().executeQuery(sql);
+    }
+
+    @Override
+    public ResultSet getProcedureColumns(String catalog, String schemaPattern,
+                                         String procedureNamePattern, String columnNamePattern)
+            throws SQLException {
+        int columns = 20;
+
+        Field[] f = new Field[columns];
+        List<Tuple> v = new ArrayList<>(); // The new ResultSet tuple stuff
+
+        f[0] = new Field("PROCEDURE_CAT", Oid.VARCHAR);
+        f[1] = new Field("PROCEDURE_SCHEM", Oid.VARCHAR);
+        f[2] = new Field("PROCEDURE_NAME", Oid.VARCHAR);
+        f[3] = new Field("COLUMN_NAME", Oid.VARCHAR);
+        f[4] = new Field("COLUMN_TYPE", Oid.INT2);
+        f[5] = new Field("DATA_TYPE", Oid.INT2);
+        f[6] = new Field("TYPE_NAME", Oid.VARCHAR);
+        f[7] = new Field("PRECISION", Oid.INT4);
+        f[8] = new Field("LENGTH", Oid.INT4);
+        f[9] = new Field("SCALE", Oid.INT2);
+        f[10] = new Field("RADIX", Oid.INT2);
+        f[11] = new Field("NULLABLE", Oid.INT2);
+        f[12] = new Field("REMARKS", Oid.VARCHAR);
+        f[13] = new Field("COLUMN_DEF", Oid.VARCHAR);
+        f[14] = new Field("SQL_DATA_TYPE", Oid.INT4);
+        f[15] = new Field("SQL_DATETIME_SUB", Oid.INT4);
+        f[16] = new Field("CHAR_OCTET_LENGTH", Oid.INT4);
+        f[17] = new Field("ORDINAL_POSITION", Oid.INT4);
+        f[18] = new Field("IS_NULLABLE", Oid.VARCHAR);
+        f[19] = new Field("SPECIFIC_NAME", Oid.VARCHAR);
+
+        String sql;
+        sql = "SELECT n.nspname,p.proname,p.prorettype,p.proargtypes, t.typtype,t.typrelid, "
+                + " p.proargnames, p.proargmodes, p.proallargtypes, p.oid "
+                + " FROM pg_catalog.pg_proc p, pg_catalog.pg_namespace n, pg_catalog.pg_type t "
+                + " WHERE p.pronamespace=n.oid AND p.prorettype=t.oid ";
+        if (schemaPattern != null && !schemaPattern.isEmpty()) {
+            sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern);
+        }
+        if (procedureNamePattern != null && !procedureNamePattern.isEmpty()) {
+            sql += " AND p.proname LIKE " + escapeQuotes(procedureNamePattern);
+        }
+        sql += " ORDER BY n.nspname, p.proname, p.oid::text ";
+
+        byte[] isnullableUnknown = new byte[0];
+
+        Statement stmt = connection.createStatement();
+        ResultSet rs = stmt.executeQuery(sql);
+        while (rs.next()) {
+            byte[] schema = rs.getBytes("nspname");
+            byte[] procedureName = rs.getBytes("proname");
+            byte[] specificName =
+                    connection.encodeString(rs.getString("proname") + "_" + rs.getString("oid"));
+            int returnType = (int) rs.getLong("prorettype");
+            String returnTypeType = rs.getString("typtype");
+            int returnTypeRelid = (int) rs.getLong("typrelid");
+
+            String strArgTypes = rs.getString("proargtypes");
+            StringTokenizer st = new StringTokenizer(strArgTypes);
+            List<Long> argTypes = new ArrayList<>();
+            while (st.hasMoreTokens()) {
+                argTypes.add(Long.valueOf(st.nextToken()));
+            }
+
+            String[] argNames = null;
+            Array argNamesArray = rs.getArray("proargnames");
+            if (argNamesArray != null) {
+                argNames = (String[]) argNamesArray.getArray();
+            }
+
+            String[] argModes = null;
+            Array argModesArray = rs.getArray("proargmodes");
+            if (argModesArray != null) {
+                argModes = (String[]) argModesArray.getArray();
+            }
+
+            int numArgs = argTypes.size();
+
+            Long[] allArgTypes = null;
+            Array allArgTypesArray = rs.getArray("proallargtypes");
+            if (allArgTypesArray != null) {
+                allArgTypes = (Long[]) allArgTypesArray.getArray();
+                numArgs = allArgTypes.length;
+            }
+
+            // decide if we are returning a single column result.
+            if ("b".equals(returnTypeType) || "d".equals(returnTypeType) || "e".equals(returnTypeType)
+                    || ("p".equals(returnTypeType) && argModesArray == null)) {
+                byte[][] tuple = new byte[columns][];
+                tuple[0] = null;
+                tuple[1] = schema;
+                tuple[2] = procedureName;
+                tuple[3] = connection.encodeString("returnValue");
+                tuple[4] = connection
+                        .encodeString(Integer.toString(DatabaseMetaData.procedureColumnReturn));
+                tuple[5] = connection
+                        .encodeString(Integer.toString(connection.getTypeInfo().getSQLType(returnType)));
+                tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(returnType));
+                tuple[7] = null;
+                tuple[8] = null;
+                tuple[9] = null;
+                tuple[10] = null;
+                tuple[11] = connection
+                        .encodeString(Integer.toString(DatabaseMetaData.procedureNullableUnknown));
+                tuple[12] = null;
+                tuple[17] = connection.encodeString(Integer.toString(0));
+                tuple[18] = isnullableUnknown;
+                tuple[19] = specificName;
+
+                v.add(new Tuple(tuple));
+            }
+
+            // Add a row for each argument.
+            for (int i = 0; i < numArgs; i++) {
+                byte[][] tuple = new byte[columns][];
+                tuple[0] = null;
+                tuple[1] = schema;
+                tuple[2] = procedureName;
+
+                if (argNames != null) {
+                    tuple[3] = connection.encodeString(argNames[i]);
+                } else {
+                    tuple[3] = connection.encodeString("$" + (i + 1));
+                }
+
+                int columnMode = DatabaseMetaData.procedureColumnIn;
+                if (argModes != null && "o".equals(argModes[i])) {
+                    columnMode = DatabaseMetaData.procedureColumnOut;
+                } else if (argModes != null && "b".equals(argModes[i])) {
+                    columnMode = DatabaseMetaData.procedureColumnInOut;
+                } else if (argModes != null && "t".equals(argModes[i])) {
+                    columnMode = DatabaseMetaData.procedureColumnReturn;
+                }
+
+                tuple[4] = connection.encodeString(Integer.toString(columnMode));
+
+                int argOid;
+                if (allArgTypes != null) {
+                    argOid = allArgTypes[i].intValue();
+                } else {
+                    argOid = argTypes.get(i).intValue();
+                }
+
+                tuple[5] =
+                        connection.encodeString(Integer.toString(connection.getTypeInfo().getSQLType(argOid)));
+                tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(argOid));
+                tuple[7] = null;
+                tuple[8] = null;
+                tuple[9] = null;
+                tuple[10] = null;
+                tuple[11] =
+                        connection.encodeString(Integer.toString(DatabaseMetaData.procedureNullableUnknown));
+                tuple[12] = null;
+                tuple[17] = connection.encodeString(Integer.toString(i + 1));
+                tuple[18] = isnullableUnknown;
+                tuple[19] = specificName;
+
+                v.add(new Tuple(tuple));
+            }
+
+            // if we are returning a multi-column result.
+            if ("c".equals(returnTypeType) || ("p".equals(returnTypeType) && argModesArray != null)) {
+                String columnsql = "SELECT a.attname,a.atttypid FROM pg_catalog.pg_attribute a "
+                        + " WHERE a.attrelid = " + returnTypeRelid
+                        + " AND NOT a.attisdropped AND a.attnum > 0 ORDER BY a.attnum ";
+                Statement columnstmt = connection.createStatement();
+                ResultSet columnrs = columnstmt.executeQuery(columnsql);
+                while (columnrs.next()) {
+                    int columnTypeOid = (int) columnrs.getLong("atttypid");
+                    byte[][] tuple = new byte[columns][];
+                    tuple[0] = null;
+                    tuple[1] = schema;
+                    tuple[2] = procedureName;
+                    tuple[3] = columnrs.getBytes("attname");
+                    tuple[4] = connection
+                            .encodeString(Integer.toString(DatabaseMetaData.procedureColumnResult));
+                    tuple[5] = connection
+                            .encodeString(Integer.toString(connection.getTypeInfo().getSQLType(columnTypeOid)));
+                    tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(columnTypeOid));
+                    tuple[7] = null;
+                    tuple[8] = null;
+                    tuple[9] = null;
+                    tuple[10] = null;
+                    tuple[11] = connection
+                            .encodeString(Integer.toString(DatabaseMetaData.procedureNullableUnknown));
+                    tuple[12] = null;
+                    tuple[17] = connection.encodeString(Integer.toString(0));
+                    tuple[18] = isnullableUnknown;
+                    tuple[19] = specificName;
+
+                    v.add(new Tuple(tuple));
+                }
+                columnrs.close();
+                columnstmt.close();
+            }
+        }
+        rs.close();
+        stmt.close();
+
+        return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
+    }
+
+    @Override
+    public ResultSet getTables(String catalog, String schemaPattern,
+                               String tableNamePattern, String[] types) throws SQLException {
+        String select;
+        String orderby;
+        String useSchemas = "SCHEMAS";
+        select = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, c.relname AS TABLE_NAME, "
+                + " CASE n.nspname ~ '^pg_' OR n.nspname = 'information_schema' "
+                + " WHEN true THEN CASE "
+                + " WHEN n.nspname = 'pg_catalog' OR n.nspname = 'information_schema' THEN CASE c.relkind "
+                + "  WHEN 'r' THEN 'SYSTEM TABLE' "
+                + "  WHEN 'v' THEN 'SYSTEM VIEW' "
+                + "  WHEN 'i' THEN 'SYSTEM INDEX' "
+                + "  ELSE NULL "
+                + "  END "
+                + " WHEN n.nspname = 'pg_toast' THEN CASE c.relkind "
+                + "  WHEN 'r' THEN 'SYSTEM TOAST TABLE' "
+                + "  WHEN 'i' THEN 'SYSTEM TOAST INDEX' "
+                + "  ELSE NULL "
+                + "  END "
+                + " ELSE CASE c.relkind "
+                + "  WHEN 'r' THEN 'TEMPORARY TABLE' "
+                + "  WHEN 'p' THEN 'TEMPORARY TABLE' "
+                + "  WHEN 'i' THEN 'TEMPORARY INDEX' "
+                + "  WHEN 'S' THEN 'TEMPORARY SEQUENCE' "
+                + "  WHEN 'v' THEN 'TEMPORARY VIEW' "
+                + "  ELSE NULL "
+                + "  END "
+                + " END "
+                + " WHEN false THEN CASE c.relkind "
+                + " WHEN 'r' THEN 'TABLE' "
+                + " WHEN 'p' THEN 'PARTITIONED TABLE' "
+                + " WHEN 'i' THEN 'INDEX' "
+                + " WHEN 'P' then 'PARTITIONED INDEX' "
+                + " WHEN 'S' THEN 'SEQUENCE' "
+                + " WHEN 'v' THEN 'VIEW' "
+                + " WHEN 'c' THEN 'TYPE' "
+                + " WHEN 'f' THEN 'FOREIGN TABLE' "
+                + " WHEN 'm' THEN 'MATERIALIZED VIEW' "
+                + " ELSE NULL "
+                + " END "
+                + " ELSE NULL "
+                + " END "
+                + " AS TABLE_TYPE, d.description AS REMARKS, "
+                + " '' as TYPE_CAT, '' as TYPE_SCHEM, '' as TYPE_NAME, "
+                + "'' AS SELF_REFERENCING_COL_NAME, '' AS REF_GENERATION "
+                + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class c "
+                + " LEFT JOIN pg_catalog.pg_description d ON (c.oid = d.objoid AND d.objsubid = 0  and d.classoid = 'pg_class'::regclass) "
+                + " WHERE c.relnamespace = n.oid ";
+
+        if (schemaPattern != null && !schemaPattern.isEmpty()) {
+            select += " AND n.nspname LIKE " + escapeQuotes(schemaPattern);
+        }
+        if (connection.getHideUnprivilegedObjects()) {
+            select += " AND has_table_privilege(c.oid, "
+                    + " 'SELECT, INSERT, UPDATE, DELETE, RULE, REFERENCES, TRIGGER')";
+        }
+        orderby = " ORDER BY TABLE_TYPE,TABLE_SCHEM,TABLE_NAME ";
+
+        if (tableNamePattern != null && !tableNamePattern.isEmpty()) {
+            select += " AND c.relname LIKE " + escapeQuotes(tableNamePattern);
+        }
+        if (types != null) {
+            select += " AND (false ";
+            StringBuilder orclause = new StringBuilder();
+            for (String type : types) {
+                Map<String, String> clauses = tableTypeClauses.get(type);
+                if (clauses != null) {
+                    String clause = clauses.get(useSchemas);
+                    orclause.append(" OR ( ").append(clause).append(" ) ");
+                }
+            }
+            select += orclause.toString() + ") ";
+        }
+        String sql = select + orderby;
+
+        return ((PgResultSet) createMetaDataStatement().executeQuery(sql)).upperCaseFieldLabels();
+    }
+
+    @Override
+    public ResultSet getSchemas() throws SQLException {
+        return getSchemas(null, null);
+    }
+
+    @Override
+    public ResultSet getSchemas(String catalog, String schemaPattern)
+            throws SQLException {
+        String sql;
+        sql = "SELECT nspname AS TABLE_SCHEM, NULL AS TABLE_CATALOG FROM pg_catalog.pg_namespace "
+                + " WHERE nspname <> 'pg_toast' AND (nspname !~ '^pg_temp_' "
+                + " OR nspname = (pg_catalog.current_schemas(true))[1]) AND (nspname !~ '^pg_toast_temp_' "
+                + " OR nspname = replace((pg_catalog.current_schemas(true))[1], 'pg_temp_', 'pg_toast_temp_')) ";
+        if (schemaPattern != null && !schemaPattern.isEmpty()) {
+            sql += " AND nspname LIKE " + escapeQuotes(schemaPattern);
+        }
+        if (connection.getHideUnprivilegedObjects()) {
+            sql += " AND has_schema_privilege(nspname, 'USAGE, CREATE')";
+        }
+        sql += " ORDER BY TABLE_SCHEM";
+
+        return createMetaDataStatement().executeQuery(sql);
+    }
+
+    @Override
+    public ResultSet getCatalogs() throws SQLException {
+        String sql = "SELECT datname AS TABLE_CAT FROM pg_catalog.pg_database"
+                + " WHERE datallowconn = true"
+                + " ORDER BY datname";
+        return createMetaDataStatement().executeQuery(sql);
+    }
+
+    @Override
+    public ResultSet getTableTypes() throws SQLException {
+        String[] types = tableTypeClauses.keySet().toArray(new String[0]);
+        Arrays.sort(types);
+
+        Field[] f = new Field[1];
+        List<Tuple> v = new ArrayList<>();
+        f[0] = new Field("TABLE_TYPE", Oid.VARCHAR);
+        for (String type : types) {
+            byte[][] tuple = new byte[1][];
+            tuple[0] = connection.encodeString(type);
+            v.add(new Tuple(tuple));
+        }
+
+        return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
+    }
+
+    @Override
+    public ResultSet getColumns(String catalog, String schemaPattern,
+                                String tableNamePattern,
+                                String columnNamePattern) throws SQLException {
+
+        int numberOfFields = 24; // JDBC4
+        List<Tuple> v = new ArrayList<>(); // The new ResultSet tuple stuff
+        Field[] f = new Field[numberOfFields]; // The field descriptors for the new ResultSet
+
+        f[0] = new Field("TABLE_CAT", Oid.VARCHAR);
+        f[1] = new Field("TABLE_SCHEM", Oid.VARCHAR);
+        f[2] = new Field("TABLE_NAME", Oid.VARCHAR);
+        f[3] = new Field("COLUMN_NAME", Oid.VARCHAR);
+        f[4] = new Field("DATA_TYPE", Oid.INT2);
+        f[5] = new Field("TYPE_NAME", Oid.VARCHAR);
+        f[6] = new Field("COLUMN_SIZE", Oid.INT4);
+        f[7] = new Field("BUFFER_LENGTH", Oid.VARCHAR);
+        f[8] = new Field("DECIMAL_DIGITS", Oid.INT4);
+        f[9] = new Field("NUM_PREC_RADIX", Oid.INT4);
+        f[10] = new Field("NULLABLE", Oid.INT4);
+        f[11] = new Field("REMARKS", Oid.VARCHAR);
+        f[12] = new Field("COLUMN_DEF", Oid.VARCHAR);
+        f[13] = new Field("SQL_DATA_TYPE", Oid.INT4);
+        f[14] = new Field("SQL_DATETIME_SUB", Oid.INT4);
+        f[15] = new Field("CHAR_OCTET_LENGTH", Oid.VARCHAR);
+        f[16] = new Field("ORDINAL_POSITION", Oid.INT4);
+        f[17] = new Field("IS_NULLABLE", Oid.VARCHAR);
+        f[18] = new Field("SCOPE_CATALOG", Oid.VARCHAR);
+        f[19] = new Field("SCOPE_SCHEMA", Oid.VARCHAR);
+        f[20] = new Field("SCOPE_TABLE", Oid.VARCHAR);
+        f[21] = new Field("SOURCE_DATA_TYPE", Oid.INT2);
+        f[22] = new Field("IS_AUTOINCREMENT", Oid.VARCHAR);
+        f[23] = new Field("IS_GENERATEDCOLUMN", Oid.VARCHAR);
+
+        String sql;
+        // a.attnum isn't decremented when preceding columns are dropped,
+        // so the only way to calculate the correct column number is with
+        // window functions, new in 8.4.
+        //
+        // We want to push as much predicate information below the window
+        // function as possible (schema/table names), but must leave
+        // column name outside so we correctly count the other columns.
+        //
+        if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) {
+            sql = "SELECT * FROM (";
         } else {
-          tuple[3] = connection.encodeString("$" + (i + 1));
+            sql = "";
         }
 
-        int columnMode = DatabaseMetaData.procedureColumnIn;
-        if (argModes != null && "o".equals(argModes[i])) {
-          columnMode = DatabaseMetaData.procedureColumnOut;
-        } else if (argModes != null && "b".equals(argModes[i])) {
-          columnMode = DatabaseMetaData.procedureColumnInOut;
-        } else if (argModes != null && "t".equals(argModes[i])) {
-          columnMode = DatabaseMetaData.procedureColumnReturn;
-        }
+        sql += "SELECT n.nspname,c.relname,a.attname,a.atttypid,a.attnotnull "
+                + "OR (t.typtype = 'd' AND t.typnotnull) AS attnotnull,a.atttypmod,a.attlen,t.typtypmod,";
 
-        tuple[4] = connection.encodeString(Integer.toString(columnMode));
-
-        int argOid;
-        if (allArgTypes != null) {
-          argOid = allArgTypes[i].intValue();
+        if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) {
+            sql += "row_number() OVER (PARTITION BY a.attrelid ORDER BY a.attnum) AS attnum, ";
         } else {
-          argOid = argTypes.get(i).intValue();
+            sql += "a.attnum,";
         }
 
-        tuple[5] =
-            connection.encodeString(Integer.toString(connection.getTypeInfo().getSQLType(argOid)));
-        tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(argOid));
-        tuple[7] = null;
-        tuple[8] = null;
-        tuple[9] = null;
-        tuple[10] = null;
-        tuple[11] =
-            connection.encodeString(Integer.toString(DatabaseMetaData.procedureNullableUnknown));
-        tuple[12] = null;
-        tuple[17] = connection.encodeString(Integer.toString(i + 1));
-        tuple[18] = isnullableUnknown;
-        tuple[19] = specificName;
-
-        v.add(new Tuple(tuple));
-      }
-
-      // if we are returning a multi-column result.
-      if ("c".equals(returnTypeType) || ("p".equals(returnTypeType) && argModesArray != null)) {
-        String columnsql = "SELECT a.attname,a.atttypid FROM pg_catalog.pg_attribute a "
-                           + " WHERE a.attrelid = " + returnTypeRelid
-                           + " AND NOT a.attisdropped AND a.attnum > 0 ORDER BY a.attnum ";
-        Statement columnstmt = connection.createStatement();
-        ResultSet columnrs = columnstmt.executeQuery(columnsql);
-        while (columnrs.next()) {
-          int columnTypeOid = (int) columnrs.getLong("atttypid");
-          byte[] [] tuple = new byte[columns][];
-          tuple[0] = null;
-          tuple[1] = schema;
-          tuple[2] = procedureName;
-          tuple[3] = columnrs.getBytes("attname");
-          tuple[4] = connection
-              .encodeString(Integer.toString(DatabaseMetaData.procedureColumnResult));
-          tuple[5] = connection
-              .encodeString(Integer.toString(connection.getTypeInfo().getSQLType(columnTypeOid)));
-          tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(columnTypeOid));
-          tuple[7] = null;
-          tuple[8] = null;
-          tuple[9] = null;
-          tuple[10] = null;
-          tuple[11] = connection
-              .encodeString(Integer.toString(DatabaseMetaData.procedureNullableUnknown));
-          tuple[12] = null;
-          tuple[17] = connection.encodeString(Integer.toString(0));
-          tuple[18] = isnullableUnknown;
-          tuple[19] = specificName;
-
-          v.add(new Tuple(tuple));
+        if (connection.haveMinimumServerVersion(ServerVersion.v10)) {
+            sql += "nullif(a.attidentity, '') as attidentity,";
+        } else {
+            sql += "null as attidentity,";
         }
-        columnrs.close();
-        columnstmt.close();
-      }
-    }
-    rs.close();
-    stmt.close();
 
-    return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
-  }
-
-  @Override
-  public ResultSet getTables(String catalog, String schemaPattern,
-      String tableNamePattern, String [] types) throws SQLException {
-    String select;
-    String orderby;
-    String useSchemas = "SCHEMAS";
-    select = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, c.relname AS TABLE_NAME, "
-             + " CASE n.nspname ~ '^pg_' OR n.nspname = 'information_schema' "
-             + " WHEN true THEN CASE "
-             + " WHEN n.nspname = 'pg_catalog' OR n.nspname = 'information_schema' THEN CASE c.relkind "
-             + "  WHEN 'r' THEN 'SYSTEM TABLE' "
-             + "  WHEN 'v' THEN 'SYSTEM VIEW' "
-             + "  WHEN 'i' THEN 'SYSTEM INDEX' "
-             + "  ELSE NULL "
-             + "  END "
-             + " WHEN n.nspname = 'pg_toast' THEN CASE c.relkind "
-             + "  WHEN 'r' THEN 'SYSTEM TOAST TABLE' "
-             + "  WHEN 'i' THEN 'SYSTEM TOAST INDEX' "
-             + "  ELSE NULL "
-             + "  END "
-             + " ELSE CASE c.relkind "
-             + "  WHEN 'r' THEN 'TEMPORARY TABLE' "
-             + "  WHEN 'p' THEN 'TEMPORARY TABLE' "
-             + "  WHEN 'i' THEN 'TEMPORARY INDEX' "
-             + "  WHEN 'S' THEN 'TEMPORARY SEQUENCE' "
-             + "  WHEN 'v' THEN 'TEMPORARY VIEW' "
-             + "  ELSE NULL "
-             + "  END "
-             + " END "
-             + " WHEN false THEN CASE c.relkind "
-             + " WHEN 'r' THEN 'TABLE' "
-             + " WHEN 'p' THEN 'PARTITIONED TABLE' "
-             + " WHEN 'i' THEN 'INDEX' "
-             + " WHEN 'P' then 'PARTITIONED INDEX' "
-             + " WHEN 'S' THEN 'SEQUENCE' "
-             + " WHEN 'v' THEN 'VIEW' "
-             + " WHEN 'c' THEN 'TYPE' "
-             + " WHEN 'f' THEN 'FOREIGN TABLE' "
-             + " WHEN 'm' THEN 'MATERIALIZED VIEW' "
-             + " ELSE NULL "
-             + " END "
-             + " ELSE NULL "
-             + " END "
-             + " AS TABLE_TYPE, d.description AS REMARKS, "
-             + " '' as TYPE_CAT, '' as TYPE_SCHEM, '' as TYPE_NAME, "
-             + "'' AS SELF_REFERENCING_COL_NAME, '' AS REF_GENERATION "
-             + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class c "
-             + " LEFT JOIN pg_catalog.pg_description d ON (c.oid = d.objoid AND d.objsubid = 0  and d.classoid = 'pg_class'::regclass) "
-             + " WHERE c.relnamespace = n.oid ";
-
-    if (schemaPattern != null && !schemaPattern.isEmpty()) {
-      select += " AND n.nspname LIKE " + escapeQuotes(schemaPattern);
-    }
-    if (connection.getHideUnprivilegedObjects()) {
-      select += " AND has_table_privilege(c.oid, "
-        + " 'SELECT, INSERT, UPDATE, DELETE, RULE, REFERENCES, TRIGGER')";
-    }
-    orderby = " ORDER BY TABLE_TYPE,TABLE_SCHEM,TABLE_NAME ";
-
-    if (tableNamePattern != null && !tableNamePattern.isEmpty()) {
-      select += " AND c.relname LIKE " + escapeQuotes(tableNamePattern);
-    }
-    if (types != null) {
-      select += " AND (false ";
-      StringBuilder orclause = new StringBuilder();
-      for (String type : types) {
-        Map<String, String> clauses = tableTypeClauses.get(type);
-        if (clauses != null) {
-          String clause = clauses.get(useSchemas);
-          orclause.append(" OR ( ").append(clause).append(" ) ");
+        if (connection.haveMinimumServerVersion(ServerVersion.v12)) {
+            sql += "nullif(a.attgenerated, '') as attgenerated,";
+        } else {
+            sql += "null as attgenerated,";
         }
-      }
-      select += orclause.toString() + ") ";
-    }
-    String sql = select + orderby;
 
-    return ((PgResultSet) createMetaDataStatement().executeQuery(sql)).upperCaseFieldLabels();
-  }
+        sql += "pg_catalog.pg_get_expr(def.adbin, def.adrelid) AS adsrc,dsc.description,t.typbasetype,t.typtype "
+                + " FROM pg_catalog.pg_namespace n "
+                + " JOIN pg_catalog.pg_class c ON (c.relnamespace = n.oid) "
+                + " JOIN pg_catalog.pg_attribute a ON (a.attrelid=c.oid) "
+                + " JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid) "
+                + " LEFT JOIN pg_catalog.pg_attrdef def ON (a.attrelid=def.adrelid AND a.attnum = def.adnum) "
+                + " LEFT JOIN pg_catalog.pg_description dsc ON (c.oid=dsc.objoid AND a.attnum = dsc.objsubid) "
+                + " LEFT JOIN pg_catalog.pg_class dc ON (dc.oid=dsc.classoid AND dc.relname='pg_class') "
+                + " LEFT JOIN pg_catalog.pg_namespace dn ON (dc.relnamespace=dn.oid AND dn.nspname='pg_catalog') "
+                + " WHERE c.relkind in ('r','p','v','f','m') and a.attnum > 0 AND NOT a.attisdropped ";
 
-  private static final Map<String, Map<String, String>> tableTypeClauses;
-
-  static {
-    tableTypeClauses = new HashMap<>();
-    Map<String, String> ht = new HashMap<>();
-    tableTypeClauses.put("TABLE", ht);
-    ht.put("SCHEMAS", "c.relkind = 'r' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'");
-    ht.put("NOSCHEMAS", "c.relkind = 'r' AND c.relname !~ '^pg_'");
-    ht = new HashMap<>();
-    tableTypeClauses.put("PARTITIONED TABLE", ht);
-    ht.put("SCHEMAS", "c.relkind = 'p' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'");
-    ht.put("NOSCHEMAS", "c.relkind = 'p' AND c.relname !~ '^pg_'");
-    ht = new HashMap<>();
-    tableTypeClauses.put("VIEW", ht);
-    ht.put("SCHEMAS",
-        "c.relkind = 'v' AND n.nspname <> 'pg_catalog' AND n.nspname <> 'information_schema'");
-    ht.put("NOSCHEMAS", "c.relkind = 'v' AND c.relname !~ '^pg_'");
-    ht = new HashMap<>();
-    tableTypeClauses.put("INDEX", ht);
-    ht.put("SCHEMAS",
-        "c.relkind = 'i' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'");
-    ht.put("NOSCHEMAS", "c.relkind = 'i' AND c.relname !~ '^pg_'");
-    ht = new HashMap<>();
-    tableTypeClauses.put("PARTITIONED INDEX", ht);
-    ht.put("SCHEMAS", "c.relkind = 'I' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'");
-    ht.put("NOSCHEMAS", "c.relkind = 'I' AND c.relname !~ '^pg_'");
-    ht = new HashMap<>();
-    tableTypeClauses.put("SEQUENCE", ht);
-    ht.put("SCHEMAS", "c.relkind = 'S'");
-    ht.put("NOSCHEMAS", "c.relkind = 'S'");
-    ht = new HashMap<>();
-    tableTypeClauses.put("TYPE", ht);
-    ht.put("SCHEMAS",
-        "c.relkind = 'c' AND n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'");
-    ht.put("NOSCHEMAS", "c.relkind = 'c' AND c.relname !~ '^pg_'");
-    ht = new HashMap<>();
-    tableTypeClauses.put("SYSTEM TABLE", ht);
-    ht.put("SCHEMAS",
-        "c.relkind = 'r' AND (n.nspname = 'pg_catalog' OR n.nspname = 'information_schema')");
-    ht.put("NOSCHEMAS",
-        "c.relkind = 'r' AND c.relname ~ '^pg_' AND c.relname !~ '^pg_toast_' AND c.relname !~ '^pg_temp_'");
-    ht = new HashMap<>();
-    tableTypeClauses.put("SYSTEM TOAST TABLE", ht);
-    ht.put("SCHEMAS", "c.relkind = 'r' AND n.nspname = 'pg_toast'");
-    ht.put("NOSCHEMAS", "c.relkind = 'r' AND c.relname ~ '^pg_toast_'");
-    ht = new HashMap<>();
-    tableTypeClauses.put("SYSTEM TOAST INDEX", ht);
-    ht.put("SCHEMAS", "c.relkind = 'i' AND n.nspname = 'pg_toast'");
-    ht.put("NOSCHEMAS", "c.relkind = 'i' AND c.relname ~ '^pg_toast_'");
-    ht = new HashMap<>();
-    tableTypeClauses.put("SYSTEM VIEW", ht);
-    ht.put("SCHEMAS",
-        "c.relkind = 'v' AND (n.nspname = 'pg_catalog' OR n.nspname = 'information_schema') ");
-    ht.put("NOSCHEMAS", "c.relkind = 'v' AND c.relname ~ '^pg_'");
-    ht = new HashMap<>();
-    tableTypeClauses.put("SYSTEM INDEX", ht);
-    ht.put("SCHEMAS",
-        "c.relkind = 'i' AND (n.nspname = 'pg_catalog' OR n.nspname = 'information_schema') ");
-    ht.put("NOSCHEMAS",
-        "c.relkind = 'v' AND c.relname ~ '^pg_' AND c.relname !~ '^pg_toast_' AND c.relname !~ '^pg_temp_'");
-    ht = new HashMap<>();
-    tableTypeClauses.put("TEMPORARY TABLE", ht);
-    ht.put("SCHEMAS", "c.relkind IN ('r','p') AND n.nspname ~ '^pg_temp_' ");
-    ht.put("NOSCHEMAS", "c.relkind IN ('r','p') AND c.relname ~ '^pg_temp_' ");
-    ht = new HashMap<>();
-    tableTypeClauses.put("TEMPORARY INDEX", ht);
-    ht.put("SCHEMAS", "c.relkind = 'i' AND n.nspname ~ '^pg_temp_' ");
-    ht.put("NOSCHEMAS", "c.relkind = 'i' AND c.relname ~ '^pg_temp_' ");
-    ht = new HashMap<>();
-    tableTypeClauses.put("TEMPORARY VIEW", ht);
-    ht.put("SCHEMAS", "c.relkind = 'v' AND n.nspname ~ '^pg_temp_' ");
-    ht.put("NOSCHEMAS", "c.relkind = 'v' AND c.relname ~ '^pg_temp_' ");
-    ht = new HashMap<>();
-    tableTypeClauses.put("TEMPORARY SEQUENCE", ht);
-    ht.put("SCHEMAS", "c.relkind = 'S' AND n.nspname ~ '^pg_temp_' ");
-    ht.put("NOSCHEMAS", "c.relkind = 'S' AND c.relname ~ '^pg_temp_' ");
-    ht = new HashMap<>();
-    tableTypeClauses.put("FOREIGN TABLE", ht);
-    ht.put("SCHEMAS", "c.relkind = 'f'");
-    ht.put("NOSCHEMAS", "c.relkind = 'f'");
-    ht = new HashMap<>();
-    tableTypeClauses.put("MATERIALIZED VIEW", ht);
-    ht.put("SCHEMAS", "c.relkind = 'm'");
-    ht.put("NOSCHEMAS", "c.relkind = 'm'");
-  }
-
-  @Override
-  public ResultSet getSchemas() throws SQLException {
-    return getSchemas(null, null);
-  }
-
-  @Override
-  public ResultSet getSchemas(String catalog, String schemaPattern)
-      throws SQLException {
-    String sql;
-    sql = "SELECT nspname AS TABLE_SCHEM, NULL AS TABLE_CATALOG FROM pg_catalog.pg_namespace "
-          + " WHERE nspname <> 'pg_toast' AND (nspname !~ '^pg_temp_' "
-          + " OR nspname = (pg_catalog.current_schemas(true))[1]) AND (nspname !~ '^pg_toast_temp_' "
-          + " OR nspname = replace((pg_catalog.current_schemas(true))[1], 'pg_temp_', 'pg_toast_temp_')) ";
-    if (schemaPattern != null && !schemaPattern.isEmpty()) {
-      sql += " AND nspname LIKE " + escapeQuotes(schemaPattern);
-    }
-    if (connection.getHideUnprivilegedObjects()) {
-      sql += " AND has_schema_privilege(nspname, 'USAGE, CREATE')";
-    }
-    sql += " ORDER BY TABLE_SCHEM";
-
-    return createMetaDataStatement().executeQuery(sql);
-  }
-
-  @Override
-  public ResultSet getCatalogs() throws SQLException {
-    String sql = "SELECT datname AS TABLE_CAT FROM pg_catalog.pg_database"
-        + " WHERE datallowconn = true"
-        + " ORDER BY datname";
-    return createMetaDataStatement().executeQuery(sql);
-  }
-
-  @Override
-  public ResultSet getTableTypes() throws SQLException {
-    String[] types = tableTypeClauses.keySet().toArray(new String[0]);
-    Arrays.sort(types);
-
-    Field[] f = new Field[1];
-    List<Tuple> v = new ArrayList<>();
-    f[0] = new Field("TABLE_TYPE", Oid.VARCHAR);
-    for (String type : types) {
-      byte[] [] tuple = new byte[1][];
-      tuple[0] = connection.encodeString(type);
-      v.add(new Tuple(tuple));
-    }
-
-    return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
-  }
-
-  @Override
-  public ResultSet getColumns(String catalog, String schemaPattern,
-      String tableNamePattern,
-      String columnNamePattern) throws SQLException {
-
-    int numberOfFields = 24; // JDBC4
-    List<Tuple> v = new ArrayList<>(); // The new ResultSet tuple stuff
-    Field[] f = new Field[numberOfFields]; // The field descriptors for the new ResultSet
-
-    f[0] = new Field("TABLE_CAT", Oid.VARCHAR);
-    f[1] = new Field("TABLE_SCHEM", Oid.VARCHAR);
-    f[2] = new Field("TABLE_NAME", Oid.VARCHAR);
-    f[3] = new Field("COLUMN_NAME", Oid.VARCHAR);
-    f[4] = new Field("DATA_TYPE", Oid.INT2);
-    f[5] = new Field("TYPE_NAME", Oid.VARCHAR);
-    f[6] = new Field("COLUMN_SIZE", Oid.INT4);
-    f[7] = new Field("BUFFER_LENGTH", Oid.VARCHAR);
-    f[8] = new Field("DECIMAL_DIGITS", Oid.INT4);
-    f[9] = new Field("NUM_PREC_RADIX", Oid.INT4);
-    f[10] = new Field("NULLABLE", Oid.INT4);
-    f[11] = new Field("REMARKS", Oid.VARCHAR);
-    f[12] = new Field("COLUMN_DEF", Oid.VARCHAR);
-    f[13] = new Field("SQL_DATA_TYPE", Oid.INT4);
-    f[14] = new Field("SQL_DATETIME_SUB", Oid.INT4);
-    f[15] = new Field("CHAR_OCTET_LENGTH", Oid.VARCHAR);
-    f[16] = new Field("ORDINAL_POSITION", Oid.INT4);
-    f[17] = new Field("IS_NULLABLE", Oid.VARCHAR);
-    f[18] = new Field("SCOPE_CATALOG", Oid.VARCHAR);
-    f[19] = new Field("SCOPE_SCHEMA", Oid.VARCHAR);
-    f[20] = new Field("SCOPE_TABLE", Oid.VARCHAR);
-    f[21] = new Field("SOURCE_DATA_TYPE", Oid.INT2);
-    f[22] = new Field("IS_AUTOINCREMENT", Oid.VARCHAR);
-    f[23] = new Field( "IS_GENERATEDCOLUMN", Oid.VARCHAR);
-
-    String sql;
-    // a.attnum isn't decremented when preceding columns are dropped,
-    // so the only way to calculate the correct column number is with
-    // window functions, new in 8.4.
-    //
-    // We want to push as much predicate information below the window
-    // function as possible (schema/table names), but must leave
-    // column name outside so we correctly count the other columns.
-    //
-    if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) {
-      sql = "SELECT * FROM (";
-    } else {
-      sql = "";
-    }
-
-    sql += "SELECT n.nspname,c.relname,a.attname,a.atttypid,a.attnotnull "
-           + "OR (t.typtype = 'd' AND t.typnotnull) AS attnotnull,a.atttypmod,a.attlen,t.typtypmod,";
-
-    if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) {
-      sql += "row_number() OVER (PARTITION BY a.attrelid ORDER BY a.attnum) AS attnum, ";
-    } else {
-      sql += "a.attnum,";
-    }
-
-    if (connection.haveMinimumServerVersion(ServerVersion.v10)) {
-      sql += "nullif(a.attidentity, '') as attidentity,";
-    } else {
-      sql += "null as attidentity,";
-    }
-
-    if (connection.haveMinimumServerVersion(ServerVersion.v12)) {
-      sql += "nullif(a.attgenerated, '') as attgenerated,";
-    } else {
-      sql += "null as attgenerated,";
-    }
-
-    sql += "pg_catalog.pg_get_expr(def.adbin, def.adrelid) AS adsrc,dsc.description,t.typbasetype,t.typtype "
-           + " FROM pg_catalog.pg_namespace n "
-           + " JOIN pg_catalog.pg_class c ON (c.relnamespace = n.oid) "
-           + " JOIN pg_catalog.pg_attribute a ON (a.attrelid=c.oid) "
-           + " JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid) "
-           + " LEFT JOIN pg_catalog.pg_attrdef def ON (a.attrelid=def.adrelid AND a.attnum = def.adnum) "
-           + " LEFT JOIN pg_catalog.pg_description dsc ON (c.oid=dsc.objoid AND a.attnum = dsc.objsubid) "
-           + " LEFT JOIN pg_catalog.pg_class dc ON (dc.oid=dsc.classoid AND dc.relname='pg_class') "
-           + " LEFT JOIN pg_catalog.pg_namespace dn ON (dc.relnamespace=dn.oid AND dn.nspname='pg_catalog') "
-           + " WHERE c.relkind in ('r','p','v','f','m') and a.attnum > 0 AND NOT a.attisdropped ";
-
-    if (schemaPattern != null && !schemaPattern.isEmpty()) {
-      sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern);
-    }
-    if (tableNamePattern != null && !tableNamePattern.isEmpty()) {
-      sql += " AND c.relname LIKE " + escapeQuotes(tableNamePattern);
-    }
-    if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) {
-      sql += ") c WHERE true ";
-    }
-    if (columnNamePattern != null && !columnNamePattern.isEmpty()) {
-      sql += " AND attname LIKE " + escapeQuotes(columnNamePattern);
-    }
-    sql += " ORDER BY nspname,c.relname,attnum ";
-
-    Statement stmt = connection.createStatement();
-    ResultSet rs = stmt.executeQuery(sql);
-    while (rs.next()) {
-      byte[] [] tuple = new byte[numberOfFields][];
-      int typeOid = (int) rs.getLong("atttypid");
-      int typeMod = rs.getInt("atttypmod");
-
-      tuple[0] = null; // Catalog name, not supported
-      tuple[1] = rs.getBytes("nspname"); // Schema
-      tuple[2] = rs.getBytes("relname"); // Table name
-      tuple[3] = rs.getBytes("attname"); // Column name
-
-      String typtype = rs.getString("typtype");
-      int sqlType;
-      if ("c".equals(typtype)) {
-        sqlType = Types.STRUCT;
-      } else if ("d".equals(typtype)) {
-        sqlType = Types.DISTINCT;
-      } else if ("e".equals(typtype)) {
-        sqlType = Types.VARCHAR;
-      } else {
-        sqlType = connection.getTypeInfo().getSQLType(typeOid);
-      }
-
-      tuple[4] = connection.encodeString(Integer.toString(sqlType));
-      String pgType = connection.getTypeInfo().getPGType(typeOid);
-      tuple[5] = connection.encodeString(pgType); // Type name
-      tuple[7] = null; // Buffer length
-
-      String defval = rs.getString("adsrc");
-
-      if (defval != null && defval.contains("nextval(") ) {
-        if ("int4".equals(pgType)) {
-          tuple[5] = connection.encodeString("serial"); // Type name == serial
-        } else if ("int8".equals(pgType)) {
-          tuple[5] = connection.encodeString("bigserial"); // Type name == bigserial
-        } else if ("int2".equals(pgType) && connection.haveMinimumServerVersion(ServerVersion.v9_2)) {
-          tuple[5] = connection.encodeString("smallserial"); // Type name == smallserial
+        if (schemaPattern != null && !schemaPattern.isEmpty()) {
+            sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern);
         }
-      }
-      String identity = rs.getString("attidentity");
+        if (tableNamePattern != null && !tableNamePattern.isEmpty()) {
+            sql += " AND c.relname LIKE " + escapeQuotes(tableNamePattern);
+        }
+        if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) {
+            sql += ") c WHERE true ";
+        }
+        if (columnNamePattern != null && !columnNamePattern.isEmpty()) {
+            sql += " AND attname LIKE " + escapeQuotes(columnNamePattern);
+        }
+        sql += " ORDER BY nspname,c.relname,attnum ";
 
-      String generated = rs.getString("attgenerated");
+        Statement stmt = connection.createStatement();
+        ResultSet rs = stmt.executeQuery(sql);
+        while (rs.next()) {
+            byte[][] tuple = new byte[numberOfFields][];
+            int typeOid = (int) rs.getLong("atttypid");
+            int typeMod = rs.getInt("atttypmod");
 
-      int baseTypeOid = (int) rs.getLong("typbasetype");
+            tuple[0] = null; // Catalog name, not supported
+            tuple[1] = rs.getBytes("nspname"); // Schema
+            tuple[2] = rs.getBytes("relname"); // Table name
+            tuple[3] = rs.getBytes("attname"); // Column name
 
-      int decimalDigits;
-      int columnSize;
+            String typtype = rs.getString("typtype");
+            int sqlType;
+            if ("c".equals(typtype)) {
+                sqlType = Types.STRUCT;
+            } else if ("d".equals(typtype)) {
+                sqlType = Types.DISTINCT;
+            } else if ("e".equals(typtype)) {
+                sqlType = Types.VARCHAR;
+            } else {
+                sqlType = connection.getTypeInfo().getSQLType(typeOid);
+            }
 
-      /* this is really a DOMAIN type not sure where DISTINCT came from */
-      if ( sqlType == Types.DISTINCT ) {
+            tuple[4] = connection.encodeString(Integer.toString(sqlType));
+            String pgType = connection.getTypeInfo().getPGType(typeOid);
+            tuple[5] = connection.encodeString(pgType); // Type name
+            tuple[7] = null; // Buffer length
+
+            String defval = rs.getString("adsrc");
+
+            if (defval != null && defval.contains("nextval(")) {
+                if ("int4".equals(pgType)) {
+                    tuple[5] = connection.encodeString("serial"); // Type name == serial
+                } else if ("int8".equals(pgType)) {
+                    tuple[5] = connection.encodeString("bigserial"); // Type name == bigserial
+                } else if ("int2".equals(pgType) && connection.haveMinimumServerVersion(ServerVersion.v9_2)) {
+                    tuple[5] = connection.encodeString("smallserial"); // Type name == smallserial
+                }
+            }
+            String identity = rs.getString("attidentity");
+
+            String generated = rs.getString("attgenerated");
+
+            int baseTypeOid = (int) rs.getLong("typbasetype");
+
+            int decimalDigits;
+            int columnSize;
+
+            /* this is really a DOMAIN type not sure where DISTINCT came from */
+            if (sqlType == Types.DISTINCT) {
         /*
         From the docs if typtypmod is -1
          */
-        int typtypmod = rs.getInt("typtypmod");
-        decimalDigits = connection.getTypeInfo().getScale(baseTypeOid, typeMod);
+                int typtypmod = rs.getInt("typtypmod");
+                decimalDigits = connection.getTypeInfo().getScale(baseTypeOid, typeMod);
         /*
         From the postgres docs:
         Domains use typtypmod to record the typmod to be applied to their
@@ -1779,1566 +1910,1432 @@ public class PgDatabaseMetaData implements DatabaseMetaData {
         if it is -1 then get the precision from the basetype. This doesn't help if the basetype is
         a domain, but for actual types this will return the correct value.
          */
-        if ( typtypmod == -1 ) {
-          columnSize = connection.getTypeInfo().getPrecision(baseTypeOid, typeMod);
-        } else if (baseTypeOid == Oid.NUMERIC ) {
-          decimalDigits = connection.getTypeInfo().getScale(baseTypeOid, typtypmod);
-          columnSize = connection.getTypeInfo().getPrecision(baseTypeOid, typtypmod);
-        } else {
-          columnSize = typtypmod;
-        }
-      } else {
-        decimalDigits = connection.getTypeInfo().getScale(typeOid, typeMod);
-        columnSize = connection.getTypeInfo().getPrecision(typeOid, typeMod);
-        if ( sqlType != Types.NUMERIC && columnSize == 0 ) {
-          columnSize = connection.getTypeInfo().getDisplaySize(typeOid, typeMod);
-        }
-      }
-      tuple[6] = connection.encodeString(Integer.toString(columnSize));
-      // Give null for an unset scale on Decimal and Numeric columns
-      if (((sqlType == Types.NUMERIC) || (sqlType == Types.DECIMAL)) && (typeMod == -1)) {
-        tuple[8] = null;
-      } else {
-        tuple[8] = connection.encodeString(Integer.toString(decimalDigits));
-      }
+                if (typtypmod == -1) {
+                    columnSize = connection.getTypeInfo().getPrecision(baseTypeOid, typeMod);
+                } else if (baseTypeOid == Oid.NUMERIC) {
+                    decimalDigits = connection.getTypeInfo().getScale(baseTypeOid, typtypmod);
+                    columnSize = connection.getTypeInfo().getPrecision(baseTypeOid, typtypmod);
+                } else {
+                    columnSize = typtypmod;
+                }
+            } else {
+                decimalDigits = connection.getTypeInfo().getScale(typeOid, typeMod);
+                columnSize = connection.getTypeInfo().getPrecision(typeOid, typeMod);
+                if (sqlType != Types.NUMERIC && columnSize == 0) {
+                    columnSize = connection.getTypeInfo().getDisplaySize(typeOid, typeMod);
+                }
+            }
+            tuple[6] = connection.encodeString(Integer.toString(columnSize));
+            // Give null for an unset scale on Decimal and Numeric columns
+            if (((sqlType == Types.NUMERIC) || (sqlType == Types.DECIMAL)) && (typeMod == -1)) {
+                tuple[8] = null;
+            } else {
+                tuple[8] = connection.encodeString(Integer.toString(decimalDigits));
+            }
 
-      // Everything is base 10 unless we override later.
-      tuple[9] = connection.encodeString("10");
+            // Everything is base 10 unless we override later.
+            tuple[9] = connection.encodeString("10");
 
-      if ("bit".equals(pgType) || "varbit".equals(pgType)) {
-        tuple[9] = connection.encodeString("2");
-      }
+            if ("bit".equals(pgType) || "varbit".equals(pgType)) {
+                tuple[9] = connection.encodeString("2");
+            }
 
-      tuple[10] = connection.encodeString(Integer.toString(rs.getBoolean("attnotnull")
-          ? DatabaseMetaData.columnNoNulls : DatabaseMetaData.columnNullable)); // Nullable
-      tuple[11] = rs.getBytes("description"); // Description (if any)
-      tuple[12] = rs.getBytes("adsrc"); // Column default
-      tuple[13] = null; // sql data type (unused)
-      tuple[14] = null; // sql datetime sub (unused)
-      tuple[15] = tuple[6]; // char octet length
-      tuple[16] = connection.encodeString(String.valueOf(rs.getInt("attnum"))); // ordinal position
-      // Is nullable
-      tuple[17] = connection.encodeString(rs.getBoolean("attnotnull") ? "NO" : "YES");
+            tuple[10] = connection.encodeString(Integer.toString(rs.getBoolean("attnotnull")
+                    ? DatabaseMetaData.columnNoNulls : DatabaseMetaData.columnNullable)); // Nullable
+            tuple[11] = rs.getBytes("description"); // Description (if any)
+            tuple[12] = rs.getBytes("adsrc"); // Column default
+            tuple[13] = null; // sql data type (unused)
+            tuple[14] = null; // sql datetime sub (unused)
+            tuple[15] = tuple[6]; // char octet length
+            tuple[16] = connection.encodeString(String.valueOf(rs.getInt("attnum"))); // ordinal position
+            // Is nullable
+            tuple[17] = connection.encodeString(rs.getBoolean("attnotnull") ? "NO" : "YES");
 
-      tuple[18] = null; // SCOPE_CATLOG
-      tuple[19] = null; // SCOPE_SCHEMA
-      tuple[20] = null; // SCOPE_TABLE
-      tuple[21] = baseTypeOid == 0 // SOURCE_DATA_TYPE
-                  ? null
-                  : connection.encodeString(Integer.toString(connection.getTypeInfo().getSQLType(baseTypeOid)));
+            tuple[18] = null; // SCOPE_CATLOG
+            tuple[19] = null; // SCOPE_SCHEMA
+            tuple[20] = null; // SCOPE_TABLE
+            tuple[21] = baseTypeOid == 0 // SOURCE_DATA_TYPE
+                    ? null
+                    : connection.encodeString(Integer.toString(connection.getTypeInfo().getSQLType(baseTypeOid)));
 
-      String autoinc = "NO";
-      if (defval != null && defval.contains("nextval(") || identity != null) {
-        autoinc = "YES";
-      }
-      tuple[22] = connection.encodeString(autoinc); // IS_AUTOINCREMENT
+            String autoinc = "NO";
+            if (defval != null && defval.contains("nextval(") || identity != null) {
+                autoinc = "YES";
+            }
+            tuple[22] = connection.encodeString(autoinc); // IS_AUTOINCREMENT
 
-      String generatedcolumn = "NO";
-      if (generated != null) {
-        generatedcolumn = "YES";
-      }
-      tuple[23] = connection.encodeString(generatedcolumn); // IS_GENERATEDCOLUMN
+            String generatedcolumn = "NO";
+            if (generated != null) {
+                generatedcolumn = "YES";
+            }
+            tuple[23] = connection.encodeString(generatedcolumn); // IS_GENERATEDCOLUMN
 
-      v.add(new Tuple(tuple));
-    }
-    rs.close();
-    stmt.close();
-
-    return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
-  }
-
-  @Override
-  public ResultSet getColumnPrivileges(String catalog, String schema,
-      String table, String columnNamePattern) throws SQLException {
-    Field[] f = new Field[8];
-    List<Tuple> v = new ArrayList<>();
-
-    f[0] = new Field("TABLE_CAT", Oid.VARCHAR);
-    f[1] = new Field("TABLE_SCHEM", Oid.VARCHAR);
-    f[2] = new Field("TABLE_NAME", Oid.VARCHAR);
-    f[3] = new Field("COLUMN_NAME", Oid.VARCHAR);
-    f[4] = new Field("GRANTOR", Oid.VARCHAR);
-    f[5] = new Field("GRANTEE", Oid.VARCHAR);
-    f[6] = new Field("PRIVILEGE", Oid.VARCHAR);
-    f[7] = new Field("IS_GRANTABLE", Oid.VARCHAR);
-
-    String sql;
-    sql = "SELECT n.nspname,c.relname,r.rolname,c.relacl, "
-          + (connection.haveMinimumServerVersion(ServerVersion.v8_4) ? "a.attacl, " : "")
-          + " a.attname "
-          + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class c, "
-          + " pg_catalog.pg_roles r, pg_catalog.pg_attribute a "
-          + " WHERE c.relnamespace = n.oid "
-          + " AND c.relowner = r.oid "
-          + " AND c.oid = a.attrelid "
-          + " AND c.relkind = 'r' "
-          + " AND a.attnum > 0 AND NOT a.attisdropped ";
-
-    if (schema != null && !schema.isEmpty()) {
-      sql += " AND n.nspname = " + escapeQuotes(schema);
-    }
-    if (table != null && !table.isEmpty()) {
-      sql += " AND c.relname = " + escapeQuotes(table);
-    }
-    if (columnNamePattern != null && !columnNamePattern.isEmpty()) {
-      sql += " AND a.attname LIKE " + escapeQuotes(columnNamePattern);
-    }
-    sql += " ORDER BY attname ";
-
-    Statement stmt = connection.createStatement();
-    ResultSet rs = stmt.executeQuery(sql);
-    while (rs.next()) {
-      byte[] schemaName = rs.getBytes("nspname");
-      byte[] tableName = rs.getBytes("relname");
-      byte[] column = rs.getBytes("attname");
-      String owner = rs.getString("rolname");
-      String relAcl = rs.getString("relacl");
-
-      // For instance: SELECT -> user1 -> list of [grantor, grantable]
-      Map<String, Map<String, List<String[]>>> permissions = parseACL(relAcl, owner);
-
-      if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) {
-        String acl = rs.getString("attacl");
-        Map<String, Map<String, List<String[]>>> relPermissions = parseACL(acl, owner);
-        permissions.putAll(relPermissions);
-      }
-      String[] permNames = permissions.keySet().toArray(new String[0]);
-      Arrays.sort(permNames);
-      for (String permName : permNames) {
-        byte[] privilege = connection.encodeString(permName);
-        Map<String, List<String[]>> grantees = permissions.get(permName);
-        for (Map.Entry<String, List<String[]>> userToGrantable : grantees.entrySet()) {
-          List<String[]> grantor = userToGrantable.getValue();
-          String grantee = userToGrantable.getKey();
-          for (String[] grants : grantor) {
-            String grantable = owner.equals(grantee) ? "YES" : grants[1];
-            byte[] [] tuple = new byte[8][];
-            tuple[0] = null;
-            tuple[1] = schemaName;
-            tuple[2] = tableName;
-            tuple[3] = column;
-            tuple[4] = connection.encodeString(grants[0]);
-            tuple[5] = connection.encodeString(grantee);
-            tuple[6] = privilege;
-            tuple[7] = connection.encodeString(grantable);
             v.add(new Tuple(tuple));
-          }
         }
-      }
-    }
-    rs.close();
-    stmt.close();
+        rs.close();
+        stmt.close();
 
-    return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
-  }
-
-  @Override
-  public ResultSet getTablePrivileges(String catalog, String schemaPattern,
-      String tableNamePattern) throws SQLException {
-    Field[] f = new Field[7];
-    List<Tuple> v = new ArrayList<>();
-
-    f[0] = new Field("TABLE_CAT", Oid.VARCHAR);
-    f[1] = new Field("TABLE_SCHEM", Oid.VARCHAR);
-    f[2] = new Field("TABLE_NAME", Oid.VARCHAR);
-    f[3] = new Field("GRANTOR", Oid.VARCHAR);
-    f[4] = new Field("GRANTEE", Oid.VARCHAR);
-    f[5] = new Field("PRIVILEGE", Oid.VARCHAR);
-    f[6] = new Field("IS_GRANTABLE", Oid.VARCHAR);
-
-    String sql;
-    // r = ordinary table, p = partitioned table, v = view, m = materialized view, f = foreign table
-    sql = "SELECT n.nspname,c.relname,r.rolname,c.relacl "
-          + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class c, pg_catalog.pg_roles r "
-          + " WHERE c.relnamespace = n.oid "
-          + " AND c.relowner = r.oid "
-          + " AND c.relkind IN ('r','p','v','m','f') ";
-
-    if (schemaPattern != null && !schemaPattern.isEmpty()) {
-      sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern);
+        return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
     }
 
-    if (tableNamePattern != null && !tableNamePattern.isEmpty()) {
-      sql += " AND c.relname LIKE " + escapeQuotes(tableNamePattern);
-    }
-    sql += " ORDER BY nspname, relname ";
+    @Override
+    public ResultSet getColumnPrivileges(String catalog, String schema,
+                                         String table, String columnNamePattern) throws SQLException {
+        Field[] f = new Field[8];
+        List<Tuple> v = new ArrayList<>();
 
-    Statement stmt = connection.createStatement();
-    ResultSet rs = stmt.executeQuery(sql);
-    while (rs.next()) {
-      byte[] schema = rs.getBytes("nspname");
-      byte[] table = rs.getBytes("relname");
-      String owner = rs.getString("rolname");
-      String acl = rs.getString("relacl");
-      Map<String, Map<String, List<String[]>>> permissions = parseACL(acl, owner);
-      String[] permNames = permissions.keySet().toArray(new String[0]);
-      Arrays.sort(permNames);
-      for (String permName : permNames) {
-        byte[] privilege = connection.encodeString(permName);
-        Map<String, List<String[]>> grantees = permissions.get(permName);
-        for (Map.Entry<String, List<String[]>> userToGrantable : grantees.entrySet()) {
-          List<String[]> grants = userToGrantable.getValue();
-          String granteeUser = userToGrantable.getKey();
-          for (String[] grantTuple : grants) {
-            // report the owner as grantor if it's missing
-            String grantor = grantTuple[0] == null ? owner : grantTuple[0];
-            // owner always has grant privileges
-            String grantable = owner.equals(granteeUser) ? "YES" : grantTuple[1];
-            byte[] [] tuple = new byte[7][];
-            tuple[0] = null;
-            tuple[1] = schema;
-            tuple[2] = table;
-            tuple[3] = connection.encodeString(grantor);
-            tuple[4] = connection.encodeString(granteeUser);
-            tuple[5] = privilege;
-            tuple[6] = connection.encodeString(grantable);
+        f[0] = new Field("TABLE_CAT", Oid.VARCHAR);
+        f[1] = new Field("TABLE_SCHEM", Oid.VARCHAR);
+        f[2] = new Field("TABLE_NAME", Oid.VARCHAR);
+        f[3] = new Field("COLUMN_NAME", Oid.VARCHAR);
+        f[4] = new Field("GRANTOR", Oid.VARCHAR);
+        f[5] = new Field("GRANTEE", Oid.VARCHAR);
+        f[6] = new Field("PRIVILEGE", Oid.VARCHAR);
+        f[7] = new Field("IS_GRANTABLE", Oid.VARCHAR);
+
+        String sql;
+        sql = "SELECT n.nspname,c.relname,r.rolname,c.relacl, "
+                + (connection.haveMinimumServerVersion(ServerVersion.v8_4) ? "a.attacl, " : "")
+                + " a.attname "
+                + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class c, "
+                + " pg_catalog.pg_roles r, pg_catalog.pg_attribute a "
+                + " WHERE c.relnamespace = n.oid "
+                + " AND c.relowner = r.oid "
+                + " AND c.oid = a.attrelid "
+                + " AND c.relkind = 'r' "
+                + " AND a.attnum > 0 AND NOT a.attisdropped ";
+
+        if (schema != null && !schema.isEmpty()) {
+            sql += " AND n.nspname = " + escapeQuotes(schema);
+        }
+        if (table != null && !table.isEmpty()) {
+            sql += " AND c.relname = " + escapeQuotes(table);
+        }
+        if (columnNamePattern != null && !columnNamePattern.isEmpty()) {
+            sql += " AND a.attname LIKE " + escapeQuotes(columnNamePattern);
+        }
+        sql += " ORDER BY attname ";
+
+        Statement stmt = connection.createStatement();
+        ResultSet rs = stmt.executeQuery(sql);
+        while (rs.next()) {
+            byte[] schemaName = rs.getBytes("nspname");
+            byte[] tableName = rs.getBytes("relname");
+            byte[] column = rs.getBytes("attname");
+            String owner = rs.getString("rolname");
+            String relAcl = rs.getString("relacl");
+
+            // For instance: SELECT -> user1 -> list of [grantor, grantable]
+            Map<String, Map<String, List<String[]>>> permissions = parseACL(relAcl, owner);
+
+            if (connection.haveMinimumServerVersion(ServerVersion.v8_4)) {
+                String acl = rs.getString("attacl");
+                Map<String, Map<String, List<String[]>>> relPermissions = parseACL(acl, owner);
+                permissions.putAll(relPermissions);
+            }
+            String[] permNames = permissions.keySet().toArray(new String[0]);
+            Arrays.sort(permNames);
+            for (String permName : permNames) {
+                byte[] privilege = connection.encodeString(permName);
+                Map<String, List<String[]>> grantees = permissions.get(permName);
+                for (Map.Entry<String, List<String[]>> userToGrantable : grantees.entrySet()) {
+                    List<String[]> grantor = userToGrantable.getValue();
+                    String grantee = userToGrantable.getKey();
+                    for (String[] grants : grantor) {
+                        String grantable = owner.equals(grantee) ? "YES" : grants[1];
+                        byte[][] tuple = new byte[8][];
+                        tuple[0] = null;
+                        tuple[1] = schemaName;
+                        tuple[2] = tableName;
+                        tuple[3] = column;
+                        tuple[4] = connection.encodeString(grants[0]);
+                        tuple[5] = connection.encodeString(grantee);
+                        tuple[6] = privilege;
+                        tuple[7] = connection.encodeString(grantable);
+                        v.add(new Tuple(tuple));
+                    }
+                }
+            }
+        }
+        rs.close();
+        stmt.close();
+
+        return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
+    }
+
+    @Override
+    public ResultSet getTablePrivileges(String catalog, String schemaPattern,
+                                        String tableNamePattern) throws SQLException {
+        Field[] f = new Field[7];
+        List<Tuple> v = new ArrayList<>();
+
+        f[0] = new Field("TABLE_CAT", Oid.VARCHAR);
+        f[1] = new Field("TABLE_SCHEM", Oid.VARCHAR);
+        f[2] = new Field("TABLE_NAME", Oid.VARCHAR);
+        f[3] = new Field("GRANTOR", Oid.VARCHAR);
+        f[4] = new Field("GRANTEE", Oid.VARCHAR);
+        f[5] = new Field("PRIVILEGE", Oid.VARCHAR);
+        f[6] = new Field("IS_GRANTABLE", Oid.VARCHAR);
+
+        String sql;
+        // r = ordinary table, p = partitioned table, v = view, m = materialized view, f = foreign table
+        sql = "SELECT n.nspname,c.relname,r.rolname,c.relacl "
+                + " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class c, pg_catalog.pg_roles r "
+                + " WHERE c.relnamespace = n.oid "
+                + " AND c.relowner = r.oid "
+                + " AND c.relkind IN ('r','p','v','m','f') ";
+
+        if (schemaPattern != null && !schemaPattern.isEmpty()) {
+            sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern);
+        }
+
+        if (tableNamePattern != null && !tableNamePattern.isEmpty()) {
+            sql += " AND c.relname LIKE " + escapeQuotes(tableNamePattern);
+        }
+        sql += " ORDER BY nspname, relname ";
+
+        Statement stmt = connection.createStatement();
+        ResultSet rs = stmt.executeQuery(sql);
+        while (rs.next()) {
+            byte[] schema = rs.getBytes("nspname");
+            byte[] table = rs.getBytes("relname");
+            String owner = rs.getString("rolname");
+            String acl = rs.getString("relacl");
+            Map<String, Map<String, List<String[]>>> permissions = parseACL(acl, owner);
+            String[] permNames = permissions.keySet().toArray(new String[0]);
+            Arrays.sort(permNames);
+            for (String permName : permNames) {
+                byte[] privilege = connection.encodeString(permName);
+                Map<String, List<String[]>> grantees = permissions.get(permName);
+                for (Map.Entry<String, List<String[]>> userToGrantable : grantees.entrySet()) {
+                    List<String[]> grants = userToGrantable.getValue();
+                    String granteeUser = userToGrantable.getKey();
+                    for (String[] grantTuple : grants) {
+                        // report the owner as grantor if it's missing
+                        String grantor = grantTuple[0] == null ? owner : grantTuple[0];
+                        // owner always has grant privileges
+                        String grantable = owner.equals(granteeUser) ? "YES" : grantTuple[1];
+                        byte[][] tuple = new byte[7][];
+                        tuple[0] = null;
+                        tuple[1] = schema;
+                        tuple[2] = table;
+                        tuple[3] = connection.encodeString(grantor);
+                        tuple[4] = connection.encodeString(granteeUser);
+                        tuple[5] = privilege;
+                        tuple[6] = connection.encodeString(grantable);
+                        v.add(new Tuple(tuple));
+                    }
+                }
+            }
+        }
+        rs.close();
+        stmt.close();
+
+        return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
+    }
+
+    /**
+     * Take the a String representing an array of ACLs and return a Map mapping the SQL permission
+     * name to a List of usernames who have that permission.
+     * For instance: {@code SELECT -> user1 -> list of [grantor, grantable]}
+     *
+     * @param aclArray ACL array
+     * @param owner    owner
+     * @return a Map mapping the SQL permission name
+     */
+    public Map<String, Map<String, List<String[]>>> parseACL(String aclArray,
+                                                             String owner) {
+        if (aclArray == null) {
+            // arwdxt -- 8.2 Removed the separate RULE permission
+            // arwdDxt -- 8.4 Added a separate TRUNCATE permission
+            String perms = connection.haveMinimumServerVersion(ServerVersion.v8_4) ? "arwdDxt" : "arwdxt";
+
+            aclArray = "{" + owner + "=" + perms + "/" + owner + "}";
+        }
+
+        List<String> acls = parseACLArray(aclArray);
+        Map<String, Map<String, List<String[]>>> privileges =
+                new HashMap<>();
+        for (String acl : acls) {
+            addACLPrivileges(acl, privileges);
+        }
+        return privileges;
+    }
+
+    @Override
+    public ResultSet getBestRowIdentifier(
+            String catalog, String schema, String table,
+            int scope, boolean nullable) throws SQLException {
+        Field[] f = new Field[8];
+        List<Tuple> v = new ArrayList<>(); // The new ResultSet tuple stuff
+
+        f[0] = new Field("SCOPE", Oid.INT2);
+        f[1] = new Field("COLUMN_NAME", Oid.VARCHAR);
+        f[2] = new Field("DATA_TYPE", Oid.INT2);
+        f[3] = new Field("TYPE_NAME", Oid.VARCHAR);
+        f[4] = new Field("COLUMN_SIZE", Oid.INT4);
+        f[5] = new Field("BUFFER_LENGTH", Oid.INT4);
+        f[6] = new Field("DECIMAL_DIGITS", Oid.INT2);
+        f[7] = new Field("PSEUDO_COLUMN", Oid.INT2);
+
+        /*
+         * At the moment this simply returns a table's primary key, if there is one. I believe other
+         * unique indexes, ctid, and oid should also be considered. -KJ
+         */
+
+        String sql;
+        sql = "SELECT a.attname, a.atttypid, atttypmod "
+                + "FROM pg_catalog.pg_class ct "
+                + "  JOIN pg_catalog.pg_attribute a ON (ct.oid = a.attrelid) "
+                + "  JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) "
+                + "  JOIN (SELECT i.indexrelid, i.indrelid, i.indisprimary, "
+                + "             information_schema._pg_expandarray(i.indkey) AS keys "
+                + "        FROM pg_catalog.pg_index i) i "
+                + "    ON (a.attnum = (i.keys).x AND a.attrelid = i.indrelid) "
+                + "WHERE true ";
+
+        if (schema != null && !schema.isEmpty()) {
+            sql += " AND n.nspname = " + escapeQuotes(schema);
+        }
+
+        sql += " AND ct.relname = " + escapeQuotes(table)
+                + " AND i.indisprimary "
+                + " ORDER BY a.attnum ";
+
+        Statement stmt = connection.createStatement();
+        ResultSet rs = stmt.executeQuery(sql);
+        while (rs.next()) {
+            byte[][] tuple = new byte[8][];
+            int typeOid = (int) rs.getLong("atttypid");
+            int sqlType = connection.getTypeInfo().getSQLType(typeOid);
+            int typeMod = rs.getInt("atttypmod");
+            int decimalDigits = connection.getTypeInfo().getScale(typeOid, typeMod);
+            int columnSize = connection.getTypeInfo().getPrecision(typeOid, typeMod);
+            if (sqlType != Types.NUMERIC && columnSize == 0) {
+                columnSize = connection.getTypeInfo().getDisplaySize(typeOid, typeMod);
+            }
+            tuple[0] = connection.encodeString(Integer.toString(scope));
+            tuple[1] = rs.getBytes("attname");
+            tuple[2] =
+                    connection.encodeString(Integer.toString(sqlType));
+            tuple[3] = connection.encodeString(connection.getTypeInfo().getPGType(typeOid));
+            tuple[4] = connection.encodeString(Integer.toString(columnSize));
+            tuple[5] = null; // unused
+            tuple[6] = connection.encodeString(Integer.toString(decimalDigits));
+            tuple[7] =
+                    connection.encodeString(Integer.toString(DatabaseMetaData.bestRowNotPseudo));
             v.add(new Tuple(tuple));
-          }
         }
-      }
-    }
-    rs.close();
-    stmt.close();
+        rs.close();
+        stmt.close();
 
-    return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
-  }
-
-  /**
-   * Parse an String of ACLs into a List of ACLs.
-   */
-  private static List<String> parseACLArray(String aclString) {
-    List<String> acls = new ArrayList<>();
-    if (aclString == null || aclString.isEmpty()) {
-      return acls;
-    }
-    boolean inQuotes = false;
-    // start at 1 because of leading "{"
-    int beginIndex = 1;
-    char prevChar = ' ';
-    for (int i = beginIndex; i < aclString.length(); i++) {
-
-      char c = aclString.charAt(i);
-      if (c == '"' && prevChar != '\\') {
-        inQuotes = !inQuotes;
-      } else if (c == ',' && !inQuotes) {
-        acls.add(aclString.substring(beginIndex, i));
-        beginIndex = i + 1;
-      }
-      prevChar = c;
-    }
-    // add last element removing the trailing "}"
-    acls.add(aclString.substring(beginIndex, aclString.length() - 1));
-
-    // Strip out enclosing quotes, if any.
-    for (int i = 0; i < acls.size(); i++) {
-      String acl = acls.get(i);
-      if (acl.startsWith("\"") && acl.endsWith("\"")) {
-        acl = acl.substring(1, acl.length() - 1);
-        acls.set(i, acl);
-      }
-    }
-    return acls;
-  }
-
-  /**
-   * Add the user described by the given acl to the Lists of users with the privileges described by
-   * the acl.
-   */
-  private static void addACLPrivileges(String acl,
-      Map<String, Map<String, List<String[]>>> privileges) {
-    int equalIndex = acl.lastIndexOf("=");
-    int slashIndex = acl.lastIndexOf("/");
-    if (equalIndex == -1) {
-      return;
+        return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
     }
 
-    String user = acl.substring(0, equalIndex);
-    String grantor = null;
-    if (user.isEmpty()) {
-      user = "PUBLIC";
-    }
-    String privs;
-    if (slashIndex != -1) {
-      privs = acl.substring(equalIndex + 1, slashIndex);
-      grantor = acl.substring(slashIndex + 1, acl.length());
-    } else {
-      privs = acl.substring(equalIndex + 1, acl.length());
+    @Override
+    public ResultSet getVersionColumns(
+            String catalog, String schema, String table)
+            throws SQLException {
+        Field[] f = new Field[8];
+        List<Tuple> v = new ArrayList<>(); // The new ResultSet tuple stuff
+
+        f[0] = new Field("SCOPE", Oid.INT2);
+        f[1] = new Field("COLUMN_NAME", Oid.VARCHAR);
+        f[2] = new Field("DATA_TYPE", Oid.INT2);
+        f[3] = new Field("TYPE_NAME", Oid.VARCHAR);
+        f[4] = new Field("COLUMN_SIZE", Oid.INT4);
+        f[5] = new Field("BUFFER_LENGTH", Oid.INT4);
+        f[6] = new Field("DECIMAL_DIGITS", Oid.INT2);
+        f[7] = new Field("PSEUDO_COLUMN", Oid.INT2);
+
+        byte[][] tuple = new byte[8][];
+
+        /*
+         * Postgresql does not have any column types that are automatically updated like some databases'
+         * timestamp type. We can't tell what rules or triggers might be doing, so we are left with the
+         * system columns that change on an update. An update may change all of the following system
+         * columns: ctid, xmax, xmin, cmax, and cmin. Depending on if we are in a transaction and
+         * whether we roll it back or not the only guaranteed change is to ctid. -KJ
+         */
+
+        tuple[0] = null;
+        tuple[1] = connection.encodeString("ctid");
+        tuple[2] =
+                connection.encodeString(Integer.toString(connection.getTypeInfo().getSQLType("tid")));
+        tuple[3] = connection.encodeString("tid");
+        tuple[4] = null;
+        tuple[5] = null;
+        tuple[6] = null;
+        tuple[7] =
+                connection.encodeString(Integer.toString(DatabaseMetaData.versionColumnPseudo));
+        v.add(new Tuple(tuple));
+
+        /*
+         * Perhaps we should check that the given catalog.schema.table actually exists. -KJ
+         */
+        return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
     }
 
-    for (int i = 0; i < privs.length(); i++) {
-      char c = privs.charAt(i);
-      if (c != '*') {
-        String sqlpriv;
-        String grantable;
-        if (i < privs.length() - 1 && privs.charAt(i + 1) == '*') {
-          grantable = "YES";
-        } else {
-          grantable = "NO";
-        }
-        switch (c) {
-          case 'a':
-            sqlpriv = "INSERT";
-            break;
-          case 'r':
-          case 'p':
-            sqlpriv = "SELECT";
-            break;
-          case 'w':
-            sqlpriv = "UPDATE";
-            break;
-          case 'd':
-            sqlpriv = "DELETE";
-            break;
-          case 'D':
-            sqlpriv = "TRUNCATE";
-            break;
-          case 'R':
-            sqlpriv = "RULE";
-            break;
-          case 'x':
-            sqlpriv = "REFERENCES";
-            break;
-          case 't':
-            sqlpriv = "TRIGGER";
-            break;
-          // the following can't be granted to a table, but
-          // we'll keep them for completeness.
-          case 'X':
-            sqlpriv = "EXECUTE";
-            break;
-          case 'U':
-            sqlpriv = "USAGE";
-            break;
-          case 'C':
-            sqlpriv = "CREATE";
-            break;
-          case 'T':
-            sqlpriv = "CREATE TEMP";
-            break;
-          default:
-            sqlpriv = "UNKNOWN";
+    @Override
+    public ResultSet getPrimaryKeys(String catalog, String schema, String table)
+            throws SQLException {
+        String sql;
+        sql = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, "
+                + "  ct.relname AS TABLE_NAME, a.attname AS COLUMN_NAME, "
+                + "  (information_schema._pg_expandarray(i.indkey)).n AS KEY_SEQ, ci.relname AS PK_NAME, "
+                + "  information_schema._pg_expandarray(i.indkey) AS KEYS, a.attnum AS A_ATTNUM "
+                + "FROM pg_catalog.pg_class ct "
+                + "  JOIN pg_catalog.pg_attribute a ON (ct.oid = a.attrelid) "
+                + "  JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) "
+                + "  JOIN pg_catalog.pg_index i ON ( a.attrelid = i.indrelid) "
+                + "  JOIN pg_catalog.pg_class ci ON (ci.oid = i.indexrelid) "
+                + "WHERE true ";
+
+        if (schema != null && !schema.isEmpty()) {
+            sql += " AND n.nspname = " + escapeQuotes(schema);
         }
 
-        Map<String, List<String[]>> usersWithPermission = privileges.get(sqlpriv);
-        if (usersWithPermission == null) {
-          usersWithPermission = new HashMap<>();
-          privileges.put(sqlpriv, usersWithPermission);
+        if (table != null && !table.isEmpty()) {
+            sql += " AND ct.relname = " + escapeQuotes(table);
         }
 
-        List<String[]> permissionByGrantor = usersWithPermission.get(user);
-        if (permissionByGrantor == null) {
-          permissionByGrantor = new ArrayList<>();
-          usersWithPermission.put(user, permissionByGrantor);
-        }
+        sql += " AND i.indisprimary ";
+        sql = "SELECT "
+                + "       result.TABLE_CAT, "
+                + "       result.TABLE_SCHEM, "
+                + "       result.TABLE_NAME, "
+                + "       result.COLUMN_NAME, "
+                + "       result.KEY_SEQ, "
+                + "       result.PK_NAME "
+                + "FROM "
+                + "     (" + sql + " ) result"
+                + " where "
+                + " result.A_ATTNUM = (result.KEYS).x ";
+        sql += " ORDER BY result.table_name, result.pk_name, result.key_seq";
 
-        String[] grant = {grantor, grantable};
-        permissionByGrantor.add(grant);
-      }
+        return createMetaDataStatement().executeQuery(sql);
     }
-  }
-
-  /**
-   * Take the a String representing an array of ACLs and return a Map mapping the SQL permission
-   * name to a List of usernames who have that permission.
-   * For instance: {@code SELECT -> user1 -> list of [grantor, grantable]}
-   *
-   * @param aclArray ACL array
-   * @param owner owner
-   * @return a Map mapping the SQL permission name
-   */
-  public Map<String, Map<String, List<String[]>>> parseACL(String aclArray,
-      String owner) {
-    if (aclArray == null) {
-      // arwdxt -- 8.2 Removed the separate RULE permission
-      // arwdDxt -- 8.4 Added a separate TRUNCATE permission
-      String perms = connection.haveMinimumServerVersion(ServerVersion.v8_4) ? "arwdDxt" : "arwdxt";
-
-      aclArray = "{" + owner + "=" + perms + "/" + owner + "}";
-    }
-
-    List<String> acls = parseACLArray(aclArray);
-    Map<String, Map<String, List<String[]>>> privileges =
-        new HashMap<>();
-    for (String acl : acls) {
-      addACLPrivileges(acl, privileges);
-    }
-    return privileges;
-  }
-
-  @Override
-  public ResultSet getBestRowIdentifier(
-      String catalog, String schema, String table,
-      int scope, boolean nullable) throws SQLException {
-    Field[] f = new Field[8];
-    List<Tuple> v = new ArrayList<>(); // The new ResultSet tuple stuff
-
-    f[0] = new Field("SCOPE", Oid.INT2);
-    f[1] = new Field("COLUMN_NAME", Oid.VARCHAR);
-    f[2] = new Field("DATA_TYPE", Oid.INT2);
-    f[3] = new Field("TYPE_NAME", Oid.VARCHAR);
-    f[4] = new Field("COLUMN_SIZE", Oid.INT4);
-    f[5] = new Field("BUFFER_LENGTH", Oid.INT4);
-    f[6] = new Field("DECIMAL_DIGITS", Oid.INT2);
-    f[7] = new Field("PSEUDO_COLUMN", Oid.INT2);
 
     /*
-     * At the moment this simply returns a table's primary key, if there is one. I believe other
-     * unique indexes, ctid, and oid should also be considered. -KJ
+    This is for internal use only to see if a resultset is updateable.
+    Unique keys can also be used so we add them to the query.
      */
+    protected ResultSet getPrimaryUniqueKeys(String catalog, String schema, String table)
+            throws SQLException {
+        String sql;
+        sql = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, "
+                + "  ct.relname AS TABLE_NAME, a.attname AS COLUMN_NAME, "
+                + "  (information_schema._pg_expandarray(i.indkey)).n AS KEY_SEQ, ci.relname AS PK_NAME, "
+                + "  information_schema._pg_expandarray(i.indkey) AS KEYS, a.attnum AS A_ATTNUM, "
+                + "  a.attnotnull AS IS_NOT_NULL "
+                + "FROM pg_catalog.pg_class ct "
+                + "  JOIN pg_catalog.pg_attribute a ON (ct.oid = a.attrelid) "
+                + "  JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) "
+                + "  JOIN pg_catalog.pg_index i ON ( a.attrelid = i.indrelid) "
+                + "  JOIN pg_catalog.pg_class ci ON (ci.oid = i.indexrelid) "
+                // primary as well as unique keys can be used to uniquely identify a row to update
+                + "WHERE (i.indisprimary OR ( "
+                + "    i.indisunique "
+                + "    AND i.indisvalid "
+                // partial indexes are not allowed - indpred will not be null if this is a partial index
+                + "    AND i.indpred IS NULL "
+                // indexes with expressions are not allowed
+                + "    AND i.indexprs IS NULL "
+                + "  )) ";
 
-    String sql;
-    sql = "SELECT a.attname, a.atttypid, atttypmod "
-          + "FROM pg_catalog.pg_class ct "
-          + "  JOIN pg_catalog.pg_attribute a ON (ct.oid = a.attrelid) "
-          + "  JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) "
-          + "  JOIN (SELECT i.indexrelid, i.indrelid, i.indisprimary, "
-          + "             information_schema._pg_expandarray(i.indkey) AS keys "
-          + "        FROM pg_catalog.pg_index i) i "
-          + "    ON (a.attnum = (i.keys).x AND a.attrelid = i.indrelid) "
-          + "WHERE true ";
+        if (schema != null && !schema.isEmpty()) {
+            sql += " AND n.nspname = " + escapeQuotes(schema);
+        }
 
-    if (schema != null && !schema.isEmpty()) {
-      sql += " AND n.nspname = " + escapeQuotes(schema);
+        if (table != null && !table.isEmpty()) {
+            sql += " AND ct.relname = " + escapeQuotes(table);
+        }
+
+        sql = "SELECT "
+                + "       result.TABLE_CAT, "
+                + "       result.TABLE_SCHEM, "
+                + "       result.TABLE_NAME, "
+                + "       result.COLUMN_NAME, "
+                + "       result.KEY_SEQ, "
+                + "       result.PK_NAME, "
+                + "       result.IS_NOT_NULL "
+                + "FROM "
+                + "     (" + sql + " ) result"
+                + " where "
+                + " result.A_ATTNUM = (result.KEYS).x ";
+        sql += " ORDER BY result.table_name, result.pk_name, result.key_seq";
+
+        return createMetaDataStatement().executeQuery(sql);
     }
 
-    sql += " AND ct.relname = " + escapeQuotes(table)
-        + " AND i.indisprimary "
-        + " ORDER BY a.attnum ";
-
-    Statement stmt = connection.createStatement();
-    ResultSet rs = stmt.executeQuery(sql);
-    while (rs.next()) {
-      byte[] [] tuple = new byte[8][];
-      int typeOid = (int) rs.getLong("atttypid");
-      int sqlType = connection.getTypeInfo().getSQLType(typeOid);
-      int typeMod = rs.getInt("atttypmod");
-      int decimalDigits = connection.getTypeInfo().getScale(typeOid, typeMod);
-      int columnSize = connection.getTypeInfo().getPrecision(typeOid, typeMod);
-      if ( sqlType != Types.NUMERIC && columnSize == 0) {
-        columnSize = connection.getTypeInfo().getDisplaySize(typeOid, typeMod);
-      }
-      tuple[0] = connection.encodeString(Integer.toString(scope));
-      tuple[1] = rs.getBytes("attname");
-      tuple[2] =
-          connection.encodeString(Integer.toString(sqlType));
-      tuple[3] = connection.encodeString(connection.getTypeInfo().getPGType(typeOid));
-      tuple[4] = connection.encodeString(Integer.toString(columnSize));
-      tuple[5] = null; // unused
-      tuple[6] = connection.encodeString(Integer.toString(decimalDigits));
-      tuple[7] =
-          connection.encodeString(Integer.toString(DatabaseMetaData.bestRowNotPseudo));
-      v.add(new Tuple(tuple));
-    }
-    rs.close();
-    stmt.close();
-
-    return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
-  }
-
-  @Override
-  public ResultSet getVersionColumns(
-      String catalog, String schema, String table)
-      throws SQLException {
-    Field[] f = new Field[8];
-    List<Tuple> v = new ArrayList<>(); // The new ResultSet tuple stuff
-
-    f[0] = new Field("SCOPE", Oid.INT2);
-    f[1] = new Field("COLUMN_NAME", Oid.VARCHAR);
-    f[2] = new Field("DATA_TYPE", Oid.INT2);
-    f[3] = new Field("TYPE_NAME", Oid.VARCHAR);
-    f[4] = new Field("COLUMN_SIZE", Oid.INT4);
-    f[5] = new Field("BUFFER_LENGTH", Oid.INT4);
-    f[6] = new Field("DECIMAL_DIGITS", Oid.INT2);
-    f[7] = new Field("PSEUDO_COLUMN", Oid.INT2);
-
-    byte[] [] tuple = new byte[8][];
-
-    /*
-     * Postgresql does not have any column types that are automatically updated like some databases'
-     * timestamp type. We can't tell what rules or triggers might be doing, so we are left with the
-     * system columns that change on an update. An update may change all of the following system
-     * columns: ctid, xmax, xmin, cmax, and cmin. Depending on if we are in a transaction and
-     * whether we roll it back or not the only guaranteed change is to ctid. -KJ
+    /**
+     * @param primaryCatalog primary catalog
+     * @param primarySchema  primary schema
+     * @param primaryTable   if provided will get the keys exported by this table
+     * @param foreignCatalog foreign catalog
+     * @param foreignSchema  foreign schema
+     * @param foreignTable   if provided will get the keys imported by this table
+     * @return ResultSet
+     * @throws SQLException if something wrong happens
      */
+    protected ResultSet getImportedExportedKeys(
+            String primaryCatalog, String primarySchema, String primaryTable,
+            String foreignCatalog, String foreignSchema, String foreignTable)
+            throws SQLException {
 
-    tuple[0] = null;
-    tuple[1] = connection.encodeString("ctid");
-    tuple[2] =
-        connection.encodeString(Integer.toString(connection.getTypeInfo().getSQLType("tid")));
-    tuple[3] = connection.encodeString("tid");
-    tuple[4] = null;
-    tuple[5] = null;
-    tuple[6] = null;
-    tuple[7] =
-        connection.encodeString(Integer.toString(DatabaseMetaData.versionColumnPseudo));
-    v.add(new Tuple(tuple));
+        /*
+         * The addition of the pg_constraint in 7.3 table should have really helped us out here, but it
+         * comes up just a bit short. - The conkey, confkey columns aren't really useful without
+         * contrib/array unless we want to issues separate queries. - Unique indexes that can support
+         * foreign keys are not necessarily added to pg_constraint. Also multiple unique indexes
+         * covering the same keys can be created which make it difficult to determine the PK_NAME field.
+         */
 
-    /*
-     * Perhaps we should check that the given catalog.schema.table actually exists. -KJ
-     */
-    return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
-  }
-
-  @Override
-  public ResultSet getPrimaryKeys(String catalog, String schema, String table)
-      throws SQLException {
-    String sql;
-    sql = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, "
-          + "  ct.relname AS TABLE_NAME, a.attname AS COLUMN_NAME, "
-          + "  (information_schema._pg_expandarray(i.indkey)).n AS KEY_SEQ, ci.relname AS PK_NAME, "
-          + "  information_schema._pg_expandarray(i.indkey) AS KEYS, a.attnum AS A_ATTNUM "
-          + "FROM pg_catalog.pg_class ct "
-          + "  JOIN pg_catalog.pg_attribute a ON (ct.oid = a.attrelid) "
-          + "  JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) "
-          + "  JOIN pg_catalog.pg_index i ON ( a.attrelid = i.indrelid) "
-          + "  JOIN pg_catalog.pg_class ci ON (ci.oid = i.indexrelid) "
-          + "WHERE true ";
-
-    if (schema != null && !schema.isEmpty()) {
-      sql += " AND n.nspname = " + escapeQuotes(schema);
-    }
-
-    if (table != null && !table.isEmpty()) {
-      sql += " AND ct.relname = " + escapeQuotes(table);
-    }
-
-    sql += " AND i.indisprimary ";
-    sql = "SELECT "
-            + "       result.TABLE_CAT, "
-            + "       result.TABLE_SCHEM, "
-            + "       result.TABLE_NAME, "
-            + "       result.COLUMN_NAME, "
-            + "       result.KEY_SEQ, "
-            + "       result.PK_NAME "
-            + "FROM "
-            + "     (" + sql + " ) result"
-            + " where "
-            + " result.A_ATTNUM = (result.KEYS).x ";
-    sql += " ORDER BY result.table_name, result.pk_name, result.key_seq";
-
-    return createMetaDataStatement().executeQuery(sql);
-  }
-
-  /*
-  This is for internal use only to see if a resultset is updateable.
-  Unique keys can also be used so we add them to the query.
-   */
-  protected ResultSet getPrimaryUniqueKeys(String catalog, String schema, String table)
-      throws SQLException {
-    String sql;
-    sql = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, "
-        + "  ct.relname AS TABLE_NAME, a.attname AS COLUMN_NAME, "
-        + "  (information_schema._pg_expandarray(i.indkey)).n AS KEY_SEQ, ci.relname AS PK_NAME, "
-        + "  information_schema._pg_expandarray(i.indkey) AS KEYS, a.attnum AS A_ATTNUM, "
-        + "  a.attnotnull AS IS_NOT_NULL "
-        + "FROM pg_catalog.pg_class ct "
-        + "  JOIN pg_catalog.pg_attribute a ON (ct.oid = a.attrelid) "
-        + "  JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) "
-        + "  JOIN pg_catalog.pg_index i ON ( a.attrelid = i.indrelid) "
-        + "  JOIN pg_catalog.pg_class ci ON (ci.oid = i.indexrelid) "
-        // primary as well as unique keys can be used to uniquely identify a row to update
-        + "WHERE (i.indisprimary OR ( "
-        + "    i.indisunique "
-        + "    AND i.indisvalid "
-        // partial indexes are not allowed - indpred will not be null if this is a partial index
-        + "    AND i.indpred IS NULL "
-        // indexes with expressions are not allowed
-        + "    AND i.indexprs IS NULL "
-        + "  )) ";
-
-    if (schema != null && !schema.isEmpty()) {
-      sql += " AND n.nspname = " + escapeQuotes(schema);
-    }
-
-    if (table != null && !table.isEmpty()) {
-      sql += " AND ct.relname = " + escapeQuotes(table);
-    }
-
-    sql = "SELECT "
-        + "       result.TABLE_CAT, "
-        + "       result.TABLE_SCHEM, "
-        + "       result.TABLE_NAME, "
-        + "       result.COLUMN_NAME, "
-        + "       result.KEY_SEQ, "
-        + "       result.PK_NAME, "
-        + "       result.IS_NOT_NULL "
-        + "FROM "
-        + "     (" + sql + " ) result"
-        + " where "
-        + " result.A_ATTNUM = (result.KEYS).x ";
-    sql += " ORDER BY result.table_name, result.pk_name, result.key_seq";
-
-    return createMetaDataStatement().executeQuery(sql);
-  }
-
-  /**
-   * @param primaryCatalog primary catalog
-   * @param primarySchema primary schema
-   * @param primaryTable if provided will get the keys exported by this table
-   * @param foreignCatalog foreign catalog
-   * @param foreignSchema foreign schema
-   * @param foreignTable if provided will get the keys imported by this table
-   * @return ResultSet
-   * @throws SQLException if something wrong happens
-   */
-  protected ResultSet getImportedExportedKeys(
-      String primaryCatalog, String primarySchema, String primaryTable,
-      String foreignCatalog, String foreignSchema, String foreignTable)
-          throws SQLException {
-
-    /*
-     * The addition of the pg_constraint in 7.3 table should have really helped us out here, but it
-     * comes up just a bit short. - The conkey, confkey columns aren't really useful without
-     * contrib/array unless we want to issues separate queries. - Unique indexes that can support
-     * foreign keys are not necessarily added to pg_constraint. Also multiple unique indexes
-     * covering the same keys can be created which make it difficult to determine the PK_NAME field.
-     */
-
-    String sql =
-        "SELECT NULL::text AS PKTABLE_CAT, pkn.nspname AS PKTABLE_SCHEM, pkc.relname AS PKTABLE_NAME, pka.attname AS PKCOLUMN_NAME, "
-            + "NULL::text AS FKTABLE_CAT, fkn.nspname AS FKTABLE_SCHEM, fkc.relname AS FKTABLE_NAME, fka.attname AS FKCOLUMN_NAME, "
-            + "pos.n AS KEY_SEQ, "
-            + "CASE con.confupdtype "
-            + " WHEN 'c' THEN " + DatabaseMetaData.importedKeyCascade
-            + " WHEN 'n' THEN " + DatabaseMetaData.importedKeySetNull
-            + " WHEN 'd' THEN " + DatabaseMetaData.importedKeySetDefault
-            + " WHEN 'r' THEN " + DatabaseMetaData.importedKeyRestrict
-            + " WHEN 'p' THEN " + DatabaseMetaData.importedKeyRestrict
-            + " WHEN 'a' THEN " + DatabaseMetaData.importedKeyNoAction
-            + " ELSE NULL END AS UPDATE_RULE, "
-            + "CASE con.confdeltype "
-            + " WHEN 'c' THEN " + DatabaseMetaData.importedKeyCascade
-            + " WHEN 'n' THEN " + DatabaseMetaData.importedKeySetNull
-            + " WHEN 'd' THEN " + DatabaseMetaData.importedKeySetDefault
-            + " WHEN 'r' THEN " + DatabaseMetaData.importedKeyRestrict
-            + " WHEN 'p' THEN " + DatabaseMetaData.importedKeyRestrict
-            + " WHEN 'a' THEN " + DatabaseMetaData.importedKeyNoAction
-            + " ELSE NULL END AS DELETE_RULE, "
-            + "con.conname AS FK_NAME, pkic.relname AS PK_NAME, "
-            + "CASE "
-            + " WHEN con.condeferrable AND con.condeferred THEN "
-            + DatabaseMetaData.importedKeyInitiallyDeferred
-            + " WHEN con.condeferrable THEN " + DatabaseMetaData.importedKeyInitiallyImmediate
-            + " ELSE " + DatabaseMetaData.importedKeyNotDeferrable
-            + " END AS DEFERRABILITY "
-            + " FROM "
-            + " pg_catalog.pg_namespace pkn, pg_catalog.pg_class pkc, pg_catalog.pg_attribute pka, "
-            + " pg_catalog.pg_namespace fkn, pg_catalog.pg_class fkc, pg_catalog.pg_attribute fka, "
-            + " pg_catalog.pg_constraint con, "
-            + " pg_catalog.generate_series(1, " + getMaxIndexKeys() + ") pos(n), "
-            + " pg_catalog.pg_class pkic";
-    // Starting in Postgres 9.0, pg_constraint was augmented with the conindid column, which
-    // contains the oid of the index supporting the constraint. This makes it unnecessary to do a
-    // further join on pg_depend.
-    if (!connection.haveMinimumServerVersion(ServerVersion.v9_0)) {
-      sql += ", pg_catalog.pg_depend dep ";
-    }
-    sql +=
-        " WHERE pkn.oid = pkc.relnamespace AND pkc.oid = pka.attrelid AND pka.attnum = con.confkey[pos.n] AND con.confrelid = pkc.oid "
-            + " AND fkn.oid = fkc.relnamespace AND fkc.oid = fka.attrelid AND fka.attnum = con.conkey[pos.n] AND con.conrelid = fkc.oid "
-            + " AND con.contype = 'f' ";
+        String sql =
+                "SELECT NULL::text AS PKTABLE_CAT, pkn.nspname AS PKTABLE_SCHEM, pkc.relname AS PKTABLE_NAME, pka.attname AS PKCOLUMN_NAME, "
+                        + "NULL::text AS FKTABLE_CAT, fkn.nspname AS FKTABLE_SCHEM, fkc.relname AS FKTABLE_NAME, fka.attname AS FKCOLUMN_NAME, "
+                        + "pos.n AS KEY_SEQ, "
+                        + "CASE con.confupdtype "
+                        + " WHEN 'c' THEN " + DatabaseMetaData.importedKeyCascade
+                        + " WHEN 'n' THEN " + DatabaseMetaData.importedKeySetNull
+                        + " WHEN 'd' THEN " + DatabaseMetaData.importedKeySetDefault
+                        + " WHEN 'r' THEN " + DatabaseMetaData.importedKeyRestrict
+                        + " WHEN 'p' THEN " + DatabaseMetaData.importedKeyRestrict
+                        + " WHEN 'a' THEN " + DatabaseMetaData.importedKeyNoAction
+                        + " ELSE NULL END AS UPDATE_RULE, "
+                        + "CASE con.confdeltype "
+                        + " WHEN 'c' THEN " + DatabaseMetaData.importedKeyCascade
+                        + " WHEN 'n' THEN " + DatabaseMetaData.importedKeySetNull
+                        + " WHEN 'd' THEN " + DatabaseMetaData.importedKeySetDefault
+                        + " WHEN 'r' THEN " + DatabaseMetaData.importedKeyRestrict
+                        + " WHEN 'p' THEN " + DatabaseMetaData.importedKeyRestrict
+                        + " WHEN 'a' THEN " + DatabaseMetaData.importedKeyNoAction
+                        + " ELSE NULL END AS DELETE_RULE, "
+                        + "con.conname AS FK_NAME, pkic.relname AS PK_NAME, "
+                        + "CASE "
+                        + " WHEN con.condeferrable AND con.condeferred THEN "
+                        + DatabaseMetaData.importedKeyInitiallyDeferred
+                        + " WHEN con.condeferrable THEN " + DatabaseMetaData.importedKeyInitiallyImmediate
+                        + " ELSE " + DatabaseMetaData.importedKeyNotDeferrable
+                        + " END AS DEFERRABILITY "
+                        + " FROM "
+                        + " pg_catalog.pg_namespace pkn, pg_catalog.pg_class pkc, pg_catalog.pg_attribute pka, "
+                        + " pg_catalog.pg_namespace fkn, pg_catalog.pg_class fkc, pg_catalog.pg_attribute fka, "
+                        + " pg_catalog.pg_constraint con, "
+                        + " pg_catalog.generate_series(1, " + getMaxIndexKeys() + ") pos(n), "
+                        + " pg_catalog.pg_class pkic";
+        // Starting in Postgres 9.0, pg_constraint was augmented with the conindid column, which
+        // contains the oid of the index supporting the constraint. This makes it unnecessary to do a
+        // further join on pg_depend.
+        if (!connection.haveMinimumServerVersion(ServerVersion.v9_0)) {
+            sql += ", pg_catalog.pg_depend dep ";
+        }
+        sql +=
+                " WHERE pkn.oid = pkc.relnamespace AND pkc.oid = pka.attrelid AND pka.attnum = con.confkey[pos.n] AND con.confrelid = pkc.oid "
+                        + " AND fkn.oid = fkc.relnamespace AND fkc.oid = fka.attrelid AND fka.attnum = con.conkey[pos.n] AND con.conrelid = fkc.oid "
+                        + " AND con.contype = 'f' ";
     /*
     In version 11 we added Partitioned indexes indicated by relkind = 'I'
     I could have done this using lower(relkind) = 'i' but chose to be explicit
     for clarity
     */
 
-    if (!connection.haveMinimumServerVersion(ServerVersion.v11)) {
-      sql += "AND pkic.relkind = 'i' ";
-    } else {
-      sql += "AND (pkic.relkind = 'i' OR pkic.relkind = 'I')";
-    }
-
-    if (!connection.haveMinimumServerVersion(ServerVersion.v9_0)) {
-      sql += " AND con.oid = dep.objid AND pkic.oid = dep.refobjid AND dep.classid = 'pg_constraint'::regclass::oid AND dep.refclassid = 'pg_class'::regclass::oid ";
-    } else {
-      sql += " AND pkic.oid = con.conindid ";
-    }
-
-    if (primarySchema != null && !primarySchema.isEmpty()) {
-      sql += " AND pkn.nspname = " + escapeQuotes(primarySchema);
-    }
-    if (foreignSchema != null && !foreignSchema.isEmpty()) {
-      sql += " AND fkn.nspname = " + escapeQuotes(foreignSchema);
-    }
-    if (primaryTable != null && !primaryTable.isEmpty()) {
-      sql += " AND pkc.relname = " + escapeQuotes(primaryTable);
-    }
-    if (foreignTable != null && !foreignTable.isEmpty()) {
-      sql += " AND fkc.relname = " + escapeQuotes(foreignTable);
-    }
-
-    if (primaryTable != null) {
-      sql += " ORDER BY fkn.nspname,fkc.relname,con.conname,pos.n";
-    } else {
-      sql += " ORDER BY pkn.nspname,pkc.relname, con.conname,pos.n";
-    }
-
-    return createMetaDataStatement().executeQuery(sql);
-  }
-
-  @Override
-  public ResultSet getImportedKeys(String catalog, String schema, String table)
-      throws SQLException {
-    return getImportedExportedKeys(null, null, null, catalog, schema, table);
-  }
-
-  @Override
-  public ResultSet getExportedKeys(String catalog, String schema, String table)
-      throws SQLException {
-    return getImportedExportedKeys(catalog, schema, table, null, null, null);
-  }
-
-  @Override
-  public ResultSet getCrossReference(
-      String primaryCatalog, String primarySchema, String primaryTable,
-      String foreignCatalog, String foreignSchema, String foreignTable)
-      throws SQLException {
-    return getImportedExportedKeys(primaryCatalog, primarySchema, primaryTable, foreignCatalog,
-        foreignSchema, foreignTable);
-  }
-
-  @Override
-  public ResultSet getTypeInfo() throws SQLException {
-
-    Field[] f = new Field[18];
-    List<Tuple> v = new ArrayList<>(); // The new ResultSet tuple stuff
-
-    f[0] = new Field("TYPE_NAME", Oid.VARCHAR);
-    f[1] = new Field("DATA_TYPE", Oid.INT2);
-    f[2] = new Field("PRECISION", Oid.INT4);
-    f[3] = new Field("LITERAL_PREFIX", Oid.VARCHAR);
-    f[4] = new Field("LITERAL_SUFFIX", Oid.VARCHAR);
-    f[5] = new Field("CREATE_PARAMS", Oid.VARCHAR);
-    f[6] = new Field("NULLABLE", Oid.INT2);
-    f[7] = new Field("CASE_SENSITIVE", Oid.BOOL);
-    f[8] = new Field("SEARCHABLE", Oid.INT2);
-    f[9] = new Field("UNSIGNED_ATTRIBUTE", Oid.BOOL);
-    f[10] = new Field("FIXED_PREC_SCALE", Oid.BOOL);
-    f[11] = new Field("AUTO_INCREMENT", Oid.BOOL);
-    f[12] = new Field("LOCAL_TYPE_NAME", Oid.VARCHAR);
-    f[13] = new Field("MINIMUM_SCALE", Oid.INT2);
-    f[14] = new Field("MAXIMUM_SCALE", Oid.INT2);
-    f[15] = new Field("SQL_DATA_TYPE", Oid.INT4);
-    f[16] = new Field("SQL_DATETIME_SUB", Oid.INT4);
-    f[17] = new Field("NUM_PREC_RADIX", Oid.INT4);
-
-    String sql;
-    sql = "SELECT t.typname,t.oid FROM pg_catalog.pg_type t"
-          + " JOIN pg_catalog.pg_namespace n ON (t.typnamespace = n.oid) "
-          + " WHERE n.nspname  != 'pg_toast'"
-          + " AND "
-          + " (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid))";
-
-    if (connection.getHideUnprivilegedObjects() && connection.haveMinimumServerVersion(ServerVersion.v9_2)) {
-      sql += " AND has_type_privilege(t.oid, 'USAGE')";
-    }
-
-    Statement stmt = connection.createStatement();
-    ResultSet rs = stmt.executeQuery(sql);
-    // cache some results, this will keep memory usage down, and speed
-    // things up a little.
-    byte[] bZero = connection.encodeString("0");
-    byte[] b10 = connection.encodeString("10");
-    byte[] bf = connection.encodeString("f");
-    byte[] bt = connection.encodeString("t");
-    byte[] bliteral = connection.encodeString("'");
-    byte[] bNullable =
-              connection.encodeString(Integer.toString(DatabaseMetaData.typeNullable));
-    byte[] bSearchable =
-              connection.encodeString(Integer.toString(DatabaseMetaData.typeSearchable));
-
-    TypeInfo ti = connection.getTypeInfo();
-    if (ti instanceof TypeInfoCache) {
-      ((TypeInfoCache) ti).cacheSQLTypes();
-    }
-
-    while (rs.next()) {
-      byte[] [] tuple = new byte[19][];
-      String typname = rs.getString(1);
-      int typeOid = (int) rs.getLong(2);
-
-      tuple[0] = connection.encodeString(typname);
-      int sqlType = connection.getTypeInfo().getSQLType(typname);
-      tuple[1] =
-          connection.encodeString(Integer.toString(sqlType));
-
-      /* this is just for sorting below, the result set never sees this */
-      tuple[18] = BigInteger.valueOf(sqlType).toByteArray();
-
-      tuple[2] = connection
-          .encodeString(Integer.toString(connection.getTypeInfo().getMaximumPrecision(typeOid)));
-
-      // Using requiresQuoting(oid) would might trigger select statements that might fail with NPE
-      // if oid in question is being dropped.
-      // requiresQuotingSqlType is not bulletproof, however, it solves the most visible NPE.
-      if (connection.getTypeInfo().requiresQuotingSqlType(sqlType)) {
-        tuple[3] = bliteral;
-        tuple[4] = bliteral;
-      }
-
-      tuple[6] = bNullable; // all types can be null
-      tuple[7] = connection.getTypeInfo().isCaseSensitive(typeOid) ? bt : bf;
-      tuple[8] = bSearchable; // any thing can be used in the WHERE clause
-      tuple[9] = connection.getTypeInfo().isSigned(typeOid) ? bf : bt;
-      tuple[10] = bf; // false for now - must handle money
-      tuple[11] = bf; // false - it isn't autoincrement
-      tuple[13] = bZero; // min scale is zero
-      // only numeric can supports a scale.
-      tuple[14] = typeOid == Oid.NUMERIC ? connection.encodeString("1000") : bZero;
-
-      // 12 - LOCAL_TYPE_NAME is null
-      // 15 & 16 are unused so we return null
-      tuple[17] = b10; // everything is base 10
-      v.add(new Tuple(tuple));
-
-      // add pseudo-type serial, bigserial, smallserial
-      if ("int4".equals(typname)) {
-        byte[] [] tuple1 = tuple.clone();
-
-        tuple1[0] = connection.encodeString("serial");
-        tuple1[11] = bt;
-        v.add(new Tuple(tuple1));
-      } else if ("int8".equals(typname)) {
-        byte[] [] tuple1 = tuple.clone();
-
-        tuple1[0] = connection.encodeString("bigserial");
-        tuple1[11] = bt;
-        v.add(new Tuple(tuple1));
-      } else if ("int2".equals(typname) && connection.haveMinimumServerVersion(ServerVersion.v9_2)) {
-        byte[] [] tuple1 = tuple.clone();
-
-        tuple1[0] = connection.encodeString("smallserial");
-        tuple1[11] = bt;
-        v.add(new Tuple(tuple1));
-      }
-
-    }
-    rs.close();
-    stmt.close();
-
-    Collections.sort(v, new Comparator<Tuple>() {
-      @Override
-      public int compare(Tuple o1, Tuple o2) {
-        int i1 = ByteConverter.bytesToInt(o1.get(18));
-        int i2 = ByteConverter.bytesToInt(o2.get(18));
-        return i1 < i2 ? -1 : (i1 == i2 ? 0 : 1);
-      }
-    });
-    return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
-  }
-
-  @Override
-  public ResultSet getIndexInfo(
-      String catalog, String schema, String tableName,
-      boolean unique, boolean approximate) throws SQLException {
-    /*
-     * This is a complicated function because we have three possible situations: <= 7.2 no schemas,
-     * single column functional index 7.3 schemas, single column functional index >= 7.4 schemas,
-     * multi-column expressional index >= 8.3 supports ASC/DESC column info >= 9.0 no longer renames
-     * index columns on a table column rename, so we must look at the table attribute names
-     *
-     * with the single column functional index we need an extra join to the table's pg_attribute
-     * data to get the column the function operates on.
-     */
-    String sql;
-    if (connection.haveMinimumServerVersion(ServerVersion.v8_3)) {
-      sql = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, "
-            + "  ct.relname AS TABLE_NAME, NOT i.indisunique AS NON_UNIQUE, "
-            + "  NULL AS INDEX_QUALIFIER, ci.relname AS INDEX_NAME, "
-            + "  CASE i.indisclustered "
-            + "    WHEN true THEN " + DatabaseMetaData.tableIndexClustered
-            + "    ELSE CASE am.amname "
-            + "      WHEN 'hash' THEN " + DatabaseMetaData.tableIndexHashed
-            + "      ELSE " + DatabaseMetaData.tableIndexOther
-            + "    END "
-            + "  END AS TYPE, "
-            + "  (information_schema._pg_expandarray(i.indkey)).n AS ORDINAL_POSITION, "
-            + "  ci.reltuples AS CARDINALITY, "
-            + "  ci.relpages AS PAGES, "
-            + "  pg_catalog.pg_get_expr(i.indpred, i.indrelid) AS FILTER_CONDITION, "
-            + "  ci.oid AS CI_OID, "
-            + "  i.indoption AS I_INDOPTION, "
-            + (connection.haveMinimumServerVersion(ServerVersion.v9_6) ? "  am.amname AS AM_NAME " : "  am.amcanorder AS AM_CANORDER ")
-            + "FROM pg_catalog.pg_class ct "
-            + "  JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) "
-            + "  JOIN pg_catalog.pg_index i ON (ct.oid = i.indrelid) "
-            + "  JOIN pg_catalog.pg_class ci ON (ci.oid = i.indexrelid) "
-            + "  JOIN pg_catalog.pg_am am ON (ci.relam = am.oid) "
-            + "WHERE true ";
-
-      if (schema != null && !schema.isEmpty()) {
-        sql += " AND n.nspname = " + escapeQuotes(schema);
-      }
-
-      sql += " AND ct.relname = " + escapeQuotes(tableName);
-
-      if (unique) {
-        sql += " AND i.indisunique ";
-      }
-
-      sql = "SELECT "
-                + "    tmp.TABLE_CAT, "
-                + "    tmp.TABLE_SCHEM, "
-                + "    tmp.TABLE_NAME, "
-                + "    tmp.NON_UNIQUE, "
-                + "    tmp.INDEX_QUALIFIER, "
-                + "    tmp.INDEX_NAME, "
-                + "    tmp.TYPE, "
-                + "    tmp.ORDINAL_POSITION, "
-                + "    trim(both '\"' from pg_catalog.pg_get_indexdef(tmp.CI_OID, tmp.ORDINAL_POSITION, false)) AS COLUMN_NAME, "
-                + (connection.haveMinimumServerVersion(ServerVersion.v9_6)
-                        ? "  CASE tmp.AM_NAME "
-                        + "    WHEN 'btree' THEN CASE tmp.I_INDOPTION[tmp.ORDINAL_POSITION - 1] & 1::smallint "
-                        + "      WHEN 1 THEN 'D' "
-                        + "      ELSE 'A' "
-                        + "    END "
-                        + "    ELSE NULL "
-                        + "  END AS ASC_OR_DESC, "
-                        : "  CASE tmp.AM_CANORDER "
-                        + "    WHEN true THEN CASE tmp.I_INDOPTION[tmp.ORDINAL_POSITION - 1] & 1::smallint "
-                        + "      WHEN 1 THEN 'D' "
-                        + "      ELSE 'A' "
-                        + "    END "
-                        + "    ELSE NULL "
-                        + "  END AS ASC_OR_DESC, ")
-                + "    tmp.CARDINALITY, "
-                + "    tmp.PAGES, "
-                + "    tmp.FILTER_CONDITION "
-                + "FROM ("
-                + sql
-                + ") AS tmp";
-    } else {
-      String select;
-      String from;
-      String where;
-
-      select = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, ";
-      from = " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class ct, pg_catalog.pg_class ci, "
-             + " pg_catalog.pg_attribute a, pg_catalog.pg_am am ";
-      where = " AND n.oid = ct.relnamespace ";
-      from += ", pg_catalog.pg_index i ";
-
-      if (schema != null && !schema.isEmpty()) {
-        where += " AND n.nspname = " + escapeQuotes(schema);
-      }
-
-      sql = select
-            + " ct.relname AS TABLE_NAME, NOT i.indisunique AS NON_UNIQUE, NULL AS INDEX_QUALIFIER, ci.relname AS INDEX_NAME, "
-            + " CASE i.indisclustered "
-            + " WHEN true THEN " + DatabaseMetaData.tableIndexClustered
-            + " ELSE CASE am.amname "
-            + " WHEN 'hash' THEN " + DatabaseMetaData.tableIndexHashed
-            + " ELSE " + DatabaseMetaData.tableIndexOther
-            + " END "
-            + " END AS TYPE, "
-            + " a.attnum AS ORDINAL_POSITION, "
-            + " CASE WHEN i.indexprs IS NULL THEN a.attname "
-            + " ELSE pg_catalog.pg_get_indexdef(ci.oid,a.attnum,false) END AS COLUMN_NAME, "
-            + " NULL AS ASC_OR_DESC, "
-            + " ci.reltuples AS CARDINALITY, "
-            + " ci.relpages AS PAGES, "
-            + " pg_catalog.pg_get_expr(i.indpred, i.indrelid) AS FILTER_CONDITION "
-            + from
-            + " WHERE ct.oid=i.indrelid AND ci.oid=i.indexrelid AND a.attrelid=ci.oid AND ci.relam=am.oid "
-            + where;
-
-      sql += " AND ct.relname = " + escapeQuotes(tableName);
-
-      if (unique) {
-        sql += " AND i.indisunique ";
-      }
-    }
-
-    sql += " ORDER BY NON_UNIQUE, TYPE, INDEX_NAME, ORDINAL_POSITION ";
-
-    return ((PgResultSet) createMetaDataStatement().executeQuery(sql)).upperCaseFieldLabels();
-  }
-
-  // ** JDBC 2 Extensions **
-
-  @Override
-  public boolean supportsResultSetType(int type) throws SQLException {
-    // The only type we don't support
-    return type != ResultSet.TYPE_SCROLL_SENSITIVE;
-  }
-
-  @Override
-  public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException {
-    // These combinations are not supported!
-    if (type == ResultSet.TYPE_SCROLL_SENSITIVE) {
-      return false;
-    }
-
-    // We do support Updateable ResultSets
-    if (concurrency == ResultSet.CONCUR_UPDATABLE) {
-      return true;
-    }
-
-    // Everything else we do
-    return true;
-  }
-
-  /* lots of unsupported stuff... */
-  @Override
-  public boolean ownUpdatesAreVisible(int type) throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean ownDeletesAreVisible(int type) throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean ownInsertsAreVisible(int type) throws SQLException {
-    // indicates that
-    return true;
-  }
-
-  @Override
-  public boolean othersUpdatesAreVisible(int type) throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean othersDeletesAreVisible(int i) throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean othersInsertsAreVisible(int type) throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean updatesAreDetected(int type) throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean deletesAreDetected(int i) throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean insertsAreDetected(int type) throws SQLException {
-    return false;
-  }
-
-  @Override
-  public boolean supportsBatchUpdates() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public ResultSet getUDTs(String catalog, String schemaPattern,
-      String typeNamePattern, int [] types) throws SQLException {
-    String sql = "select "
-        + "null as type_cat, n.nspname as type_schem, t.typname as type_name,  null as class_name, "
-        + "CASE WHEN t.typtype='c' then " + Types.STRUCT + " else "
-        + Types.DISTINCT
-        + " end as data_type, pg_catalog.obj_description(t.oid, 'pg_type')  "
-        + "as remarks, CASE WHEN t.typtype = 'd' then  (select CASE";
-    TypeInfo typeInfo = connection.getTypeInfo();
-
-    StringBuilder sqlwhen = new StringBuilder();
-    for (Iterator<Integer> i = typeInfo.getPGTypeOidsWithSQLTypes(); i.hasNext(); ) {
-      Integer typOid = i.next();
-      // NB: Java Integers are signed 32-bit integers, but oids are unsigned 32-bit integers.
-      // We must therefore map it to a positive long value before writing it into the query,
-      // or we'll be unable to correctly handle ~ half of the oid space.
-      long longTypOid = typeInfo.intOidToLong(typOid);
-      int sqlType = typeInfo.getSQLType(typOid);
-
-      sqlwhen.append(" when base_type.oid = ").append(longTypOid).append(" then ").append(sqlType);
-    }
-    sql += sqlwhen.toString();
-
-    sql += " else " + Types.OTHER + " end from pg_type base_type where base_type.oid=t.typbasetype) "
-        + "else null end as base_type "
-        + "from pg_catalog.pg_type t, pg_catalog.pg_namespace n where t.typnamespace = n.oid and n.nspname != 'pg_catalog' and n.nspname != 'pg_toast'";
-
-    StringBuilder toAdd = new StringBuilder();
-    if (types != null) {
-      toAdd.append(" and (false ");
-      for (int type : types) {
-        if (type == Types.STRUCT) {
-          toAdd.append(" or t.typtype = 'c'");
-        } else if (type == Types.DISTINCT) {
-          toAdd.append(" or t.typtype = 'd'");
-        }
-      }
-      toAdd.append(" ) ");
-    } else {
-      toAdd.append(" and t.typtype IN ('c','d') ");
-    }
-    // spec says that if typeNamePattern is a fully qualified name
-    // then the schema and catalog are ignored
-
-    if (typeNamePattern != null) {
-      // search for qualifier
-      int firstQualifier = typeNamePattern.indexOf('.');
-      int secondQualifier = typeNamePattern.lastIndexOf('.');
-
-      if (firstQualifier != -1) {
-        // if one of them is -1 they both will be
-        if (firstQualifier != secondQualifier) {
-          // we have a catalog.schema.typename, ignore catalog
-          schemaPattern = typeNamePattern.substring(firstQualifier + 1, secondQualifier);
+        if (!connection.haveMinimumServerVersion(ServerVersion.v11)) {
+            sql += "AND pkic.relkind = 'i' ";
         } else {
-          // we just have a schema.typename
-          schemaPattern = typeNamePattern.substring(0, firstQualifier);
+            sql += "AND (pkic.relkind = 'i' OR pkic.relkind = 'I')";
         }
-        // strip out just the typeName
-        typeNamePattern = typeNamePattern.substring(secondQualifier + 1);
-      }
-      toAdd.append(" and t.typname like ").append(escapeQuotes(typeNamePattern));
+
+        if (!connection.haveMinimumServerVersion(ServerVersion.v9_0)) {
+            sql += " AND con.oid = dep.objid AND pkic.oid = dep.refobjid AND dep.classid = 'pg_constraint'::regclass::oid AND dep.refclassid = 'pg_class'::regclass::oid ";
+        } else {
+            sql += " AND pkic.oid = con.conindid ";
+        }
+
+        if (primarySchema != null && !primarySchema.isEmpty()) {
+            sql += " AND pkn.nspname = " + escapeQuotes(primarySchema);
+        }
+        if (foreignSchema != null && !foreignSchema.isEmpty()) {
+            sql += " AND fkn.nspname = " + escapeQuotes(foreignSchema);
+        }
+        if (primaryTable != null && !primaryTable.isEmpty()) {
+            sql += " AND pkc.relname = " + escapeQuotes(primaryTable);
+        }
+        if (foreignTable != null && !foreignTable.isEmpty()) {
+            sql += " AND fkc.relname = " + escapeQuotes(foreignTable);
+        }
+
+        if (primaryTable != null) {
+            sql += " ORDER BY fkn.nspname,fkc.relname,con.conname,pos.n";
+        } else {
+            sql += " ORDER BY pkn.nspname,pkc.relname, con.conname,pos.n";
+        }
+
+        return createMetaDataStatement().executeQuery(sql);
     }
 
-    // schemaPattern may have been modified above
-    if (schemaPattern != null) {
-      toAdd.append(" and n.nspname like ").append(escapeQuotes(schemaPattern));
-    }
-    sql += toAdd.toString();
-
-    if (connection.getHideUnprivilegedObjects()
-        && connection.haveMinimumServerVersion(ServerVersion.v9_2)) {
-      sql += " AND has_type_privilege(t.oid, 'USAGE')";
+    @Override
+    public ResultSet getImportedKeys(String catalog, String schema, String table)
+            throws SQLException {
+        return getImportedExportedKeys(null, null, null, catalog, schema, table);
     }
 
-    sql += " order by data_type, type_schem, type_name";
-    return createMetaDataStatement().executeQuery(sql);
-  }
-
-  @Override
-  public Connection getConnection() throws SQLException {
-    return connection;
-  }
-
-  protected Statement createMetaDataStatement() throws SQLException {
-    return connection.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
-        ResultSet.CONCUR_READ_ONLY);
-  }
-
-  @Override
-  public long getMaxLogicalLobSize() throws SQLException {
-    return 0;
-  }
-
-  @Override
-  public boolean supportsRefCursors() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public RowIdLifetime getRowIdLifetime() throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getRowIdLifetime()");
-  }
-
-  @Override
-  public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException {
-    return true;
-  }
-
-  @Override
-  public boolean autoCommitFailureClosesAllResultSets() throws SQLException {
-    return false;
-  }
-
-  @Override
-  public ResultSet getClientInfoProperties() throws SQLException {
-    Field[] f = new Field[4];
-    f[0] = new Field("NAME", Oid.VARCHAR);
-    f[1] = new Field("MAX_LEN", Oid.INT4);
-    f[2] = new Field("DEFAULT_VALUE", Oid.VARCHAR);
-    f[3] = new Field("DESCRIPTION", Oid.VARCHAR);
-
-    List<Tuple> v = new ArrayList<>();
-
-    if (connection.haveMinimumServerVersion(ServerVersion.v9_0)) {
-      byte[] [] tuple = new byte[4][];
-      tuple[0] = connection.encodeString("ApplicationName");
-      tuple[1] = connection.encodeString(Integer.toString(getMaxNameLength()));
-      tuple[2] = connection.encodeString("");
-      tuple[3] = connection
-          .encodeString("The name of the application currently utilizing the connection.");
-      v.add(new Tuple(tuple));
+    @Override
+    public ResultSet getExportedKeys(String catalog, String schema, String table)
+            throws SQLException {
+        return getImportedExportedKeys(catalog, schema, table, null, null, null);
     }
 
-    return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
-  }
-
-  @Override
-  public boolean isWrapperFor(Class<?> iface) throws SQLException {
-    return iface.isAssignableFrom(getClass());
-  }
-
-  @Override
-  public <T> T unwrap(Class<T> iface) throws SQLException {
-    if (iface.isAssignableFrom(getClass())) {
-      return iface.cast(this);
-    }
-    throw new SQLException("Cannot unwrap to " + iface.getName());
-  }
-
-  @Override
-  public ResultSet getFunctions(String catalog, String schemaPattern,
-      String functionNamePattern)
-      throws SQLException {
-
-    // The pg_get_function_result only exists 8.4 or later
-    boolean pgFuncResultExists = connection.haveMinimumServerVersion(ServerVersion.v8_4);
-
-    // Use query that support pg_get_function_result to get function result, else unknown is defaulted
-    String funcTypeSql = DatabaseMetaData.functionResultUnknown + " ";
-    if (pgFuncResultExists) {
-      funcTypeSql = " CASE "
-              + "   WHEN (format_type(p.prorettype, null) = 'unknown') THEN " + DatabaseMetaData.functionResultUnknown
-              + "   WHEN "
-              + "     (substring(pg_get_function_result(p.oid) from 0 for 6) = 'TABLE') OR "
-              + "     (substring(pg_get_function_result(p.oid) from 0 for 6) = 'SETOF') THEN " + DatabaseMetaData.functionReturnsTable
-              + "   ELSE " + DatabaseMetaData.functionNoTable
-              + " END ";
+    @Override
+    public ResultSet getCrossReference(
+            String primaryCatalog, String primarySchema, String primaryTable,
+            String foreignCatalog, String foreignSchema, String foreignTable)
+            throws SQLException {
+        return getImportedExportedKeys(primaryCatalog, primarySchema, primaryTable, foreignCatalog,
+                foreignSchema, foreignTable);
     }
 
-    // Build query and result
-    String sql;
-    sql = "SELECT current_database() AS FUNCTION_CAT, n.nspname AS FUNCTION_SCHEM, p.proname AS FUNCTION_NAME, "
-        + " d.description AS REMARKS, "
-        + funcTypeSql + " AS FUNCTION_TYPE, "
-        + " p.proname || '_' || p.oid AS SPECIFIC_NAME "
-        + "FROM pg_catalog.pg_proc p "
-        + "INNER JOIN pg_catalog.pg_namespace n ON p.pronamespace=n.oid "
-        + "LEFT JOIN pg_catalog.pg_description d ON p.oid=d.objoid "
-        + "WHERE true  ";
+    @Override
+    public ResultSet getTypeInfo() throws SQLException {
 
-    if (connection.haveMinimumServerVersion(ServerVersion.v11)) {
-      sql += " AND p.prokind='f'";
+        Field[] f = new Field[18];
+        List<Tuple> v = new ArrayList<>(); // The new ResultSet tuple stuff
+
+        f[0] = new Field("TYPE_NAME", Oid.VARCHAR);
+        f[1] = new Field("DATA_TYPE", Oid.INT2);
+        f[2] = new Field("PRECISION", Oid.INT4);
+        f[3] = new Field("LITERAL_PREFIX", Oid.VARCHAR);
+        f[4] = new Field("LITERAL_SUFFIX", Oid.VARCHAR);
+        f[5] = new Field("CREATE_PARAMS", Oid.VARCHAR);
+        f[6] = new Field("NULLABLE", Oid.INT2);
+        f[7] = new Field("CASE_SENSITIVE", Oid.BOOL);
+        f[8] = new Field("SEARCHABLE", Oid.INT2);
+        f[9] = new Field("UNSIGNED_ATTRIBUTE", Oid.BOOL);
+        f[10] = new Field("FIXED_PREC_SCALE", Oid.BOOL);
+        f[11] = new Field("AUTO_INCREMENT", Oid.BOOL);
+        f[12] = new Field("LOCAL_TYPE_NAME", Oid.VARCHAR);
+        f[13] = new Field("MINIMUM_SCALE", Oid.INT2);
+        f[14] = new Field("MAXIMUM_SCALE", Oid.INT2);
+        f[15] = new Field("SQL_DATA_TYPE", Oid.INT4);
+        f[16] = new Field("SQL_DATETIME_SUB", Oid.INT4);
+        f[17] = new Field("NUM_PREC_RADIX", Oid.INT4);
+
+        String sql;
+        sql = "SELECT t.typname,t.oid FROM pg_catalog.pg_type t"
+                + " JOIN pg_catalog.pg_namespace n ON (t.typnamespace = n.oid) "
+                + " WHERE n.nspname  != 'pg_toast'"
+                + " AND "
+                + " (t.typrelid = 0 OR (SELECT c.relkind = 'c' FROM pg_catalog.pg_class c WHERE c.oid = t.typrelid))";
+
+        if (connection.getHideUnprivilegedObjects() && connection.haveMinimumServerVersion(ServerVersion.v9_2)) {
+            sql += " AND has_type_privilege(t.oid, 'USAGE')";
+        }
+
+        Statement stmt = connection.createStatement();
+        ResultSet rs = stmt.executeQuery(sql);
+        // cache some results, this will keep memory usage down, and speed
+        // things up a little.
+        byte[] bZero = connection.encodeString("0");
+        byte[] b10 = connection.encodeString("10");
+        byte[] bf = connection.encodeString("f");
+        byte[] bt = connection.encodeString("t");
+        byte[] bliteral = connection.encodeString("'");
+        byte[] bNullable =
+                connection.encodeString(Integer.toString(DatabaseMetaData.typeNullable));
+        byte[] bSearchable =
+                connection.encodeString(Integer.toString(DatabaseMetaData.typeSearchable));
+
+        TypeInfo ti = connection.getTypeInfo();
+        if (ti instanceof TypeInfoCache) {
+            ((TypeInfoCache) ti).cacheSQLTypes();
+        }
+
+        while (rs.next()) {
+            byte[][] tuple = new byte[19][];
+            String typname = rs.getString(1);
+            int typeOid = (int) rs.getLong(2);
+
+            tuple[0] = connection.encodeString(typname);
+            int sqlType = connection.getTypeInfo().getSQLType(typname);
+            tuple[1] =
+                    connection.encodeString(Integer.toString(sqlType));
+
+            /* this is just for sorting below, the result set never sees this */
+            tuple[18] = BigInteger.valueOf(sqlType).toByteArray();
+
+            tuple[2] = connection
+                    .encodeString(Integer.toString(connection.getTypeInfo().getMaximumPrecision(typeOid)));
+
+            // Using requiresQuoting(oid) would might trigger select statements that might fail with NPE
+            // if oid in question is being dropped.
+            // requiresQuotingSqlType is not bulletproof, however, it solves the most visible NPE.
+            if (connection.getTypeInfo().requiresQuotingSqlType(sqlType)) {
+                tuple[3] = bliteral;
+                tuple[4] = bliteral;
+            }
+
+            tuple[6] = bNullable; // all types can be null
+            tuple[7] = connection.getTypeInfo().isCaseSensitive(typeOid) ? bt : bf;
+            tuple[8] = bSearchable; // any thing can be used in the WHERE clause
+            tuple[9] = connection.getTypeInfo().isSigned(typeOid) ? bf : bt;
+            tuple[10] = bf; // false for now - must handle money
+            tuple[11] = bf; // false - it isn't autoincrement
+            tuple[13] = bZero; // min scale is zero
+            // only numeric can supports a scale.
+            tuple[14] = typeOid == Oid.NUMERIC ? connection.encodeString("1000") : bZero;
+
+            // 12 - LOCAL_TYPE_NAME is null
+            // 15 & 16 are unused so we return null
+            tuple[17] = b10; // everything is base 10
+            v.add(new Tuple(tuple));
+
+            // add pseudo-type serial, bigserial, smallserial
+            if ("int4".equals(typname)) {
+                byte[][] tuple1 = tuple.clone();
+
+                tuple1[0] = connection.encodeString("serial");
+                tuple1[11] = bt;
+                v.add(new Tuple(tuple1));
+            } else if ("int8".equals(typname)) {
+                byte[][] tuple1 = tuple.clone();
+
+                tuple1[0] = connection.encodeString("bigserial");
+                tuple1[11] = bt;
+                v.add(new Tuple(tuple1));
+            } else if ("int2".equals(typname) && connection.haveMinimumServerVersion(ServerVersion.v9_2)) {
+                byte[][] tuple1 = tuple.clone();
+
+                tuple1[0] = connection.encodeString("smallserial");
+                tuple1[11] = bt;
+                v.add(new Tuple(tuple1));
+            }
+
+        }
+        rs.close();
+        stmt.close();
+
+        Collections.sort(v, new Comparator<Tuple>() {
+            @Override
+            public int compare(Tuple o1, Tuple o2) {
+                int i1 = ByteConverter.bytesToInt(o1.get(18));
+                int i2 = ByteConverter.bytesToInt(o2.get(18));
+                return i1 < i2 ? -1 : (i1 == i2 ? 0 : 1);
+            }
+        });
+        return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
     }
+
+    @Override
+    public ResultSet getIndexInfo(
+            String catalog, String schema, String tableName,
+            boolean unique, boolean approximate) throws SQLException {
+        /*
+         * This is a complicated function because we have three possible situations: <= 7.2 no schemas,
+         * single column functional index 7.3 schemas, single column functional index >= 7.4 schemas,
+         * multi-column expressional index >= 8.3 supports ASC/DESC column info >= 9.0 no longer renames
+         * index columns on a table column rename, so we must look at the table attribute names
+         *
+         * with the single column functional index we need an extra join to the table's pg_attribute
+         * data to get the column the function operates on.
+         */
+        String sql;
+        if (connection.haveMinimumServerVersion(ServerVersion.v8_3)) {
+            sql = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, "
+                    + "  ct.relname AS TABLE_NAME, NOT i.indisunique AS NON_UNIQUE, "
+                    + "  NULL AS INDEX_QUALIFIER, ci.relname AS INDEX_NAME, "
+                    + "  CASE i.indisclustered "
+                    + "    WHEN true THEN " + DatabaseMetaData.tableIndexClustered
+                    + "    ELSE CASE am.amname "
+                    + "      WHEN 'hash' THEN " + DatabaseMetaData.tableIndexHashed
+                    + "      ELSE " + DatabaseMetaData.tableIndexOther
+                    + "    END "
+                    + "  END AS TYPE, "
+                    + "  (information_schema._pg_expandarray(i.indkey)).n AS ORDINAL_POSITION, "
+                    + "  ci.reltuples AS CARDINALITY, "
+                    + "  ci.relpages AS PAGES, "
+                    + "  pg_catalog.pg_get_expr(i.indpred, i.indrelid) AS FILTER_CONDITION, "
+                    + "  ci.oid AS CI_OID, "
+                    + "  i.indoption AS I_INDOPTION, "
+                    + (connection.haveMinimumServerVersion(ServerVersion.v9_6) ? "  am.amname AS AM_NAME " : "  am.amcanorder AS AM_CANORDER ")
+                    + "FROM pg_catalog.pg_class ct "
+                    + "  JOIN pg_catalog.pg_namespace n ON (ct.relnamespace = n.oid) "
+                    + "  JOIN pg_catalog.pg_index i ON (ct.oid = i.indrelid) "
+                    + "  JOIN pg_catalog.pg_class ci ON (ci.oid = i.indexrelid) "
+                    + "  JOIN pg_catalog.pg_am am ON (ci.relam = am.oid) "
+                    + "WHERE true ";
+
+            if (schema != null && !schema.isEmpty()) {
+                sql += " AND n.nspname = " + escapeQuotes(schema);
+            }
+
+            sql += " AND ct.relname = " + escapeQuotes(tableName);
+
+            if (unique) {
+                sql += " AND i.indisunique ";
+            }
+
+            sql = "SELECT "
+                    + "    tmp.TABLE_CAT, "
+                    + "    tmp.TABLE_SCHEM, "
+                    + "    tmp.TABLE_NAME, "
+                    + "    tmp.NON_UNIQUE, "
+                    + "    tmp.INDEX_QUALIFIER, "
+                    + "    tmp.INDEX_NAME, "
+                    + "    tmp.TYPE, "
+                    + "    tmp.ORDINAL_POSITION, "
+                    + "    trim(both '\"' from pg_catalog.pg_get_indexdef(tmp.CI_OID, tmp.ORDINAL_POSITION, false)) AS COLUMN_NAME, "
+                    + (connection.haveMinimumServerVersion(ServerVersion.v9_6)
+                    ? "  CASE tmp.AM_NAME "
+                    + "    WHEN 'btree' THEN CASE tmp.I_INDOPTION[tmp.ORDINAL_POSITION - 1] & 1::smallint "
+                    + "      WHEN 1 THEN 'D' "
+                    + "      ELSE 'A' "
+                    + "    END "
+                    + "    ELSE NULL "
+                    + "  END AS ASC_OR_DESC, "
+                    : "  CASE tmp.AM_CANORDER "
+                    + "    WHEN true THEN CASE tmp.I_INDOPTION[tmp.ORDINAL_POSITION - 1] & 1::smallint "
+                    + "      WHEN 1 THEN 'D' "
+                    + "      ELSE 'A' "
+                    + "    END "
+                    + "    ELSE NULL "
+                    + "  END AS ASC_OR_DESC, ")
+                    + "    tmp.CARDINALITY, "
+                    + "    tmp.PAGES, "
+                    + "    tmp.FILTER_CONDITION "
+                    + "FROM ("
+                    + sql
+                    + ") AS tmp";
+        } else {
+            String select;
+            String from;
+            String where;
+
+            select = "SELECT NULL AS TABLE_CAT, n.nspname AS TABLE_SCHEM, ";
+            from = " FROM pg_catalog.pg_namespace n, pg_catalog.pg_class ct, pg_catalog.pg_class ci, "
+                    + " pg_catalog.pg_attribute a, pg_catalog.pg_am am ";
+            where = " AND n.oid = ct.relnamespace ";
+            from += ", pg_catalog.pg_index i ";
+
+            if (schema != null && !schema.isEmpty()) {
+                where += " AND n.nspname = " + escapeQuotes(schema);
+            }
+
+            sql = select
+                    + " ct.relname AS TABLE_NAME, NOT i.indisunique AS NON_UNIQUE, NULL AS INDEX_QUALIFIER, ci.relname AS INDEX_NAME, "
+                    + " CASE i.indisclustered "
+                    + " WHEN true THEN " + DatabaseMetaData.tableIndexClustered
+                    + " ELSE CASE am.amname "
+                    + " WHEN 'hash' THEN " + DatabaseMetaData.tableIndexHashed
+                    + " ELSE " + DatabaseMetaData.tableIndexOther
+                    + " END "
+                    + " END AS TYPE, "
+                    + " a.attnum AS ORDINAL_POSITION, "
+                    + " CASE WHEN i.indexprs IS NULL THEN a.attname "
+                    + " ELSE pg_catalog.pg_get_indexdef(ci.oid,a.attnum,false) END AS COLUMN_NAME, "
+                    + " NULL AS ASC_OR_DESC, "
+                    + " ci.reltuples AS CARDINALITY, "
+                    + " ci.relpages AS PAGES, "
+                    + " pg_catalog.pg_get_expr(i.indpred, i.indrelid) AS FILTER_CONDITION "
+                    + from
+                    + " WHERE ct.oid=i.indrelid AND ci.oid=i.indexrelid AND a.attrelid=ci.oid AND ci.relam=am.oid "
+                    + where;
+
+            sql += " AND ct.relname = " + escapeQuotes(tableName);
+
+            if (unique) {
+                sql += " AND i.indisunique ";
+            }
+        }
+
+        sql += " ORDER BY NON_UNIQUE, TYPE, INDEX_NAME, ORDINAL_POSITION ";
+
+        return ((PgResultSet) createMetaDataStatement().executeQuery(sql)).upperCaseFieldLabels();
+    }
+
+    // ** JDBC 2 Extensions **
+
+    @Override
+    public boolean supportsResultSetType(int type) throws SQLException {
+        // The only type we don't support
+        return type != ResultSet.TYPE_SCROLL_SENSITIVE;
+    }
+
+    @Override
+    public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException {
+        // These combinations are not supported!
+        if (type == ResultSet.TYPE_SCROLL_SENSITIVE) {
+            return false;
+        }
+
+        // We do support Updateable ResultSets
+        if (concurrency == ResultSet.CONCUR_UPDATABLE) {
+            return true;
+        }
+
+        // Everything else we do
+        return true;
+    }
+
+    /* lots of unsupported stuff... */
+    @Override
+    public boolean ownUpdatesAreVisible(int type) throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean ownDeletesAreVisible(int type) throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean ownInsertsAreVisible(int type) throws SQLException {
+        // indicates that
+        return true;
+    }
+
+    @Override
+    public boolean othersUpdatesAreVisible(int type) throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean othersDeletesAreVisible(int i) throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean othersInsertsAreVisible(int type) throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean updatesAreDetected(int type) throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean deletesAreDetected(int i) throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean insertsAreDetected(int type) throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean supportsBatchUpdates() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public ResultSet getUDTs(String catalog, String schemaPattern,
+                             String typeNamePattern, int[] types) throws SQLException {
+        String sql = "select "
+                + "null as type_cat, n.nspname as type_schem, t.typname as type_name,  null as class_name, "
+                + "CASE WHEN t.typtype='c' then " + Types.STRUCT + " else "
+                + Types.DISTINCT
+                + " end as data_type, pg_catalog.obj_description(t.oid, 'pg_type')  "
+                + "as remarks, CASE WHEN t.typtype = 'd' then  (select CASE";
+        TypeInfo typeInfo = connection.getTypeInfo();
+
+        StringBuilder sqlwhen = new StringBuilder();
+        for (Iterator<Integer> i = typeInfo.getPGTypeOidsWithSQLTypes(); i.hasNext(); ) {
+            Integer typOid = i.next();
+            // NB: Java Integers are signed 32-bit integers, but oids are unsigned 32-bit integers.
+            // We must therefore map it to a positive long value before writing it into the query,
+            // or we'll be unable to correctly handle ~ half of the oid space.
+            long longTypOid = typeInfo.intOidToLong(typOid);
+            int sqlType = typeInfo.getSQLType(typOid);
+
+            sqlwhen.append(" when base_type.oid = ").append(longTypOid).append(" then ").append(sqlType);
+        }
+        sql += sqlwhen.toString();
+
+        sql += " else " + Types.OTHER + " end from pg_type base_type where base_type.oid=t.typbasetype) "
+                + "else null end as base_type "
+                + "from pg_catalog.pg_type t, pg_catalog.pg_namespace n where t.typnamespace = n.oid and n.nspname != 'pg_catalog' and n.nspname != 'pg_toast'";
+
+        StringBuilder toAdd = new StringBuilder();
+        if (types != null) {
+            toAdd.append(" and (false ");
+            for (int type : types) {
+                if (type == Types.STRUCT) {
+                    toAdd.append(" or t.typtype = 'c'");
+                } else if (type == Types.DISTINCT) {
+                    toAdd.append(" or t.typtype = 'd'");
+                }
+            }
+            toAdd.append(" ) ");
+        } else {
+            toAdd.append(" and t.typtype IN ('c','d') ");
+        }
+        // spec says that if typeNamePattern is a fully qualified name
+        // then the schema and catalog are ignored
+
+        if (typeNamePattern != null) {
+            // search for qualifier
+            int firstQualifier = typeNamePattern.indexOf('.');
+            int secondQualifier = typeNamePattern.lastIndexOf('.');
+
+            if (firstQualifier != -1) {
+                // if one of them is -1 they both will be
+                if (firstQualifier != secondQualifier) {
+                    // we have a catalog.schema.typename, ignore catalog
+                    schemaPattern = typeNamePattern.substring(firstQualifier + 1, secondQualifier);
+                } else {
+                    // we just have a schema.typename
+                    schemaPattern = typeNamePattern.substring(0, firstQualifier);
+                }
+                // strip out just the typeName
+                typeNamePattern = typeNamePattern.substring(secondQualifier + 1);
+            }
+            toAdd.append(" and t.typname like ").append(escapeQuotes(typeNamePattern));
+        }
+
+        // schemaPattern may have been modified above
+        if (schemaPattern != null) {
+            toAdd.append(" and n.nspname like ").append(escapeQuotes(schemaPattern));
+        }
+        sql += toAdd.toString();
+
+        if (connection.getHideUnprivilegedObjects()
+                && connection.haveMinimumServerVersion(ServerVersion.v9_2)) {
+            sql += " AND has_type_privilege(t.oid, 'USAGE')";
+        }
+
+        sql += " order by data_type, type_schem, type_name";
+        return createMetaDataStatement().executeQuery(sql);
+    }
+
+    @Override
+    public Connection getConnection() throws SQLException {
+        return connection;
+    }
+
+    protected Statement createMetaDataStatement() throws SQLException {
+        return connection.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
+                ResultSet.CONCUR_READ_ONLY);
+    }
+
+    @Override
+    public long getMaxLogicalLobSize() throws SQLException {
+        return 0;
+    }
+
+    @Override
+    public boolean supportsRefCursors() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public RowIdLifetime getRowIdLifetime() throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getRowIdLifetime()");
+    }
+
+    @Override
+    public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean autoCommitFailureClosesAllResultSets() throws SQLException {
+        return false;
+    }
+
+    @Override
+    public ResultSet getClientInfoProperties() throws SQLException {
+        Field[] f = new Field[4];
+        f[0] = new Field("NAME", Oid.VARCHAR);
+        f[1] = new Field("MAX_LEN", Oid.INT4);
+        f[2] = new Field("DEFAULT_VALUE", Oid.VARCHAR);
+        f[3] = new Field("DESCRIPTION", Oid.VARCHAR);
+
+        List<Tuple> v = new ArrayList<>();
+
+        if (connection.haveMinimumServerVersion(ServerVersion.v9_0)) {
+            byte[][] tuple = new byte[4][];
+            tuple[0] = connection.encodeString("ApplicationName");
+            tuple[1] = connection.encodeString(Integer.toString(getMaxNameLength()));
+            tuple[2] = connection.encodeString("");
+            tuple[3] = connection
+                    .encodeString("The name of the application currently utilizing the connection.");
+            v.add(new Tuple(tuple));
+        }
+
+        return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
+    }
+
+    @Override
+    public boolean isWrapperFor(Class<?> iface) throws SQLException {
+        return iface.isAssignableFrom(getClass());
+    }
+
+    @Override
+    public <T> T unwrap(Class<T> iface) throws SQLException {
+        if (iface.isAssignableFrom(getClass())) {
+            return iface.cast(this);
+        }
+        throw new SQLException("Cannot unwrap to " + iface.getName());
+    }
+
+    @Override
+    public ResultSet getFunctions(String catalog, String schemaPattern,
+                                  String functionNamePattern)
+            throws SQLException {
+
+        // The pg_get_function_result only exists 8.4 or later
+        boolean pgFuncResultExists = connection.haveMinimumServerVersion(ServerVersion.v8_4);
+
+        // Use query that support pg_get_function_result to get function result, else unknown is defaulted
+        String funcTypeSql = DatabaseMetaData.functionResultUnknown + " ";
+        if (pgFuncResultExists) {
+            funcTypeSql = " CASE "
+                    + "   WHEN (format_type(p.prorettype, null) = 'unknown') THEN " + DatabaseMetaData.functionResultUnknown
+                    + "   WHEN "
+                    + "     (substring(pg_get_function_result(p.oid) from 0 for 6) = 'TABLE') OR "
+                    + "     (substring(pg_get_function_result(p.oid) from 0 for 6) = 'SETOF') THEN " + DatabaseMetaData.functionReturnsTable
+                    + "   ELSE " + DatabaseMetaData.functionNoTable
+                    + " END ";
+        }
+
+        // Build query and result
+        String sql;
+        sql = "SELECT current_database() AS FUNCTION_CAT, n.nspname AS FUNCTION_SCHEM, p.proname AS FUNCTION_NAME, "
+                + " d.description AS REMARKS, "
+                + funcTypeSql + " AS FUNCTION_TYPE, "
+                + " p.proname || '_' || p.oid AS SPECIFIC_NAME "
+                + "FROM pg_catalog.pg_proc p "
+                + "INNER JOIN pg_catalog.pg_namespace n ON p.pronamespace=n.oid "
+                + "LEFT JOIN pg_catalog.pg_description d ON p.oid=d.objoid "
+                + "WHERE true  ";
+
+        if (connection.haveMinimumServerVersion(ServerVersion.v11)) {
+            sql += " AND p.prokind='f'";
+        }
     /*
     if the user provides a schema then search inside the schema for it
      */
-    if (schemaPattern != null && !schemaPattern.isEmpty()) {
-      sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern);
-    }
-    if (functionNamePattern != null && !functionNamePattern.isEmpty()) {
-      sql += " AND p.proname LIKE " + escapeQuotes(functionNamePattern);
-    }
-    if (connection.getHideUnprivilegedObjects()) {
-      sql += " AND has_function_privilege(p.oid,'EXECUTE')";
-    }
-    sql += " ORDER BY FUNCTION_SCHEM, FUNCTION_NAME, p.oid::text ";
-
-    return createMetaDataStatement().executeQuery(sql);
-  }
-
-  @Override
-  public ResultSet getFunctionColumns(String catalog, String schemaPattern,
-      String functionNamePattern, String columnNamePattern)
-      throws SQLException {
-    int columns = 17;
-
-    Field[] f = new Field[columns];
-    List<Tuple> v = new ArrayList<>();
-
-    f[0] = new Field("FUNCTION_CAT", Oid.VARCHAR);
-    f[1] = new Field("FUNCTION_SCHEM", Oid.VARCHAR);
-    f[2] = new Field("FUNCTION_NAME", Oid.VARCHAR);
-    f[3] = new Field("COLUMN_NAME", Oid.VARCHAR);
-    f[4] = new Field("COLUMN_TYPE", Oid.INT2);
-    f[5] = new Field("DATA_TYPE", Oid.INT2);
-    f[6] = new Field("TYPE_NAME", Oid.VARCHAR);
-    f[7] = new Field("PRECISION", Oid.INT2);
-    f[8] = new Field("LENGTH", Oid.INT4);
-    f[9] = new Field("SCALE", Oid.INT2);
-    f[10] = new Field("RADIX", Oid.INT2);
-    f[11] = new Field("NULLABLE", Oid.INT2);
-    f[12] = new Field("REMARKS", Oid.VARCHAR);
-    f[13] = new Field("CHAR_OCTET_LENGTH", Oid.INT4);
-    f[14] = new Field("ORDINAL_POSITION", Oid.INT4);
-    f[15] = new Field("IS_NULLABLE", Oid.VARCHAR);
-    f[16] = new Field("SPECIFIC_NAME", Oid.VARCHAR);
-
-    String sql;
-    sql = "SELECT n.nspname,p.proname,p.prorettype,p.proargtypes, t.typtype,t.typrelid, "
-        + " p.proargnames, p.proargmodes, p.proallargtypes, p.oid "
-        + " FROM pg_catalog.pg_proc p, pg_catalog.pg_namespace n, pg_catalog.pg_type t "
-        + " WHERE p.pronamespace=n.oid AND p.prorettype=t.oid ";
-    if (schemaPattern != null && !schemaPattern.isEmpty()) {
-      sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern);
-    }
-    if (functionNamePattern != null && !functionNamePattern.isEmpty()) {
-      sql += " AND p.proname LIKE " + escapeQuotes(functionNamePattern);
-    }
-    sql += " ORDER BY n.nspname, p.proname, p.oid::text ";
-
-    byte[] isnullableUnknown = new byte[0];
-
-    Statement stmt = connection.createStatement();
-    ResultSet rs = stmt.executeQuery(sql);
-    while (rs.next()) {
-      byte[] schema = rs.getBytes("nspname");
-      byte[] functionName = rs.getBytes("proname");
-      byte[] specificName =
-          connection.encodeString(rs.getString("proname") + "_" + rs.getString("oid"));
-      int returnType = (int) rs.getLong("prorettype");
-      String returnTypeType = rs.getString("typtype");
-      int returnTypeRelid = (int) rs.getLong("typrelid");
-
-      String strArgTypes = rs.getString("proargtypes");
-      StringTokenizer st = new StringTokenizer(strArgTypes);
-      List<Long> argTypes = new ArrayList<>();
-      while (st.hasMoreTokens()) {
-        argTypes.add(Long.valueOf(st.nextToken()));
-      }
-
-      String[] argNames = null;
-      Array argNamesArray = rs.getArray("proargnames");
-      if (argNamesArray != null) {
-        argNames = (String[]) argNamesArray.getArray();
-      }
-
-      String[] argModes = null;
-      Array argModesArray = rs.getArray("proargmodes");
-      if (argModesArray != null) {
-        argModes = (String[]) argModesArray.getArray();
-      }
-
-      int numArgs = argTypes.size();
-
-      Long[] allArgTypes = null;
-      Array allArgTypesArray = rs.getArray("proallargtypes");
-      if (allArgTypesArray != null) {
-        allArgTypes = (Long[]) allArgTypesArray.getArray();
-        numArgs = allArgTypes.length;
-      }
-
-      // decide if we are returning a single column result.
-      if ("b".equals(returnTypeType) || "d".equals(returnTypeType) || "e".equals(returnTypeType)
-          || ("p".equals(returnTypeType) && argModesArray == null)) {
-        byte[] [] tuple = new byte[columns][];
-        tuple[0] = null;
-        tuple[1] = schema;
-        tuple[2] = functionName;
-        tuple[3] = connection.encodeString("returnValue");
-        tuple[4] = connection
-            .encodeString(Integer.toString(DatabaseMetaData.functionReturn));
-        tuple[5] = connection
-            .encodeString(Integer.toString(connection.getTypeInfo().getSQLType(returnType)));
-        tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(returnType));
-        tuple[7] = null;
-        tuple[8] = null;
-        tuple[9] = null;
-        tuple[10] = null;
-        tuple[11] = connection
-            .encodeString(Integer.toString(DatabaseMetaData.functionNullableUnknown));
-        tuple[12] = null;
-        tuple[14] = connection.encodeString(Integer.toString(0));
-        tuple[15] = isnullableUnknown;
-        tuple[16] = specificName;
-
-        v.add(new Tuple(tuple));
-      }
-
-      // Add a row for each argument.
-      for (int i = 0; i < numArgs; i++) {
-        byte[] [] tuple = new byte[columns][];
-        tuple[0] = null;
-        tuple[1] = schema;
-        tuple[2] = functionName;
-
-        if (argNames != null) {
-          tuple[3] = connection.encodeString(argNames[i]);
-        } else {
-          tuple[3] = connection.encodeString("$" + (i + 1));
+        if (schemaPattern != null && !schemaPattern.isEmpty()) {
+            sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern);
         }
-
-        int columnMode = DatabaseMetaData.functionColumnIn;
-        if (argModes != null && argModes[i] != null) {
-          if ("o".equals(argModes[i])) {
-            columnMode = DatabaseMetaData.functionColumnOut;
-          } else if ("b".equals(argModes[i])) {
-            columnMode = DatabaseMetaData.functionColumnInOut;
-          } else if ("t".equals(argModes[i])) {
-            columnMode = DatabaseMetaData.functionReturn;
-          }
+        if (functionNamePattern != null && !functionNamePattern.isEmpty()) {
+            sql += " AND p.proname LIKE " + escapeQuotes(functionNamePattern);
         }
-
-        tuple[4] = connection.encodeString(Integer.toString(columnMode));
-
-        int argOid;
-        if (allArgTypes != null) {
-          argOid = allArgTypes[i].intValue();
-        } else {
-          argOid = argTypes.get(i).intValue();
+        if (connection.getHideUnprivilegedObjects()) {
+            sql += " AND has_function_privilege(p.oid,'EXECUTE')";
         }
+        sql += " ORDER BY FUNCTION_SCHEM, FUNCTION_NAME, p.oid::text ";
 
-        tuple[5] =
-            connection.encodeString(Integer.toString(connection.getTypeInfo().getSQLType(argOid)));
-        tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(argOid));
-        tuple[7] = null;
-        tuple[8] = null;
-        tuple[9] = null;
-        tuple[10] = null;
-        tuple[11] =
-            connection.encodeString(Integer.toString(DatabaseMetaData.functionNullableUnknown));
-        tuple[12] = null;
-        tuple[14] = connection.encodeString(Integer.toString(i + 1));
-        tuple[15] = isnullableUnknown;
-        tuple[16] = specificName;
-
-        v.add(new Tuple(tuple));
-      }
-
-      // if we are returning a multi-column result.
-      if ("c".equals(returnTypeType) || ("p".equals(returnTypeType) && argModesArray != null)) {
-        String columnsql = "SELECT a.attname,a.atttypid FROM pg_catalog.pg_attribute a "
-            + " WHERE a.attrelid = " + returnTypeRelid
-            + " AND NOT a.attisdropped AND a.attnum > 0 ORDER BY a.attnum ";
-        Statement columnstmt = connection.createStatement();
-        ResultSet columnrs = columnstmt.executeQuery(columnsql);
-        while (columnrs.next()) {
-          int columnTypeOid = (int) columnrs.getLong("atttypid");
-          byte[] [] tuple = new byte[columns][];
-          tuple[0] = null;
-          tuple[1] = schema;
-          tuple[2] = functionName;
-          tuple[3] = columnrs.getBytes("attname");
-          tuple[4] = connection
-              .encodeString(Integer.toString(DatabaseMetaData.functionColumnResult));
-          tuple[5] = connection
-              .encodeString(Integer.toString(connection.getTypeInfo().getSQLType(columnTypeOid)));
-          tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(columnTypeOid));
-          tuple[7] = null;
-          tuple[8] = null;
-          tuple[9] = null;
-          tuple[10] = null;
-          tuple[11] = connection
-              .encodeString(Integer.toString(DatabaseMetaData.functionNullableUnknown));
-          tuple[12] = null;
-          tuple[14] = connection.encodeString(Integer.toString(0));
-          tuple[15] = isnullableUnknown;
-          tuple[16] = specificName;
-
-          v.add(new Tuple(tuple));
-        }
-        columnrs.close();
-        columnstmt.close();
-      }
+        return createMetaDataStatement().executeQuery(sql);
     }
-    rs.close();
-    stmt.close();
 
-    return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
-  }
+    @Override
+    public ResultSet getFunctionColumns(String catalog, String schemaPattern,
+                                        String functionNamePattern, String columnNamePattern)
+            throws SQLException {
+        int columns = 17;
 
-  @Override
-  public ResultSet getPseudoColumns(String catalog, String schemaPattern,
-      String tableNamePattern, String columnNamePattern)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(),
-        "getPseudoColumns(String, String, String, String)");
-  }
+        Field[] f = new Field[columns];
+        List<Tuple> v = new ArrayList<>();
 
-  @Override
-  public boolean generatedKeyAlwaysReturned() throws SQLException {
-    return true;
-  }
+        f[0] = new Field("FUNCTION_CAT", Oid.VARCHAR);
+        f[1] = new Field("FUNCTION_SCHEM", Oid.VARCHAR);
+        f[2] = new Field("FUNCTION_NAME", Oid.VARCHAR);
+        f[3] = new Field("COLUMN_NAME", Oid.VARCHAR);
+        f[4] = new Field("COLUMN_TYPE", Oid.INT2);
+        f[5] = new Field("DATA_TYPE", Oid.INT2);
+        f[6] = new Field("TYPE_NAME", Oid.VARCHAR);
+        f[7] = new Field("PRECISION", Oid.INT2);
+        f[8] = new Field("LENGTH", Oid.INT4);
+        f[9] = new Field("SCALE", Oid.INT2);
+        f[10] = new Field("RADIX", Oid.INT2);
+        f[11] = new Field("NULLABLE", Oid.INT2);
+        f[12] = new Field("REMARKS", Oid.VARCHAR);
+        f[13] = new Field("CHAR_OCTET_LENGTH", Oid.INT4);
+        f[14] = new Field("ORDINAL_POSITION", Oid.INT4);
+        f[15] = new Field("IS_NULLABLE", Oid.VARCHAR);
+        f[16] = new Field("SPECIFIC_NAME", Oid.VARCHAR);
 
-  @Override
-  public boolean supportsSavepoints() throws SQLException {
-    return true;
-  }
+        String sql;
+        sql = "SELECT n.nspname,p.proname,p.prorettype,p.proargtypes, t.typtype,t.typrelid, "
+                + " p.proargnames, p.proargmodes, p.proallargtypes, p.oid "
+                + " FROM pg_catalog.pg_proc p, pg_catalog.pg_namespace n, pg_catalog.pg_type t "
+                + " WHERE p.pronamespace=n.oid AND p.prorettype=t.oid ";
+        if (schemaPattern != null && !schemaPattern.isEmpty()) {
+            sql += " AND n.nspname LIKE " + escapeQuotes(schemaPattern);
+        }
+        if (functionNamePattern != null && !functionNamePattern.isEmpty()) {
+            sql += " AND p.proname LIKE " + escapeQuotes(functionNamePattern);
+        }
+        sql += " ORDER BY n.nspname, p.proname, p.oid::text ";
 
-  @Override
-  public boolean supportsNamedParameters() throws SQLException {
-    return false;
-  }
+        byte[] isnullableUnknown = new byte[0];
 
-  @Override
-  public boolean supportsMultipleOpenResults() throws SQLException {
-    return false;
-  }
+        Statement stmt = connection.createStatement();
+        ResultSet rs = stmt.executeQuery(sql);
+        while (rs.next()) {
+            byte[] schema = rs.getBytes("nspname");
+            byte[] functionName = rs.getBytes("proname");
+            byte[] specificName =
+                    connection.encodeString(rs.getString("proname") + "_" + rs.getString("oid"));
+            int returnType = (int) rs.getLong("prorettype");
+            String returnTypeType = rs.getString("typtype");
+            int returnTypeRelid = (int) rs.getLong("typrelid");
 
-  @Override
-  public boolean supportsGetGeneratedKeys() throws SQLException {
-    // We don't support returning generated keys by column index,
-    // but that should be a rarer case than the ones we do support.
-    //
-    return true;
-  }
+            String strArgTypes = rs.getString("proargtypes");
+            StringTokenizer st = new StringTokenizer(strArgTypes);
+            List<Long> argTypes = new ArrayList<>();
+            while (st.hasMoreTokens()) {
+                argTypes.add(Long.valueOf(st.nextToken()));
+            }
 
-  @Override
-  public ResultSet getSuperTypes(String catalog, String schemaPattern,
-      String typeNamePattern)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(),
-        "getSuperTypes(String,String,String)");
-  }
+            String[] argNames = null;
+            Array argNamesArray = rs.getArray("proargnames");
+            if (argNamesArray != null) {
+                argNames = (String[]) argNamesArray.getArray();
+            }
 
-  @Override
-  public ResultSet getSuperTables(String catalog, String schemaPattern,
-      String tableNamePattern)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(),
-        "getSuperTables(String,String,String,String)");
-  }
+            String[] argModes = null;
+            Array argModesArray = rs.getArray("proargmodes");
+            if (argModesArray != null) {
+                argModes = (String[]) argModesArray.getArray();
+            }
 
-  @Override
-  public ResultSet getAttributes(String catalog, String schemaPattern,
-      String typeNamePattern, String attributeNamePattern) throws SQLException {
-    throw Driver.notImplemented(this.getClass(),
-        "getAttributes(String,String,String,String)");
-  }
+            int numArgs = argTypes.size();
 
-  @Override
-  public boolean supportsResultSetHoldability(int holdability) throws SQLException {
-    return true;
-  }
+            Long[] allArgTypes = null;
+            Array allArgTypesArray = rs.getArray("proallargtypes");
+            if (allArgTypesArray != null) {
+                allArgTypes = (Long[]) allArgTypesArray.getArray();
+                numArgs = allArgTypes.length;
+            }
 
-  @Override
-  public int getResultSetHoldability() throws SQLException {
-    return ResultSet.HOLD_CURSORS_OVER_COMMIT;
-  }
+            // decide if we are returning a single column result.
+            if ("b".equals(returnTypeType) || "d".equals(returnTypeType) || "e".equals(returnTypeType)
+                    || ("p".equals(returnTypeType) && argModesArray == null)) {
+                byte[][] tuple = new byte[columns][];
+                tuple[0] = null;
+                tuple[1] = schema;
+                tuple[2] = functionName;
+                tuple[3] = connection.encodeString("returnValue");
+                tuple[4] = connection
+                        .encodeString(Integer.toString(DatabaseMetaData.functionReturn));
+                tuple[5] = connection
+                        .encodeString(Integer.toString(connection.getTypeInfo().getSQLType(returnType)));
+                tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(returnType));
+                tuple[7] = null;
+                tuple[8] = null;
+                tuple[9] = null;
+                tuple[10] = null;
+                tuple[11] = connection
+                        .encodeString(Integer.toString(DatabaseMetaData.functionNullableUnknown));
+                tuple[12] = null;
+                tuple[14] = connection.encodeString(Integer.toString(0));
+                tuple[15] = isnullableUnknown;
+                tuple[16] = specificName;
 
-  @Override
-  public int getDatabaseMajorVersion() throws SQLException {
-    return connection.getServerMajorVersion();
-  }
+                v.add(new Tuple(tuple));
+            }
 
-  @Override
-  public int getDatabaseMinorVersion() throws SQLException {
-    return connection.getServerMinorVersion();
-  }
+            // Add a row for each argument.
+            for (int i = 0; i < numArgs; i++) {
+                byte[][] tuple = new byte[columns][];
+                tuple[0] = null;
+                tuple[1] = schema;
+                tuple[2] = functionName;
 
-  @Override
-  public int getJDBCMajorVersion() {
-    return DriverInfo.JDBC_MAJOR_VERSION;
-  }
+                if (argNames != null) {
+                    tuple[3] = connection.encodeString(argNames[i]);
+                } else {
+                    tuple[3] = connection.encodeString("$" + (i + 1));
+                }
 
-  @Override
-  public int getJDBCMinorVersion() {
-    return DriverInfo.JDBC_MINOR_VERSION;
-  }
+                int columnMode = DatabaseMetaData.functionColumnIn;
+                if (argModes != null && argModes[i] != null) {
+                    if ("o".equals(argModes[i])) {
+                        columnMode = DatabaseMetaData.functionColumnOut;
+                    } else if ("b".equals(argModes[i])) {
+                        columnMode = DatabaseMetaData.functionColumnInOut;
+                    } else if ("t".equals(argModes[i])) {
+                        columnMode = DatabaseMetaData.functionReturn;
+                    }
+                }
 
-  @Override
-  public int getSQLStateType() throws SQLException {
-    return sqlStateSQL;
-  }
+                tuple[4] = connection.encodeString(Integer.toString(columnMode));
 
-  @Override
-  public boolean locatorsUpdateCopy() throws SQLException {
-    /*
-     * Currently LOB's aren't updateable at all, so it doesn't matter what we return. We don't throw
-     * the notImplemented Exception because the 1.5 JDK's CachedRowSet calls this method regardless
-     * of whether large objects are used.
-     */
-    return true;
-  }
+                int argOid;
+                if (allArgTypes != null) {
+                    argOid = allArgTypes[i].intValue();
+                } else {
+                    argOid = argTypes.get(i).intValue();
+                }
 
-  @Override
-  public boolean supportsStatementPooling() throws SQLException {
-    return false;
-  }
+                tuple[5] =
+                        connection.encodeString(Integer.toString(connection.getTypeInfo().getSQLType(argOid)));
+                tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(argOid));
+                tuple[7] = null;
+                tuple[8] = null;
+                tuple[9] = null;
+                tuple[10] = null;
+                tuple[11] =
+                        connection.encodeString(Integer.toString(DatabaseMetaData.functionNullableUnknown));
+                tuple[12] = null;
+                tuple[14] = connection.encodeString(Integer.toString(i + 1));
+                tuple[15] = isnullableUnknown;
+                tuple[16] = specificName;
+
+                v.add(new Tuple(tuple));
+            }
+
+            // if we are returning a multi-column result.
+            if ("c".equals(returnTypeType) || ("p".equals(returnTypeType) && argModesArray != null)) {
+                String columnsql = "SELECT a.attname,a.atttypid FROM pg_catalog.pg_attribute a "
+                        + " WHERE a.attrelid = " + returnTypeRelid
+                        + " AND NOT a.attisdropped AND a.attnum > 0 ORDER BY a.attnum ";
+                Statement columnstmt = connection.createStatement();
+                ResultSet columnrs = columnstmt.executeQuery(columnsql);
+                while (columnrs.next()) {
+                    int columnTypeOid = (int) columnrs.getLong("atttypid");
+                    byte[][] tuple = new byte[columns][];
+                    tuple[0] = null;
+                    tuple[1] = schema;
+                    tuple[2] = functionName;
+                    tuple[3] = columnrs.getBytes("attname");
+                    tuple[4] = connection
+                            .encodeString(Integer.toString(DatabaseMetaData.functionColumnResult));
+                    tuple[5] = connection
+                            .encodeString(Integer.toString(connection.getTypeInfo().getSQLType(columnTypeOid)));
+                    tuple[6] = connection.encodeString(connection.getTypeInfo().getPGType(columnTypeOid));
+                    tuple[7] = null;
+                    tuple[8] = null;
+                    tuple[9] = null;
+                    tuple[10] = null;
+                    tuple[11] = connection
+                            .encodeString(Integer.toString(DatabaseMetaData.functionNullableUnknown));
+                    tuple[12] = null;
+                    tuple[14] = connection.encodeString(Integer.toString(0));
+                    tuple[15] = isnullableUnknown;
+                    tuple[16] = specificName;
+
+                    v.add(new Tuple(tuple));
+                }
+                columnrs.close();
+                columnstmt.close();
+            }
+        }
+        rs.close();
+        stmt.close();
+
+        return ((BaseStatement) createMetaDataStatement()).createDriverResultSet(f, v);
+    }
+
+    @Override
+    public ResultSet getPseudoColumns(String catalog, String schemaPattern,
+                                      String tableNamePattern, String columnNamePattern)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(),
+                "getPseudoColumns(String, String, String, String)");
+    }
+
+    @Override
+    public boolean generatedKeyAlwaysReturned() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean supportsSavepoints() throws SQLException {
+        return true;
+    }
+
+    @Override
+    public boolean supportsNamedParameters() throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean supportsMultipleOpenResults() throws SQLException {
+        return false;
+    }
+
+    @Override
+    public boolean supportsGetGeneratedKeys() throws SQLException {
+        // We don't support returning generated keys by column index,
+        // but that should be a rarer case than the ones we do support.
+        //
+        return true;
+    }
+
+    @Override
+    public ResultSet getSuperTypes(String catalog, String schemaPattern,
+                                   String typeNamePattern)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(),
+                "getSuperTypes(String,String,String)");
+    }
+
+    @Override
+    public ResultSet getSuperTables(String catalog, String schemaPattern,
+                                    String tableNamePattern)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(),
+                "getSuperTables(String,String,String,String)");
+    }
+
+    @Override
+    public ResultSet getAttributes(String catalog, String schemaPattern,
+                                   String typeNamePattern, String attributeNamePattern) throws SQLException {
+        throw Driver.notImplemented(this.getClass(),
+                "getAttributes(String,String,String,String)");
+    }
+
+    @Override
+    public boolean supportsResultSetHoldability(int holdability) throws SQLException {
+        return true;
+    }
+
+    @Override
+    public int getResultSetHoldability() throws SQLException {
+        return ResultSet.HOLD_CURSORS_OVER_COMMIT;
+    }
+
+    @Override
+    public int getDatabaseMajorVersion() throws SQLException {
+        return connection.getServerMajorVersion();
+    }
+
+    @Override
+    public int getDatabaseMinorVersion() throws SQLException {
+        return connection.getServerMinorVersion();
+    }
+
+    @Override
+    public int getJDBCMajorVersion() {
+        return DriverInfo.JDBC_MAJOR_VERSION;
+    }
+
+    @Override
+    public int getJDBCMinorVersion() {
+        return DriverInfo.JDBC_MINOR_VERSION;
+    }
+
+    @Override
+    public int getSQLStateType() throws SQLException {
+        return sqlStateSQL;
+    }
+
+    @Override
+    public boolean locatorsUpdateCopy() throws SQLException {
+        /*
+         * Currently LOB's aren't updateable at all, so it doesn't matter what we return. We don't throw
+         * the notImplemented Exception because the 1.5 JDK's CachedRowSet calls this method regardless
+         * of whether large objects are used.
+         */
+        return true;
+    }
+
+    @Override
+    public boolean supportsStatementPooling() throws SQLException {
+        return false;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgParameterMetaData.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgParameterMetaData.java
index 1b15b77..48f3179 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/PgParameterMetaData.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgParameterMetaData.java
@@ -15,96 +15,96 @@ import java.sql.SQLException;
 
 public class PgParameterMetaData implements ParameterMetaData {
 
-  private final BaseConnection connection;
-  private final int[] oids;
+    private final BaseConnection connection;
+    private final int[] oids;
 
-  public PgParameterMetaData(BaseConnection connection, int[] oids) {
-    this.connection = connection;
-    this.oids = oids;
-  }
-
-  @Override
-  public String getParameterClassName(int param) throws SQLException {
-    checkParamIndex(param);
-    return connection.getTypeInfo().getJavaClass(oids[param - 1]);
-  }
-
-  @Override
-  public int getParameterCount() {
-    return oids.length;
-  }
-
-  /**
-   * {@inheritDoc} For now report all parameters as inputs. CallableStatements may have one output,
-   * but ignore that for now.
-   */
-  @Override
-  public int getParameterMode(int param) throws SQLException {
-    checkParamIndex(param);
-    return ParameterMetaData.parameterModeIn;
-  }
-
-  @Override
-  public int getParameterType(int param) throws SQLException {
-    checkParamIndex(param);
-    return connection.getTypeInfo().getSQLType(oids[param - 1]);
-  }
-
-  @Override
-  public String getParameterTypeName(int param) throws SQLException {
-    checkParamIndex(param);
-    return connection.getTypeInfo().getPGType(oids[param - 1]);
-  }
-
-  // we don't know this
-  @Override
-  public int getPrecision(int param) throws SQLException {
-    checkParamIndex(param);
-    return 0;
-  }
-
-  // we don't know this
-  @Override
-  public int getScale(int param) throws SQLException {
-    checkParamIndex(param);
-    return 0;
-  }
-
-  // we can't tell anything about nullability
-  @Override
-  public int isNullable(int param) throws SQLException {
-    checkParamIndex(param);
-    return ParameterMetaData.parameterNullableUnknown;
-  }
-
-  /**
-   * {@inheritDoc} PostgreSQL doesn't have unsigned numbers
-   */
-  @Override
-  public boolean isSigned(int param) throws SQLException {
-    checkParamIndex(param);
-    return connection.getTypeInfo().isSigned(oids[param - 1]);
-  }
-
-  private void checkParamIndex(int param) throws PSQLException {
-    if (param < 1 || param > oids.length) {
-      throw new PSQLException(
-          GT.tr("The parameter index is out of range: {0}, number of parameters: {1}.",
-              param, oids.length),
-          PSQLState.INVALID_PARAMETER_VALUE);
+    public PgParameterMetaData(BaseConnection connection, int[] oids) {
+        this.connection = connection;
+        this.oids = oids;
     }
-  }
 
-  @Override
-  public boolean isWrapperFor(Class<?> iface) throws SQLException {
-    return iface.isAssignableFrom(getClass());
-  }
-
-  @Override
-  public <T> T unwrap(Class<T> iface) throws SQLException {
-    if (iface.isAssignableFrom(getClass())) {
-      return iface.cast(this);
+    @Override
+    public String getParameterClassName(int param) throws SQLException {
+        checkParamIndex(param);
+        return connection.getTypeInfo().getJavaClass(oids[param - 1]);
+    }
+
+    @Override
+    public int getParameterCount() {
+        return oids.length;
+    }
+
+    /**
+     * {@inheritDoc} For now report all parameters as inputs. CallableStatements may have one output,
+     * but ignore that for now.
+     */
+    @Override
+    public int getParameterMode(int param) throws SQLException {
+        checkParamIndex(param);
+        return ParameterMetaData.parameterModeIn;
+    }
+
+    @Override
+    public int getParameterType(int param) throws SQLException {
+        checkParamIndex(param);
+        return connection.getTypeInfo().getSQLType(oids[param - 1]);
+    }
+
+    @Override
+    public String getParameterTypeName(int param) throws SQLException {
+        checkParamIndex(param);
+        return connection.getTypeInfo().getPGType(oids[param - 1]);
+    }
+
+    // we don't know this
+    @Override
+    public int getPrecision(int param) throws SQLException {
+        checkParamIndex(param);
+        return 0;
+    }
+
+    // we don't know this
+    @Override
+    public int getScale(int param) throws SQLException {
+        checkParamIndex(param);
+        return 0;
+    }
+
+    // we can't tell anything about nullability
+    @Override
+    public int isNullable(int param) throws SQLException {
+        checkParamIndex(param);
+        return ParameterMetaData.parameterNullableUnknown;
+    }
+
+    /**
+     * {@inheritDoc} PostgreSQL doesn't have unsigned numbers
+     */
+    @Override
+    public boolean isSigned(int param) throws SQLException {
+        checkParamIndex(param);
+        return connection.getTypeInfo().isSigned(oids[param - 1]);
+    }
+
+    private void checkParamIndex(int param) throws PSQLException {
+        if (param < 1 || param > oids.length) {
+            throw new PSQLException(
+                    GT.tr("The parameter index is out of range: {0}, number of parameters: {1}.",
+                            param, oids.length),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+    }
+
+    @Override
+    public boolean isWrapperFor(Class<?> iface) throws SQLException {
+        return iface.isAssignableFrom(getClass());
+    }
+
+    @Override
+    public <T> T unwrap(Class<T> iface) throws SQLException {
+        if (iface.isAssignableFrom(getClass())) {
+            return iface.cast(this);
+        }
+        throw new SQLException("Cannot unwrap to " + iface.getName());
     }
-    throw new SQLException("Cannot unwrap to " + iface.getName());
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgPreparedStatement.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgPreparedStatement.java
index 6ed7b44..d6e406a 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/PgPreparedStatement.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgPreparedStatement.java
@@ -72,1729 +72,1730 @@ import java.util.TimeZone;
 import java.util.UUID;
 
 @SuppressWarnings("try")
-class PgPreparedStatement extends PgStatement implements PreparedStatement {
+public class PgPreparedStatement extends PgStatement implements PreparedStatement {
 
-  protected final CachedQuery preparedQuery; // Query fragments for prepared statement.
-  protected final ParameterList preparedParameters; // Parameter values for prepared statement.
+    protected final CachedQuery preparedQuery; // Query fragments for prepared statement.
+    protected final ParameterList preparedParameters; // Parameter values for prepared statement.
 
-  private TimeZone defaultTimeZone;
+    private TimeZone defaultTimeZone;
 
-  PgPreparedStatement(PgConnection connection, String sql, int rsType, int rsConcurrency,
-      int rsHoldability) throws SQLException {
-    this(connection, connection.borrowQuery(sql), rsType, rsConcurrency, rsHoldability);
-  }
-
-  PgPreparedStatement(PgConnection connection, CachedQuery query, int rsType,
-      int rsConcurrency, int rsHoldability) throws SQLException {
-    super(connection, rsType, rsConcurrency, rsHoldability);
-
-    this.preparedQuery = query;
-    this.preparedParameters = this.preparedQuery.query.createParameterList();
-    int parameterCount = preparedParameters.getParameterCount();
-    int maxSupportedParameters = maximumNumberOfParameters();
-    if (parameterCount > maxSupportedParameters) {
-      throw new PSQLException(
-          GT.tr("PreparedStatement can have at most {0} parameters. Please consider using arrays, or splitting the query in several ones, or using COPY. Given query has {1} parameters",
-              maxSupportedParameters,
-              parameterCount),
-          PSQLState.INVALID_PARAMETER_VALUE);
+    public PgPreparedStatement(PgConnection connection, String sql, int rsType, int rsConcurrency,
+                               int rsHoldability) throws SQLException {
+        this(connection, connection.borrowQuery(sql), rsType, rsConcurrency, rsHoldability);
     }
 
-    // TODO: this.wantsGeneratedKeysAlways = true;
+    @SuppressWarnings("this-escape")
+    public PgPreparedStatement(PgConnection connection, CachedQuery query, int rsType,
+                               int rsConcurrency, int rsHoldability) throws SQLException {
+        super(connection, rsType, rsConcurrency, rsHoldability);
 
-    setPoolable(true); // As per JDBC spec: prepared and callable statements are poolable by
-  }
-
-  final int maximumNumberOfParameters() {
-    return connection.getPreferQueryMode() == PreferQueryMode.SIMPLE ? Integer.MAX_VALUE : 65535;
-  }
-
-  @Override
-  public ResultSet executeQuery(String sql) throws SQLException {
-    throw new PSQLException(
-        GT.tr("Can''t use query methods that take a query string on a PreparedStatement."),
-        PSQLState.WRONG_OBJECT_TYPE);
-  }
-
-  /*
-   * A Prepared SQL query is executed and its ResultSet is returned
-   *
-   * @return a ResultSet that contains the data produced by the * query - never null
-   *
-   * @exception SQLException if a database access error occurs
-   */
-  @Override
-  public ResultSet executeQuery() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (!executeWithFlags(0)) {
-        throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
-      }
-
-      return getSingleResultSet();
-    }
-  }
-
-  @Override
-  public int executeUpdate(String sql) throws SQLException {
-    throw new PSQLException(
-        GT.tr("Can''t use query methods that take a query string on a PreparedStatement."),
-        PSQLState.WRONG_OBJECT_TYPE);
-  }
-
-  @Override
-  public int executeUpdate() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      executeWithFlags(QueryExecutor.QUERY_NO_RESULTS);
-      checkNoResultUpdate();
-      return getUpdateCount();
-    }
-  }
-
-  @Override
-  public long executeLargeUpdate() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      executeWithFlags(QueryExecutor.QUERY_NO_RESULTS);
-      checkNoResultUpdate();
-      return getLargeUpdateCount();
-    }
-  }
-
-  @Override
-  public boolean execute(String sql) throws SQLException {
-    throw new PSQLException(
-        GT.tr("Can''t use query methods that take a query string on a PreparedStatement."),
-        PSQLState.WRONG_OBJECT_TYPE);
-  }
-
-  @Override
-  public boolean execute() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      return executeWithFlags(0);
-    }
-  }
-
-  @Override
-  public boolean executeWithFlags(int flags) throws SQLException {
-    try {
-      try (ResourceLock ignore = lock.obtain()) {
-        checkClosed();
-
-        if (connection.getPreferQueryMode() == PreferQueryMode.SIMPLE) {
-          flags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
-        }
-
-        execute(preparedQuery, preparedParameters, flags);
-
-        checkClosed();
-        return result != null && result.getResultSet() != null;
-      }
-    } finally {
-      defaultTimeZone = null;
-    }
-  }
-
-  @Override
-  protected boolean isOneShotQuery(CachedQuery cachedQuery) {
-    if (cachedQuery == null) {
-      cachedQuery = preparedQuery;
-    }
-    return super.isOneShotQuery(cachedQuery);
-  }
-
-  @Override
-  public void closeImpl() throws SQLException {
-    if (preparedQuery != null) {
-      connection.releaseQuery(preparedQuery);
-    }
-  }
-
-  @Override
-  public void setNull(int parameterIndex, int sqlType) throws SQLException {
-    checkClosed();
-
-    if (parameterIndex < 1 || parameterIndex > preparedParameters.getParameterCount()) {
-      throw new PSQLException(
-        GT.tr("The column index is out of range: {0}, number of columns: {1}.",
-          parameterIndex, preparedParameters.getParameterCount()),
-        PSQLState.INVALID_PARAMETER_VALUE);
-    }
-
-    int oid;
-    switch (sqlType) {
-      case Types.SQLXML:
-        oid = Oid.XML;
-        break;
-      case Types.INTEGER:
-        oid = Oid.INT4;
-        break;
-      case Types.TINYINT:
-      case Types.SMALLINT:
-        oid = Oid.INT2;
-        break;
-      case Types.BIGINT:
-        oid = Oid.INT8;
-        break;
-      case Types.REAL:
-        oid = Oid.FLOAT4;
-        break;
-      case Types.DOUBLE:
-      case Types.FLOAT:
-        oid = Oid.FLOAT8;
-        break;
-      case Types.DECIMAL:
-      case Types.NUMERIC:
-        oid = Oid.NUMERIC;
-        break;
-      case Types.CHAR:
-        oid = Oid.BPCHAR;
-        break;
-      case Types.VARCHAR:
-      case Types.LONGVARCHAR:
-        oid = connection.getStringVarcharFlag() ? Oid.VARCHAR : Oid.UNSPECIFIED;
-        break;
-      case Types.DATE:
-        oid = Oid.DATE;
-        break;
-      case Types.TIME:
-      case Types.TIME_WITH_TIMEZONE:
-      case Types.TIMESTAMP_WITH_TIMEZONE:
-      case Types.TIMESTAMP:
-        oid = Oid.UNSPECIFIED;
-        break;
-      case Types.BOOLEAN:
-      case Types.BIT:
-        oid = Oid.BOOL;
-        break;
-      case Types.BINARY:
-      case Types.VARBINARY:
-      case Types.LONGVARBINARY:
-        oid = Oid.BYTEA;
-        break;
-      case Types.BLOB:
-      case Types.CLOB:
-        oid = Oid.OID;
-        break;
-      case Types.REF_CURSOR:
-        oid = Oid.REF_CURSOR;
-        break;
-      case Types.ARRAY:
-      case Types.DISTINCT:
-      case Types.STRUCT:
-      case Types.NULL:
-      case Types.OTHER:
-        oid = Oid.UNSPECIFIED;
-        break;
-      default:
-        // Bad Types value.
-        throw new PSQLException(GT.tr("Unknown Types value."), PSQLState.INVALID_PARAMETER_TYPE);
-    }
-    preparedParameters.setNull(parameterIndex, oid);
-  }
-
-  @Override
-  public void setBoolean(int parameterIndex, boolean x) throws SQLException {
-    checkClosed();
-    // The key words TRUE and FALSE are the preferred (SQL-compliant) usage.
-    bindLiteral(parameterIndex, x ? "TRUE" : "FALSE", Oid.BOOL);
-  }
-
-  @Override
-  public void setByte(int parameterIndex, byte x) throws SQLException {
-    setShort(parameterIndex, x);
-  }
-
-  @Override
-  public void setShort(int parameterIndex, short x) throws SQLException {
-    checkClosed();
-    if (connection.binaryTransferSend(Oid.INT2)) {
-      byte[] val = new byte[2];
-      ByteConverter.int2(val, 0, x);
-      bindBytes(parameterIndex, val, Oid.INT2);
-      return;
-    }
-    bindLiteral(parameterIndex, Integer.toString(x), Oid.INT2);
-  }
-
-  @Override
-  public void setInt(int parameterIndex, int x) throws SQLException {
-    checkClosed();
-    if (connection.binaryTransferSend(Oid.INT4)) {
-      byte[] val = new byte[4];
-      ByteConverter.int4(val, 0, x);
-      bindBytes(parameterIndex, val, Oid.INT4);
-      return;
-    }
-    bindLiteral(parameterIndex, Integer.toString(x), Oid.INT4);
-  }
-
-  @Override
-  public void setLong(int parameterIndex, long x) throws SQLException {
-    checkClosed();
-    if (connection.binaryTransferSend(Oid.INT8)) {
-      byte[] val = new byte[8];
-      ByteConverter.int8(val, 0, x);
-      bindBytes(parameterIndex, val, Oid.INT8);
-      return;
-    }
-    bindLiteral(parameterIndex, Long.toString(x), Oid.INT8);
-  }
-
-  @Override
-  public void setFloat(int parameterIndex, float x) throws SQLException {
-    checkClosed();
-    if (connection.binaryTransferSend(Oid.FLOAT4)) {
-      byte[] val = new byte[4];
-      ByteConverter.float4(val, 0, x);
-      bindBytes(parameterIndex, val, Oid.FLOAT4);
-      return;
-    }
-    bindLiteral(parameterIndex, Float.toString(x), Oid.FLOAT8);
-  }
-
-  @Override
-  public void setDouble(int parameterIndex, double x) throws SQLException {
-    checkClosed();
-    if (connection.binaryTransferSend(Oid.FLOAT8)) {
-      byte[] val = new byte[8];
-      ByteConverter.float8(val, 0, x);
-      bindBytes(parameterIndex, val, Oid.FLOAT8);
-      return;
-    }
-    bindLiteral(parameterIndex, Double.toString(x), Oid.FLOAT8);
-  }
-
-  @Override
-  public void setBigDecimal(int parameterIndex, BigDecimal x)
-      throws SQLException {
-    if (x != null && connection.binaryTransferSend(Oid.NUMERIC)) {
-      final byte[] bytes = ByteConverter.numeric(x);
-      bindBytes(parameterIndex, bytes, Oid.NUMERIC);
-      return;
-    }
-    setNumber(parameterIndex, x);
-  }
-
-  @Override
-  public void setString(int parameterIndex, String x) throws SQLException {
-    checkClosed();
-    setString(parameterIndex, x, getStringType());
-  }
-
-  private int getStringType() {
-    return connection.getStringVarcharFlag() ? Oid.VARCHAR : Oid.UNSPECIFIED;
-  }
-
-  protected void setString(int parameterIndex,
-      String x, int oid) throws SQLException {
-    // if the passed string is null, then set this column to null
-    checkClosed();
-    if (x == null) {
-      preparedParameters.setNull(parameterIndex, oid);
-    } else {
-      bindString(parameterIndex, x, oid);
-    }
-  }
-
-  @Override
-  public void setBytes(int parameterIndex, byte [] x) throws SQLException {
-    checkClosed();
-
-    if (null == x) {
-      setNull(parameterIndex, Types.VARBINARY);
-      return;
-    }
-
-    // Version 7.2 supports the bytea datatype for byte arrays
-    byte[] copy = new byte[x.length];
-    System.arraycopy(x, 0, copy, 0, x.length);
-    preparedParameters.setBytea(parameterIndex, copy, 0, x.length);
-  }
-
-  private void setByteStreamWriter(int parameterIndex,
-      ByteStreamWriter x) throws SQLException {
-    preparedParameters.setBytea(parameterIndex, x);
-  }
-
-  @Override
-  public void setDate(int parameterIndex,
-      Date x) throws SQLException {
-    setDate(parameterIndex, x, null);
-  }
-
-  @Override
-  public void setTime(int parameterIndex, Time x) throws SQLException {
-    setTime(parameterIndex, x, null);
-  }
-
-  @Override
-  public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException {
-    setTimestamp(parameterIndex, x, null);
-  }
-
-  private void setCharacterStreamPost71(int parameterIndex,
-      InputStream x, int length,
-      String encoding) throws SQLException {
-
-    if (x == null) {
-      setNull(parameterIndex, Types.VARCHAR);
-      return;
-    }
-    if (length < 0) {
-      throw new PSQLException(GT.tr("Invalid stream length {0}.", length),
-          PSQLState.INVALID_PARAMETER_VALUE);
-    }
-
-    // Version 7.2 supports AsciiStream for all PG text types (char, varchar, text)
-    // As the spec/javadoc for this method indicate this is to be used for
-    // large String values (i.e. LONGVARCHAR) PG doesn't have a separate
-    // long varchar datatype, but with toast all text datatypes are capable of
-    // handling very large values. Thus the implementation ends up calling
-    // setString() since there is no current way to stream the value to the server
-    try {
-      InputStreamReader inStream = new InputStreamReader(x, encoding);
-      char[] chars = new char[length];
-      int charsRead = 0;
-      while (true) {
-        int n = inStream.read(chars, charsRead, length - charsRead);
-        if (n == -1) {
-          break;
-        }
-
-        charsRead += n;
-
-        if (charsRead == length) {
-          break;
-        }
-      }
-
-      setString(parameterIndex, new String(chars, 0, charsRead), Oid.VARCHAR);
-    } catch (UnsupportedEncodingException uee) {
-      throw new PSQLException(GT.tr("The JVM claims not to support the {0} encoding.", encoding),
-          PSQLState.UNEXPECTED_ERROR, uee);
-    } catch (IOException ioe) {
-      throw new PSQLException(GT.tr("Provided InputStream failed."), PSQLState.UNEXPECTED_ERROR,
-          ioe);
-    }
-  }
-
-  @Override
-  public void setAsciiStream(int parameterIndex, InputStream x,
-      int length) throws SQLException {
-    checkClosed();
-    setCharacterStreamPost71(parameterIndex, x, length, "ASCII");
-  }
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public void setUnicodeStream(int parameterIndex, InputStream x,
-      int length) throws SQLException {
-    checkClosed();
-
-    setCharacterStreamPost71(parameterIndex, x, length, "UTF-8");
-  }
-
-  @Override
-  public void setBinaryStream(int parameterIndex, InputStream x,
-      int length) throws SQLException {
-    checkClosed();
-
-    if (x == null) {
-      setNull(parameterIndex, Types.VARBINARY);
-      return;
-    }
-
-    if (length < 0) {
-      throw new PSQLException(GT.tr("Invalid stream length {0}.", length),
-          PSQLState.INVALID_PARAMETER_VALUE);
-    }
-
-    // Version 7.2 supports BinaryStream for the PG bytea type
-    // As the spec/javadoc for this method indicate this is to be used for
-    // large binary values (i.e. LONGVARBINARY) PG doesn't have a separate
-    // long binary datatype, but with toast the bytea datatype is capable of
-    // handling very large values.
-    preparedParameters.setBytea(parameterIndex, x, length);
-  }
-
-  @Override
-  public void clearParameters() throws SQLException {
-    preparedParameters.clear();
-  }
-
-  // Helper method for setting parameters to PGobject subclasses.
-  private void setPGobject(int parameterIndex, PGobject x) throws SQLException {
-    String typename = x.getType();
-    int oid = connection.getTypeInfo().getPGType(typename);
-    if (oid == Oid.UNSPECIFIED) {
-      throw new PSQLException(GT.tr("Unknown type {0}.", typename),
-          PSQLState.INVALID_PARAMETER_TYPE);
-    }
-
-    if ((x instanceof PGBinaryObject) && connection.binaryTransferSend(oid)) {
-      PGBinaryObject binObj = (PGBinaryObject) x;
-      int length = binObj.lengthInBytes();
-      if (length == 0) {
-        preparedParameters.setNull(parameterIndex, oid);
-        return;
-      }
-      byte[] data = new byte[length];
-      binObj.toBytes(data, 0);
-      bindBytes(parameterIndex, data, oid);
-    } else {
-      setString(parameterIndex, x.getValue(), oid);
-    }
-  }
-
-  private void setMap(int parameterIndex, Map<?, ?> x) throws SQLException {
-    int oid = connection.getTypeInfo().getPGType("hstore");
-    if (oid == Oid.UNSPECIFIED) {
-      throw new PSQLException(GT.tr("No hstore extension installed."),
-          PSQLState.INVALID_PARAMETER_TYPE);
-    }
-    if (connection.binaryTransferSend(oid)) {
-      byte[] data = HStoreConverter.toBytes(x, connection.getEncoding());
-      bindBytes(parameterIndex, data, oid);
-    } else {
-      setString(parameterIndex, HStoreConverter.toString(x), oid);
-    }
-  }
-
-  private void setNumber(int parameterIndex, Number x) throws SQLException {
-    checkClosed();
-    if (x == null) {
-      setNull(parameterIndex, Types.DECIMAL);
-    } else {
-      bindLiteral(parameterIndex, x.toString(), Oid.NUMERIC);
-    }
-  }
-
-  @Override
-  public void setObject(int parameterIndex, Object in,
-      int targetSqlType, int scale)
-      throws SQLException {
-    checkClosed();
-
-    if (in == null) {
-      setNull(parameterIndex, targetSqlType);
-      return;
-    }
-
-    if (targetSqlType == Types.OTHER && in instanceof UUID
-        && connection.haveMinimumServerVersion(ServerVersion.v8_3)) {
-      setUuid(parameterIndex, (UUID) in);
-      return;
-    }
-
-    switch (targetSqlType) {
-      case Types.SQLXML:
-        if (in instanceof SQLXML) {
-          setSQLXML(parameterIndex, (SQLXML) in);
-        } else {
-          setSQLXML(parameterIndex, new PgSQLXML(connection, in.toString()));
-        }
-        break;
-      case Types.INTEGER:
-        setInt(parameterIndex, castToInt(in));
-        break;
-      case Types.TINYINT:
-      case Types.SMALLINT:
-        setShort(parameterIndex, castToShort(in));
-        break;
-      case Types.BIGINT:
-        setLong(parameterIndex, castToLong(in));
-        break;
-      case Types.REAL:
-        setFloat(parameterIndex, castToFloat(in));
-        break;
-      case Types.DOUBLE:
-      case Types.FLOAT:
-        setDouble(parameterIndex, castToDouble(in));
-        break;
-      case Types.DECIMAL:
-      case Types.NUMERIC:
-        setBigDecimal(parameterIndex, castToBigDecimal(in, scale));
-        break;
-      case Types.CHAR:
-        setString(parameterIndex, castToString(in), Oid.BPCHAR);
-        break;
-      case Types.VARCHAR:
-        setString(parameterIndex, castToString(in), getStringType());
-        break;
-      case Types.LONGVARCHAR:
-        if (in instanceof InputStream) {
-          preparedParameters.setText(parameterIndex, (InputStream) in);
-        } else {
-          setString(parameterIndex, castToString(in), getStringType());
-        }
-        break;
-      case Types.DATE:
-        if (in instanceof Date) {
-          setDate(parameterIndex, (Date) in);
-        } else {
-          Date tmpd;
-          if (in instanceof java.util.Date) {
-            tmpd = new Date(((java.util.Date) in).getTime());
-          } else if (in instanceof LocalDate) {
-            setDate(parameterIndex, (LocalDate) in);
-            break;
-          } else {
-            tmpd = getTimestampUtils().toDate(getDefaultCalendar(), in.toString());
-          }
-          setDate(parameterIndex, tmpd);
-        }
-        break;
-      case Types.TIME:
-        if (in instanceof Time) {
-          setTime(parameterIndex, (Time) in);
-        } else {
-          Time tmpt;
-          if (in instanceof java.util.Date) {
-            tmpt = new Time(((java.util.Date) in).getTime());
-          } else if (in instanceof LocalTime) {
-            setTime(parameterIndex, (LocalTime) in);
-            break;
-          } else if (in instanceof OffsetTime) {
-            setTime(parameterIndex, (OffsetTime) in);
-            break;
-          } else {
-            tmpt = getTimestampUtils().toTime(getDefaultCalendar(), in.toString());
-          }
-          setTime(parameterIndex, tmpt);
-        }
-        break;
-      case Types.TIMESTAMP:
-        if (in instanceof PGTimestamp) {
-          setObject(parameterIndex, in);
-        } else if (in instanceof Timestamp) {
-          setTimestamp(parameterIndex, (Timestamp) in);
-        } else {
-          Timestamp tmpts;
-          if (in instanceof java.util.Date) {
-            tmpts = new Timestamp(((java.util.Date) in).getTime());
-          } else if (in instanceof LocalDateTime) {
-            setTimestamp(parameterIndex, (LocalDateTime) in);
-            break;
-          } else {
-            tmpts = getTimestampUtils().toTimestamp(getDefaultCalendar(), in.toString());
-          }
-          setTimestamp(parameterIndex, tmpts);
-        }
-        break;
-      case Types.TIMESTAMP_WITH_TIMEZONE:
-        if (in instanceof OffsetDateTime) {
-          setTimestamp(parameterIndex, (OffsetDateTime) in);
-        } else if (in instanceof PGTimestamp) {
-          setObject(parameterIndex, in);
-        } else {
-          throw new PSQLException(
-              GT.tr("Cannot cast an instance of {0} to type {1}",
-                  in.getClass().getName(), "Types.TIMESTAMP_WITH_TIMEZONE"),
-              PSQLState.INVALID_PARAMETER_TYPE);
-        }
-        break;
-      case Types.BOOLEAN:
-      case Types.BIT:
-        setBoolean(parameterIndex, BooleanTypeUtil.castToBoolean(in));
-        break;
-      case Types.BINARY:
-      case Types.VARBINARY:
-      case Types.LONGVARBINARY:
-        setObject(parameterIndex, in);
-        break;
-      case Types.BLOB:
-        if (in instanceof Blob) {
-          setBlob(parameterIndex, (Blob) in);
-        } else if (in instanceof InputStream) {
-          long oid = createBlob(parameterIndex, (InputStream) in, Long.MAX_VALUE);
-          setLong(parameterIndex, oid);
-        } else {
-          throw new PSQLException(
-              GT.tr("Cannot cast an instance of {0} to type {1}",
-                  in.getClass().getName(), "Types.BLOB"),
-              PSQLState.INVALID_PARAMETER_TYPE);
-        }
-        break;
-      case Types.CLOB:
-        if (in instanceof Clob) {
-          setClob(parameterIndex, (Clob) in);
-        } else {
-          throw new PSQLException(
-              GT.tr("Cannot cast an instance of {0} to type {1}",
-                  in.getClass().getName(), "Types.CLOB"),
-              PSQLState.INVALID_PARAMETER_TYPE);
-        }
-        break;
-      case Types.ARRAY:
-        if (in instanceof Array) {
-          setArray(parameterIndex, (Array) in);
-        } else {
-          try {
-            setObjectArray(parameterIndex, in);
-          } catch (Exception e) {
+        this.preparedQuery = query;
+        this.preparedParameters = this.preparedQuery.query.createParameterList();
+        int parameterCount = preparedParameters.getParameterCount();
+        int maxSupportedParameters = maximumNumberOfParameters();
+        if (parameterCount > maxSupportedParameters) {
             throw new PSQLException(
-                GT.tr("Cannot cast an instance of {0} to type {1}", in.getClass().getName(), "Types.ARRAY"),
-                PSQLState.INVALID_PARAMETER_TYPE, e);
-          }
+                    GT.tr("PreparedStatement can have at most {0} parameters. Please consider using arrays, or splitting the query in several ones, or using COPY. Given query has {1} parameters",
+                            maxSupportedParameters,
+                            parameterCount),
+                    PSQLState.INVALID_PARAMETER_VALUE);
         }
-        break;
-      case Types.DISTINCT:
-        bindString(parameterIndex, in.toString(), Oid.UNSPECIFIED);
-        break;
-      case Types.OTHER:
-        if (in instanceof PGobject) {
-          setPGobject(parameterIndex, (PGobject) in);
-        } else if (in instanceof Map) {
-          setMap(parameterIndex, (Map<?, ?>) in);
-        } else {
-          bindString(parameterIndex, in.toString(), Oid.UNSPECIFIED);
+
+        // TODO: this.wantsGeneratedKeysAlways = true;
+
+        setPoolable(true); // As per JDBC spec: prepared and callable statements are poolable by
+    }
+
+    private static String asString(final Clob in) throws SQLException {
+        return in.getSubString(1, (int) in.length());
+    }
+
+    private static int castToInt(final Object in) throws SQLException {
+        try {
+            if (in instanceof String) {
+                return Integer.parseInt((String) in);
+            }
+            if (in instanceof Number) {
+                return ((Number) in).intValue();
+            }
+            if (in instanceof java.util.Date) {
+                return (int) ((java.util.Date) in).getTime();
+            }
+            if (in instanceof Boolean) {
+                return (Boolean) in ? 1 : 0;
+            }
+            if (in instanceof Clob) {
+                return Integer.parseInt(asString((Clob) in));
+            }
+            if (in instanceof Character) {
+                return Integer.parseInt(in.toString());
+            }
+        } catch (final Exception e) {
+            throw cannotCastException(in.getClass().getName(), "int", e);
         }
-        break;
-      default:
-        throw new PSQLException(GT.tr("Unsupported Types value: {0}", targetSqlType),
-            PSQLState.INVALID_PARAMETER_TYPE);
+        throw cannotCastException(in.getClass().getName(), "int");
     }
-  }
 
-  private Class<?> getArrayType(Class<?> type) {
-    Class<?> subType = type.getComponentType();
-    while (subType != null) {
-      type = subType;
-      subType = type.getComponentType();
-    }
-    return type;
-  }
-
-  private <A extends Object> void setObjectArray(int parameterIndex, A in) throws SQLException {
-    final ArrayEncoding.ArrayEncoder<A> arraySupport = ArrayEncoding.getArrayEncoder(in);
-
-    final TypeInfo typeInfo = connection.getTypeInfo();
-
-    int oid = arraySupport.getDefaultArrayTypeOid();
-
-    if (arraySupport.supportBinaryRepresentation(oid) && connection.getPreferQueryMode() != PreferQueryMode.SIMPLE) {
-      bindBytes(parameterIndex, arraySupport.toBinaryRepresentation(connection, in, oid), oid);
-    } else {
-      if (oid == Oid.UNSPECIFIED) {
-        Class<?> arrayType = getArrayType(in.getClass());
-        oid = typeInfo.getJavaArrayType(arrayType.getName());
-        if (oid == Oid.UNSPECIFIED) {
-          throw new SQLFeatureNotSupportedException();
+    private static short castToShort(final Object in) throws SQLException {
+        try {
+            if (in instanceof String) {
+                return Short.parseShort((String) in);
+            }
+            if (in instanceof Number) {
+                return ((Number) in).shortValue();
+            }
+            if (in instanceof java.util.Date) {
+                return (short) ((java.util.Date) in).getTime();
+            }
+            if (in instanceof Boolean) {
+                return (Boolean) in ? (short) 1 : (short) 0;
+            }
+            if (in instanceof Clob) {
+                return Short.parseShort(asString((Clob) in));
+            }
+            if (in instanceof Character) {
+                return Short.parseShort(in.toString());
+            }
+        } catch (final Exception e) {
+            throw cannotCastException(in.getClass().getName(), "short", e);
         }
-      }
-      final int baseOid = typeInfo.getPGArrayElement(oid);
-      final String baseType = typeInfo.getPGType(baseOid);
-
-      final Array array = getPGConnection().createArrayOf(baseType, in);
-      this.setArray(parameterIndex, array);
+        throw cannotCastException(in.getClass().getName(), "short");
     }
-  }
 
-  private static String asString(final Clob in) throws SQLException {
-    return in.getSubString(1, (int) in.length());
-  }
-
-  private static int castToInt(final Object in) throws SQLException {
-    try {
-      if (in instanceof String) {
-        return Integer.parseInt((String) in);
-      }
-      if (in instanceof Number) {
-        return ((Number) in).intValue();
-      }
-      if (in instanceof java.util.Date) {
-        return (int) ((java.util.Date) in).getTime();
-      }
-      if (in instanceof Boolean) {
-        return (Boolean) in ? 1 : 0;
-      }
-      if (in instanceof Clob) {
-        return Integer.parseInt(asString((Clob) in));
-      }
-      if (in instanceof Character) {
-        return Integer.parseInt(in.toString());
-      }
-    } catch (final Exception e) {
-      throw cannotCastException(in.getClass().getName(), "int", e);
-    }
-    throw cannotCastException(in.getClass().getName(), "int");
-  }
-
-  private static short castToShort(final Object in) throws SQLException {
-    try {
-      if (in instanceof String) {
-        return Short.parseShort((String) in);
-      }
-      if (in instanceof Number) {
-        return ((Number) in).shortValue();
-      }
-      if (in instanceof java.util.Date) {
-        return (short) ((java.util.Date) in).getTime();
-      }
-      if (in instanceof Boolean) {
-        return (Boolean) in ? (short) 1 : (short) 0;
-      }
-      if (in instanceof Clob) {
-        return Short.parseShort(asString((Clob) in));
-      }
-      if (in instanceof Character) {
-        return Short.parseShort(in.toString());
-      }
-    } catch (final Exception e) {
-      throw cannotCastException(in.getClass().getName(), "short", e);
-    }
-    throw cannotCastException(in.getClass().getName(), "short");
-  }
-
-  private static long castToLong(final Object in) throws SQLException {
-    try {
-      if (in instanceof String) {
-        return Long.parseLong((String) in);
-      }
-      if (in instanceof Number) {
-        return ((Number) in).longValue();
-      }
-      if (in instanceof java.util.Date) {
-        return ((java.util.Date) in).getTime();
-      }
-      if (in instanceof Boolean) {
-        return (Boolean) in ? 1L : 0L;
-      }
-      if (in instanceof Clob) {
-        return Long.parseLong(asString((Clob) in));
-      }
-      if (in instanceof Character) {
-        return Long.parseLong(in.toString());
-      }
-    } catch (final Exception e) {
-      throw cannotCastException(in.getClass().getName(), "long", e);
-    }
-    throw cannotCastException(in.getClass().getName(), "long");
-  }
-
-  private static float castToFloat(final Object in) throws SQLException {
-    try {
-      if (in instanceof String) {
-        return Float.parseFloat((String) in);
-      }
-      if (in instanceof Number) {
-        return ((Number) in).floatValue();
-      }
-      if (in instanceof java.util.Date) {
-        return ((java.util.Date) in).getTime();
-      }
-      if (in instanceof Boolean) {
-        return (Boolean) in ? 1f : 0f;
-      }
-      if (in instanceof Clob) {
-        return Float.parseFloat(asString((Clob) in));
-      }
-      if (in instanceof Character) {
-        return Float.parseFloat(in.toString());
-      }
-    } catch (final Exception e) {
-      throw cannotCastException(in.getClass().getName(), "float", e);
-    }
-    throw cannotCastException(in.getClass().getName(), "float");
-  }
-
-  private static double castToDouble(final Object in) throws SQLException {
-    try {
-      if (in instanceof String) {
-        return Double.parseDouble((String) in);
-      }
-      if (in instanceof Number) {
-        return ((Number) in).doubleValue();
-      }
-      if (in instanceof java.util.Date) {
-        return ((java.util.Date) in).getTime();
-      }
-      if (in instanceof Boolean) {
-        return (Boolean) in ? 1d : 0d;
-      }
-      if (in instanceof Clob) {
-        return Double.parseDouble(asString((Clob) in));
-      }
-      if (in instanceof Character) {
-        return Double.parseDouble(in.toString());
-      }
-    } catch (final Exception e) {
-      throw cannotCastException(in.getClass().getName(), "double", e);
-    }
-    throw cannotCastException(in.getClass().getName(), "double");
-  }
-
-  private static BigDecimal castToBigDecimal(final Object in, final int scale) throws SQLException {
-    try {
-      BigDecimal rc = null;
-      if (in instanceof String) {
-        rc = new BigDecimal((String) in);
-      } else if (in instanceof BigDecimal) {
-        rc = (BigDecimal) in;
-      } else if (in instanceof BigInteger) {
-        rc = new BigDecimal((BigInteger) in);
-      } else if (in instanceof Long || in instanceof Integer || in instanceof Short
-          || in instanceof Byte) {
-        rc = BigDecimal.valueOf(((Number) in).longValue());
-      } else if (in instanceof Double || in instanceof Float) {
-        rc = BigDecimal.valueOf(((Number) in).doubleValue());
-      } else if (in instanceof java.util.Date) {
-        rc = BigDecimal.valueOf(((java.util.Date) in).getTime());
-      } else if (in instanceof Boolean) {
-        rc = (Boolean) in ? BigDecimal.ONE : BigDecimal.ZERO;
-      } else if (in instanceof Clob) {
-        rc = new BigDecimal(asString((Clob) in));
-      } else if (in instanceof Character) {
-        rc = new BigDecimal(new char[]{(Character) in});
-      }
-      if (rc != null) {
-        if (scale >= 0) {
-          rc = rc.setScale(scale, RoundingMode.HALF_UP);
+    private static long castToLong(final Object in) throws SQLException {
+        try {
+            if (in instanceof String) {
+                return Long.parseLong((String) in);
+            }
+            if (in instanceof Number) {
+                return ((Number) in).longValue();
+            }
+            if (in instanceof java.util.Date) {
+                return ((java.util.Date) in).getTime();
+            }
+            if (in instanceof Boolean) {
+                return (Boolean) in ? 1L : 0L;
+            }
+            if (in instanceof Clob) {
+                return Long.parseLong(asString((Clob) in));
+            }
+            if (in instanceof Character) {
+                return Long.parseLong(in.toString());
+            }
+        } catch (final Exception e) {
+            throw cannotCastException(in.getClass().getName(), "long", e);
         }
-        return rc;
-      }
-    } catch (final Exception e) {
-      throw cannotCastException(in.getClass().getName(), "BigDecimal", e);
+        throw cannotCastException(in.getClass().getName(), "long");
     }
-    throw cannotCastException(in.getClass().getName(), "BigDecimal");
-  }
 
-  private static String castToString(final Object in) throws SQLException {
-    try {
-      if (in instanceof String) {
-        return (String) in;
-      }
-      if (in instanceof Clob) {
-        return asString((Clob) in);
-      }
-      // convert any unknown objects to string.
-      return in.toString();
-
-    } catch (final Exception e) {
-      throw cannotCastException(in.getClass().getName(), "String", e);
+    private static float castToFloat(final Object in) throws SQLException {
+        try {
+            if (in instanceof String) {
+                return Float.parseFloat((String) in);
+            }
+            if (in instanceof Number) {
+                return ((Number) in).floatValue();
+            }
+            if (in instanceof java.util.Date) {
+                return ((java.util.Date) in).getTime();
+            }
+            if (in instanceof Boolean) {
+                return (Boolean) in ? 1f : 0f;
+            }
+            if (in instanceof Clob) {
+                return Float.parseFloat(asString((Clob) in));
+            }
+            if (in instanceof Character) {
+                return Float.parseFloat(in.toString());
+            }
+        } catch (final Exception e) {
+            throw cannotCastException(in.getClass().getName(), "float", e);
+        }
+        throw cannotCastException(in.getClass().getName(), "float");
     }
-  }
 
-  private static PSQLException cannotCastException(final String fromType, final String toType) {
-    return cannotCastException(fromType, toType, null);
-  }
+    private static double castToDouble(final Object in) throws SQLException {
+        try {
+            if (in instanceof String) {
+                return Double.parseDouble((String) in);
+            }
+            if (in instanceof Number) {
+                return ((Number) in).doubleValue();
+            }
+            if (in instanceof java.util.Date) {
+                return ((java.util.Date) in).getTime();
+            }
+            if (in instanceof Boolean) {
+                return (Boolean) in ? 1d : 0d;
+            }
+            if (in instanceof Clob) {
+                return Double.parseDouble(asString((Clob) in));
+            }
+            if (in instanceof Character) {
+                return Double.parseDouble(in.toString());
+            }
+        } catch (final Exception e) {
+            throw cannotCastException(in.getClass().getName(), "double", e);
+        }
+        throw cannotCastException(in.getClass().getName(), "double");
+    }
 
-  private static PSQLException cannotCastException(final String fromType, final String toType,
-      final Exception cause) {
-    return new PSQLException(
-        GT.tr("Cannot convert an instance of {0} to type {1}", fromType, toType),
-        PSQLState.INVALID_PARAMETER_TYPE, cause);
-  }
+    private static BigDecimal castToBigDecimal(final Object in, final int scale) throws SQLException {
+        try {
+            BigDecimal rc = null;
+            if (in instanceof String) {
+                rc = new BigDecimal((String) in);
+            } else if (in instanceof BigDecimal) {
+                rc = (BigDecimal) in;
+            } else if (in instanceof BigInteger) {
+                rc = new BigDecimal((BigInteger) in);
+            } else if (in instanceof Long || in instanceof Integer || in instanceof Short
+                    || in instanceof Byte) {
+                rc = BigDecimal.valueOf(((Number) in).longValue());
+            } else if (in instanceof Double || in instanceof Float) {
+                rc = BigDecimal.valueOf(((Number) in).doubleValue());
+            } else if (in instanceof java.util.Date) {
+                rc = BigDecimal.valueOf(((java.util.Date) in).getTime());
+            } else if (in instanceof Boolean) {
+                rc = (Boolean) in ? BigDecimal.ONE : BigDecimal.ZERO;
+            } else if (in instanceof Clob) {
+                rc = new BigDecimal(asString((Clob) in));
+            } else if (in instanceof Character) {
+                rc = new BigDecimal(new char[]{(Character) in});
+            }
+            if (rc != null) {
+                if (scale >= 0) {
+                    rc = rc.setScale(scale, RoundingMode.HALF_UP);
+                }
+                return rc;
+            }
+        } catch (final Exception e) {
+            throw cannotCastException(in.getClass().getName(), "BigDecimal", e);
+        }
+        throw cannotCastException(in.getClass().getName(), "BigDecimal");
+    }
 
-  @Override
-  public void setObject(int parameterIndex, Object x,
-      int targetSqlType) throws SQLException {
-    setObject(parameterIndex, x, targetSqlType, -1);
-  }
+    private static String castToString(final Object in) throws SQLException {
+        try {
+            if (in instanceof String) {
+                return (String) in;
+            }
+            if (in instanceof Clob) {
+                return asString((Clob) in);
+            }
+            // convert any unknown objects to string.
+            return in.toString();
 
-  /*
-   * This stores an Object into a parameter.
-   */
-  @Override
-  public void setObject(int parameterIndex, Object x) throws SQLException {
-    checkClosed();
-    if (x == null) {
-      setNull(parameterIndex, Types.OTHER);
-    } else if (x instanceof UUID && connection.haveMinimumServerVersion(ServerVersion.v8_3)) {
-      setUuid(parameterIndex, (UUID) x);
-    } else if (x instanceof SQLXML) {
-      setSQLXML(parameterIndex, (SQLXML) x);
-    } else if (x instanceof String) {
-      setString(parameterIndex, (String) x);
-    } else if (x instanceof BigDecimal) {
-      setBigDecimal(parameterIndex, (BigDecimal) x);
-    } else if (x instanceof Short) {
-      setShort(parameterIndex, (Short) x);
-    } else if (x instanceof Integer) {
-      setInt(parameterIndex, (Integer) x);
-    } else if (x instanceof Long) {
-      setLong(parameterIndex, (Long) x);
-    } else if (x instanceof Float) {
-      setFloat(parameterIndex, (Float) x);
-    } else if (x instanceof Double) {
-      setDouble(parameterIndex, (Double) x);
-    } else if (x instanceof byte[]) {
-      setBytes(parameterIndex, (byte[]) x);
-    } else if (x instanceof ByteStreamWriter) {
-      setByteStreamWriter(parameterIndex, (ByteStreamWriter) x);
-    } else if (x instanceof Date) {
-      setDate(parameterIndex, (Date) x);
-    } else if (x instanceof Time) {
-      setTime(parameterIndex, (Time) x);
-    } else if (x instanceof Timestamp) {
-      setTimestamp(parameterIndex, (Timestamp) x);
-    } else if (x instanceof Boolean) {
-      setBoolean(parameterIndex, (Boolean) x);
-    } else if (x instanceof Byte) {
-      setByte(parameterIndex, (Byte) x);
-    } else if (x instanceof Blob) {
-      setBlob(parameterIndex, (Blob) x);
-    } else if (x instanceof Clob) {
-      setClob(parameterIndex, (Clob) x);
-    } else if (x instanceof Array) {
-      setArray(parameterIndex, (Array) x);
-    } else if (x instanceof PGobject) {
-      setPGobject(parameterIndex, (PGobject) x);
-    } else if (x instanceof Character) {
-      setString(parameterIndex, ((Character) x).toString());
-    } else if (x instanceof LocalDate) {
-      setDate(parameterIndex, (LocalDate) x);
-    } else if (x instanceof LocalTime) {
-      setTime(parameterIndex, (LocalTime) x);
-    } else if (x instanceof OffsetTime) {
-      setTime(parameterIndex, (OffsetTime) x);
-    } else if (x instanceof LocalDateTime) {
-      setTimestamp(parameterIndex, (LocalDateTime) x);
-    } else if (x instanceof OffsetDateTime) {
-      setTimestamp(parameterIndex, (OffsetDateTime) x);
-    } else if (x instanceof Map) {
-      setMap(parameterIndex, (Map<?, ?>) x);
-    } else if (x instanceof Number) {
-      setNumber(parameterIndex, (Number) x);
-    } else if (x.getClass().isArray()) {
-      try {
-        setObjectArray(parameterIndex, x);
-      } catch (Exception e) {
+        } catch (final Exception e) {
+            throw cannotCastException(in.getClass().getName(), "String", e);
+        }
+    }
+
+    private static PSQLException cannotCastException(final String fromType, final String toType) {
+        return cannotCastException(fromType, toType, null);
+    }
+
+    private static PSQLException cannotCastException(final String fromType, final String toType,
+                                                     final Exception cause) {
+        return new PSQLException(
+                GT.tr("Cannot convert an instance of {0} to type {1}", fromType, toType),
+                PSQLState.INVALID_PARAMETER_TYPE, cause);
+    }
+
+    final int maximumNumberOfParameters() {
+        return connection.getPreferQueryMode() == PreferQueryMode.SIMPLE ? Integer.MAX_VALUE : 65535;
+    }
+
+    @Override
+    public ResultSet executeQuery(String sql) throws SQLException {
         throw new PSQLException(
-            GT.tr("Cannot cast an instance of {0} to type {1}", x.getClass().getName(), "Types.ARRAY"),
-            PSQLState.INVALID_PARAMETER_TYPE, e);
-      }
-    } else {
-      // Can't infer a type.
-      throw new PSQLException(GT.tr(
-          "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.",
-          x.getClass().getName()), PSQLState.INVALID_PARAMETER_TYPE);
-    }
-  }
-
-  /**
-   * Returns the SQL statement with the current template values substituted.
-   *
-   * @return SQL statement with the current template values substituted
-   */
-  @Override
-  public String toString() {
-    if (preparedQuery == null) {
-      return super.toString();
+                GT.tr("Can''t use query methods that take a query string on a PreparedStatement."),
+                PSQLState.WRONG_OBJECT_TYPE);
     }
 
-    return preparedQuery.query.toString(preparedParameters);
-  }
+    /*
+     * A Prepared SQL query is executed and its ResultSet is returned
+     *
+     * @return a ResultSet that contains the data produced by the * query - never null
+     *
+     * @exception SQLException if a database access error occurs
+     */
+    @Override
+    public ResultSet executeQuery() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (!executeWithFlags(0)) {
+                throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+            }
 
-  /**
-   * Note if s is a String it should be escaped by the caller to avoid SQL injection attacks. It is
-   * not done here for efficiency reasons as most calls to this method do not require escaping as
-   * the source of the string is known safe (i.e. {@code Integer.toString()})
-   *
-   * @param paramIndex parameter index
-   * @param s value (the value should already be escaped)
-   * @param oid type oid
-   * @throws SQLException if something goes wrong
-   */
-  protected void bindLiteral(int paramIndex,
-      String s, int oid) throws SQLException {
-    preparedParameters.setLiteralParameter(paramIndex, s, oid);
-  }
-
-  protected void bindBytes(int paramIndex,
-      byte[] b, int oid) throws SQLException {
-    preparedParameters.setBinaryParameter(paramIndex, b, oid);
-  }
-
-  /**
-   * This version is for values that should turn into strings e.g. setString directly calls
-   * bindString with no escaping; the per-protocol ParameterList does escaping as needed.
-   *
-   * @param paramIndex parameter index
-   * @param s value
-   * @param oid type oid
-   * @throws SQLException if something goes wrong
-   */
-  private void bindString(int paramIndex, String s, int oid) throws SQLException {
-    preparedParameters.setStringParameter(paramIndex, s, oid);
-  }
-
-  @Override
-  public boolean isUseServerPrepare() {
-    return preparedQuery != null && mPrepareThreshold != 0
-        && preparedQuery.getExecuteCount() + 1 >= mPrepareThreshold;
-  }
-
-  @Override
-  public void addBatch(String sql) throws SQLException {
-    checkClosed();
-
-    throw new PSQLException(
-        GT.tr("Can''t use query methods that take a query string on a PreparedStatement."),
-        PSQLState.WRONG_OBJECT_TYPE);
-  }
-
-  @Override
-  public void addBatch() throws SQLException {
-    checkClosed();
-    ArrayList<Query> batchStatements = this.batchStatements;
-    if (batchStatements == null) {
-      this.batchStatements = batchStatements = new ArrayList<>();
-    }
-    ArrayList<ParameterList> batchParameters = this.batchParameters;
-    if (batchParameters == null) {
-      this.batchParameters = batchParameters = new ArrayList<ParameterList>();
-    }
-    // we need to create copies of our parameters, otherwise the values can be changed
-    batchParameters.add(preparedParameters.copy());
-    Query query = preparedQuery.query;
-    if (!(query instanceof BatchedQuery) || batchStatements.isEmpty()) {
-      batchStatements.add(query);
-    }
-  }
-
-  @Override
-  public ResultSetMetaData getMetaData() throws SQLException {
-    checkClosed();
-    ResultSet rs = getResultSet();
-
-    if (rs == null || ((PgResultSet) rs).isResultSetClosed()) {
-      // OK, we haven't executed it yet, or it was closed
-      // we've got to go to the backend
-      // for more info. We send the full query, but just don't
-      // execute it.
-
-      int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_DESCRIBE_ONLY
-          | QueryExecutor.QUERY_SUPPRESS_BEGIN;
-      StatementResultHandler handler = new StatementResultHandler();
-      connection.getQueryExecutor().execute(preparedQuery.query, preparedParameters, handler, 0, 0,
-          flags);
-      ResultWrapper wrapper = handler.getResults();
-      if (wrapper != null) {
-        rs = wrapper.getResultSet();
-      }
-    }
-
-    if (rs != null) {
-      return rs.getMetaData();
-    }
-
-    return null;
-  }
-
-  @Override
-  public void setArray(int i, Array x) throws SQLException {
-    checkClosed();
-
-    if (null == x) {
-      setNull(i, Types.ARRAY);
-      return;
-    }
-
-    // This only works for Array implementations that return a valid array
-    // literal from Array.toString(), such as the implementation we return
-    // from ResultSet.getArray(). Eventually we need a proper implementation
-    // here that works for any Array implementation.
-    String typename = x.getBaseTypeName();
-    int oid = connection.getTypeInfo().getPGArrayType(typename);
-    if (oid == Oid.UNSPECIFIED) {
-      throw new PSQLException(GT.tr("Unknown type {0}.", typename),
-          PSQLState.INVALID_PARAMETER_TYPE);
-    }
-
-    if (x instanceof PgArray) {
-      PgArray arr = (PgArray) x;
-      byte[] bytes = arr.toBytes();
-      if (bytes != null) {
-        bindBytes(i, bytes, oid);
-        return;
-      }
-    }
-
-    setString(i, x.toString(), oid);
-  }
-
-  protected long createBlob(int i, InputStream inputStream,
-      long length) throws SQLException {
-    LargeObjectManager lom = connection.getLargeObjectAPI();
-    long oid = lom.createLO();
-    LargeObject lob = lom.open(oid);
-    try (OutputStream outputStream = lob.getOutputStream()) {
-      // The actual buffer size does not matter much, see benchmarks
-      // https://github.com/pgjdbc/pgjdbc/pull/3044#issuecomment-1838057929
-      // BlobOutputStream would gradually increase the buffer, so it will level the number of
-      // database calls.
-      // At the same time, inputStream.read might produce less rows than requested, so we can not
-      // use a plain lob.write(buf, 0, numRead) as it might not align with 2K boundaries.
-      byte[] buf = new byte[(int) Math.min(length, 8192)];
-      int numRead;
-      while (length > 0 && (
-          numRead = inputStream.read(buf, 0, (int) Math.min(buf.length, length))) >= 0) {
-        length -= numRead;
-        outputStream.write(buf, 0, numRead);
-      }
-    } catch (IOException se) {
-      throw new PSQLException(GT.tr("Unexpected error writing large object to database."),
-          PSQLState.UNEXPECTED_ERROR, se);
-    }
-    return oid;
-  }
-
-  @Override
-  public void setBlob(int i, Blob x) throws SQLException {
-    checkClosed();
-
-    if (x == null) {
-      setNull(i, Types.BLOB);
-      return;
-    }
-
-    InputStream inStream = x.getBinaryStream();
-    try {
-      long oid = createBlob(i, inStream, x.length());
-      setLong(i, oid);
-    } finally {
-      try {
-        inStream.close();
-      } catch (Exception e) {
-      }
-    }
-  }
-
-  private String readerToString(Reader value, int maxLength) throws SQLException {
-    try {
-      int bufferSize = Math.min(maxLength, 1024);
-      StringBuilder v = new StringBuilder(bufferSize);
-      char[] buf = new char[bufferSize];
-      int nRead = 0;
-      while (nRead > -1 && v.length() < maxLength) {
-        nRead = value.read(buf, 0, Math.min(bufferSize, maxLength - v.length()));
-        if (nRead > 0) {
-          v.append(buf, 0, nRead);
+            return getSingleResultSet();
         }
-      }
-      return v.toString();
-    } catch (IOException ioe) {
-      throw new PSQLException(GT.tr("Provided Reader failed."), PSQLState.UNEXPECTED_ERROR, ioe);
-    }
-  }
-
-  @Override
-  public void setCharacterStream(int i, Reader x,
-      int length) throws SQLException {
-    checkClosed();
-
-    if (x == null) {
-      setNull(i, Types.VARCHAR);
-      return;
     }
 
-    if (length < 0) {
-      throw new PSQLException(GT.tr("Invalid stream length {0}.", length),
-          PSQLState.INVALID_PARAMETER_VALUE);
+    @Override
+    public int executeUpdate(String sql) throws SQLException {
+        throw new PSQLException(
+                GT.tr("Can''t use query methods that take a query string on a PreparedStatement."),
+                PSQLState.WRONG_OBJECT_TYPE);
     }
 
-    // Version 7.2 supports CharacterStream for the PG text types
-    // As the spec/javadoc for this method indicate this is to be used for
-    // large text values (i.e. LONGVARCHAR) PG doesn't have a separate
-    // long varchar datatype, but with toast all the text datatypes are capable of
-    // handling very large values. Thus the implementation ends up calling
-    // setString() since there is no current way to stream the value to the server
-    setString(i, readerToString(x, length));
-  }
-
-  @Override
-  public void setClob(int i, Clob x) throws SQLException {
-    checkClosed();
-
-    if (x == null) {
-      setNull(i, Types.CLOB);
-      return;
-    }
-
-    Reader inStream = x.getCharacterStream();
-    int length = (int) x.length();
-    LargeObjectManager lom = connection.getLargeObjectAPI();
-    long oid = lom.createLO();
-    LargeObject lob = lom.open(oid);
-    Charset connectionCharset = Charset.forName(connection.getEncoding().name());
-    OutputStream los = lob.getOutputStream();
-    Writer lw = new OutputStreamWriter(los, connectionCharset);
-    try {
-      // could be buffered, but then the OutputStream returned by LargeObject
-      // is buffered internally anyhow, so there would be no performance
-      // boost gained, if anything it would be worse!
-      int c = inStream.read();
-      int p = 0;
-      while (c > -1 && p < length) {
-        lw.write(c);
-        c = inStream.read();
-        p++;
-      }
-      lw.close();
-    } catch (IOException se) {
-      throw new PSQLException(GT.tr("Unexpected error writing large object to database."),
-          PSQLState.UNEXPECTED_ERROR, se);
-    }
-    // lob is closed by the stream so don't call lob.close()
-    setLong(i, oid);
-  }
-
-  @Override
-  public void setNull(int parameterIndex, int t,
-      String typeName) throws SQLException {
-    if (typeName == null) {
-      setNull(parameterIndex, t);
-      return;
-    }
-
-    checkClosed();
-
-    TypeInfo typeInfo = connection.getTypeInfo();
-    int oid = typeInfo.getPGType(typeName);
-
-    preparedParameters.setNull(parameterIndex, oid);
-  }
-
-  @Override
-  public void setRef(int i, Ref x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setRef(int,Ref)");
-  }
-
-  @Override
-  public void setDate(int i, Date d,
-      Calendar cal) throws SQLException {
-    checkClosed();
-
-    if (d == null) {
-      setNull(i, Types.DATE);
-      return;
-    }
-
-    if (connection.binaryTransferSend(Oid.DATE)) {
-      byte[] val = new byte[4];
-      TimeZone tz = cal != null ? cal.getTimeZone() : null;
-      getTimestampUtils().toBinDate(tz, val, d);
-      preparedParameters.setBinaryParameter(i, val, Oid.DATE);
-      return;
-    }
-
-    // We must use UNSPECIFIED here, or inserting a Date-with-timezone into a
-    // timestamptz field does an unexpected rotation by the server's TimeZone:
-    //
-    // We want to interpret 2005/01/01 with calendar +0100 as
-    // "local midnight in +0100", but if we go via date it interprets it
-    // as local midnight in the server's timezone:
-
-    // template1=# select '2005-01-01+0100'::timestamptz;
-    // timestamptz
-    // ------------------------
-    // 2005-01-01 02:00:00+03
-    // (1 row)
-
-    // template1=# select '2005-01-01+0100'::date::timestamptz;
-    // timestamptz
-    // ------------------------
-    // 2005-01-01 00:00:00+03
-    // (1 row)
-
-    if (cal == null) {
-      cal = getDefaultCalendar();
-    }
-    bindString(i, getTimestampUtils().toString(cal, d), Oid.UNSPECIFIED);
-  }
-
-  @Override
-  public void setTime(int i, Time t,
-      Calendar cal) throws SQLException {
-    checkClosed();
-
-    if (t == null) {
-      setNull(i, Types.TIME);
-      return;
-    }
-
-    int oid = Oid.UNSPECIFIED;
-
-    // If a PGTime is used, we can define the OID explicitly.
-    if (t instanceof PGTime) {
-      PGTime pgTime = (PGTime) t;
-      if (pgTime.getCalendar() == null) {
-        oid = Oid.TIME;
-      } else {
-        oid = Oid.TIMETZ;
-        cal = pgTime.getCalendar();
-      }
-    }
-
-    if (cal == null) {
-      cal = getDefaultCalendar();
-    }
-    bindString(i, getTimestampUtils().toString(cal, t), oid);
-  }
-
-  @Override
-  public void setTimestamp(int i, Timestamp t,
-      Calendar cal) throws SQLException {
-    checkClosed();
-
-    if (t == null) {
-      setNull(i, Types.TIMESTAMP);
-      return;
-    }
-
-    int oid = Oid.UNSPECIFIED;
-
-    // Use UNSPECIFIED as a compromise to get both TIMESTAMP and TIMESTAMPTZ working.
-    // This is because you get this in a +1300 timezone:
-    //
-    // template1=# select '2005-01-01 15:00:00 +1000'::timestamptz;
-    // timestamptz
-    // ------------------------
-    // 2005-01-01 18:00:00+13
-    // (1 row)
-
-    // template1=# select '2005-01-01 15:00:00 +1000'::timestamp;
-    // timestamp
-    // ---------------------
-    // 2005-01-01 15:00:00
-    // (1 row)
-
-    // template1=# select '2005-01-01 15:00:00 +1000'::timestamptz::timestamp;
-    // timestamp
-    // ---------------------
-    // 2005-01-01 18:00:00
-    // (1 row)
-
-    // So we want to avoid doing a timestamptz -> timestamp conversion, as that
-    // will first convert the timestamptz to an equivalent time in the server's
-    // timezone (+1300, above), then turn it into a timestamp with the "wrong"
-    // time compared to the string we originally provided. But going straight
-    // to timestamp is OK as the input parser for timestamp just throws away
-    // the timezone part entirely. Since we don't know ahead of time what type
-    // we're actually dealing with, UNSPECIFIED seems the lesser evil, even if it
-    // does give more scope for type-mismatch errors being silently hidden.
-
-    // If a PGTimestamp is used, we can define the OID explicitly.
-    if (t instanceof PGTimestamp) {
-      PGTimestamp pgTimestamp = (PGTimestamp) t;
-      if (pgTimestamp.getCalendar() == null) {
-        oid = Oid.TIMESTAMP;
-      } else {
-        oid = Oid.TIMESTAMPTZ;
-        cal = pgTimestamp.getCalendar();
-      }
-    }
-    if (cal == null) {
-      cal = getDefaultCalendar();
-    }
-    bindString(i, getTimestampUtils().toString(cal, t), oid);
-  }
-
-  private void setDate(int i, LocalDate localDate) throws SQLException {
-    int oid = Oid.DATE;
-    bindString(i, getTimestampUtils().toString(localDate), oid);
-  }
-
-  private void setTime(int i, LocalTime localTime) throws SQLException {
-    int oid = Oid.TIME;
-    bindString(i, getTimestampUtils().toString(localTime), oid);
-  }
-
-  private void setTime(int i, OffsetTime offsetTime) throws SQLException {
-    int oid = Oid.TIMETZ;
-    bindString(i, getTimestampUtils().toString(offsetTime), oid);
-  }
-
-  private void setTimestamp(int i, LocalDateTime localDateTime)
-      throws SQLException {
-    int oid = Oid.TIMESTAMP;
-    bindString(i, getTimestampUtils().toString(localDateTime), oid);
-  }
-
-  private void setTimestamp(int i, OffsetDateTime offsetDateTime)
-      throws SQLException {
-    int oid = Oid.TIMESTAMPTZ;
-    bindString(i, getTimestampUtils().toString(offsetDateTime), oid);
-  }
-
-  public ParameterMetaData createParameterMetaData(BaseConnection conn, int[] oids)
-      throws SQLException {
-    return new PgParameterMetaData(conn, oids);
-  }
-
-  @Override
-  public void setObject(int parameterIndex, Object x,
-      SQLType targetSqlType,
-      int scaleOrLength) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setObject");
-  }
-
-  @Override
-  public void setObject(int parameterIndex, Object x,
-      SQLType targetSqlType)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setObject");
-  }
-
-  @Override
-  public void setRowId(int parameterIndex, RowId x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setRowId(int, RowId)");
-  }
-
-  @Override
-  public void setNString(int parameterIndex, String value) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setNString(int, String)");
-  }
-
-  @Override
-  public void setNCharacterStream(int parameterIndex, Reader value, long length)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setNCharacterStream(int, Reader, long)");
-  }
-
-  @Override
-  public void setNCharacterStream(int parameterIndex,
-      Reader value) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setNCharacterStream(int, Reader)");
-  }
-
-  @Override
-  public void setCharacterStream(int parameterIndex,
-      Reader value, long length)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setCharacterStream(int, Reader, long)");
-  }
-
-  @Override
-  public void setCharacterStream(int parameterIndex,
-      Reader value) throws SQLException {
-    if (connection.getPreferQueryMode() == PreferQueryMode.SIMPLE) {
-      String s = value != null ? readerToString(value, Integer.MAX_VALUE) : null;
-      setString(parameterIndex, s);
-      return;
-    }
-    InputStream is = value != null ? new ReaderInputStream(value) : null;
-    setObject(parameterIndex, is, Types.LONGVARCHAR);
-  }
-
-  @Override
-  public void setBinaryStream(int parameterIndex, InputStream value, long length)
-      throws SQLException {
-    if (length > Integer.MAX_VALUE) {
-      throw new PSQLException(GT.tr("Object is too large to send over the protocol."),
-          PSQLState.NUMERIC_CONSTANT_OUT_OF_RANGE);
-    }
-    if (value == null) {
-      preparedParameters.setNull(parameterIndex, Oid.BYTEA);
-    } else {
-      preparedParameters.setBytea(parameterIndex, value, (int) length);
-    }
-  }
-
-  @Override
-  public void setBinaryStream(int parameterIndex,
-      InputStream value) throws SQLException {
-    if (value == null) {
-      preparedParameters.setNull(parameterIndex, Oid.BYTEA);
-    } else {
-      preparedParameters.setBytea(parameterIndex, value);
-    }
-  }
-
-  @Override
-  public void setAsciiStream(int parameterIndex,
-      InputStream value, long length)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setAsciiStream(int, InputStream, long)");
-  }
-
-  @Override
-  public void setAsciiStream(int parameterIndex,
-      InputStream value) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setAsciiStream(int, InputStream)");
-  }
-
-  @Override
-  public void setNClob(int parameterIndex,
-      NClob value) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setNClob(int, NClob)");
-  }
-
-  @Override
-  public void setClob(int parameterIndex,
-      Reader reader, long length) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setClob(int, Reader, long)");
-  }
-
-  @Override
-  public void setClob(int parameterIndex,
-      Reader reader) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setClob(int, Reader)");
-  }
-
-  @Override
-  public void setBlob(int parameterIndex,
-      InputStream inputStream, long length)
-      throws SQLException {
-    checkClosed();
-
-    if (inputStream == null) {
-      setNull(parameterIndex, Types.BLOB);
-      return;
-    }
-
-    if (length < 0) {
-      throw new PSQLException(GT.tr("Invalid stream length {0}.", length),
-          PSQLState.INVALID_PARAMETER_VALUE);
-    }
-
-    long oid = createBlob(parameterIndex, inputStream, length);
-    setLong(parameterIndex, oid);
-  }
-
-  @Override
-  public void setBlob(int parameterIndex,
-      InputStream inputStream) throws SQLException {
-    checkClosed();
-
-    if (inputStream == null) {
-      setNull(parameterIndex, Types.BLOB);
-      return;
-    }
-
-    long oid = createBlob(parameterIndex, inputStream, Long.MAX_VALUE);
-    setLong(parameterIndex, oid);
-  }
-
-  @Override
-  public void setNClob(int parameterIndex,
-      Reader reader, long length) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setNClob(int, Reader, long)");
-  }
-
-  @Override
-  public void setNClob(int parameterIndex,
-      Reader reader) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setNClob(int, Reader)");
-  }
-
-  @Override
-  public void setSQLXML(int parameterIndex,
-      SQLXML xmlObject) throws SQLException {
-    checkClosed();
-    String stringValue = xmlObject == null ? null : xmlObject.getString();
-    if (stringValue == null) {
-      setNull(parameterIndex, Types.SQLXML);
-    } else {
-      setString(parameterIndex, stringValue, Oid.XML);
-    }
-  }
-
-  private void setUuid(int parameterIndex, UUID uuid) throws SQLException {
-    if (connection.binaryTransferSend(Oid.UUID)) {
-      byte[] val = new byte[16];
-      ByteConverter.int8(val, 0, uuid.getMostSignificantBits());
-      ByteConverter.int8(val, 8, uuid.getLeastSignificantBits());
-      bindBytes(parameterIndex, val, Oid.UUID);
-    } else {
-      bindLiteral(parameterIndex, uuid.toString(), Oid.UUID);
-    }
-  }
-
-  @Override
-  public void setURL(int parameterIndex, URL x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setURL(int,URL)");
-  }
-
-  @Override
-  public int[] executeBatch() throws SQLException {
-    try {
-      // Note: in batch prepared statements batchStatements == 1, and batchParameters is equal
-      // to the number of addBatch calls
-      // batchParameters might be empty in case of empty batch
-      if (batchParameters != null && batchParameters.size() > 1 && mPrepareThreshold > 0) {
-        // Use server-prepared statements when there's more than one statement in a batch
-        // Technically speaking, it might cause to create a server-prepared statement
-        // just for 2 executions even for prepareThreshold=5. That however should be
-        // acceptable since prepareThreshold is a optimization kind of parameter.
-        this.preparedQuery.increaseExecuteCount(mPrepareThreshold);
-      }
-      return super.executeBatch();
-    } finally {
-      defaultTimeZone = null;
-    }
-  }
-
-  private Calendar getDefaultCalendar() {
-    if (getTimestampUtils().hasFastDefaultTimeZone()) {
-      return getTimestampUtils().getSharedCalendar(null);
-    }
-    Calendar sharedCalendar = getTimestampUtils().getSharedCalendar(defaultTimeZone);
-    if (defaultTimeZone == null) {
-      defaultTimeZone = sharedCalendar.getTimeZone();
-    }
-    return sharedCalendar;
-  }
-
-  @Override
-  public ParameterMetaData getParameterMetaData() throws SQLException {
-    int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_DESCRIBE_ONLY
-        | QueryExecutor.QUERY_SUPPRESS_BEGIN;
-    StatementResultHandler handler = new StatementResultHandler();
-    connection.getQueryExecutor().execute(preparedQuery.query, preparedParameters, handler, 0, 0,
-        flags);
-
-    int[] oids = preparedParameters.getTypeOIDs();
-    return createParameterMetaData(connection, oids);
-  }
-
-  @Override
-  protected void transformQueriesAndParameters() throws SQLException {
-    ArrayList<ParameterList> batchParameters = this.batchParameters;
-    if (batchParameters == null || batchParameters.size() <= 1
-        || !(preparedQuery.query instanceof BatchedQuery)) {
-      return;
-    }
-    BatchedQuery originalQuery = (BatchedQuery) preparedQuery.query;
-    // Single query cannot have more than {@link Short#MAX_VALUE} binds, thus
-    // the number of multi-values blocks should be capped.
-    // Typically, it does not make much sense to batch more than 128 rows: performance
-    // does not improve much after updating 128 statements with 1 multi-valued one, thus
-    // we cap maximum batch size and split there.
-    final int bindCount = originalQuery.getBindCount();
-    final int highestBlockCount = 128;
-    final int maxValueBlocks = bindCount == 0 ? 1024 /* if no binds, use 1024 rows */
-        : Integer.highestOneBit( // deriveForMultiBatch supports powers of two only
-            Math.min(Math.max(1, maximumNumberOfParameters() / bindCount), highestBlockCount));
-    int unprocessedBatchCount = batchParameters.size();
-    final int fullValueBlocksCount = unprocessedBatchCount / maxValueBlocks;
-    final int partialValueBlocksCount = Integer.bitCount(unprocessedBatchCount % maxValueBlocks);
-    final int count = fullValueBlocksCount + partialValueBlocksCount;
-    ArrayList<Query> newBatchStatements = new ArrayList<>(count);
-    ArrayList<ParameterList> newBatchParameters =
-        new ArrayList<ParameterList>(count);
-    int offset = 0;
-    for (int i = 0; i < count; i++) {
-      int valueBlock;
-      if (unprocessedBatchCount >= maxValueBlocks) {
-        valueBlock = maxValueBlocks;
-      } else {
-        valueBlock = Integer.highestOneBit(unprocessedBatchCount);
-      }
-      // Find appropriate batch for block count.
-      BatchedQuery bq = originalQuery.deriveForMultiBatch(valueBlock);
-      ParameterList newPl = bq.createParameterList();
-      for (int j = 0; j < valueBlock; j++) {
-        ParameterList pl = batchParameters.get(offset++);
-        if (pl != null) {
-          newPl.appendAll(pl);
+    @Override
+    public int executeUpdate() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            executeWithFlags(QueryExecutor.QUERY_NO_RESULTS);
+            checkNoResultUpdate();
+            return getUpdateCount();
         }
-      }
-      newBatchStatements.add(bq);
-      newBatchParameters.add(newPl);
-      unprocessedBatchCount -= valueBlock;
     }
-    this.batchStatements = newBatchStatements;
-    this.batchParameters = newBatchParameters;
-  }
+
+    @Override
+    public long executeLargeUpdate() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            executeWithFlags(QueryExecutor.QUERY_NO_RESULTS);
+            checkNoResultUpdate();
+            return getLargeUpdateCount();
+        }
+    }
+
+    @Override
+    public boolean execute(String sql) throws SQLException {
+        throw new PSQLException(
+                GT.tr("Can''t use query methods that take a query string on a PreparedStatement."),
+                PSQLState.WRONG_OBJECT_TYPE);
+    }
+
+    @Override
+    public boolean execute() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            return executeWithFlags(0);
+        }
+    }
+
+    @Override
+    public boolean executeWithFlags(int flags) throws SQLException {
+        try {
+            try (ResourceLock ignore = lock.obtain()) {
+                checkClosed();
+
+                if (connection.getPreferQueryMode() == PreferQueryMode.SIMPLE) {
+                    flags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
+                }
+
+                execute(preparedQuery, preparedParameters, flags);
+
+                checkClosed();
+                return result != null && result.getResultSet() != null;
+            }
+        } finally {
+            defaultTimeZone = null;
+        }
+    }
+
+    @Override
+    protected boolean isOneShotQuery(CachedQuery cachedQuery) {
+        if (cachedQuery == null) {
+            cachedQuery = preparedQuery;
+        }
+        return super.isOneShotQuery(cachedQuery);
+    }
+
+    @Override
+    public void closeImpl() throws SQLException {
+        if (preparedQuery != null) {
+            connection.releaseQuery(preparedQuery);
+        }
+    }
+
+    @Override
+    public void setNull(int parameterIndex, int sqlType) throws SQLException {
+        checkClosed();
+
+        if (parameterIndex < 1 || parameterIndex > preparedParameters.getParameterCount()) {
+            throw new PSQLException(
+                    GT.tr("The column index is out of range: {0}, number of columns: {1}.",
+                            parameterIndex, preparedParameters.getParameterCount()),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+
+        int oid;
+        switch (sqlType) {
+            case Types.SQLXML:
+                oid = Oid.XML;
+                break;
+            case Types.INTEGER:
+                oid = Oid.INT4;
+                break;
+            case Types.TINYINT:
+            case Types.SMALLINT:
+                oid = Oid.INT2;
+                break;
+            case Types.BIGINT:
+                oid = Oid.INT8;
+                break;
+            case Types.REAL:
+                oid = Oid.FLOAT4;
+                break;
+            case Types.DOUBLE:
+            case Types.FLOAT:
+                oid = Oid.FLOAT8;
+                break;
+            case Types.DECIMAL:
+            case Types.NUMERIC:
+                oid = Oid.NUMERIC;
+                break;
+            case Types.CHAR:
+                oid = Oid.BPCHAR;
+                break;
+            case Types.VARCHAR:
+            case Types.LONGVARCHAR:
+                oid = connection.getStringVarcharFlag() ? Oid.VARCHAR : Oid.UNSPECIFIED;
+                break;
+            case Types.DATE:
+                oid = Oid.DATE;
+                break;
+            case Types.TIME:
+            case Types.TIME_WITH_TIMEZONE:
+            case Types.TIMESTAMP_WITH_TIMEZONE:
+            case Types.TIMESTAMP:
+                oid = Oid.UNSPECIFIED;
+                break;
+            case Types.BOOLEAN:
+            case Types.BIT:
+                oid = Oid.BOOL;
+                break;
+            case Types.BINARY:
+            case Types.VARBINARY:
+            case Types.LONGVARBINARY:
+                oid = Oid.BYTEA;
+                break;
+            case Types.BLOB:
+            case Types.CLOB:
+                oid = Oid.OID;
+                break;
+            case Types.REF_CURSOR:
+                oid = Oid.REF_CURSOR;
+                break;
+            case Types.ARRAY:
+            case Types.DISTINCT:
+            case Types.STRUCT:
+            case Types.NULL:
+            case Types.OTHER:
+                oid = Oid.UNSPECIFIED;
+                break;
+            default:
+                // Bad Types value.
+                throw new PSQLException(GT.tr("Unknown Types value."), PSQLState.INVALID_PARAMETER_TYPE);
+        }
+        preparedParameters.setNull(parameterIndex, oid);
+    }
+
+    @Override
+    public void setBoolean(int parameterIndex, boolean x) throws SQLException {
+        checkClosed();
+        // The key words TRUE and FALSE are the preferred (SQL-compliant) usage.
+        bindLiteral(parameterIndex, x ? "TRUE" : "FALSE", Oid.BOOL);
+    }
+
+    @Override
+    public void setByte(int parameterIndex, byte x) throws SQLException {
+        setShort(parameterIndex, x);
+    }
+
+    @Override
+    public void setShort(int parameterIndex, short x) throws SQLException {
+        checkClosed();
+        if (connection.binaryTransferSend(Oid.INT2)) {
+            byte[] val = new byte[2];
+            ByteConverter.int2(val, 0, x);
+            bindBytes(parameterIndex, val, Oid.INT2);
+            return;
+        }
+        bindLiteral(parameterIndex, Integer.toString(x), Oid.INT2);
+    }
+
+    @Override
+    public void setInt(int parameterIndex, int x) throws SQLException {
+        checkClosed();
+        if (connection.binaryTransferSend(Oid.INT4)) {
+            byte[] val = new byte[4];
+            ByteConverter.int4(val, 0, x);
+            bindBytes(parameterIndex, val, Oid.INT4);
+            return;
+        }
+        bindLiteral(parameterIndex, Integer.toString(x), Oid.INT4);
+    }
+
+    @Override
+    public void setLong(int parameterIndex, long x) throws SQLException {
+        checkClosed();
+        if (connection.binaryTransferSend(Oid.INT8)) {
+            byte[] val = new byte[8];
+            ByteConverter.int8(val, 0, x);
+            bindBytes(parameterIndex, val, Oid.INT8);
+            return;
+        }
+        bindLiteral(parameterIndex, Long.toString(x), Oid.INT8);
+    }
+
+    @Override
+    public void setFloat(int parameterIndex, float x) throws SQLException {
+        checkClosed();
+        if (connection.binaryTransferSend(Oid.FLOAT4)) {
+            byte[] val = new byte[4];
+            ByteConverter.float4(val, 0, x);
+            bindBytes(parameterIndex, val, Oid.FLOAT4);
+            return;
+        }
+        bindLiteral(parameterIndex, Float.toString(x), Oid.FLOAT8);
+    }
+
+    @Override
+    public void setDouble(int parameterIndex, double x) throws SQLException {
+        checkClosed();
+        if (connection.binaryTransferSend(Oid.FLOAT8)) {
+            byte[] val = new byte[8];
+            ByteConverter.float8(val, 0, x);
+            bindBytes(parameterIndex, val, Oid.FLOAT8);
+            return;
+        }
+        bindLiteral(parameterIndex, Double.toString(x), Oid.FLOAT8);
+    }
+
+    @Override
+    public void setBigDecimal(int parameterIndex, BigDecimal x)
+            throws SQLException {
+        if (x != null && connection.binaryTransferSend(Oid.NUMERIC)) {
+            final byte[] bytes = ByteConverter.numeric(x);
+            bindBytes(parameterIndex, bytes, Oid.NUMERIC);
+            return;
+        }
+        setNumber(parameterIndex, x);
+    }
+
+    @Override
+    public void setString(int parameterIndex, String x) throws SQLException {
+        checkClosed();
+        setString(parameterIndex, x, getStringType());
+    }
+
+    private int getStringType() {
+        return connection.getStringVarcharFlag() ? Oid.VARCHAR : Oid.UNSPECIFIED;
+    }
+
+    protected void setString(int parameterIndex,
+                             String x, int oid) throws SQLException {
+        // if the passed string is null, then set this column to null
+        checkClosed();
+        if (x == null) {
+            preparedParameters.setNull(parameterIndex, oid);
+        } else {
+            bindString(parameterIndex, x, oid);
+        }
+    }
+
+    @Override
+    public void setBytes(int parameterIndex, byte[] x) throws SQLException {
+        checkClosed();
+
+        if (null == x) {
+            setNull(parameterIndex, Types.VARBINARY);
+            return;
+        }
+
+        // Version 7.2 supports the bytea datatype for byte arrays
+        byte[] copy = new byte[x.length];
+        System.arraycopy(x, 0, copy, 0, x.length);
+        preparedParameters.setBytea(parameterIndex, copy, 0, x.length);
+    }
+
+    private void setByteStreamWriter(int parameterIndex,
+                                     ByteStreamWriter x) throws SQLException {
+        preparedParameters.setBytea(parameterIndex, x);
+    }
+
+    @Override
+    public void setDate(int parameterIndex,
+                        Date x) throws SQLException {
+        setDate(parameterIndex, x, null);
+    }
+
+    @Override
+    public void setTime(int parameterIndex, Time x) throws SQLException {
+        setTime(parameterIndex, x, null);
+    }
+
+    @Override
+    public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException {
+        setTimestamp(parameterIndex, x, null);
+    }
+
+    private void setCharacterStreamPost71(int parameterIndex,
+                                          InputStream x, int length,
+                                          String encoding) throws SQLException {
+
+        if (x == null) {
+            setNull(parameterIndex, Types.VARCHAR);
+            return;
+        }
+        if (length < 0) {
+            throw new PSQLException(GT.tr("Invalid stream length {0}.", length),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+
+        // Version 7.2 supports AsciiStream for all PG text types (char, varchar, text)
+        // As the spec/javadoc for this method indicate this is to be used for
+        // large String values (i.e. LONGVARCHAR) PG doesn't have a separate
+        // long varchar datatype, but with toast all text datatypes are capable of
+        // handling very large values. Thus the implementation ends up calling
+        // setString() since there is no current way to stream the value to the server
+        try {
+            InputStreamReader inStream = new InputStreamReader(x, encoding);
+            char[] chars = new char[length];
+            int charsRead = 0;
+            while (true) {
+                int n = inStream.read(chars, charsRead, length - charsRead);
+                if (n == -1) {
+                    break;
+                }
+
+                charsRead += n;
+
+                if (charsRead == length) {
+                    break;
+                }
+            }
+
+            setString(parameterIndex, new String(chars, 0, charsRead), Oid.VARCHAR);
+        } catch (UnsupportedEncodingException uee) {
+            throw new PSQLException(GT.tr("The JVM claims not to support the {0} encoding.", encoding),
+                    PSQLState.UNEXPECTED_ERROR, uee);
+        } catch (IOException ioe) {
+            throw new PSQLException(GT.tr("Provided InputStream failed."), PSQLState.UNEXPECTED_ERROR,
+                    ioe);
+        }
+    }
+
+    @Override
+    public void setAsciiStream(int parameterIndex, InputStream x,
+                               int length) throws SQLException {
+        checkClosed();
+        setCharacterStreamPost71(parameterIndex, x, length, "ASCII");
+    }
+
+    @Override
+    @SuppressWarnings("deprecation")
+    public void setUnicodeStream(int parameterIndex, InputStream x,
+                                 int length) throws SQLException {
+        checkClosed();
+
+        setCharacterStreamPost71(parameterIndex, x, length, "UTF-8");
+    }
+
+    @Override
+    public void setBinaryStream(int parameterIndex, InputStream x,
+                                int length) throws SQLException {
+        checkClosed();
+
+        if (x == null) {
+            setNull(parameterIndex, Types.VARBINARY);
+            return;
+        }
+
+        if (length < 0) {
+            throw new PSQLException(GT.tr("Invalid stream length {0}.", length),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+
+        // Version 7.2 supports BinaryStream for the PG bytea type
+        // As the spec/javadoc for this method indicate this is to be used for
+        // large binary values (i.e. LONGVARBINARY) PG doesn't have a separate
+        // long binary datatype, but with toast the bytea datatype is capable of
+        // handling very large values.
+        preparedParameters.setBytea(parameterIndex, x, length);
+    }
+
+    @Override
+    public void clearParameters() throws SQLException {
+        preparedParameters.clear();
+    }
+
+    // Helper method for setting parameters to PGobject subclasses.
+    private void setPGobject(int parameterIndex, PGobject x) throws SQLException {
+        String typename = x.getType();
+        int oid = connection.getTypeInfo().getPGType(typename);
+        if (oid == Oid.UNSPECIFIED) {
+            throw new PSQLException(GT.tr("Unknown type {0}.", typename),
+                    PSQLState.INVALID_PARAMETER_TYPE);
+        }
+
+        if ((x instanceof PGBinaryObject) && connection.binaryTransferSend(oid)) {
+            PGBinaryObject binObj = (PGBinaryObject) x;
+            int length = binObj.lengthInBytes();
+            if (length == 0) {
+                preparedParameters.setNull(parameterIndex, oid);
+                return;
+            }
+            byte[] data = new byte[length];
+            binObj.toBytes(data, 0);
+            bindBytes(parameterIndex, data, oid);
+        } else {
+            setString(parameterIndex, x.getValue(), oid);
+        }
+    }
+
+    private void setMap(int parameterIndex, Map<?, ?> x) throws SQLException {
+        int oid = connection.getTypeInfo().getPGType("hstore");
+        if (oid == Oid.UNSPECIFIED) {
+            throw new PSQLException(GT.tr("No hstore extension installed."),
+                    PSQLState.INVALID_PARAMETER_TYPE);
+        }
+        if (connection.binaryTransferSend(oid)) {
+            byte[] data = HStoreConverter.toBytes(x, connection.getEncoding());
+            bindBytes(parameterIndex, data, oid);
+        } else {
+            setString(parameterIndex, HStoreConverter.toString(x), oid);
+        }
+    }
+
+    private void setNumber(int parameterIndex, Number x) throws SQLException {
+        checkClosed();
+        if (x == null) {
+            setNull(parameterIndex, Types.DECIMAL);
+        } else {
+            bindLiteral(parameterIndex, x.toString(), Oid.NUMERIC);
+        }
+    }
+
+    @Override
+    public void setObject(int parameterIndex, Object in,
+                          int targetSqlType, int scale)
+            throws SQLException {
+        checkClosed();
+
+        if (in == null) {
+            setNull(parameterIndex, targetSqlType);
+            return;
+        }
+
+        if (targetSqlType == Types.OTHER && in instanceof UUID
+                && connection.haveMinimumServerVersion(ServerVersion.v8_3)) {
+            setUuid(parameterIndex, (UUID) in);
+            return;
+        }
+
+        switch (targetSqlType) {
+            case Types.SQLXML:
+                if (in instanceof SQLXML) {
+                    setSQLXML(parameterIndex, (SQLXML) in);
+                } else {
+                    setSQLXML(parameterIndex, new PgSQLXML(connection, in.toString()));
+                }
+                break;
+            case Types.INTEGER:
+                setInt(parameterIndex, castToInt(in));
+                break;
+            case Types.TINYINT:
+            case Types.SMALLINT:
+                setShort(parameterIndex, castToShort(in));
+                break;
+            case Types.BIGINT:
+                setLong(parameterIndex, castToLong(in));
+                break;
+            case Types.REAL:
+                setFloat(parameterIndex, castToFloat(in));
+                break;
+            case Types.DOUBLE:
+            case Types.FLOAT:
+                setDouble(parameterIndex, castToDouble(in));
+                break;
+            case Types.DECIMAL:
+            case Types.NUMERIC:
+                setBigDecimal(parameterIndex, castToBigDecimal(in, scale));
+                break;
+            case Types.CHAR:
+                setString(parameterIndex, castToString(in), Oid.BPCHAR);
+                break;
+            case Types.VARCHAR:
+                setString(parameterIndex, castToString(in), getStringType());
+                break;
+            case Types.LONGVARCHAR:
+                if (in instanceof InputStream) {
+                    preparedParameters.setText(parameterIndex, (InputStream) in);
+                } else {
+                    setString(parameterIndex, castToString(in), getStringType());
+                }
+                break;
+            case Types.DATE:
+                if (in instanceof Date) {
+                    setDate(parameterIndex, (Date) in);
+                } else {
+                    Date tmpd;
+                    if (in instanceof java.util.Date) {
+                        tmpd = new Date(((java.util.Date) in).getTime());
+                    } else if (in instanceof LocalDate) {
+                        setDate(parameterIndex, (LocalDate) in);
+                        break;
+                    } else {
+                        tmpd = getTimestampUtils().toDate(getDefaultCalendar(), in.toString());
+                    }
+                    setDate(parameterIndex, tmpd);
+                }
+                break;
+            case Types.TIME:
+                if (in instanceof Time) {
+                    setTime(parameterIndex, (Time) in);
+                } else {
+                    Time tmpt;
+                    if (in instanceof java.util.Date) {
+                        tmpt = new Time(((java.util.Date) in).getTime());
+                    } else if (in instanceof LocalTime) {
+                        setTime(parameterIndex, (LocalTime) in);
+                        break;
+                    } else if (in instanceof OffsetTime) {
+                        setTime(parameterIndex, (OffsetTime) in);
+                        break;
+                    } else {
+                        tmpt = getTimestampUtils().toTime(getDefaultCalendar(), in.toString());
+                    }
+                    setTime(parameterIndex, tmpt);
+                }
+                break;
+            case Types.TIMESTAMP:
+                if (in instanceof PGTimestamp) {
+                    setObject(parameterIndex, in);
+                } else if (in instanceof Timestamp) {
+                    setTimestamp(parameterIndex, (Timestamp) in);
+                } else {
+                    Timestamp tmpts;
+                    if (in instanceof java.util.Date) {
+                        tmpts = new Timestamp(((java.util.Date) in).getTime());
+                    } else if (in instanceof LocalDateTime) {
+                        setTimestamp(parameterIndex, (LocalDateTime) in);
+                        break;
+                    } else {
+                        tmpts = getTimestampUtils().toTimestamp(getDefaultCalendar(), in.toString());
+                    }
+                    setTimestamp(parameterIndex, tmpts);
+                }
+                break;
+            case Types.TIMESTAMP_WITH_TIMEZONE:
+                if (in instanceof OffsetDateTime) {
+                    setTimestamp(parameterIndex, (OffsetDateTime) in);
+                } else if (in instanceof PGTimestamp) {
+                    setObject(parameterIndex, in);
+                } else {
+                    throw new PSQLException(
+                            GT.tr("Cannot cast an instance of {0} to type {1}",
+                                    in.getClass().getName(), "Types.TIMESTAMP_WITH_TIMEZONE"),
+                            PSQLState.INVALID_PARAMETER_TYPE);
+                }
+                break;
+            case Types.BOOLEAN:
+            case Types.BIT:
+                setBoolean(parameterIndex, BooleanTypeUtil.castToBoolean(in));
+                break;
+            case Types.BINARY:
+            case Types.VARBINARY:
+            case Types.LONGVARBINARY:
+                setObject(parameterIndex, in);
+                break;
+            case Types.BLOB:
+                if (in instanceof Blob) {
+                    setBlob(parameterIndex, (Blob) in);
+                } else if (in instanceof InputStream) {
+                    long oid = createBlob(parameterIndex, (InputStream) in, Long.MAX_VALUE);
+                    setLong(parameterIndex, oid);
+                } else {
+                    throw new PSQLException(
+                            GT.tr("Cannot cast an instance of {0} to type {1}",
+                                    in.getClass().getName(), "Types.BLOB"),
+                            PSQLState.INVALID_PARAMETER_TYPE);
+                }
+                break;
+            case Types.CLOB:
+                if (in instanceof Clob) {
+                    setClob(parameterIndex, (Clob) in);
+                } else {
+                    throw new PSQLException(
+                            GT.tr("Cannot cast an instance of {0} to type {1}",
+                                    in.getClass().getName(), "Types.CLOB"),
+                            PSQLState.INVALID_PARAMETER_TYPE);
+                }
+                break;
+            case Types.ARRAY:
+                if (in instanceof Array) {
+                    setArray(parameterIndex, (Array) in);
+                } else {
+                    try {
+                        setObjectArray(parameterIndex, in);
+                    } catch (Exception e) {
+                        throw new PSQLException(
+                                GT.tr("Cannot cast an instance of {0} to type {1}", in.getClass().getName(), "Types.ARRAY"),
+                                PSQLState.INVALID_PARAMETER_TYPE, e);
+                    }
+                }
+                break;
+            case Types.DISTINCT:
+                bindString(parameterIndex, in.toString(), Oid.UNSPECIFIED);
+                break;
+            case Types.OTHER:
+                if (in instanceof PGobject) {
+                    setPGobject(parameterIndex, (PGobject) in);
+                } else if (in instanceof Map) {
+                    setMap(parameterIndex, (Map<?, ?>) in);
+                } else {
+                    bindString(parameterIndex, in.toString(), Oid.UNSPECIFIED);
+                }
+                break;
+            default:
+                throw new PSQLException(GT.tr("Unsupported Types value: {0}", targetSqlType),
+                        PSQLState.INVALID_PARAMETER_TYPE);
+        }
+    }
+
+    private Class<?> getArrayType(Class<?> type) {
+        Class<?> subType = type.getComponentType();
+        while (subType != null) {
+            type = subType;
+            subType = type.getComponentType();
+        }
+        return type;
+    }
+
+    private <A extends Object> void setObjectArray(int parameterIndex, A in) throws SQLException {
+        final ArrayEncoding.ArrayEncoder<A> arraySupport = ArrayEncoding.getArrayEncoder(in);
+
+        final TypeInfo typeInfo = connection.getTypeInfo();
+
+        int oid = arraySupport.getDefaultArrayTypeOid();
+
+        if (arraySupport.supportBinaryRepresentation(oid) && connection.getPreferQueryMode() != PreferQueryMode.SIMPLE) {
+            bindBytes(parameterIndex, arraySupport.toBinaryRepresentation(connection, in, oid), oid);
+        } else {
+            if (oid == Oid.UNSPECIFIED) {
+                Class<?> arrayType = getArrayType(in.getClass());
+                oid = typeInfo.getJavaArrayType(arrayType.getName());
+                if (oid == Oid.UNSPECIFIED) {
+                    throw new SQLFeatureNotSupportedException();
+                }
+            }
+            final int baseOid = typeInfo.getPGArrayElement(oid);
+            final String baseType = typeInfo.getPGType(baseOid);
+
+            final Array array = getPGConnection().createArrayOf(baseType, in);
+            this.setArray(parameterIndex, array);
+        }
+    }
+
+    @Override
+    public void setObject(int parameterIndex, Object x,
+                          int targetSqlType) throws SQLException {
+        setObject(parameterIndex, x, targetSqlType, -1);
+    }
+
+    /*
+     * This stores an Object into a parameter.
+     */
+    @Override
+    public void setObject(int parameterIndex, Object x) throws SQLException {
+        checkClosed();
+        if (x == null) {
+            setNull(parameterIndex, Types.OTHER);
+        } else if (x instanceof UUID && connection.haveMinimumServerVersion(ServerVersion.v8_3)) {
+            setUuid(parameterIndex, (UUID) x);
+        } else if (x instanceof SQLXML) {
+            setSQLXML(parameterIndex, (SQLXML) x);
+        } else if (x instanceof String) {
+            setString(parameterIndex, (String) x);
+        } else if (x instanceof BigDecimal) {
+            setBigDecimal(parameterIndex, (BigDecimal) x);
+        } else if (x instanceof Short) {
+            setShort(parameterIndex, (Short) x);
+        } else if (x instanceof Integer) {
+            setInt(parameterIndex, (Integer) x);
+        } else if (x instanceof Long) {
+            setLong(parameterIndex, (Long) x);
+        } else if (x instanceof Float) {
+            setFloat(parameterIndex, (Float) x);
+        } else if (x instanceof Double) {
+            setDouble(parameterIndex, (Double) x);
+        } else if (x instanceof byte[]) {
+            setBytes(parameterIndex, (byte[]) x);
+        } else if (x instanceof ByteStreamWriter) {
+            setByteStreamWriter(parameterIndex, (ByteStreamWriter) x);
+        } else if (x instanceof Date) {
+            setDate(parameterIndex, (Date) x);
+        } else if (x instanceof Time) {
+            setTime(parameterIndex, (Time) x);
+        } else if (x instanceof Timestamp) {
+            setTimestamp(parameterIndex, (Timestamp) x);
+        } else if (x instanceof Boolean) {
+            setBoolean(parameterIndex, (Boolean) x);
+        } else if (x instanceof Byte) {
+            setByte(parameterIndex, (Byte) x);
+        } else if (x instanceof Blob) {
+            setBlob(parameterIndex, (Blob) x);
+        } else if (x instanceof Clob) {
+            setClob(parameterIndex, (Clob) x);
+        } else if (x instanceof Array) {
+            setArray(parameterIndex, (Array) x);
+        } else if (x instanceof PGobject) {
+            setPGobject(parameterIndex, (PGobject) x);
+        } else if (x instanceof Character) {
+            setString(parameterIndex, ((Character) x).toString());
+        } else if (x instanceof LocalDate) {
+            setDate(parameterIndex, (LocalDate) x);
+        } else if (x instanceof LocalTime) {
+            setTime(parameterIndex, (LocalTime) x);
+        } else if (x instanceof OffsetTime) {
+            setTime(parameterIndex, (OffsetTime) x);
+        } else if (x instanceof LocalDateTime) {
+            setTimestamp(parameterIndex, (LocalDateTime) x);
+        } else if (x instanceof OffsetDateTime) {
+            setTimestamp(parameterIndex, (OffsetDateTime) x);
+        } else if (x instanceof Map) {
+            setMap(parameterIndex, (Map<?, ?>) x);
+        } else if (x instanceof Number) {
+            setNumber(parameterIndex, (Number) x);
+        } else if (x.getClass().isArray()) {
+            try {
+                setObjectArray(parameterIndex, x);
+            } catch (Exception e) {
+                throw new PSQLException(
+                        GT.tr("Cannot cast an instance of {0} to type {1}", x.getClass().getName(), "Types.ARRAY"),
+                        PSQLState.INVALID_PARAMETER_TYPE, e);
+            }
+        } else {
+            // Can't infer a type.
+            throw new PSQLException(GT.tr(
+                    "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.",
+                    x.getClass().getName()), PSQLState.INVALID_PARAMETER_TYPE);
+        }
+    }
+
+    /**
+     * Returns the SQL statement with the current template values substituted.
+     *
+     * @return SQL statement with the current template values substituted
+     */
+    @Override
+    public String toString() {
+        if (preparedQuery == null) {
+            return super.toString();
+        }
+
+        return preparedQuery.query.toString(preparedParameters);
+    }
+
+    /**
+     * Note if s is a String it should be escaped by the caller to avoid SQL injection attacks. It is
+     * not done here for efficiency reasons as most calls to this method do not require escaping as
+     * the source of the string is known safe (i.e. {@code Integer.toString()})
+     *
+     * @param paramIndex parameter index
+     * @param s          value (the value should already be escaped)
+     * @param oid        type oid
+     * @throws SQLException if something goes wrong
+     */
+    protected void bindLiteral(int paramIndex,
+                               String s, int oid) throws SQLException {
+        preparedParameters.setLiteralParameter(paramIndex, s, oid);
+    }
+
+    protected void bindBytes(int paramIndex,
+                             byte[] b, int oid) throws SQLException {
+        preparedParameters.setBinaryParameter(paramIndex, b, oid);
+    }
+
+    /**
+     * This version is for values that should turn into strings e.g. setString directly calls
+     * bindString with no escaping; the per-protocol ParameterList does escaping as needed.
+     *
+     * @param paramIndex parameter index
+     * @param s          value
+     * @param oid        type oid
+     * @throws SQLException if something goes wrong
+     */
+    private void bindString(int paramIndex, String s, int oid) throws SQLException {
+        preparedParameters.setStringParameter(paramIndex, s, oid);
+    }
+
+    @Override
+    public boolean isUseServerPrepare() {
+        return preparedQuery != null && mPrepareThreshold != 0
+                && preparedQuery.getExecuteCount() + 1 >= mPrepareThreshold;
+    }
+
+    @Override
+    public void addBatch(String sql) throws SQLException {
+        checkClosed();
+
+        throw new PSQLException(
+                GT.tr("Can''t use query methods that take a query string on a PreparedStatement."),
+                PSQLState.WRONG_OBJECT_TYPE);
+    }
+
+    @Override
+    public void addBatch() throws SQLException {
+        checkClosed();
+        ArrayList<Query> batchStatements = this.batchStatements;
+        if (batchStatements == null) {
+            this.batchStatements = batchStatements = new ArrayList<>();
+        }
+        ArrayList<ParameterList> batchParameters = this.batchParameters;
+        if (batchParameters == null) {
+            this.batchParameters = batchParameters = new ArrayList<ParameterList>();
+        }
+        // we need to create copies of our parameters, otherwise the values can be changed
+        batchParameters.add(preparedParameters.copy());
+        Query query = preparedQuery.query;
+        if (!(query instanceof BatchedQuery) || batchStatements.isEmpty()) {
+            batchStatements.add(query);
+        }
+    }
+
+    @Override
+    public ResultSetMetaData getMetaData() throws SQLException {
+        checkClosed();
+        ResultSet rs = getResultSet();
+
+        if (rs == null || ((PgResultSet) rs).isResultSetClosed()) {
+            // OK, we haven't executed it yet, or it was closed
+            // we've got to go to the backend
+            // for more info. We send the full query, but just don't
+            // execute it.
+
+            int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_DESCRIBE_ONLY
+                    | QueryExecutor.QUERY_SUPPRESS_BEGIN;
+            StatementResultHandler handler = new StatementResultHandler();
+            connection.getQueryExecutor().execute(preparedQuery.query, preparedParameters, handler, 0, 0,
+                    flags);
+            ResultWrapper wrapper = handler.getResults();
+            if (wrapper != null) {
+                rs = wrapper.getResultSet();
+            }
+        }
+
+        if (rs != null) {
+            return rs.getMetaData();
+        }
+
+        return null;
+    }
+
+    @Override
+    public void setArray(int i, Array x) throws SQLException {
+        checkClosed();
+
+        if (null == x) {
+            setNull(i, Types.ARRAY);
+            return;
+        }
+
+        // This only works for Array implementations that return a valid array
+        // literal from Array.toString(), such as the implementation we return
+        // from ResultSet.getArray(). Eventually we need a proper implementation
+        // here that works for any Array implementation.
+        String typename = x.getBaseTypeName();
+        int oid = connection.getTypeInfo().getPGArrayType(typename);
+        if (oid == Oid.UNSPECIFIED) {
+            throw new PSQLException(GT.tr("Unknown type {0}.", typename),
+                    PSQLState.INVALID_PARAMETER_TYPE);
+        }
+
+        if (x instanceof PgArray) {
+            PgArray arr = (PgArray) x;
+            byte[] bytes = arr.toBytes();
+            if (bytes != null) {
+                bindBytes(i, bytes, oid);
+                return;
+            }
+        }
+
+        setString(i, x.toString(), oid);
+    }
+
+    protected long createBlob(int i, InputStream inputStream,
+                              long length) throws SQLException {
+        LargeObjectManager lom = connection.getLargeObjectAPI();
+        long oid = lom.createLO();
+        LargeObject lob = lom.open(oid);
+        try (OutputStream outputStream = lob.getOutputStream()) {
+            // The actual buffer size does not matter much, see benchmarks
+            // https://github.com/pgjdbc/pgjdbc/pull/3044#issuecomment-1838057929
+            // BlobOutputStream would gradually increase the buffer, so it will level the number of
+            // database calls.
+            // At the same time, inputStream.read might produce less rows than requested, so we can not
+            // use a plain lob.write(buf, 0, numRead) as it might not align with 2K boundaries.
+            byte[] buf = new byte[(int) Math.min(length, 8192)];
+            int numRead;
+            while (length > 0 && (
+                    numRead = inputStream.read(buf, 0, (int) Math.min(buf.length, length))) >= 0) {
+                length -= numRead;
+                outputStream.write(buf, 0, numRead);
+            }
+        } catch (IOException se) {
+            throw new PSQLException(GT.tr("Unexpected error writing large object to database."),
+                    PSQLState.UNEXPECTED_ERROR, se);
+        }
+        return oid;
+    }
+
+    @Override
+    public void setBlob(int i, Blob x) throws SQLException {
+        checkClosed();
+
+        if (x == null) {
+            setNull(i, Types.BLOB);
+            return;
+        }
+
+        InputStream inStream = x.getBinaryStream();
+        try {
+            long oid = createBlob(i, inStream, x.length());
+            setLong(i, oid);
+        } finally {
+            try {
+                inStream.close();
+            } catch (Exception e) {
+            }
+        }
+    }
+
+    private String readerToString(Reader value, int maxLength) throws SQLException {
+        try {
+            int bufferSize = Math.min(maxLength, 1024);
+            StringBuilder v = new StringBuilder(bufferSize);
+            char[] buf = new char[bufferSize];
+            int nRead = 0;
+            while (nRead > -1 && v.length() < maxLength) {
+                nRead = value.read(buf, 0, Math.min(bufferSize, maxLength - v.length()));
+                if (nRead > 0) {
+                    v.append(buf, 0, nRead);
+                }
+            }
+            return v.toString();
+        } catch (IOException ioe) {
+            throw new PSQLException(GT.tr("Provided Reader failed."), PSQLState.UNEXPECTED_ERROR, ioe);
+        }
+    }
+
+    @Override
+    public void setCharacterStream(int i, Reader x,
+                                   int length) throws SQLException {
+        checkClosed();
+
+        if (x == null) {
+            setNull(i, Types.VARCHAR);
+            return;
+        }
+
+        if (length < 0) {
+            throw new PSQLException(GT.tr("Invalid stream length {0}.", length),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+
+        // Version 7.2 supports CharacterStream for the PG text types
+        // As the spec/javadoc for this method indicate this is to be used for
+        // large text values (i.e. LONGVARCHAR) PG doesn't have a separate
+        // long varchar datatype, but with toast all the text datatypes are capable of
+        // handling very large values. Thus the implementation ends up calling
+        // setString() since there is no current way to stream the value to the server
+        setString(i, readerToString(x, length));
+    }
+
+    @Override
+    public void setClob(int i, Clob x) throws SQLException {
+        checkClosed();
+
+        if (x == null) {
+            setNull(i, Types.CLOB);
+            return;
+        }
+
+        Reader inStream = x.getCharacterStream();
+        int length = (int) x.length();
+        LargeObjectManager lom = connection.getLargeObjectAPI();
+        long oid = lom.createLO();
+        LargeObject lob = lom.open(oid);
+        Charset connectionCharset = Charset.forName(connection.getEncoding().name());
+        OutputStream los = lob.getOutputStream();
+        Writer lw = new OutputStreamWriter(los, connectionCharset);
+        try {
+            // could be buffered, but then the OutputStream returned by LargeObject
+            // is buffered internally anyhow, so there would be no performance
+            // boost gained, if anything it would be worse!
+            int c = inStream.read();
+            int p = 0;
+            while (c > -1 && p < length) {
+                lw.write(c);
+                c = inStream.read();
+                p++;
+            }
+            lw.close();
+        } catch (IOException se) {
+            throw new PSQLException(GT.tr("Unexpected error writing large object to database."),
+                    PSQLState.UNEXPECTED_ERROR, se);
+        }
+        // lob is closed by the stream so don't call lob.close()
+        setLong(i, oid);
+    }
+
+    @Override
+    public void setNull(int parameterIndex, int t,
+                        String typeName) throws SQLException {
+        if (typeName == null) {
+            setNull(parameterIndex, t);
+            return;
+        }
+
+        checkClosed();
+
+        TypeInfo typeInfo = connection.getTypeInfo();
+        int oid = typeInfo.getPGType(typeName);
+
+        preparedParameters.setNull(parameterIndex, oid);
+    }
+
+    @Override
+    public void setRef(int i, Ref x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setRef(int,Ref)");
+    }
+
+    @Override
+    public void setDate(int i, Date d,
+                        Calendar cal) throws SQLException {
+        checkClosed();
+
+        if (d == null) {
+            setNull(i, Types.DATE);
+            return;
+        }
+
+        if (connection.binaryTransferSend(Oid.DATE)) {
+            byte[] val = new byte[4];
+            TimeZone tz = cal != null ? cal.getTimeZone() : null;
+            getTimestampUtils().toBinDate(tz, val, d);
+            preparedParameters.setBinaryParameter(i, val, Oid.DATE);
+            return;
+        }
+
+        // We must use UNSPECIFIED here, or inserting a Date-with-timezone into a
+        // timestamptz field does an unexpected rotation by the server's TimeZone:
+        //
+        // We want to interpret 2005/01/01 with calendar +0100 as
+        // "local midnight in +0100", but if we go via date it interprets it
+        // as local midnight in the server's timezone:
+
+        // template1=# select '2005-01-01+0100'::timestamptz;
+        // timestamptz
+        // ------------------------
+        // 2005-01-01 02:00:00+03
+        // (1 row)
+
+        // template1=# select '2005-01-01+0100'::date::timestamptz;
+        // timestamptz
+        // ------------------------
+        // 2005-01-01 00:00:00+03
+        // (1 row)
+
+        if (cal == null) {
+            cal = getDefaultCalendar();
+        }
+        bindString(i, getTimestampUtils().toString(cal, d), Oid.UNSPECIFIED);
+    }
+
+    @Override
+    public void setTime(int i, Time t,
+                        Calendar cal) throws SQLException {
+        checkClosed();
+
+        if (t == null) {
+            setNull(i, Types.TIME);
+            return;
+        }
+
+        int oid = Oid.UNSPECIFIED;
+
+        // If a PGTime is used, we can define the OID explicitly.
+        if (t instanceof PGTime) {
+            PGTime pgTime = (PGTime) t;
+            if (pgTime.getCalendar() == null) {
+                oid = Oid.TIME;
+            } else {
+                oid = Oid.TIMETZ;
+                cal = pgTime.getCalendar();
+            }
+        }
+
+        if (cal == null) {
+            cal = getDefaultCalendar();
+        }
+        bindString(i, getTimestampUtils().toString(cal, t), oid);
+    }
+
+    @Override
+    public void setTimestamp(int i, Timestamp t,
+                             Calendar cal) throws SQLException {
+        checkClosed();
+
+        if (t == null) {
+            setNull(i, Types.TIMESTAMP);
+            return;
+        }
+
+        int oid = Oid.UNSPECIFIED;
+
+        // Use UNSPECIFIED as a compromise to get both TIMESTAMP and TIMESTAMPTZ working.
+        // This is because you get this in a +1300 timezone:
+        //
+        // template1=# select '2005-01-01 15:00:00 +1000'::timestamptz;
+        // timestamptz
+        // ------------------------
+        // 2005-01-01 18:00:00+13
+        // (1 row)
+
+        // template1=# select '2005-01-01 15:00:00 +1000'::timestamp;
+        // timestamp
+        // ---------------------
+        // 2005-01-01 15:00:00
+        // (1 row)
+
+        // template1=# select '2005-01-01 15:00:00 +1000'::timestamptz::timestamp;
+        // timestamp
+        // ---------------------
+        // 2005-01-01 18:00:00
+        // (1 row)
+
+        // So we want to avoid doing a timestamptz -> timestamp conversion, as that
+        // will first convert the timestamptz to an equivalent time in the server's
+        // timezone (+1300, above), then turn it into a timestamp with the "wrong"
+        // time compared to the string we originally provided. But going straight
+        // to timestamp is OK as the input parser for timestamp just throws away
+        // the timezone part entirely. Since we don't know ahead of time what type
+        // we're actually dealing with, UNSPECIFIED seems the lesser evil, even if it
+        // does give more scope for type-mismatch errors being silently hidden.
+
+        // If a PGTimestamp is used, we can define the OID explicitly.
+        if (t instanceof PGTimestamp) {
+            PGTimestamp pgTimestamp = (PGTimestamp) t;
+            if (pgTimestamp.getCalendar() == null) {
+                oid = Oid.TIMESTAMP;
+            } else {
+                oid = Oid.TIMESTAMPTZ;
+                cal = pgTimestamp.getCalendar();
+            }
+        }
+        if (cal == null) {
+            cal = getDefaultCalendar();
+        }
+        bindString(i, getTimestampUtils().toString(cal, t), oid);
+    }
+
+    private void setDate(int i, LocalDate localDate) throws SQLException {
+        int oid = Oid.DATE;
+        bindString(i, getTimestampUtils().toString(localDate), oid);
+    }
+
+    private void setTime(int i, LocalTime localTime) throws SQLException {
+        int oid = Oid.TIME;
+        bindString(i, getTimestampUtils().toString(localTime), oid);
+    }
+
+    private void setTime(int i, OffsetTime offsetTime) throws SQLException {
+        int oid = Oid.TIMETZ;
+        bindString(i, getTimestampUtils().toString(offsetTime), oid);
+    }
+
+    private void setTimestamp(int i, LocalDateTime localDateTime)
+            throws SQLException {
+        int oid = Oid.TIMESTAMP;
+        bindString(i, getTimestampUtils().toString(localDateTime), oid);
+    }
+
+    private void setTimestamp(int i, OffsetDateTime offsetDateTime)
+            throws SQLException {
+        int oid = Oid.TIMESTAMPTZ;
+        bindString(i, getTimestampUtils().toString(offsetDateTime), oid);
+    }
+
+    public ParameterMetaData createParameterMetaData(BaseConnection conn, int[] oids)
+            throws SQLException {
+        return new PgParameterMetaData(conn, oids);
+    }
+
+    @Override
+    public void setObject(int parameterIndex, Object x,
+                          SQLType targetSqlType,
+                          int scaleOrLength) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setObject");
+    }
+
+    @Override
+    public void setObject(int parameterIndex, Object x,
+                          SQLType targetSqlType)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setObject");
+    }
+
+    @Override
+    public void setRowId(int parameterIndex, RowId x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setRowId(int, RowId)");
+    }
+
+    @Override
+    public void setNString(int parameterIndex, String value) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setNString(int, String)");
+    }
+
+    @Override
+    public void setNCharacterStream(int parameterIndex, Reader value, long length)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setNCharacterStream(int, Reader, long)");
+    }
+
+    @Override
+    public void setNCharacterStream(int parameterIndex,
+                                    Reader value) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setNCharacterStream(int, Reader)");
+    }
+
+    @Override
+    public void setCharacterStream(int parameterIndex,
+                                   Reader value, long length)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setCharacterStream(int, Reader, long)");
+    }
+
+    @Override
+    public void setCharacterStream(int parameterIndex,
+                                   Reader value) throws SQLException {
+        if (connection.getPreferQueryMode() == PreferQueryMode.SIMPLE) {
+            String s = value != null ? readerToString(value, Integer.MAX_VALUE) : null;
+            setString(parameterIndex, s);
+            return;
+        }
+        InputStream is = value != null ? new ReaderInputStream(value) : null;
+        setObject(parameterIndex, is, Types.LONGVARCHAR);
+    }
+
+    @Override
+    public void setBinaryStream(int parameterIndex, InputStream value, long length)
+            throws SQLException {
+        if (length > Integer.MAX_VALUE) {
+            throw new PSQLException(GT.tr("Object is too large to send over the protocol."),
+                    PSQLState.NUMERIC_CONSTANT_OUT_OF_RANGE);
+        }
+        if (value == null) {
+            preparedParameters.setNull(parameterIndex, Oid.BYTEA);
+        } else {
+            preparedParameters.setBytea(parameterIndex, value, (int) length);
+        }
+    }
+
+    @Override
+    public void setBinaryStream(int parameterIndex,
+                                InputStream value) throws SQLException {
+        if (value == null) {
+            preparedParameters.setNull(parameterIndex, Oid.BYTEA);
+        } else {
+            preparedParameters.setBytea(parameterIndex, value);
+        }
+    }
+
+    @Override
+    public void setAsciiStream(int parameterIndex,
+                               InputStream value, long length)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setAsciiStream(int, InputStream, long)");
+    }
+
+    @Override
+    public void setAsciiStream(int parameterIndex,
+                               InputStream value) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setAsciiStream(int, InputStream)");
+    }
+
+    @Override
+    public void setNClob(int parameterIndex,
+                         NClob value) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setNClob(int, NClob)");
+    }
+
+    @Override
+    public void setClob(int parameterIndex,
+                        Reader reader, long length) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setClob(int, Reader, long)");
+    }
+
+    @Override
+    public void setClob(int parameterIndex,
+                        Reader reader) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setClob(int, Reader)");
+    }
+
+    @Override
+    public void setBlob(int parameterIndex,
+                        InputStream inputStream, long length)
+            throws SQLException {
+        checkClosed();
+
+        if (inputStream == null) {
+            setNull(parameterIndex, Types.BLOB);
+            return;
+        }
+
+        if (length < 0) {
+            throw new PSQLException(GT.tr("Invalid stream length {0}.", length),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+
+        long oid = createBlob(parameterIndex, inputStream, length);
+        setLong(parameterIndex, oid);
+    }
+
+    @Override
+    public void setBlob(int parameterIndex,
+                        InputStream inputStream) throws SQLException {
+        checkClosed();
+
+        if (inputStream == null) {
+            setNull(parameterIndex, Types.BLOB);
+            return;
+        }
+
+        long oid = createBlob(parameterIndex, inputStream, Long.MAX_VALUE);
+        setLong(parameterIndex, oid);
+    }
+
+    @Override
+    public void setNClob(int parameterIndex,
+                         Reader reader, long length) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setNClob(int, Reader, long)");
+    }
+
+    @Override
+    public void setNClob(int parameterIndex,
+                         Reader reader) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setNClob(int, Reader)");
+    }
+
+    @Override
+    public void setSQLXML(int parameterIndex,
+                          SQLXML xmlObject) throws SQLException {
+        checkClosed();
+        String stringValue = xmlObject == null ? null : xmlObject.getString();
+        if (stringValue == null) {
+            setNull(parameterIndex, Types.SQLXML);
+        } else {
+            setString(parameterIndex, stringValue, Oid.XML);
+        }
+    }
+
+    private void setUuid(int parameterIndex, UUID uuid) throws SQLException {
+        if (connection.binaryTransferSend(Oid.UUID)) {
+            byte[] val = new byte[16];
+            ByteConverter.int8(val, 0, uuid.getMostSignificantBits());
+            ByteConverter.int8(val, 8, uuid.getLeastSignificantBits());
+            bindBytes(parameterIndex, val, Oid.UUID);
+        } else {
+            bindLiteral(parameterIndex, uuid.toString(), Oid.UUID);
+        }
+    }
+
+    @Override
+    public void setURL(int parameterIndex, URL x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setURL(int,URL)");
+    }
+
+    @Override
+    public int[] executeBatch() throws SQLException {
+        try {
+            // Note: in batch prepared statements batchStatements == 1, and batchParameters is equal
+            // to the number of addBatch calls
+            // batchParameters might be empty in case of empty batch
+            if (batchParameters != null && batchParameters.size() > 1 && mPrepareThreshold > 0) {
+                // Use server-prepared statements when there's more than one statement in a batch
+                // Technically speaking, it might cause to create a server-prepared statement
+                // just for 2 executions even for prepareThreshold=5. That however should be
+                // acceptable since prepareThreshold is a optimization kind of parameter.
+                this.preparedQuery.increaseExecuteCount(mPrepareThreshold);
+            }
+            return super.executeBatch();
+        } finally {
+            defaultTimeZone = null;
+        }
+    }
+
+    private Calendar getDefaultCalendar() {
+        if (getTimestampUtils().hasFastDefaultTimeZone()) {
+            return getTimestampUtils().getSharedCalendar(null);
+        }
+        Calendar sharedCalendar = getTimestampUtils().getSharedCalendar(defaultTimeZone);
+        if (defaultTimeZone == null) {
+            defaultTimeZone = sharedCalendar.getTimeZone();
+        }
+        return sharedCalendar;
+    }
+
+    @Override
+    public ParameterMetaData getParameterMetaData() throws SQLException {
+        int flags = QueryExecutor.QUERY_ONESHOT | QueryExecutor.QUERY_DESCRIBE_ONLY
+                | QueryExecutor.QUERY_SUPPRESS_BEGIN;
+        StatementResultHandler handler = new StatementResultHandler();
+        connection.getQueryExecutor().execute(preparedQuery.query, preparedParameters, handler, 0, 0,
+                flags);
+
+        int[] oids = preparedParameters.getTypeOIDs();
+        return createParameterMetaData(connection, oids);
+    }
+
+    @Override
+    public void transformQueriesAndParameters() throws SQLException {
+        ArrayList<ParameterList> batchParameters = this.batchParameters;
+        if (batchParameters == null || batchParameters.size() <= 1
+                || !(preparedQuery.query instanceof BatchedQuery)) {
+            return;
+        }
+        BatchedQuery originalQuery = (BatchedQuery) preparedQuery.query;
+        // Single query cannot have more than {@link Short#MAX_VALUE} binds, thus
+        // the number of multi-values blocks should be capped.
+        // Typically, it does not make much sense to batch more than 128 rows: performance
+        // does not improve much after updating 128 statements with 1 multi-valued one, thus
+        // we cap maximum batch size and split there.
+        final int bindCount = originalQuery.getBindCount();
+        final int highestBlockCount = 128;
+        final int maxValueBlocks = bindCount == 0 ? 1024 /* if no binds, use 1024 rows */
+                : Integer.highestOneBit( // deriveForMultiBatch supports powers of two only
+                Math.min(Math.max(1, maximumNumberOfParameters() / bindCount), highestBlockCount));
+        int unprocessedBatchCount = batchParameters.size();
+        final int fullValueBlocksCount = unprocessedBatchCount / maxValueBlocks;
+        final int partialValueBlocksCount = Integer.bitCount(unprocessedBatchCount % maxValueBlocks);
+        final int count = fullValueBlocksCount + partialValueBlocksCount;
+        ArrayList<Query> newBatchStatements = new ArrayList<>(count);
+        ArrayList<ParameterList> newBatchParameters =
+                new ArrayList<ParameterList>(count);
+        int offset = 0;
+        for (int i = 0; i < count; i++) {
+            int valueBlock;
+            if (unprocessedBatchCount >= maxValueBlocks) {
+                valueBlock = maxValueBlocks;
+            } else {
+                valueBlock = Integer.highestOneBit(unprocessedBatchCount);
+            }
+            // Find appropriate batch for block count.
+            BatchedQuery bq = originalQuery.deriveForMultiBatch(valueBlock);
+            ParameterList newPl = bq.createParameterList();
+            for (int j = 0; j < valueBlock; j++) {
+                ParameterList pl = batchParameters.get(offset++);
+                if (pl != null) {
+                    newPl.appendAll(pl);
+                }
+            }
+            newBatchStatements.add(bq);
+            newBatchParameters.add(newPl);
+            unprocessedBatchCount -= valueBlock;
+        }
+        this.batchStatements = newBatchStatements;
+        this.batchParameters = newBatchParameters;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSet.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSet.java
index 3eb3796..b67193d 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSet.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSet.java
@@ -87,4214 +87,4182 @@ import java.util.logging.Level;
 @SuppressWarnings({"try", "deprecation"})
 public class PgResultSet implements ResultSet, PGRefCursorResultSet {
 
-  // needed for updateable result set support
-  private boolean updateable;
-  private boolean doingUpdates;
-  private HashMap<String, Object> updateValues;
-  private boolean usingOID; // are we using the OID for the primary key?
-  private List<PrimaryKey> primaryKeys; // list of primary keys
-  private boolean singleTable;
-  private String onlyTable = "";
-  private String tableName;
-  private PreparedStatement deleteStatement;
-  private final int resultsettype;
-  private final int resultsetconcurrency;
-  private int fetchdirection = ResultSet.FETCH_UNKNOWN;
-  private TimeZone defaultTimeZone;
-  protected final BaseConnection connection; // the connection we belong to
-  protected final BaseStatement statement; // the statement we belong to
-  protected final Field[] fields; // Field metadata for this resultset.
-  protected final Query originalQuery; // Query we originated from
-  private TimestampUtils timestampUtils; // our own Object because it's not thread safe
+    // TODO: In Java 8 this constant is missing, later versions (at least 11) have LocalDate#EPOCH:
+    private static final LocalDate LOCAL_DATE_EPOCH = LocalDate.of(1970, 1, 1);
+    private static final BigInteger BYTEMAX = new BigInteger(Byte.toString(Byte.MAX_VALUE));
+    private static final BigInteger BYTEMIN = new BigInteger(Byte.toString(Byte.MIN_VALUE));
+    /**
+     * A dummy exception thrown when fast byte[] to number parsing fails and no value can be returned.
+     * The exact stack trace does not matter because the exception is always caught and is not visible
+     * to users.
+     */
+    private static final NumberFormatException FAST_NUMBER_FAILED = new NumberFormatException() {
 
-  protected final int maxRows; // Maximum rows in this resultset (might be 0).
-  protected final int maxFieldSize; // Maximum field size in this resultset (might be 0).
+        // Override fillInStackTrace to prevent memory leak via Throwable.backtrace hidden field
+        // The field is not observable via reflection, however when throwable contains stacktrace, it
+        // does
+        // hold strong references to user objects (e.g. classes -> classloaders), thus it might lead to
+        // OutOfMemory conditions.
+        @Override
+        public Throwable fillInStackTrace() {
+            return this;
+        }
+    };
+    private static final BigInteger SHORTMAX = new BigInteger(Short.toString(Short.MAX_VALUE));
+    private static final BigInteger SHORTMIN = new BigInteger(Short.toString(Short.MIN_VALUE));
+    private static final BigInteger INTMAX = new BigInteger(Integer.toString(Integer.MAX_VALUE));
+    private static final BigInteger INTMIN = new BigInteger(Integer.toString(Integer.MIN_VALUE));
+    private static final BigInteger LONGMAX = new BigInteger(Long.toString(Long.MAX_VALUE));
+    private static final BigInteger LONGMIN = new BigInteger(Long.toString(Long.MIN_VALUE));
+    private static final float LONG_MAX_FLOAT = StrictMath.nextDown(Long.MAX_VALUE);
+    private static final float LONG_MIN_FLOAT = StrictMath.nextUp(Long.MIN_VALUE);
+    private static final double LONG_MAX_DOUBLE = StrictMath.nextDown((double) Long.MAX_VALUE);
+    private static final double LONG_MIN_DOUBLE = StrictMath.nextUp((double) Long.MIN_VALUE);
+    protected final BaseConnection connection; // the connection we belong to
+    protected final BaseStatement statement; // the statement we belong to
+    protected final Field[] fields; // Field metadata for this resultset.
+    protected final Query originalQuery; // Query we originated from
+    protected final int maxRows; // Maximum rows in this resultset (might be 0).
+    protected final int maxFieldSize; // Maximum field size in this resultset (might be 0).
+    private final int resultsettype;
+    private final int resultsetconcurrency;
+    private final ResourceLock lock = new ResourceLock();
+    protected List<Tuple> rows; // Current page of results.
+    protected int currentRow = -1; // Index into 'rows' of our current row (0-based)
+    protected int rowOffset; // Offset of row 0 in the actual resultset
+    protected Tuple thisRow; // copy of the current result row
+    // are we on the insert row (for JDBC2 updatable resultsets)?
+    protected SQLWarning warnings; // The warning chain
+    /**
+     * True if the last obtained column value was SQL NULL as specified by {@link #wasNull}. The value
+     * is always updated by the {@link #getRawValue} method.
+     */
+    protected boolean wasNullFlag;
+    protected boolean onInsertRow;
+    protected int fetchSize; // Current fetch size (might be 0).
+    protected int lastUsedFetchSize; // Fetch size used during last fetch
+    protected boolean adaptiveFetch;
+    protected ResultCursor cursor; // Cursor for fetching additional data.
+    // needed for updateable result set support
+    private boolean updateable;
+    private boolean doingUpdates;
+    private HashMap<String, Object> updateValues;
+    private boolean usingOID; // are we using the OID for the primary key?
+    private List<PrimaryKey> primaryKeys; // list of primary keys
+    private boolean singleTable;
+    private String onlyTable = "";
+    private String tableName;
+    private PreparedStatement deleteStatement;
+    private int fetchdirection = ResultSet.FETCH_UNKNOWN;
+    private TimeZone defaultTimeZone;
+    private TimestampUtils timestampUtils; // our own Object because it's not thread safe
+    private Tuple rowBuffer; // updateable rowbuffer
+    // Speed up findColumn by caching lookups
+    private Map<String, Integer> columnNameIndexMap;
+    private ResultSetMetaData rsMetaData;
+    private String refCursorName;
 
-  protected List<Tuple> rows; // Current page of results.
-  protected int currentRow = -1; // Index into 'rows' of our current row (0-based)
-  protected int rowOffset; // Offset of row 0 in the actual resultset
-  protected Tuple thisRow; // copy of the current result row
-  protected SQLWarning warnings; // The warning chain
-  /**
-   * True if the last obtained column value was SQL NULL as specified by {@link #wasNull}. The value
-   * is always updated by the {@link #getRawValue} method.
-   */
-  protected boolean wasNullFlag;
-  protected boolean onInsertRow;
-  // are we on the insert row (for JDBC2 updatable resultsets)?
-
-  private Tuple rowBuffer; // updateable rowbuffer
-
-  protected int fetchSize; // Current fetch size (might be 0).
-  protected int lastUsedFetchSize; // Fetch size used during last fetch
-  protected boolean adaptiveFetch;
-  protected ResultCursor cursor; // Cursor for fetching additional data.
-
-  // Speed up findColumn by caching lookups
-  private Map<String, Integer> columnNameIndexMap;
-
-  private ResultSetMetaData rsMetaData;
-  private final ResourceLock lock = new ResourceLock();
-
-  protected ResultSetMetaData createMetaData() throws SQLException {
-    return new PgResultSetMetaData(connection, fields);
-  }
-
-  @Override
-  public ResultSetMetaData getMetaData() throws SQLException {
-    checkClosed();
-    if (rsMetaData == null) {
-      rsMetaData = createMetaData();
-    }
-    return rsMetaData;
-  }
-
-  PgResultSet(Query originalQuery, BaseStatement statement,
-      Field[] fields, List<Tuple> tuples,
-      ResultCursor cursor, int maxRows, int maxFieldSize, int rsType, int rsConcurrency,
-      int rsHoldability, boolean adaptiveFetch) throws SQLException {
-    // Fail-fast on invalid null inputs
-    if (tuples == null) {
-      throw new NullPointerException("tuples must be non-null");
-    }
-    if (fields == null) {
-      throw new NullPointerException("fields must be non-null");
-    }
-
-    this.originalQuery = originalQuery;
-    this.connection = (BaseConnection) statement.getConnection();
-    this.statement = statement;
-    this.fields = fields;
-    this.rows = tuples;
-    this.cursor = cursor;
-    this.maxRows = maxRows;
-    this.maxFieldSize = maxFieldSize;
-    this.resultsettype = rsType;
-    this.resultsetconcurrency = rsConcurrency;
-    this.adaptiveFetch = adaptiveFetch;
-
-    // Constructor doesn't have fetch size and can't be sure if fetch size was used so initial value would be the number of rows
-    this.lastUsedFetchSize = tuples.size();
-  }
-
-  @Override
-  public URL getURL(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getURL columnIndex: {0}", columnIndex);
-    checkClosed();
-    throw Driver.notImplemented(this.getClass(), "getURL(int)");
-  }
-
-  @Override
-  public URL getURL(String columnName) throws SQLException {
-    return getURL(findColumn(columnName));
-  }
-
-  protected Object internalGetObject(int columnIndex, Field field) throws SQLException {
-    switch (getSQLType(columnIndex)) {
-      case Types.BOOLEAN:
-      case Types.BIT:
-        if (field.getOID() == Oid.BOOL) {
-          return getBoolean(columnIndex);
+    PgResultSet(Query originalQuery, BaseStatement statement,
+                Field[] fields, List<Tuple> tuples,
+                ResultCursor cursor, int maxRows, int maxFieldSize, int rsType, int rsConcurrency,
+                int rsHoldability, boolean adaptiveFetch) throws SQLException {
+        // Fail-fast on invalid null inputs
+        if (tuples == null) {
+            throw new NullPointerException("tuples must be non-null");
+        }
+        if (fields == null) {
+            throw new NullPointerException("fields must be non-null");
         }
 
-        if (field.getOID() == Oid.BIT) {
-          // Let's peek at the data - I tried to use the field.getLength() but it returns 65535 and
-          // it doesn't reflect the real length of the field, which is odd.
-          // If we have 1 byte, it's a bit(1) and return a boolean to preserve the backwards
-          // compatibility. If the value is null, it doesn't really matter
-          byte[] data = getRawValue(columnIndex);
-          if (data == null || data.length == 1) {
-            return getBoolean(columnIndex);
-          }
+        this.originalQuery = originalQuery;
+        this.connection = (BaseConnection) statement.getConnection();
+        this.statement = statement;
+        this.fields = fields;
+        this.rows = tuples;
+        this.cursor = cursor;
+        this.maxRows = maxRows;
+        this.maxFieldSize = maxFieldSize;
+        this.resultsettype = rsType;
+        this.resultsetconcurrency = rsConcurrency;
+        this.adaptiveFetch = adaptiveFetch;
+
+        // Constructor doesn't have fetch size and can't be sure if fetch size was used so initial value would be the number of rows
+        this.lastUsedFetchSize = tuples.size();
+    }
+
+    /**
+     * Cracks out the table name and schema (if it exists) from a fully qualified table name.
+     *
+     * @param fullname string that we are trying to crack. Test cases:
+     *
+     *                 <pre>
+     *
+     *                                 Table: table
+     *                                                 ()
+     *
+     *                                 "Table": Table
+     *                                                 ()
+     *
+     *                                 Schema.Table:
+     *                                                 table (schema)
+     *
+     *                                                 "Schema"."Table": Table
+     *                                                                 (Schema)
+     *
+     *                                                 "Schema"."Dot.Table": Dot.Table
+     *                                                                 (Schema)
+     *
+     *                                                 Schema."Dot.Table": Dot.Table
+     *                                                                 (schema)
+     *
+     *                        </pre>
+     * @return String array with element zero always being the tablename and element 1 the schema name
+     * which may be a zero length string.
+     */
+    public static String[] quotelessTableName(String fullname) {
+
+        String[] parts = new String[]{null, ""};
+        StringBuilder acc = new StringBuilder();
+        boolean betweenQuotes = false;
+        for (int i = 0; i < fullname.length(); i++) {
+            char c = fullname.charAt(i);
+            switch (c) {
+                case '"':
+                    if ((i < fullname.length() - 1) && (fullname.charAt(i + 1) == '"')) {
+                        // two consecutive quotes - keep one
+                        i++;
+                        acc.append(c); // keep the quote
+                    } else { // Discard it
+                        betweenQuotes = !betweenQuotes;
+                    }
+                    break;
+                case '.':
+                    if (betweenQuotes) { // Keep it
+                        acc.append(c);
+                    } else { // Have schema name
+                        parts[1] = acc.toString();
+                        acc = new StringBuilder();
+                    }
+                    break;
+                default:
+                    acc.append(betweenQuotes ? c : Character.toLowerCase(c));
+                    break;
+            }
         }
-        // Returning null here will lead to another value processing path for the bit field
-        // which will return a PGobject
-        return null;
-      case Types.SQLXML:
-        return getSQLXML(columnIndex);
-      case Types.TINYINT:
-      case Types.SMALLINT:
-      case Types.INTEGER:
-        return getInt(columnIndex);
-      case Types.BIGINT:
-        return getLong(columnIndex);
-      case Types.NUMERIC:
-      case Types.DECIMAL:
-        return getNumeric(columnIndex,
-            field.getMod() == -1 ? -1 : ((field.getMod() - 4) & 0xffff), true);
-      case Types.REAL:
-        return getFloat(columnIndex);
-      case Types.FLOAT:
-      case Types.DOUBLE:
-        return getDouble(columnIndex);
-      case Types.CHAR:
-      case Types.VARCHAR:
-      case Types.LONGVARCHAR:
-        return getString(columnIndex);
-      case Types.DATE:
-        return getDate(columnIndex);
-      case Types.TIME:
-        return getTime(columnIndex);
-      case Types.TIMESTAMP:
-        return getTimestamp(columnIndex, null);
-      case Types.BINARY:
-      case Types.VARBINARY:
-      case Types.LONGVARBINARY:
-        return getBytes(columnIndex);
-      case Types.ARRAY:
-        return getArray(columnIndex);
-      case Types.CLOB:
-        return getClob(columnIndex);
-      case Types.BLOB:
-        return getBlob(columnIndex);
+        // Always put table in slot 0
+        parts[0] = acc.toString();
+        return parts;
+    }
 
-      default:
-        String type = getPGType(columnIndex);
-
-        // if the backend doesn't know the type then coerce to String
-        if ("unknown".equals(type)) {
-          return getString(columnIndex);
+    public static Map<String, Integer> createColumnNameIndexMap(Field[] fields,
+                                                                boolean isSanitiserDisabled) {
+        Map<String, Integer> columnNameIndexMap = new HashMap<>(fields.length * 2);
+        // The JDBC spec says when you have duplicate columns names,
+        // the first one should be returned. So load the map in
+        // reverse order so the first ones will overwrite later ones.
+        for (int i = fields.length - 1; i >= 0; i--) {
+            String columnLabel = fields[i].getColumnLabel();
+            if (isSanitiserDisabled) {
+                columnNameIndexMap.put(columnLabel, i + 1);
+            } else {
+                columnNameIndexMap.put(columnLabel.toLowerCase(Locale.US), i + 1);
+            }
         }
+        return columnNameIndexMap;
+    }
 
-        if ("uuid".equals(type)) {
-          if (isBinary(columnIndex)) {
-            return getUUID(thisRow.get(columnIndex - 1));
-          }
-          return getUUID(getString(columnIndex));
+    public static short toShort(String s) throws SQLException {
+        if (s != null) {
+            try {
+                s = s.trim();
+                return Short.parseShort(s);
+            } catch (NumberFormatException e) {
+                try {
+                    BigDecimal n = new BigDecimal(s);
+                    BigInteger i = n.toBigInteger();
+                    int gt = i.compareTo(SHORTMAX);
+                    int lt = i.compareTo(SHORTMIN);
+
+                    if (gt > 0 || lt < 0) {
+                        throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "short", s),
+                                PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+                    }
+                    return i.shortValue();
+
+                } catch (NumberFormatException ne) {
+                    throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "short", s),
+                            PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+                }
+            }
         }
+        return 0; // SQL NULL
+    }
 
-        // Specialized support for ref cursors is neater.
-        if ("refcursor".equals(type)) {
-          // Fetch all results.
-          String cursorName = getString(columnIndex);
+    public static int toInt(String s) throws SQLException {
+        if (s != null) {
+            try {
+                s = s.trim();
+                return Integer.parseInt(s);
+            } catch (NumberFormatException e) {
+                try {
+                    BigDecimal n = new BigDecimal(s);
+                    BigInteger i = n.toBigInteger();
 
-          StringBuilder sb = new StringBuilder("FETCH ALL IN ");
-          Utils.escapeIdentifier(sb, cursorName);
+                    int gt = i.compareTo(INTMAX);
+                    int lt = i.compareTo(INTMIN);
 
-          // nb: no BEGIN triggered here. This is fine. If someone
-          // committed, and the cursor was not holdable (closing the
-          // cursor), we avoid starting a new xact and promptly causing
-          // it to fail. If the cursor *was* holdable, we don't want a
-          // new xact anyway since holdable cursor state isn't affected
-          // by xact boundaries. If our caller didn't commit at all, or
-          // autocommit was on, then we wouldn't issue a BEGIN anyway.
-          //
-          // We take the scrollability from the statement, but until
-          // we have updatable cursors it must be readonly.
-          ResultSet rs =
-              connection.execSQLQuery(sb.toString(), resultsettype, ResultSet.CONCUR_READ_ONLY);
-          ((PgResultSet) rs).setRefCursor(cursorName);
-          // In long-running transactions these backend cursors take up memory space
-          // we could close in rs.close(), but if the transaction is closed before the result set,
-          // then
-          // the cursor no longer exists
-          ((PgResultSet) rs).closeRefCursor();
-          return rs;
+                    if (gt > 0 || lt < 0) {
+                        throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "int", s),
+                                PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+                    }
+                    return i.intValue();
+
+                } catch (NumberFormatException ne) {
+                    throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "int", s),
+                            PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+                }
+            }
         }
-        if ("hstore".equals(type)) {
-          if (isBinary(columnIndex)) {
-            return HStoreConverter.fromBytes(thisRow.get(columnIndex - 1),
-                connection.getEncoding());
-          }
-          return HStoreConverter.fromString(getString(columnIndex));
+        return 0; // SQL NULL
+    }
+
+    public static long toLong(String s) throws SQLException {
+        if (s != null) {
+            try {
+                s = s.trim();
+                return Long.parseLong(s);
+            } catch (NumberFormatException e) {
+                try {
+                    BigDecimal n = new BigDecimal(s);
+                    BigInteger i = n.toBigInteger();
+                    int gt = i.compareTo(LONGMAX);
+                    int lt = i.compareTo(LONGMIN);
+
+                    if (gt > 0 || lt < 0) {
+                        throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "long", s),
+                                PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+                    }
+                    return i.longValue();
+                } catch (NumberFormatException ne) {
+                    throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "long", s),
+                            PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+                }
+            }
         }
-
-        // Caller determines what to do (JDBC3 overrides in this case)
-        return null;
-    }
-  }
-  
-  private void checkScrollable() throws SQLException {
-    checkClosed();
-    if (resultsettype == ResultSet.TYPE_FORWARD_ONLY) {
-      throw new PSQLException(
-          GT.tr("Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."),
-          PSQLState.INVALID_CURSOR_STATE);
-    }
-  }
-
-  @Override
-  public boolean absolute(int index) throws SQLException {
-    checkScrollable();
-
-    // index is 1-based, but internally we use 0-based indices
-    int internalIndex;
-
-    if (index == 0) {
-      beforeFirst();
-      return false;
+        return 0; // SQL NULL
     }
 
-    final int rowsSize = rows.size();
-
-    // if index<0, count from the end of the result set, but check
-    // to be sure that it is not beyond the first index
-    if (index < 0) {
-      if (index >= -rowsSize) {
-        internalIndex = rowsSize + index;
-      } else {
-        beforeFirst();
-        return false;
-      }
-    } else {
-      // must be the case that index>0,
-      // find the correct place, assuming that
-      // the index is not too large
-      if (index <= rowsSize) {
-        internalIndex = index - 1;
-      } else {
-        afterLast();
-        return false;
-      }
-    }
-
-    currentRow = internalIndex;
-    initRowBuffer();
-    onInsertRow = false;
-
-    return true;
-  }
-
-  @Override
-  public void afterLast() throws SQLException {
-    checkScrollable();
-
-    final int rowsSize = rows.size();
-    if (rowsSize > 0) {
-      currentRow = rowsSize;
-    }
-
-    onInsertRow = false;
-    thisRow = null;
-    rowBuffer = null;
-  }
-
-  @Override
-  public void beforeFirst() throws SQLException {
-    checkScrollable();
-
-    if (!rows.isEmpty()) {
-      currentRow = -1;
-    }
-
-    onInsertRow = false;
-    thisRow = null;
-    rowBuffer = null;
-  }
-
-  @Override
-  public boolean first() throws SQLException {
-    checkScrollable();
-
-    if (rows.size() <= 0) {
-      return false;
-    }
-
-    currentRow = 0;
-    initRowBuffer();
-    onInsertRow = false;
-
-    return true;
-  }
-
-  @Override
-  public Array getArray(String colName) throws SQLException {
-    return getArray(findColumn(colName));
-  }
-
-  protected Array makeArray(int oid, byte[] value) throws SQLException {
-    return new PgArray(connection, oid, value);
-  }
-
-  protected Array makeArray(int oid, String value) throws SQLException {
-    return new PgArray(connection, oid, value);
-  }
-
-  
-  @Override
-  public Array getArray(int i) throws SQLException {
-    byte[] value = getRawValue(i);
-    if (value == null) {
-      return null;
-    }
-
-    int oid = fields[i - 1].getOID();
-    if (isBinary(i)) {
-      return makeArray(oid, value);
-    }
-    return makeArray(oid, getFixedString(i));
-  }
-
-  @Override
-  public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
-    return getBigDecimal(columnIndex, -1);
-  }
-
-  @Override
-  public BigDecimal getBigDecimal(String columnName) throws SQLException {
-    return getBigDecimal(findColumn(columnName));
-  }
-
-  @Override
-  public Blob getBlob(String columnName) throws SQLException {
-    return getBlob(findColumn(columnName));
-  }
-
-  protected Blob makeBlob(long oid) throws SQLException {
-    return new PgBlob(connection, oid);
-  }
-
-  @Override
-  
-  public Blob getBlob(int i) throws SQLException {
-    byte[] value = getRawValue(i);
-    if (value == null) {
-      return null;
-    }
-
-    return makeBlob(getLong(i));
-  }
-
-  @Override
-  public Reader getCharacterStream(String columnName) throws SQLException {
-    return getCharacterStream(findColumn(columnName));
-  }
-
-  @Override
-  public Reader getCharacterStream(int i) throws SQLException {
-    String value = getString(i);
-    if (value == null) {
-      return null;
-    }
-
-    // Version 7.2 supports AsciiStream for all the PG text types
-    // As the spec/javadoc for this method indicate this is to be used for
-    // large text values (i.e. LONGVARCHAR) PG doesn't have a separate
-    // long string datatype, but with toast the text datatype is capable of
-    // handling very large values. Thus the implementation ends up calling
-    // getString() since there is no current way to stream the value from the server
-    return new CharArrayReader(value.toCharArray());
-  }
-
-  @Override
-  public Clob getClob(String columnName) throws SQLException {
-    return getClob(findColumn(columnName));
-  }
-
-  protected Clob makeClob(long oid) throws SQLException {
-    return new PgClob(connection, oid);
-  }
-
-  @Override
-  
-  public Clob getClob(int i) throws SQLException {
-    byte[] value = getRawValue(i);
-    if (value == null) {
-      return null;
-    }
-
-    return makeClob(getLong(i));
-  }
-
-  @Override
-  public int getConcurrency() throws SQLException {
-    checkClosed();
-    return resultsetconcurrency;
-  }
-
-  @Override
-  public Date getDate(
-      int i, Calendar cal) throws SQLException {
-    byte[] value = getRawValue(i);
-    if (value == null) {
-      return null;
-    }
-
-    if (cal == null) {
-      cal = getDefaultCalendar();
-    }
-    if (isBinary(i)) {
-      int col = i - 1;
-      int oid = fields[col].getOID();
-      TimeZone tz = cal.getTimeZone();
-      if (oid == Oid.DATE) {
-        return getTimestampUtils().toDateBin(tz, value);
-      } else if (oid == Oid.TIMESTAMP || oid == Oid.TIMESTAMPTZ) {
-        // If backend provides just TIMESTAMP, we use "cal" timezone
-        // If backend provides TIMESTAMPTZ, we ignore "cal" as we know true instant value
-        Timestamp timestamp = getTimestamp(i, cal);
-        // Here we just truncate date to 00:00 in a given time zone
-        return getTimestampUtils().convertToDate(timestamp.getTime(), tz);
-      } else {
-        throw new PSQLException(
-            GT.tr("Cannot convert the column of type {0} to requested type {1}.",
-                Oid.toString(oid), "date"),
-            PSQLState.DATA_TYPE_MISMATCH);
-      }
-    }
-
-    return getTimestampUtils().toDate(cal, getString(i));
-  }
-
-  @Override
-  public Time getTime(
-      int i, Calendar cal) throws SQLException {
-    byte[] value = getRawValue(i);
-    if (value == null) {
-      return null;
-    }
-
-    if (cal == null) {
-      cal = getDefaultCalendar();
-    }
-    if (isBinary(i)) {
-      int col = i - 1;
-      int oid = fields[col].getOID();
-      TimeZone tz = cal.getTimeZone();
-      if (oid == Oid.TIME || oid == Oid.TIMETZ) {
-        return getTimestampUtils().toTimeBin(tz, value);
-      } else if (oid == Oid.TIMESTAMP || oid == Oid.TIMESTAMPTZ) {
-        // If backend provides just TIMESTAMP, we use "cal" timezone
-        // If backend provides TIMESTAMPTZ, we ignore "cal" as we know true instant value
-        Timestamp timestamp = getTimestamp(i, cal);
-        if (timestamp == null) {
-          return null;
+    public static BigDecimal toBigDecimal(String s) throws SQLException {
+        if (s == null) {
+            return null;
         }
-        long timeMillis = timestamp.getTime();
-        if (oid == Oid.TIMESTAMPTZ) {
-          // time zone == UTC since BINARY "timestamp with time zone" is always sent in UTC
-          // So we truncate days
-          return new Time(timeMillis % TimeUnit.DAYS.toMillis(1));
+        try {
+            s = s.trim();
+            return new BigDecimal(s);
+        } catch (NumberFormatException e) {
+            throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "BigDecimal", s),
+                    PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
         }
-        // Here we just truncate date part
-        return getTimestampUtils().convertToTime(timeMillis, tz);
-      } else {
-        throw new PSQLException(
-            GT.tr("Cannot convert the column of type {0} to requested type {1}.",
-                Oid.toString(oid), "time"),
-            PSQLState.DATA_TYPE_MISMATCH);
-      }
     }
 
-    String string = getString(i);
-    return getTimestampUtils().toTime(cal, string);
-  }
-
-  
-  @Override
-  public Timestamp getTimestamp(
-      int i, Calendar cal) throws SQLException {
-
-    byte[] value = getRawValue(i);
-    if (value == null) {
-      return null;
-    }
-
-    if (cal == null) {
-      cal = getDefaultCalendar();
-    }
-    int col = i - 1;
-    int oid = fields[col].getOID();
-
-    if (isBinary(i)) {
-      byte [] row = thisRow.get(col);
-      if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP) {
-        boolean hasTimeZone = oid == Oid.TIMESTAMPTZ;
-        TimeZone tz = cal.getTimeZone();
-        return getTimestampUtils().toTimestampBin(tz, row, hasTimeZone);
-      } else if (oid == Oid.TIME) {
-        // JDBC spec says getTimestamp of Time and Date must be supported
-        Timestamp tsWithMicros = getTimestampUtils().toTimestampBin(cal.getTimeZone(), row, false);
-        // If server sends us a TIME, we ensure java counterpart has date of 1970-01-01
-        Timestamp tsUnixEpochDate = new Timestamp(getTime(i, cal).getTime());
-        tsUnixEpochDate.setNanos(tsWithMicros.getNanos());
-        return tsUnixEpochDate;
-      } else if (oid == Oid.TIMETZ) {
-        TimeZone tz = cal.getTimeZone();
-        byte[] timeBytesWithoutTimeZone = Arrays.copyOfRange(row, 0, 8);
-        Timestamp tsWithMicros = getTimestampUtils().toTimestampBin(tz, timeBytesWithoutTimeZone, false);
-        // If server sends us a TIMETZ, we ensure java counterpart has date of 1970-01-01
-        Timestamp tsUnixEpochDate = new Timestamp(getTime(i, cal).getTime());
-        tsUnixEpochDate.setNanos(tsWithMicros.getNanos());
-        return tsUnixEpochDate;
-      } else if (oid == Oid.DATE) {
-        new Timestamp(getDate(i, cal).getTime());
-      } else {
-        throw new PSQLException(
-            GT.tr("Cannot convert the column of type {0} to requested type {1}.",
-                Oid.toString(oid), "timestamp"),
-            PSQLState.DATA_TYPE_MISMATCH);
-      }
-    }
-
-    // If this is actually a timestamptz, the server-provided timezone will override
-    // the one we pass in, which is the desired behaviour. Otherwise, we'll
-    // interpret the timezone-less value in the provided timezone.
-    String string = getString(i);
-    if (oid == Oid.TIME || oid == Oid.TIMETZ) {
-      // If server sends us a TIME, we ensure java counterpart has date of 1970-01-01
-      Timestamp tsWithMicros = getTimestampUtils().toTimestamp(cal, string);
-      Timestamp tsUnixEpochDate = new Timestamp(getTimestampUtils().toTime(cal, string).getTime());
-      tsUnixEpochDate.setNanos(tsWithMicros.getNanos());
-      return tsUnixEpochDate;
-    }
-
-    return getTimestampUtils().toTimestamp(cal, string);
-
-  }
-
-  // TODO: In Java 8 this constant is missing, later versions (at least 11) have LocalDate#EPOCH:
-  private static final LocalDate LOCAL_DATE_EPOCH = LocalDate.of(1970, 1, 1);
-
-  private OffsetDateTime getOffsetDateTime(int i) throws SQLException {
-    byte[] value = getRawValue(i);
-    if (value == null) {
-      return null;
-    }
-
-    int col = i - 1;
-    int oid = fields[col].getOID();
-
-    // TODO: Disallow getting OffsetDateTime from a non-TZ field
-    if (isBinary(i)) {
-      if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP) {
-        return getTimestampUtils().toOffsetDateTimeBin(value);
-      } else if (oid == Oid.TIMETZ) {
-        // JDBC spec says timetz must be supported
-        return getTimestampUtils().toOffsetTimeBin(value).atDate(LOCAL_DATE_EPOCH);
-      }
-    } else {
-      // string
-
-      if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP )  {
-
-        OffsetDateTime offsetDateTime = getTimestampUtils().toOffsetDateTime(getString(i));
-        if ( offsetDateTime != OffsetDateTime.MAX && offsetDateTime != OffsetDateTime.MIN ) {
-          return offsetDateTime.withOffsetSameInstant(ZoneOffset.UTC);
-        } else {
-          return offsetDateTime;
+    public static float toFloat(String s) throws SQLException {
+        if (s != null) {
+            try {
+                s = s.trim();
+                return Float.parseFloat(s);
+            } catch (NumberFormatException e) {
+                throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "float", s),
+                        PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+            }
         }
-
-      }
-      if ( oid == Oid.TIMETZ ) {
-        return getTimestampUtils().toOffsetDateTime(getString(i));
-      }
+        return 0; // SQL NULL
     }
 
-    throw new PSQLException(
-        GT.tr("Cannot convert the column of type {0} to requested type {1}.",
-            Oid.toString(oid), "java.time.OffsetDateTime"),
-        PSQLState.DATA_TYPE_MISMATCH);
-  }
-
-  private OffsetTime getOffsetTime(int i) throws SQLException {
-    byte[] value = getRawValue(i);
-    if (value == null) {
-      return null;
+    public static double toDouble(String s) throws SQLException {
+        if (s != null) {
+            try {
+                s = s.trim();
+                return Double.parseDouble(s);
+            } catch (NumberFormatException e) {
+                throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "double", s),
+                        PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+            }
+        }
+        return 0; // SQL NULL
     }
 
-    int col = i - 1;
-    int oid = fields[col].getOID();
-
-    if (oid == Oid.TIMETZ) {
-      if (isBinary(i)) {
-        return getTimestampUtils().toOffsetTimeBin(value);
-      } else {
-        return getTimestampUtils().toOffsetTime(getString(i));
-      }
+    protected ResultSetMetaData createMetaData() throws SQLException {
+        return new PgResultSetMetaData(connection, fields);
     }
 
-    throw new PSQLException(
-        GT.tr("Cannot convert the column of type {0} to requested type {1}.",
-            Oid.toString(oid), "java.time.OffsetTime"),
-        PSQLState.DATA_TYPE_MISMATCH);
-  }
-
-  private LocalDateTime getLocalDateTime(int i) throws SQLException {
-    byte[] value = getRawValue(i);
-    if (value == null) {
-      return null;
+    @Override
+    public ResultSetMetaData getMetaData() throws SQLException {
+        checkClosed();
+        if (rsMetaData == null) {
+            rsMetaData = createMetaData();
+        }
+        return rsMetaData;
     }
 
-    int col = i - 1;
-    int oid = fields[col].getOID();
-
-    if (oid == Oid.TIMESTAMP) {
-      if (isBinary(i)) {
-        return getTimestampUtils().toLocalDateTimeBin(value);
-      } else {
-        return getTimestampUtils().toLocalDateTime(getString(i));
-      }
+    @Override
+    public URL getURL(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getURL columnIndex: {0}", columnIndex);
+        checkClosed();
+        throw Driver.notImplemented(this.getClass(), "getURL(int)");
     }
 
-    throw new PSQLException(
-        GT.tr("Cannot convert the column of type {0} to requested type {1}.",
-            Oid.toString(oid), "java.time.LocalDateTime"),
-        PSQLState.DATA_TYPE_MISMATCH);
-  }
-
-  private LocalDate getLocalDate(int i) throws SQLException {
-    byte[] value = getRawValue(i);
-    if (value == null) {
-      return null;
+    @Override
+    public URL getURL(String columnName) throws SQLException {
+        return getURL(findColumn(columnName));
     }
 
-    int col = i - 1;
-    int oid = fields[col].getOID();
+    protected Object internalGetObject(int columnIndex, Field field) throws SQLException {
+        switch (getSQLType(columnIndex)) {
+            case Types.BOOLEAN:
+            case Types.BIT:
+                if (field.getOID() == Oid.BOOL) {
+                    return getBoolean(columnIndex);
+                }
 
-    if (isBinary(i)) {
-      if (oid == Oid.DATE) {
-        return getTimestampUtils().toLocalDateBin(value);
-      } else if (oid == Oid.TIMESTAMP) {
-        return getTimestampUtils().toLocalDateTimeBin(value).toLocalDate();
-      }
-    } else {
-      // string
-      if (oid == Oid.DATE || oid == Oid.TIMESTAMP) {
-        return getTimestampUtils().toLocalDateTime(getString(i)).toLocalDate();
-      }
+                if (field.getOID() == Oid.BIT) {
+                    // Let's peek at the data - I tried to use the field.getLength() but it returns 65535 and
+                    // it doesn't reflect the real length of the field, which is odd.
+                    // If we have 1 byte, it's a bit(1) and return a boolean to preserve the backwards
+                    // compatibility. If the value is null, it doesn't really matter
+                    byte[] data = getRawValue(columnIndex);
+                    if (data == null || data.length == 1) {
+                        return getBoolean(columnIndex);
+                    }
+                }
+                // Returning null here will lead to another value processing path for the bit field
+                // which will return a PGobject
+                return null;
+            case Types.SQLXML:
+                return getSQLXML(columnIndex);
+            case Types.TINYINT:
+            case Types.SMALLINT:
+            case Types.INTEGER:
+                return getInt(columnIndex);
+            case Types.BIGINT:
+                return getLong(columnIndex);
+            case Types.NUMERIC:
+            case Types.DECIMAL:
+                return getNumeric(columnIndex,
+                        field.getMod() == -1 ? -1 : ((field.getMod() - 4) & 0xffff), true);
+            case Types.REAL:
+                return getFloat(columnIndex);
+            case Types.FLOAT:
+            case Types.DOUBLE:
+                return getDouble(columnIndex);
+            case Types.CHAR:
+            case Types.VARCHAR:
+            case Types.LONGVARCHAR:
+                return getString(columnIndex);
+            case Types.DATE:
+                return getDate(columnIndex);
+            case Types.TIME:
+                return getTime(columnIndex);
+            case Types.TIMESTAMP:
+                return getTimestamp(columnIndex, null);
+            case Types.BINARY:
+            case Types.VARBINARY:
+            case Types.LONGVARBINARY:
+                return getBytes(columnIndex);
+            case Types.ARRAY:
+                return getArray(columnIndex);
+            case Types.CLOB:
+                return getClob(columnIndex);
+            case Types.BLOB:
+                return getBlob(columnIndex);
+
+            default:
+                String type = getPGType(columnIndex);
+
+                // if the backend doesn't know the type then coerce to String
+                if ("unknown".equals(type)) {
+                    return getString(columnIndex);
+                }
+
+                if ("uuid".equals(type)) {
+                    if (isBinary(columnIndex)) {
+                        return getUUID(thisRow.get(columnIndex - 1));
+                    }
+                    return getUUID(getString(columnIndex));
+                }
+
+                // Specialized support for ref cursors is neater.
+                if ("refcursor".equals(type)) {
+                    // Fetch all results.
+                    String cursorName = getString(columnIndex);
+
+                    StringBuilder sb = new StringBuilder("FETCH ALL IN ");
+                    Utils.escapeIdentifier(sb, cursorName);
+
+                    // nb: no BEGIN triggered here. This is fine. If someone
+                    // committed, and the cursor was not holdable (closing the
+                    // cursor), we avoid starting a new xact and promptly causing
+                    // it to fail. If the cursor *was* holdable, we don't want a
+                    // new xact anyway since holdable cursor state isn't affected
+                    // by xact boundaries. If our caller didn't commit at all, or
+                    // autocommit was on, then we wouldn't issue a BEGIN anyway.
+                    //
+                    // We take the scrollability from the statement, but until
+                    // we have updatable cursors it must be readonly.
+                    ResultSet rs =
+                            connection.execSQLQuery(sb.toString(), resultsettype, ResultSet.CONCUR_READ_ONLY);
+                    ((PgResultSet) rs).setRefCursor(cursorName);
+                    // In long-running transactions these backend cursors take up memory space
+                    // we could close in rs.close(), but if the transaction is closed before the result set,
+                    // then
+                    // the cursor no longer exists
+                    ((PgResultSet) rs).closeRefCursor();
+                    return rs;
+                }
+                if ("hstore".equals(type)) {
+                    if (isBinary(columnIndex)) {
+                        return HStoreConverter.fromBytes(thisRow.get(columnIndex - 1),
+                                connection.getEncoding());
+                    }
+                    return HStoreConverter.fromString(getString(columnIndex));
+                }
+
+                // Caller determines what to do (JDBC3 overrides in this case)
+                return null;
+        }
     }
 
-    throw new PSQLException(
-        GT.tr("Cannot convert the column of type {0} to requested type {1}.",
-            Oid.toString(oid), "java.time.LocalDate"),
-        PSQLState.DATA_TYPE_MISMATCH);
-  }
-
-  private LocalTime getLocalTime(int i) throws SQLException {
-    byte[] value = getRawValue(i);
-    if (value == null) {
-      return null;
+    private void checkScrollable() throws SQLException {
+        checkClosed();
+        if (resultsettype == ResultSet.TYPE_FORWARD_ONLY) {
+            throw new PSQLException(
+                    GT.tr("Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY."),
+                    PSQLState.INVALID_CURSOR_STATE);
+        }
     }
 
-    int col = i - 1;
-    int oid = fields[col].getOID();
-
-    if (oid == Oid.TIME) {
-      if (isBinary(i)) {
-        return getTimestampUtils().toLocalTimeBin(value);
-      } else {
-        return getTimestampUtils().toLocalTime(getString(i));
-      }
-    }
-
-    throw new PSQLException(
-        GT.tr("Cannot convert the column of type {0} to requested type {1}.",
-            Oid.toString(oid), "java.time.LocalTime"),
-        PSQLState.DATA_TYPE_MISMATCH);
-  }
-
-  @Override
-  public Date getDate(
-      String c, Calendar cal) throws SQLException {
-    return getDate(findColumn(c), cal);
-  }
-
-  @Override
-  public Time getTime(
-      String c, Calendar cal) throws SQLException {
-    return getTime(findColumn(c), cal);
-  }
-
-  @Override
-  public Timestamp getTimestamp(
-      String c, Calendar cal) throws SQLException {
-    return getTimestamp(findColumn(c), cal);
-  }
-
-  @Override
-  public int getFetchDirection() throws SQLException {
-    checkClosed();
-    return fetchdirection;
-  }
-
-  public Object getObjectImpl(
-      String columnName, Map<String, Class<?>> map) throws SQLException {
-    return getObjectImpl(findColumn(columnName), map);
-  }
-
-  /*
-   * This checks against map for the type of column i, and if found returns an object based on that
-   * mapping. The class must implement the SQLData interface.
-   */
-  public Object getObjectImpl(
-      int i, Map<String, Class<?>> map) throws SQLException {
-    checkClosed();
-    if (map == null || map.isEmpty()) {
-      return getObject(i);
-    }
-    throw Driver.notImplemented(this.getClass(), "getObjectImpl(int,Map)");
-  }
-
-  @Override
-  public Ref getRef(String columnName) throws SQLException {
-    return getRef(findColumn(columnName));
-  }
-
-  @Override
-  public Ref getRef(int i) throws SQLException {
-    checkClosed();
-    // The backend doesn't yet have SQL3 REF types
-    throw Driver.notImplemented(this.getClass(), "getRef(int)");
-  }
-
-  @Override
-  public int getRow() throws SQLException {
-    checkClosed();
-
-    if (onInsertRow) {
-      return 0;
-    }
-
-    final int rowsSize = rows.size();
-
-    if (currentRow < 0 || currentRow >= rowsSize) {
-      return 0;
-    }
-
-    return rowOffset + currentRow + 1;
-  }
-
-  // This one needs some thought, as not all ResultSets come from a statement
-  @Override
-  public Statement getStatement() throws SQLException {
-    checkClosed();
-    return statement;
-  }
-
-  @Override
-  public int getType() throws SQLException {
-    checkClosed();
-    return resultsettype;
-  }
-
-  
-  @Override
-  public boolean isAfterLast() throws SQLException {
-    checkClosed();
-    if (onInsertRow) {
-      return false;
-    }
-
-    final int rowsSize = rows.size();
-    if (rowOffset + rowsSize == 0) {
-      return false;
-    }
-    return currentRow >= rowsSize;
-  }
-
-  
-  @Override
-  public boolean isBeforeFirst() throws SQLException {
-    checkClosed();
-    if (onInsertRow) {
-      return false;
-    }
-
-    return (rowOffset + currentRow) < 0 && !rows.isEmpty();
-  }
-
-  @Override
-  public boolean isFirst() throws SQLException {
-    checkClosed();
-    if (onInsertRow) {
-      return false;
-    }
-
-    final int rowsSize = rows.size();
-    if (rowOffset + rowsSize == 0) {
-      return false;
-    }
-
-    return (rowOffset + currentRow) == 0;
-  }
-
-  @Override
-  public boolean isLast() throws SQLException {
-    checkClosed();
-    if (onInsertRow) {
-      return false;
-    }
-
-    List<Tuple> rows = this.rows;
-    final int rowsSize = rows.size();
-
-    if (rowsSize == 0) {
-      return false; // No rows.
-    }
-
-    if (currentRow != (rowsSize - 1)) {
-      return false; // Not on the last row of this block.
-    }
-
-    // We are on the last row of the current block.
-
-    ResultCursor cursor = this.cursor;
-    if (cursor == null) {
-      // This is the last block and therefore the last row.
-      return true;
-    }
-
-    if (maxRows > 0 && rowOffset + currentRow == maxRows) {
-      // We are implicitly limited by maxRows.
-      return true;
-    }
-
-    // Now the more painful case begins.
-    // We are on the last row of the current block, but we don't know if the
-    // current block is the last block; we must try to fetch some more data to
-    // find out.
-
-    // We do a fetch of the next block, then prepend the current row to that
-    // block (so currentRow == 0). This works as the current row
-    // must be the last row of the current block if we got this far.
-
-    rowOffset += rowsSize - 1; // Discarding all but one row.
-
-    // Work out how many rows maxRows will let us fetch.
-    int fetchRows = fetchSize;
-    int adaptiveFetchRows = connection.getQueryExecutor()
-        .getAdaptiveFetchSize(adaptiveFetch, cursor);
-
-    if (adaptiveFetchRows != -1) {
-      fetchRows = adaptiveFetchRows;
-    }
-
-    if (maxRows != 0) {
-      if (fetchRows == 0 || rowOffset + fetchRows > maxRows) {
-        // Fetch would exceed maxRows, limit it.
-        fetchRows = maxRows - rowOffset;
-      }
-    }
-
-    // Do the actual fetch.
-    connection.getQueryExecutor()
-        .fetch(cursor, new CursorResultHandler(), fetchRows, adaptiveFetch);
-
-    // After fetch, update last used fetch size (could be useful during adaptive fetch).
-    lastUsedFetchSize = fetchRows;
-
-    rows = this.rows;
-    // Now prepend our one saved row and move to it.
-    rows.add(0, thisRow);
-    currentRow = 0;
-
-    // Finally, now we can tell if we're the last row or not.
-    return rows.size() == 1;
-  }
-
-  @Override
-  public boolean last() throws SQLException {
-    checkScrollable();
-    List<Tuple> rows = this.rows;
-    final int rowsSize = rows.size();
-    if (rowsSize <= 0) {
-      return false;
-    }
-
-    currentRow = rowsSize - 1;
-    initRowBuffer();
-    onInsertRow = false;
-
-    return true;
-  }
-
-  @Override
-  public boolean previous() throws SQLException {
-    checkScrollable();
-
-    if (onInsertRow) {
-      throw new PSQLException(GT.tr("Can''t use relative move methods while on the insert row."),
-          PSQLState.INVALID_CURSOR_STATE);
-    }
-
-    if (currentRow - 1 < 0) {
-      currentRow = -1;
-      thisRow = null;
-      rowBuffer = null;
-      return false;
-    } else {
-      currentRow--;
-    }
-    initRowBuffer();
-    return true;
-  }
-
-  @Override
-  public boolean relative(int rows) throws SQLException {
-    checkScrollable();
-
-    if (onInsertRow) {
-      throw new PSQLException(GT.tr("Can''t use relative move methods while on the insert row."),
-          PSQLState.INVALID_CURSOR_STATE);
-    }
-
-    // have to add 1 since absolute expects a 1-based index
-    int index = currentRow + 1 + rows;
-    if (index < 0) {
-      beforeFirst();
-      return false;
-    }
-    return absolute(index);
-  }
-
-  @Override
-  public void setFetchDirection(int direction) throws SQLException {
-    checkClosed();
-    switch (direction) {
-      case ResultSet.FETCH_FORWARD:
-        break;
-      case ResultSet.FETCH_REVERSE:
-      case ResultSet.FETCH_UNKNOWN:
+    @Override
+    public boolean absolute(int index) throws SQLException {
         checkScrollable();
-        break;
-      default:
-        throw new PSQLException(GT.tr("Invalid fetch direction constant: {0}.", direction),
-            PSQLState.INVALID_PARAMETER_VALUE);
-    }
 
-    this.fetchdirection = direction;
-  }
+        // index is 1-based, but internally we use 0-based indices
+        int internalIndex;
 
-  @Override
-  public void cancelRowUpdates() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkClosed();
-      if (onInsertRow) {
-        throw new PSQLException(GT.tr("Cannot call cancelRowUpdates() when on the insert row."),
-            PSQLState.INVALID_CURSOR_STATE);
-      }
-
-      if (doingUpdates) {
-        doingUpdates = false;
-
-        clearRowBuffer(true);
-      }
-    }
-  }
-
-  @Override
-  public void deleteRow() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkUpdateable();
-
-      if (onInsertRow) {
-        throw new PSQLException(GT.tr("Cannot call deleteRow() when on the insert row."),
-            PSQLState.INVALID_CURSOR_STATE);
-      }
-
-      if (isBeforeFirst()) {
-        throw new PSQLException(
-            GT.tr(
-                "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here."),
-            PSQLState.INVALID_CURSOR_STATE);
-      }
-      if (isAfterLast()) {
-        throw new PSQLException(
-            GT.tr(
-                "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here."),
-            PSQLState.INVALID_CURSOR_STATE);
-      }
-      List<Tuple> rows = this.rows;
-      if (rows.isEmpty()) {
-        throw new PSQLException(GT.tr("There are no rows in this ResultSet."),
-            PSQLState.INVALID_CURSOR_STATE);
-      }
-
-      List<PrimaryKey> primaryKeys = this.primaryKeys;
-      int numKeys = primaryKeys.size();
-      PreparedStatement deleteStatement = this.deleteStatement;
-      if (deleteStatement == null) {
-        StringBuilder deleteSQL =
-            new StringBuilder("DELETE FROM ").append(onlyTable).append(tableName).append(" where ");
-
-        for (int i = 0; i < numKeys; i++) {
-          Utils.escapeIdentifier(deleteSQL, primaryKeys.get(i).name);
-          deleteSQL.append(" = ?");
-          if (i < numKeys - 1) {
-            deleteSQL.append(" and ");
-          }
+        if (index == 0) {
+            beforeFirst();
+            return false;
         }
 
-        this.deleteStatement = deleteStatement = connection.prepareStatement(deleteSQL.toString());
-      }
-      deleteStatement.clearParameters();
+        final int rowsSize = rows.size();
 
-      for (int i = 0; i < numKeys; i++) {
-        deleteStatement.setObject(i + 1, primaryKeys.get(i).getValue());
-      }
-
-      deleteStatement.executeUpdate();
-
-      rows.remove(currentRow);
-      currentRow--;
-      moveToCurrentRow();
-    }
-  }
-
-  @Override
-  public void insertRow() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkUpdateable();
-      if (!onInsertRow) {
-        throw new PSQLException(GT.tr("Not on the insert row."), PSQLState.INVALID_CURSOR_STATE);
-      }
-      HashMap<String, Object> updateValues = this.updateValues;
-      if (updateValues == null || updateValues.isEmpty()) {
-        throw new PSQLException(GT.tr("You must specify at least one column value to insert a row."),
-            PSQLState.INVALID_PARAMETER_VALUE);
-      }
-
-      // loop through the keys in the insertTable and create the sql statement
-      // we have to create the sql every time since the user could insert different
-      // columns each time
-
-      StringBuilder insertSQL = new StringBuilder("INSERT INTO ").append(tableName).append(" (");
-      StringBuilder paramSQL = new StringBuilder(") values (");
-
-      Iterator<String> columnNames = updateValues.keySet().iterator();
-      int numColumns = updateValues.size();
-
-      for (int i = 0; columnNames.hasNext(); i++) {
-        String columnName = columnNames.next();
-
-        Utils.escapeIdentifier(insertSQL, columnName);
-        if (i < numColumns - 1) {
-          insertSQL.append(", ");
-          paramSQL.append("?,");
+        // if index<0, count from the end of the result set, but check
+        // to be sure that it is not beyond the first index
+        if (index < 0) {
+            if (index >= -rowsSize) {
+                internalIndex = rowsSize + index;
+            } else {
+                beforeFirst();
+                return false;
+            }
         } else {
-          paramSQL.append("?)");
+            // must be the case that index>0,
+            // find the correct place, assuming that
+            // the index is not too large
+            if (index <= rowsSize) {
+                internalIndex = index - 1;
+            } else {
+                afterLast();
+                return false;
+            }
         }
 
-      }
+        currentRow = internalIndex;
+        initRowBuffer();
+        onInsertRow = false;
 
-      insertSQL.append(paramSQL.toString());
-      PreparedStatement insertStatement = null;
-
-      Tuple rowBuffer = this.rowBuffer;
-      try {
-        insertStatement = connection.prepareStatement(insertSQL.toString(), Statement.RETURN_GENERATED_KEYS);
-
-        Iterator<Object> values = updateValues.values().iterator();
-
-        for (int i = 1; values.hasNext(); i++) {
-          insertStatement.setObject(i, values.next());
-        }
-
-        insertStatement.executeUpdate();
-
-        if (usingOID) {
-          // we have to get the last inserted OID and put it in the resultset
-
-          long insertedOID = ((PgStatement) insertStatement).getLastOID();
-
-          updateValues.put("oid", insertedOID);
-
-        }
-
-        // update the underlying row to the new inserted data
-        updateRowBuffer(insertStatement, rowBuffer, updateValues);
-      } finally {
-        JdbcBlackHole.close(insertStatement);
-      }
-
-      rows.add(rowBuffer);
-
-      // we should now reflect the current data in thisRow
-      // that way getXXX will get the newly inserted data
-      thisRow = rowBuffer;
-
-      // need to clear this in case of another insert
-      clearRowBuffer(false);
+        return true;
     }
-  }
 
-  @Override
-  public void moveToCurrentRow() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkUpdateable();
+    @Override
+    public void afterLast() throws SQLException {
+        checkScrollable();
 
-      if (currentRow < 0 || currentRow >= rows.size()) {
+        final int rowsSize = rows.size();
+        if (rowsSize > 0) {
+            currentRow = rowsSize;
+        }
+
+        onInsertRow = false;
         thisRow = null;
         rowBuffer = null;
-      } else {
+    }
+
+    @Override
+    public void beforeFirst() throws SQLException {
+        checkScrollable();
+
+        if (!rows.isEmpty()) {
+            currentRow = -1;
+        }
+
+        onInsertRow = false;
+        thisRow = null;
+        rowBuffer = null;
+    }
+
+    @Override
+    public boolean first() throws SQLException {
+        checkScrollable();
+
+        if (rows.size() <= 0) {
+            return false;
+        }
+
+        currentRow = 0;
         initRowBuffer();
-      }
+        onInsertRow = false;
 
-      onInsertRow = false;
-      doingUpdates = false;
+        return true;
     }
-  }
 
-  @Override
-  public void moveToInsertRow() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkUpdateable();
-
-      // make sure the underlying data is null
-      clearRowBuffer(false);
-
-      onInsertRow = true;
-      doingUpdates = false;
+    @Override
+    public Array getArray(String colName) throws SQLException {
+        return getArray(findColumn(colName));
     }
-  }
 
-  // rowBuffer is the temporary storage for the row
-  private void clearRowBuffer(boolean copyCurrentRow) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      // inserts want an empty array while updates want a copy of the current row
-      if (copyCurrentRow) {
-        rowBuffer = thisRow.updateableCopy();
-      } else {
-        rowBuffer = new Tuple(fields.length);
-      }
-
-      // clear the updateValues hash map for the next set of updates
-      HashMap<String, Object> updateValues = this.updateValues;
-      if (updateValues != null) {
-        updateValues.clear();
-      }
+    protected Array makeArray(int oid, byte[] value) throws SQLException {
+        return new PgArray(connection, oid, value);
     }
-  }
 
-  @Override
-  public boolean rowDeleted() throws SQLException {
-    checkClosed();
-    return false;
-  }
+    protected Array makeArray(int oid, String value) throws SQLException {
+        return new PgArray(connection, oid, value);
+    }
 
-  @Override
-  public boolean rowInserted() throws SQLException {
-    checkClosed();
-    return false;
-  }
-
-  @Override
-  public boolean rowUpdated() throws SQLException {
-    checkClosed();
-    return false;
-  }
-
-  @Override
-  public void updateAsciiStream(int columnIndex,
-      InputStream x, int length)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (x == null) {
-        updateNull(columnIndex);
-        return;
-      }
-
-      try {
-        InputStreamReader reader = new InputStreamReader(x, StandardCharsets.US_ASCII);
-        char[] data = new char[length];
-        int numRead = 0;
-        while (true) {
-          int n = reader.read(data, numRead, length - numRead);
-          if (n == -1) {
-            break;
-          }
-
-          numRead += n;
-
-          if (numRead == length) {
-            break;
-          }
+    @Override
+    public Array getArray(int i) throws SQLException {
+        byte[] value = getRawValue(i);
+        if (value == null) {
+            return null;
         }
-        updateString(columnIndex, new String(data, 0, numRead));
-      } catch (IOException ie) {
-        throw new PSQLException(GT.tr("Provided InputStream failed."), null, ie);
-      }
-    }
-  }
 
-  @Override
-  public void updateBigDecimal(int columnIndex, BigDecimal x)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateValue(columnIndex, x);
-    }
-  }
-
-  @Override
-  public void updateBinaryStream(int columnIndex,
-      InputStream x, int length)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (x == null) {
-        updateNull(columnIndex);
-        return;
-      }
-
-      byte[] data = new byte[length];
-      int numRead = 0;
-      try {
-        while (true) {
-          int n = x.read(data, numRead, length - numRead);
-          if (n == -1) {
-            break;
-          }
-
-          numRead += n;
-
-          if (numRead == length) {
-            break;
-          }
+        int oid = fields[i - 1].getOID();
+        if (isBinary(i)) {
+            return makeArray(oid, value);
         }
-      } catch (IOException ie) {
-        throw new PSQLException(GT.tr("Provided InputStream failed."), null, ie);
-      }
-
-      if (numRead == length) {
-        updateBytes(columnIndex, data);
-      } else {
-        // the stream contained less data than they said
-        // perhaps this is an error?
-        byte[] data2 = new byte[numRead];
-        System.arraycopy(data, 0, data2, 0, numRead);
-        updateBytes(columnIndex, data2);
-      }
+        return makeArray(oid, getFixedString(i));
     }
-  }
 
-  @Override
-  public void updateBoolean(int columnIndex, boolean x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateValue(columnIndex, x);
+    @Override
+    public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
+        return getBigDecimal(columnIndex, -1);
     }
-  }
 
-  @Override
-  public void updateByte(int columnIndex, byte x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateValue(columnIndex, String.valueOf(x));
+    @Override
+    public BigDecimal getBigDecimal(String columnName) throws SQLException {
+        return getBigDecimal(findColumn(columnName));
     }
-  }
 
-  @Override
-  public void updateBytes(int columnIndex, byte [] x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateValue(columnIndex, x);
+    @Override
+    public Blob getBlob(String columnName) throws SQLException {
+        return getBlob(findColumn(columnName));
     }
-  }
 
-  @Override
-  public void updateCharacterStream(int columnIndex,
-      Reader x, int length)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (x == null) {
-        updateNull(columnIndex);
-        return;
-      }
+    protected Blob makeBlob(long oid) throws SQLException {
+        return new PgBlob(connection, oid);
+    }
 
-      try {
-        char[] data = new char[length];
-        int numRead = 0;
-        while (true) {
-          int n = x.read(data, numRead, length - numRead);
-          if (n == -1) {
-            break;
-          }
+    @Override
 
-          numRead += n;
-
-          if (numRead == length) {
-            break;
-          }
+    public Blob getBlob(int i) throws SQLException {
+        byte[] value = getRawValue(i);
+        if (value == null) {
+            return null;
         }
-        updateString(columnIndex, new String(data, 0, numRead));
-      } catch (IOException ie) {
-        throw new PSQLException(GT.tr("Provided Reader failed."), null, ie);
-      }
-    }
-  }
 
-  @Override
-  public void updateDate(int columnIndex,
-      Date x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateValue(columnIndex, x);
-    }
-  }
-
-  @Override
-  public void updateDouble(int columnIndex, double x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateValue(columnIndex, x);
-    }
-  }
-
-  @Override
-  public void updateFloat(int columnIndex, float x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateValue(columnIndex, x);
-    }
-  }
-
-  @Override
-  public void updateInt(int columnIndex, int x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateValue(columnIndex, x);
-    }
-  }
-
-  @Override
-  public void updateLong(int columnIndex, long x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateValue(columnIndex, x);
-    }
-  }
-
-  @Override
-  public void updateNull(int columnIndex) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkColumnIndex(columnIndex);
-      String columnTypeName = getPGType(columnIndex);
-      updateValue(columnIndex, new NullObject(columnTypeName));
-    }
-  }
-
-  @Override
-  public void updateObject(
-      int columnIndex, Object x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateValue(columnIndex, x);
-    }
-  }
-
-  @Override
-  public void updateObject(
-      int columnIndex, Object x, int scale) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      this.updateObject(columnIndex, x);
-    }
-  }
-
-  @Override
-  public void refreshRow() throws SQLException {
-    checkUpdateable();
-    if (onInsertRow) {
-      throw new PSQLException(GT.tr("Can''t refresh the insert row."),
-          PSQLState.INVALID_CURSOR_STATE);
+        return makeBlob(getLong(i));
     }
 
-    if (isBeforeFirst() || isAfterLast() || rows.isEmpty()) {
-      return;
+    @Override
+    public Reader getCharacterStream(String columnName) throws SQLException {
+        return getCharacterStream(findColumn(columnName));
     }
 
-    StringBuilder selectSQL = new StringBuilder("select ");
+    @Override
+    public Reader getCharacterStream(int i) throws SQLException {
+        String value = getString(i);
+        if (value == null) {
+            return null;
+        }
 
-    ResultSetMetaData rsmd = getMetaData();
-    PGResultSetMetaData pgmd = (PGResultSetMetaData) rsmd;
-    for (int i = 1; i <= rsmd.getColumnCount(); i++) {
-      if (i > 1) {
-        selectSQL.append(", ");
-      }
-      Utils.escapeIdentifier(selectSQL, pgmd.getBaseColumnName(i));
+        // Version 7.2 supports AsciiStream for all the PG text types
+        // As the spec/javadoc for this method indicate this is to be used for
+        // large text values (i.e. LONGVARCHAR) PG doesn't have a separate
+        // long string datatype, but with toast the text datatype is capable of
+        // handling very large values. Thus the implementation ends up calling
+        // getString() since there is no current way to stream the value from the server
+        return new CharArrayReader(value.toCharArray());
     }
-    selectSQL.append(" from ").append(onlyTable).append(tableName).append(" where ");
 
-    List<PrimaryKey> primaryKeys = this.primaryKeys;
-    int numKeys = primaryKeys.size();
-
-    for (int i = 0; i < numKeys; i++) {
-
-      PrimaryKey primaryKey = primaryKeys.get(i);
-      Utils.escapeIdentifier(selectSQL, primaryKey.name);
-      selectSQL.append(" = ?");
-
-      if (i < numKeys - 1) {
-        selectSQL.append(" and ");
-      }
+    @Override
+    public Clob getClob(String columnName) throws SQLException {
+        return getClob(findColumn(columnName));
     }
-    String sqlText = selectSQL.toString();
-    if (connection.getLogger().isLoggable(Level.FINE)) {
-      connection.getLogger().log(Level.FINE, "selecting {0}", sqlText);
+
+    protected Clob makeClob(long oid) throws SQLException {
+        return new PgClob(connection, oid);
     }
-    // because updateable result sets do not yet support binary transfers we must request refresh
-    // with updateable result set to get field data in correct format
-    PreparedStatement selectStatement = null;
-    try {
-      selectStatement = connection.prepareStatement(sqlText,
-          ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
 
-      for (int i = 0; i < numKeys; i++) {
-        selectStatement.setObject(i + 1, primaryKeys.get(i).getValue());
-      }
+    @Override
 
-      PgResultSet rs = (PgResultSet) selectStatement.executeQuery();
+    public Clob getClob(int i) throws SQLException {
+        byte[] value = getRawValue(i);
+        if (value == null) {
+            return null;
+        }
 
-      if (rs.next()) {
-        // we know that the row is updatable as it was tested above.
-        if ( rs.thisRow == null ) {
-          rowBuffer = null;
+        return makeClob(getLong(i));
+    }
+
+    @Override
+    public int getConcurrency() throws SQLException {
+        checkClosed();
+        return resultsetconcurrency;
+    }
+
+    @Override
+    public Date getDate(
+            int i, Calendar cal) throws SQLException {
+        byte[] value = getRawValue(i);
+        if (value == null) {
+            return null;
+        }
+
+        if (cal == null) {
+            cal = getDefaultCalendar();
+        }
+        if (isBinary(i)) {
+            int col = i - 1;
+            int oid = fields[col].getOID();
+            TimeZone tz = cal.getTimeZone();
+            if (oid == Oid.DATE) {
+                return getTimestampUtils().toDateBin(tz, value);
+            } else if (oid == Oid.TIMESTAMP || oid == Oid.TIMESTAMPTZ) {
+                // If backend provides just TIMESTAMP, we use "cal" timezone
+                // If backend provides TIMESTAMPTZ, we ignore "cal" as we know true instant value
+                Timestamp timestamp = getTimestamp(i, cal);
+                // Here we just truncate date to 00:00 in a given time zone
+                return getTimestampUtils().convertToDate(timestamp.getTime(), tz);
+            } else {
+                throw new PSQLException(
+                        GT.tr("Cannot convert the column of type {0} to requested type {1}.",
+                                Oid.toString(oid), "date"),
+                        PSQLState.DATA_TYPE_MISMATCH);
+            }
+        }
+
+        return getTimestampUtils().toDate(cal, getString(i));
+    }
+
+    @Override
+    public Time getTime(
+            int i, Calendar cal) throws SQLException {
+        byte[] value = getRawValue(i);
+        if (value == null) {
+            return null;
+        }
+
+        if (cal == null) {
+            cal = getDefaultCalendar();
+        }
+        if (isBinary(i)) {
+            int col = i - 1;
+            int oid = fields[col].getOID();
+            TimeZone tz = cal.getTimeZone();
+            if (oid == Oid.TIME || oid == Oid.TIMETZ) {
+                return getTimestampUtils().toTimeBin(tz, value);
+            } else if (oid == Oid.TIMESTAMP || oid == Oid.TIMESTAMPTZ) {
+                // If backend provides just TIMESTAMP, we use "cal" timezone
+                // If backend provides TIMESTAMPTZ, we ignore "cal" as we know true instant value
+                Timestamp timestamp = getTimestamp(i, cal);
+                if (timestamp == null) {
+                    return null;
+                }
+                long timeMillis = timestamp.getTime();
+                if (oid == Oid.TIMESTAMPTZ) {
+                    // time zone == UTC since BINARY "timestamp with time zone" is always sent in UTC
+                    // So we truncate days
+                    return new Time(timeMillis % TimeUnit.DAYS.toMillis(1));
+                }
+                // Here we just truncate date part
+                return getTimestampUtils().convertToTime(timeMillis, tz);
+            } else {
+                throw new PSQLException(
+                        GT.tr("Cannot convert the column of type {0} to requested type {1}.",
+                                Oid.toString(oid), "time"),
+                        PSQLState.DATA_TYPE_MISMATCH);
+            }
+        }
+
+        String string = getString(i);
+        return getTimestampUtils().toTime(cal, string);
+    }
+
+    @Override
+    public Timestamp getTimestamp(
+            int i, Calendar cal) throws SQLException {
+
+        byte[] value = getRawValue(i);
+        if (value == null) {
+            return null;
+        }
+
+        if (cal == null) {
+            cal = getDefaultCalendar();
+        }
+        int col = i - 1;
+        int oid = fields[col].getOID();
+
+        if (isBinary(i)) {
+            byte[] row = thisRow.get(col);
+            if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP) {
+                boolean hasTimeZone = oid == Oid.TIMESTAMPTZ;
+                TimeZone tz = cal.getTimeZone();
+                return getTimestampUtils().toTimestampBin(tz, row, hasTimeZone);
+            } else if (oid == Oid.TIME) {
+                // JDBC spec says getTimestamp of Time and Date must be supported
+                Timestamp tsWithMicros = getTimestampUtils().toTimestampBin(cal.getTimeZone(), row, false);
+                // If server sends us a TIME, we ensure java counterpart has date of 1970-01-01
+                Timestamp tsUnixEpochDate = new Timestamp(getTime(i, cal).getTime());
+                tsUnixEpochDate.setNanos(tsWithMicros.getNanos());
+                return tsUnixEpochDate;
+            } else if (oid == Oid.TIMETZ) {
+                TimeZone tz = cal.getTimeZone();
+                byte[] timeBytesWithoutTimeZone = Arrays.copyOfRange(row, 0, 8);
+                Timestamp tsWithMicros = getTimestampUtils().toTimestampBin(tz, timeBytesWithoutTimeZone, false);
+                // If server sends us a TIMETZ, we ensure java counterpart has date of 1970-01-01
+                Timestamp tsUnixEpochDate = new Timestamp(getTime(i, cal).getTime());
+                tsUnixEpochDate.setNanos(tsWithMicros.getNanos());
+                return tsUnixEpochDate;
+            } else if (oid == Oid.DATE) {
+                new Timestamp(getDate(i, cal).getTime());
+            } else {
+                throw new PSQLException(
+                        GT.tr("Cannot convert the column of type {0} to requested type {1}.",
+                                Oid.toString(oid), "timestamp"),
+                        PSQLState.DATA_TYPE_MISMATCH);
+            }
+        }
+
+        // If this is actually a timestamptz, the server-provided timezone will override
+        // the one we pass in, which is the desired behaviour. Otherwise, we'll
+        // interpret the timezone-less value in the provided timezone.
+        String string = getString(i);
+        if (oid == Oid.TIME || oid == Oid.TIMETZ) {
+            // If server sends us a TIME, we ensure java counterpart has date of 1970-01-01
+            Timestamp tsWithMicros = getTimestampUtils().toTimestamp(cal, string);
+            Timestamp tsUnixEpochDate = new Timestamp(getTimestampUtils().toTime(cal, string).getTime());
+            tsUnixEpochDate.setNanos(tsWithMicros.getNanos());
+            return tsUnixEpochDate;
+        }
+
+        return getTimestampUtils().toTimestamp(cal, string);
+
+    }
+
+    private OffsetDateTime getOffsetDateTime(int i) throws SQLException {
+        byte[] value = getRawValue(i);
+        if (value == null) {
+            return null;
+        }
+
+        int col = i - 1;
+        int oid = fields[col].getOID();
+
+        // TODO: Disallow getting OffsetDateTime from a non-TZ field
+        if (isBinary(i)) {
+            if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP) {
+                return getTimestampUtils().toOffsetDateTimeBin(value);
+            } else if (oid == Oid.TIMETZ) {
+                // JDBC spec says timetz must be supported
+                return getTimestampUtils().toOffsetTimeBin(value).atDate(LOCAL_DATE_EPOCH);
+            }
         } else {
-          rowBuffer = rs.thisRow.updateableCopy();
+            // string
+
+            if (oid == Oid.TIMESTAMPTZ || oid == Oid.TIMESTAMP) {
+
+                OffsetDateTime offsetDateTime = getTimestampUtils().toOffsetDateTime(getString(i));
+                if (offsetDateTime != OffsetDateTime.MAX && offsetDateTime != OffsetDateTime.MIN) {
+                    return offsetDateTime.withOffsetSameInstant(ZoneOffset.UTC);
+                } else {
+                    return offsetDateTime;
+                }
+
+            }
+            if (oid == Oid.TIMETZ) {
+                return getTimestampUtils().toOffsetDateTime(getString(i));
+            }
         }
-      }
 
-      rows.set(currentRow, rowBuffer);
-      thisRow = rowBuffer;
-
-      connection.getLogger().log(Level.FINE, "done updates");
-
-      rs.close();
-    } finally {
-      JdbcBlackHole.close(selectStatement);
-    }
-  }
-
-  @Override
-  public void updateRow() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkUpdateable();
-
-      if (onInsertRow) {
-        throw new PSQLException(GT.tr("Cannot call updateRow() when on the insert row."),
-            PSQLState.INVALID_CURSOR_STATE);
-      }
-
-      List<Tuple> rows = this.rows;
-      if (isBeforeFirst() || isAfterLast() || rows.isEmpty()) {
         throw new PSQLException(
-            GT.tr(
-                "Cannot update the ResultSet because it is either before the start or after the end of the results."),
-            PSQLState.INVALID_CURSOR_STATE);
-      }
+                GT.tr("Cannot convert the column of type {0} to requested type {1}.",
+                        Oid.toString(oid), "java.time.OffsetDateTime"),
+                PSQLState.DATA_TYPE_MISMATCH);
+    }
 
-      if (!doingUpdates) {
-        return; // No work pending.
-      }
-
-      StringBuilder updateSQL = new StringBuilder("UPDATE " + onlyTable + tableName + " SET  ");
-
-      HashMap<String, Object> updateValues = this.updateValues;
-      int numColumns = updateValues.size();
-      Iterator<String> columns = updateValues.keySet().iterator();
-
-      for (int i = 0; columns.hasNext(); i++) {
-        String column = columns.next();
-        Utils.escapeIdentifier(updateSQL, column);
-        updateSQL.append(" = ?");
-
-        if (i < numColumns - 1) {
-          updateSQL.append(", ");
+    private OffsetTime getOffsetTime(int i) throws SQLException {
+        byte[] value = getRawValue(i);
+        if (value == null) {
+            return null;
         }
-      }
 
-      updateSQL.append(" WHERE ");
+        int col = i - 1;
+        int oid = fields[col].getOID();
 
-      List<PrimaryKey> primaryKeys = this.primaryKeys;
-      int numKeys = primaryKeys.size();
-
-      for (int i = 0; i < numKeys; i++) {
-        PrimaryKey primaryKey = primaryKeys.get(i);
-        Utils.escapeIdentifier(updateSQL, primaryKey.name);
-        updateSQL.append(" = ?");
-
-        if (i < numKeys - 1) {
-          updateSQL.append(" and ");
+        if (oid == Oid.TIMETZ) {
+            if (isBinary(i)) {
+                return getTimestampUtils().toOffsetTimeBin(value);
+            } else {
+                return getTimestampUtils().toOffsetTime(getString(i));
+            }
         }
-      }
 
-      String sqlText = updateSQL.toString();
-      if (connection.getLogger().isLoggable(Level.FINE)) {
-        connection.getLogger().log(Level.FINE, "updating {0}", sqlText);
-      }
-      PreparedStatement updateStatement = null;
-      try {
-        updateStatement = connection.prepareStatement(sqlText);
+        throw new PSQLException(
+                GT.tr("Cannot convert the column of type {0} to requested type {1}.",
+                        Oid.toString(oid), "java.time.OffsetTime"),
+                PSQLState.DATA_TYPE_MISMATCH);
+    }
+
+    private LocalDateTime getLocalDateTime(int i) throws SQLException {
+        byte[] value = getRawValue(i);
+        if (value == null) {
+            return null;
+        }
+
+        int col = i - 1;
+        int oid = fields[col].getOID();
+
+        if (oid == Oid.TIMESTAMP) {
+            if (isBinary(i)) {
+                return getTimestampUtils().toLocalDateTimeBin(value);
+            } else {
+                return getTimestampUtils().toLocalDateTime(getString(i));
+            }
+        }
+
+        throw new PSQLException(
+                GT.tr("Cannot convert the column of type {0} to requested type {1}.",
+                        Oid.toString(oid), "java.time.LocalDateTime"),
+                PSQLState.DATA_TYPE_MISMATCH);
+    }
+
+    private LocalDate getLocalDate(int i) throws SQLException {
+        byte[] value = getRawValue(i);
+        if (value == null) {
+            return null;
+        }
+
+        int col = i - 1;
+        int oid = fields[col].getOID();
+
+        if (isBinary(i)) {
+            if (oid == Oid.DATE) {
+                return getTimestampUtils().toLocalDateBin(value);
+            } else if (oid == Oid.TIMESTAMP) {
+                return getTimestampUtils().toLocalDateTimeBin(value).toLocalDate();
+            }
+        } else {
+            // string
+            if (oid == Oid.DATE || oid == Oid.TIMESTAMP) {
+                return getTimestampUtils().toLocalDateTime(getString(i)).toLocalDate();
+            }
+        }
+
+        throw new PSQLException(
+                GT.tr("Cannot convert the column of type {0} to requested type {1}.",
+                        Oid.toString(oid), "java.time.LocalDate"),
+                PSQLState.DATA_TYPE_MISMATCH);
+    }
+
+    private LocalTime getLocalTime(int i) throws SQLException {
+        byte[] value = getRawValue(i);
+        if (value == null) {
+            return null;
+        }
+
+        int col = i - 1;
+        int oid = fields[col].getOID();
+
+        if (oid == Oid.TIME) {
+            if (isBinary(i)) {
+                return getTimestampUtils().toLocalTimeBin(value);
+            } else {
+                return getTimestampUtils().toLocalTime(getString(i));
+            }
+        }
+
+        throw new PSQLException(
+                GT.tr("Cannot convert the column of type {0} to requested type {1}.",
+                        Oid.toString(oid), "java.time.LocalTime"),
+                PSQLState.DATA_TYPE_MISMATCH);
+    }
+
+    @Override
+    public Date getDate(
+            String c, Calendar cal) throws SQLException {
+        return getDate(findColumn(c), cal);
+    }
+
+    @Override
+    public Time getTime(
+            String c, Calendar cal) throws SQLException {
+        return getTime(findColumn(c), cal);
+    }
+
+    @Override
+    public Timestamp getTimestamp(
+            String c, Calendar cal) throws SQLException {
+        return getTimestamp(findColumn(c), cal);
+    }
+
+    @Override
+    public int getFetchDirection() throws SQLException {
+        checkClosed();
+        return fetchdirection;
+    }
+
+    @Override
+    public void setFetchDirection(int direction) throws SQLException {
+        checkClosed();
+        switch (direction) {
+            case ResultSet.FETCH_FORWARD:
+                break;
+            case ResultSet.FETCH_REVERSE:
+            case ResultSet.FETCH_UNKNOWN:
+                checkScrollable();
+                break;
+            default:
+                throw new PSQLException(GT.tr("Invalid fetch direction constant: {0}.", direction),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+        }
+
+        this.fetchdirection = direction;
+    }
+
+    public Object getObjectImpl(
+            String columnName, Map<String, Class<?>> map) throws SQLException {
+        return getObjectImpl(findColumn(columnName), map);
+    }
+
+    /*
+     * This checks against map for the type of column i, and if found returns an object based on that
+     * mapping. The class must implement the SQLData interface.
+     */
+    public Object getObjectImpl(
+            int i, Map<String, Class<?>> map) throws SQLException {
+        checkClosed();
+        if (map == null || map.isEmpty()) {
+            return getObject(i);
+        }
+        throw Driver.notImplemented(this.getClass(), "getObjectImpl(int,Map)");
+    }
+
+    @Override
+    public Ref getRef(String columnName) throws SQLException {
+        return getRef(findColumn(columnName));
+    }
+
+    @Override
+    public Ref getRef(int i) throws SQLException {
+        checkClosed();
+        // The backend doesn't yet have SQL3 REF types
+        throw Driver.notImplemented(this.getClass(), "getRef(int)");
+    }
+
+    @Override
+    public int getRow() throws SQLException {
+        checkClosed();
+
+        if (onInsertRow) {
+            return 0;
+        }
+
+        final int rowsSize = rows.size();
+
+        if (currentRow < 0 || currentRow >= rowsSize) {
+            return 0;
+        }
+
+        return rowOffset + currentRow + 1;
+    }
+
+    // This one needs some thought, as not all ResultSets come from a statement
+    @Override
+    public Statement getStatement() throws SQLException {
+        checkClosed();
+        return statement;
+    }
+
+    @Override
+    public int getType() throws SQLException {
+        checkClosed();
+        return resultsettype;
+    }
+
+    @Override
+    public boolean isAfterLast() throws SQLException {
+        checkClosed();
+        if (onInsertRow) {
+            return false;
+        }
+
+        final int rowsSize = rows.size();
+        if (rowOffset + rowsSize == 0) {
+            return false;
+        }
+        return currentRow >= rowsSize;
+    }
+
+    @Override
+    public boolean isBeforeFirst() throws SQLException {
+        checkClosed();
+        if (onInsertRow) {
+            return false;
+        }
+
+        return (rowOffset + currentRow) < 0 && !rows.isEmpty();
+    }
+
+    @Override
+    public boolean isFirst() throws SQLException {
+        checkClosed();
+        if (onInsertRow) {
+            return false;
+        }
+
+        final int rowsSize = rows.size();
+        if (rowOffset + rowsSize == 0) {
+            return false;
+        }
+
+        return (rowOffset + currentRow) == 0;
+    }
+
+    @Override
+    public boolean isLast() throws SQLException {
+        checkClosed();
+        if (onInsertRow) {
+            return false;
+        }
+
+        List<Tuple> rows = this.rows;
+        final int rowsSize = rows.size();
+
+        if (rowsSize == 0) {
+            return false; // No rows.
+        }
+
+        if (currentRow != (rowsSize - 1)) {
+            return false; // Not on the last row of this block.
+        }
+
+        // We are on the last row of the current block.
+
+        ResultCursor cursor = this.cursor;
+        if (cursor == null) {
+            // This is the last block and therefore the last row.
+            return true;
+        }
+
+        if (maxRows > 0 && rowOffset + currentRow == maxRows) {
+            // We are implicitly limited by maxRows.
+            return true;
+        }
+
+        // Now the more painful case begins.
+        // We are on the last row of the current block, but we don't know if the
+        // current block is the last block; we must try to fetch some more data to
+        // find out.
+
+        // We do a fetch of the next block, then prepend the current row to that
+        // block (so currentRow == 0). This works as the current row
+        // must be the last row of the current block if we got this far.
+
+        rowOffset += rowsSize - 1; // Discarding all but one row.
+
+        // Work out how many rows maxRows will let us fetch.
+        int fetchRows = fetchSize;
+        int adaptiveFetchRows = connection.getQueryExecutor()
+                .getAdaptiveFetchSize(adaptiveFetch, cursor);
+
+        if (adaptiveFetchRows != -1) {
+            fetchRows = adaptiveFetchRows;
+        }
+
+        if (maxRows != 0) {
+            if (fetchRows == 0 || rowOffset + fetchRows > maxRows) {
+                // Fetch would exceed maxRows, limit it.
+                fetchRows = maxRows - rowOffset;
+            }
+        }
+
+        // Do the actual fetch.
+        connection.getQueryExecutor()
+                .fetch(cursor, new CursorResultHandler(), fetchRows, adaptiveFetch);
+
+        // After fetch, update last used fetch size (could be useful during adaptive fetch).
+        lastUsedFetchSize = fetchRows;
+
+        rows = this.rows;
+        // Now prepend our one saved row and move to it.
+        rows.add(0, thisRow);
+        currentRow = 0;
+
+        // Finally, now we can tell if we're the last row or not.
+        return rows.size() == 1;
+    }
+
+    @Override
+    public boolean last() throws SQLException {
+        checkScrollable();
+        List<Tuple> rows = this.rows;
+        final int rowsSize = rows.size();
+        if (rowsSize <= 0) {
+            return false;
+        }
+
+        currentRow = rowsSize - 1;
+        initRowBuffer();
+        onInsertRow = false;
+
+        return true;
+    }
+
+    @Override
+    public boolean previous() throws SQLException {
+        checkScrollable();
+
+        if (onInsertRow) {
+            throw new PSQLException(GT.tr("Can''t use relative move methods while on the insert row."),
+                    PSQLState.INVALID_CURSOR_STATE);
+        }
+
+        if (currentRow - 1 < 0) {
+            currentRow = -1;
+            thisRow = null;
+            rowBuffer = null;
+            return false;
+        } else {
+            currentRow--;
+        }
+        initRowBuffer();
+        return true;
+    }
+
+    @Override
+    public boolean relative(int rows) throws SQLException {
+        checkScrollable();
+
+        if (onInsertRow) {
+            throw new PSQLException(GT.tr("Can''t use relative move methods while on the insert row."),
+                    PSQLState.INVALID_CURSOR_STATE);
+        }
+
+        // have to add 1 since absolute expects a 1-based index
+        int index = currentRow + 1 + rows;
+        if (index < 0) {
+            beforeFirst();
+            return false;
+        }
+        return absolute(index);
+    }
+
+    @Override
+    public void cancelRowUpdates() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkClosed();
+            if (onInsertRow) {
+                throw new PSQLException(GT.tr("Cannot call cancelRowUpdates() when on the insert row."),
+                        PSQLState.INVALID_CURSOR_STATE);
+            }
+
+            if (doingUpdates) {
+                doingUpdates = false;
+
+                clearRowBuffer(true);
+            }
+        }
+    }
+
+    @Override
+    public void deleteRow() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkUpdateable();
+
+            if (onInsertRow) {
+                throw new PSQLException(GT.tr("Cannot call deleteRow() when on the insert row."),
+                        PSQLState.INVALID_CURSOR_STATE);
+            }
+
+            if (isBeforeFirst()) {
+                throw new PSQLException(
+                        GT.tr(
+                                "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here."),
+                        PSQLState.INVALID_CURSOR_STATE);
+            }
+            if (isAfterLast()) {
+                throw new PSQLException(
+                        GT.tr(
+                                "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here."),
+                        PSQLState.INVALID_CURSOR_STATE);
+            }
+            List<Tuple> rows = this.rows;
+            if (rows.isEmpty()) {
+                throw new PSQLException(GT.tr("There are no rows in this ResultSet."),
+                        PSQLState.INVALID_CURSOR_STATE);
+            }
+
+            List<PrimaryKey> primaryKeys = this.primaryKeys;
+            int numKeys = primaryKeys.size();
+            PreparedStatement deleteStatement = this.deleteStatement;
+            if (deleteStatement == null) {
+                StringBuilder deleteSQL =
+                        new StringBuilder("DELETE FROM ").append(onlyTable).append(tableName).append(" where ");
+
+                for (int i = 0; i < numKeys; i++) {
+                    Utils.escapeIdentifier(deleteSQL, primaryKeys.get(i).name);
+                    deleteSQL.append(" = ?");
+                    if (i < numKeys - 1) {
+                        deleteSQL.append(" and ");
+                    }
+                }
+
+                this.deleteStatement = deleteStatement = connection.prepareStatement(deleteSQL.toString());
+            }
+            deleteStatement.clearParameters();
+
+            for (int i = 0; i < numKeys; i++) {
+                deleteStatement.setObject(i + 1, primaryKeys.get(i).getValue());
+            }
+
+            deleteStatement.executeUpdate();
+
+            rows.remove(currentRow);
+            currentRow--;
+            moveToCurrentRow();
+        }
+    }
+
+    @Override
+    public void insertRow() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkUpdateable();
+            if (!onInsertRow) {
+                throw new PSQLException(GT.tr("Not on the insert row."), PSQLState.INVALID_CURSOR_STATE);
+            }
+            HashMap<String, Object> updateValues = this.updateValues;
+            if (updateValues == null || updateValues.isEmpty()) {
+                throw new PSQLException(GT.tr("You must specify at least one column value to insert a row."),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+
+            // loop through the keys in the insertTable and create the sql statement
+            // we have to create the sql every time since the user could insert different
+            // columns each time
+
+            StringBuilder insertSQL = new StringBuilder("INSERT INTO ").append(tableName).append(" (");
+            StringBuilder paramSQL = new StringBuilder(") values (");
+
+            Iterator<String> columnNames = updateValues.keySet().iterator();
+            int numColumns = updateValues.size();
+
+            for (int i = 0; columnNames.hasNext(); i++) {
+                String columnName = columnNames.next();
+
+                Utils.escapeIdentifier(insertSQL, columnName);
+                if (i < numColumns - 1) {
+                    insertSQL.append(", ");
+                    paramSQL.append("?,");
+                } else {
+                    paramSQL.append("?)");
+                }
+
+            }
+
+            insertSQL.append(paramSQL.toString());
+            PreparedStatement insertStatement = null;
+
+            Tuple rowBuffer = this.rowBuffer;
+            try {
+                insertStatement = connection.prepareStatement(insertSQL.toString(), Statement.RETURN_GENERATED_KEYS);
+
+                Iterator<Object> values = updateValues.values().iterator();
+
+                for (int i = 1; values.hasNext(); i++) {
+                    insertStatement.setObject(i, values.next());
+                }
+
+                insertStatement.executeUpdate();
+
+                if (usingOID) {
+                    // we have to get the last inserted OID and put it in the resultset
+
+                    long insertedOID = ((PgStatement) insertStatement).getLastOID();
+
+                    updateValues.put("oid", insertedOID);
+
+                }
+
+                // update the underlying row to the new inserted data
+                updateRowBuffer(insertStatement, rowBuffer, updateValues);
+            } finally {
+                JdbcBlackHole.close(insertStatement);
+            }
+
+            rows.add(rowBuffer);
+
+            // we should now reflect the current data in thisRow
+            // that way getXXX will get the newly inserted data
+            thisRow = rowBuffer;
+
+            // need to clear this in case of another insert
+            clearRowBuffer(false);
+        }
+    }
+
+    @Override
+    public void moveToCurrentRow() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkUpdateable();
+
+            if (currentRow < 0 || currentRow >= rows.size()) {
+                thisRow = null;
+                rowBuffer = null;
+            } else {
+                initRowBuffer();
+            }
+
+            onInsertRow = false;
+            doingUpdates = false;
+        }
+    }
+
+    @Override
+    public void moveToInsertRow() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkUpdateable();
+
+            // make sure the underlying data is null
+            clearRowBuffer(false);
+
+            onInsertRow = true;
+            doingUpdates = false;
+        }
+    }
+
+    // rowBuffer is the temporary storage for the row
+    private void clearRowBuffer(boolean copyCurrentRow) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            // inserts want an empty array while updates want a copy of the current row
+            if (copyCurrentRow) {
+                rowBuffer = thisRow.updateableCopy();
+            } else {
+                rowBuffer = new Tuple(fields.length);
+            }
+
+            // clear the updateValues hash map for the next set of updates
+            HashMap<String, Object> updateValues = this.updateValues;
+            if (updateValues != null) {
+                updateValues.clear();
+            }
+        }
+    }
+
+    @Override
+    public boolean rowDeleted() throws SQLException {
+        checkClosed();
+        return false;
+    }
+
+    @Override
+    public boolean rowInserted() throws SQLException {
+        checkClosed();
+        return false;
+    }
+
+    @Override
+    public boolean rowUpdated() throws SQLException {
+        checkClosed();
+        return false;
+    }
+
+    @Override
+    public void updateAsciiStream(int columnIndex,
+                                  InputStream x, int length)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (x == null) {
+                updateNull(columnIndex);
+                return;
+            }
+
+            try {
+                InputStreamReader reader = new InputStreamReader(x, StandardCharsets.US_ASCII);
+                char[] data = new char[length];
+                int numRead = 0;
+                while (true) {
+                    int n = reader.read(data, numRead, length - numRead);
+                    if (n == -1) {
+                        break;
+                    }
+
+                    numRead += n;
+
+                    if (numRead == length) {
+                        break;
+                    }
+                }
+                updateString(columnIndex, new String(data, 0, numRead));
+            } catch (IOException ie) {
+                throw new PSQLException(GT.tr("Provided InputStream failed."), null, ie);
+            }
+        }
+    }
+
+    @Override
+    public void updateBigDecimal(int columnIndex, BigDecimal x)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateValue(columnIndex, x);
+        }
+    }
+
+    @Override
+    public void updateBinaryStream(int columnIndex,
+                                   InputStream x, int length)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (x == null) {
+                updateNull(columnIndex);
+                return;
+            }
+
+            byte[] data = new byte[length];
+            int numRead = 0;
+            try {
+                while (true) {
+                    int n = x.read(data, numRead, length - numRead);
+                    if (n == -1) {
+                        break;
+                    }
+
+                    numRead += n;
+
+                    if (numRead == length) {
+                        break;
+                    }
+                }
+            } catch (IOException ie) {
+                throw new PSQLException(GT.tr("Provided InputStream failed."), null, ie);
+            }
+
+            if (numRead == length) {
+                updateBytes(columnIndex, data);
+            } else {
+                // the stream contained less data than they said
+                // perhaps this is an error?
+                byte[] data2 = new byte[numRead];
+                System.arraycopy(data, 0, data2, 0, numRead);
+                updateBytes(columnIndex, data2);
+            }
+        }
+    }
+
+    @Override
+    public void updateBoolean(int columnIndex, boolean x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateValue(columnIndex, x);
+        }
+    }
+
+    @Override
+    public void updateByte(int columnIndex, byte x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateValue(columnIndex, String.valueOf(x));
+        }
+    }
+
+    @Override
+    public void updateBytes(int columnIndex, byte[] x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateValue(columnIndex, x);
+        }
+    }
+
+    @Override
+    public void updateCharacterStream(int columnIndex,
+                                      Reader x, int length)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (x == null) {
+                updateNull(columnIndex);
+                return;
+            }
+
+            try {
+                char[] data = new char[length];
+                int numRead = 0;
+                while (true) {
+                    int n = x.read(data, numRead, length - numRead);
+                    if (n == -1) {
+                        break;
+                    }
+
+                    numRead += n;
+
+                    if (numRead == length) {
+                        break;
+                    }
+                }
+                updateString(columnIndex, new String(data, 0, numRead));
+            } catch (IOException ie) {
+                throw new PSQLException(GT.tr("Provided Reader failed."), null, ie);
+            }
+        }
+    }
+
+    @Override
+    public void updateDate(int columnIndex,
+                           Date x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateValue(columnIndex, x);
+        }
+    }
+
+    @Override
+    public void updateDouble(int columnIndex, double x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateValue(columnIndex, x);
+        }
+    }
+
+    @Override
+    public void updateFloat(int columnIndex, float x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateValue(columnIndex, x);
+        }
+    }
+
+    @Override
+    public void updateInt(int columnIndex, int x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateValue(columnIndex, x);
+        }
+    }
+
+    @Override
+    public void updateLong(int columnIndex, long x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateValue(columnIndex, x);
+        }
+    }
+
+    @Override
+    public void updateNull(int columnIndex) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkColumnIndex(columnIndex);
+            String columnTypeName = getPGType(columnIndex);
+            updateValue(columnIndex, new NullObject(columnTypeName));
+        }
+    }
+
+    @Override
+    public void updateObject(
+            int columnIndex, Object x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateValue(columnIndex, x);
+        }
+    }
+
+    @Override
+    public void updateObject(
+            int columnIndex, Object x, int scale) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            this.updateObject(columnIndex, x);
+        }
+    }
+
+    @Override
+    public void refreshRow() throws SQLException {
+        checkUpdateable();
+        if (onInsertRow) {
+            throw new PSQLException(GT.tr("Can''t refresh the insert row."),
+                    PSQLState.INVALID_CURSOR_STATE);
+        }
+
+        if (isBeforeFirst() || isAfterLast() || rows.isEmpty()) {
+            return;
+        }
+
+        StringBuilder selectSQL = new StringBuilder("select ");
+
+        ResultSetMetaData rsmd = getMetaData();
+        PGResultSetMetaData pgmd = (PGResultSetMetaData) rsmd;
+        for (int i = 1; i <= rsmd.getColumnCount(); i++) {
+            if (i > 1) {
+                selectSQL.append(", ");
+            }
+            Utils.escapeIdentifier(selectSQL, pgmd.getBaseColumnName(i));
+        }
+        selectSQL.append(" from ").append(onlyTable).append(tableName).append(" where ");
+
+        List<PrimaryKey> primaryKeys = this.primaryKeys;
+        int numKeys = primaryKeys.size();
+
+        for (int i = 0; i < numKeys; i++) {
+
+            PrimaryKey primaryKey = primaryKeys.get(i);
+            Utils.escapeIdentifier(selectSQL, primaryKey.name);
+            selectSQL.append(" = ?");
+
+            if (i < numKeys - 1) {
+                selectSQL.append(" and ");
+            }
+        }
+        String sqlText = selectSQL.toString();
+        if (connection.getLogger().isLoggable(Level.FINE)) {
+            connection.getLogger().log(Level.FINE, "selecting {0}", sqlText);
+        }
+        // because updateable result sets do not yet support binary transfers we must request refresh
+        // with updateable result set to get field data in correct format
+        PreparedStatement selectStatement = null;
+        try {
+            selectStatement = connection.prepareStatement(sqlText,
+                    ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+
+            for (int i = 0; i < numKeys; i++) {
+                selectStatement.setObject(i + 1, primaryKeys.get(i).getValue());
+            }
+
+            PgResultSet rs = (PgResultSet) selectStatement.executeQuery();
+
+            if (rs.next()) {
+                // we know that the row is updatable as it was tested above.
+                if (rs.thisRow == null) {
+                    rowBuffer = null;
+                } else {
+                    rowBuffer = rs.thisRow.updateableCopy();
+                }
+            }
+
+            rows.set(currentRow, rowBuffer);
+            thisRow = rowBuffer;
+
+            connection.getLogger().log(Level.FINE, "done updates");
+
+            rs.close();
+        } finally {
+            JdbcBlackHole.close(selectStatement);
+        }
+    }
+
+    @Override
+    public void updateRow() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkUpdateable();
+
+            if (onInsertRow) {
+                throw new PSQLException(GT.tr("Cannot call updateRow() when on the insert row."),
+                        PSQLState.INVALID_CURSOR_STATE);
+            }
+
+            List<Tuple> rows = this.rows;
+            if (isBeforeFirst() || isAfterLast() || rows.isEmpty()) {
+                throw new PSQLException(
+                        GT.tr(
+                                "Cannot update the ResultSet because it is either before the start or after the end of the results."),
+                        PSQLState.INVALID_CURSOR_STATE);
+            }
+
+            if (!doingUpdates) {
+                return; // No work pending.
+            }
+
+            StringBuilder updateSQL = new StringBuilder("UPDATE " + onlyTable + tableName + " SET  ");
+
+            HashMap<String, Object> updateValues = this.updateValues;
+            int numColumns = updateValues.size();
+            Iterator<String> columns = updateValues.keySet().iterator();
+
+            for (int i = 0; columns.hasNext(); i++) {
+                String column = columns.next();
+                Utils.escapeIdentifier(updateSQL, column);
+                updateSQL.append(" = ?");
+
+                if (i < numColumns - 1) {
+                    updateSQL.append(", ");
+                }
+            }
+
+            updateSQL.append(" WHERE ");
+
+            List<PrimaryKey> primaryKeys = this.primaryKeys;
+            int numKeys = primaryKeys.size();
+
+            for (int i = 0; i < numKeys; i++) {
+                PrimaryKey primaryKey = primaryKeys.get(i);
+                Utils.escapeIdentifier(updateSQL, primaryKey.name);
+                updateSQL.append(" = ?");
+
+                if (i < numKeys - 1) {
+                    updateSQL.append(" and ");
+                }
+            }
+
+            String sqlText = updateSQL.toString();
+            if (connection.getLogger().isLoggable(Level.FINE)) {
+                connection.getLogger().log(Level.FINE, "updating {0}", sqlText);
+            }
+            PreparedStatement updateStatement = null;
+            try {
+                updateStatement = connection.prepareStatement(sqlText);
+
+                int i = 0;
+                Iterator<Object> iterator = updateValues.values().iterator();
+                for (; iterator.hasNext(); i++) {
+                    Object o = iterator.next();
+                    updateStatement.setObject(i + 1, o);
+                }
+
+                for (int j = 0; j < numKeys; j++, i++) {
+                    updateStatement.setObject(i + 1, primaryKeys.get(j).getValue());
+                }
+
+                updateStatement.executeUpdate();
+            } finally {
+                JdbcBlackHole.close(updateStatement);
+            }
+
+            Tuple rowBuffer = this.rowBuffer;
+            updateRowBuffer(null, rowBuffer, updateValues);
+
+            connection.getLogger().log(Level.FINE, "copying data");
+            thisRow = rowBuffer.readOnlyCopy();
+            rows.set(currentRow, rowBuffer);
+
+            connection.getLogger().log(Level.FINE, "done updates");
+            updateValues.clear();
+            doingUpdates = false;
+        }
+    }
+
+    @Override
+    public void updateShort(int columnIndex, short x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateValue(columnIndex, x);
+        }
+    }
+
+    @SuppressWarnings("try")
+    @Override
+    public void updateString(int columnIndex, String x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateValue(columnIndex, x);
+        }
+    }
+
+    @Override
+    public void updateTime(int columnIndex, Time x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateValue(columnIndex, x);
+        }
+    }
+
+    @Override
+    public void updateTimestamp(
+            int columnIndex, Timestamp x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateValue(columnIndex, x);
+        }
+    }
+
+    @Override
+    public void updateNull(String columnName) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateNull(findColumn(columnName));
+        }
+    }
+
+    @Override
+    public void updateBoolean(String columnName, boolean x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateBoolean(findColumn(columnName), x);
+        }
+    }
+
+    @Override
+    public void updateByte(String columnName, byte x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateByte(findColumn(columnName), x);
+        }
+    }
+
+    @Override
+    public void updateShort(String columnName, short x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateShort(findColumn(columnName), x);
+        }
+    }
+
+    @Override
+    public void updateInt(String columnName, int x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateInt(findColumn(columnName), x);
+        }
+    }
+
+    @Override
+    public void updateLong(String columnName, long x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateLong(findColumn(columnName), x);
+        }
+    }
+
+    @Override
+    public void updateFloat(String columnName, float x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateFloat(findColumn(columnName), x);
+        }
+    }
+
+    //
+    // Backwards compatibility with PGRefCursorResultSet
+    //
+
+    @Override
+    public void updateDouble(String columnName, double x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateDouble(findColumn(columnName), x);
+        }
+    }
+
+    @Override
+    public void updateBigDecimal(
+            String columnName, BigDecimal x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateBigDecimal(findColumn(columnName), x);
+        }
+    }
+
+    @Override
+    public void updateString(
+            String columnName, String x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateString(findColumn(columnName), x);
+        }
+    }
+
+    @Override
+    public void updateBytes(
+            String columnName, byte[] x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateBytes(findColumn(columnName), x);
+        }
+    }
+
+    @Override
+    public void updateDate(
+            String columnName, Date x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateDate(findColumn(columnName), x);
+        }
+    }
+
+    @Override
+    public void updateTime(
+            String columnName, Time x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateTime(findColumn(columnName), x);
+        }
+    }
+
+    @Override
+    public void updateTimestamp(
+            String columnName, Timestamp x)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateTimestamp(findColumn(columnName), x);
+        }
+    }
+
+    @Override
+    public void updateAsciiStream(
+            String columnName, InputStream x, int length)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateAsciiStream(findColumn(columnName), x, length);
+        }
+    }
+
+    @Override
+    public void updateBinaryStream(
+            String columnName, InputStream x, int length)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateBinaryStream(findColumn(columnName), x, length);
+        }
+    }
+
+    @Override
+    public void updateCharacterStream(
+            String columnName, Reader reader,
+            int length) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateCharacterStream(findColumn(columnName), reader, length);
+        }
+    }
+
+    @Override
+    public void updateObject(
+            String columnName, Object x, int scale)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateObject(findColumn(columnName), x);
+        }
+    }
+
+    @Override
+    public void updateObject(
+            String columnName, Object x) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            updateObject(findColumn(columnName), x);
+        }
+    }
+
+    /**
+     * Is this ResultSet updateable?
+     */
+
+    boolean isUpdateable() throws SQLException {
+        checkClosed();
+
+        if (resultsetconcurrency == ResultSet.CONCUR_READ_ONLY) {
+            throw new PSQLException(
+                    GT.tr("ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."),
+                    PSQLState.INVALID_CURSOR_STATE);
+        }
+
+        if (updateable) {
+            return true;
+        }
+
+        connection.getLogger().log(Level.FINE, "checking if rs is updateable");
+
+        parseQuery();
+
+        if (tableName == null) {
+            connection.getLogger().log(Level.FINE, "tableName is not found");
+            return false;
+        }
+
+        if (!singleTable) {
+            connection.getLogger().log(Level.FINE, "not a single table");
+            return false;
+        }
+
+        usingOID = false;
+
+        connection.getLogger().log(Level.FINE, "getting primary keys");
+
+        //
+        // Contains the primary key?
+        //
+
+        List<PrimaryKey> primaryKeys = new ArrayList<>();
+        this.primaryKeys = primaryKeys;
 
         int i = 0;
-        Iterator<Object> iterator = updateValues.values().iterator();
-        for (; iterator.hasNext(); i++) {
-          Object o = iterator.next();
-          updateStatement.setObject(i + 1, o);
+        int numPKcolumns = 0;
+
+        // otherwise go and get the primary keys and create a list of keys
+        String[] s = quotelessTableName(tableName);
+        String quotelessTableName = s[0];
+        String quotelessSchemaName = s[1];
+        ResultSet rs = ((PgDatabaseMetaData) connection.getMetaData()).getPrimaryUniqueKeys("",
+                quotelessSchemaName, quotelessTableName);
+
+        String lastConstraintName = null;
+
+        while (rs.next()) {
+            String constraintName = rs.getString(6); // get the constraintName
+            if (lastConstraintName == null || !lastConstraintName.equals(constraintName)) {
+                if (lastConstraintName != null) {
+                    if (i == numPKcolumns && numPKcolumns > 0) {
+                        break;
+                    }
+                    connection.getLogger().log(Level.FINE, "no of keys={0} from constraint {1}", new Object[]{i, lastConstraintName});
+                }
+                i = 0;
+                numPKcolumns = 0;
+
+                primaryKeys.clear();
+                lastConstraintName = constraintName;
+            }
+            numPKcolumns++;
+
+            boolean isNotNull = rs.getBoolean("IS_NOT_NULL");
+
+            /* make sure that only unique keys with all non-null attributes are handled */
+            if (isNotNull) {
+                String columnName = rs.getString(4); // get the columnName
+                int index = findColumnIndex(columnName);
+
+                /* make sure that the user has included the primary key in the resultset */
+                if (index > 0) {
+                    i++;
+                    primaryKeys.add(new PrimaryKey(index, columnName)); // get the primary key information
+                }
+            }
         }
 
-        for (int j = 0; j < numKeys; j++, i++) {
-          updateStatement.setObject(i + 1, primaryKeys.get(j).getValue());
-        }
-
-        updateStatement.executeUpdate();
-      } finally {
-        JdbcBlackHole.close(updateStatement);
-      }
-
-      Tuple rowBuffer = this.rowBuffer;
-      updateRowBuffer(null, rowBuffer, updateValues);
-
-      connection.getLogger().log(Level.FINE, "copying data");
-      thisRow = rowBuffer.readOnlyCopy();
-      rows.set(currentRow, rowBuffer);
-
-      connection.getLogger().log(Level.FINE, "done updates");
-      updateValues.clear();
-      doingUpdates = false;
-    }
-  }
-
-  @Override
-  public void updateShort(int columnIndex, short x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateValue(columnIndex, x);
-    }
-  }
-
-  @SuppressWarnings("try")
-  @Override
-  public void updateString(int columnIndex, String x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateValue(columnIndex, x);
-    }
-  }
-
-  @Override
-  public void updateTime(int columnIndex, Time x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateValue(columnIndex, x);
-    }
-  }
-
-  @Override
-  public void updateTimestamp(
-      int columnIndex, Timestamp x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateValue(columnIndex, x);
-    }
-  }
-
-  @Override
-  public void updateNull(String columnName) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateNull(findColumn(columnName));
-    }
-  }
-
-  @Override
-  public void updateBoolean(String columnName, boolean x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateBoolean(findColumn(columnName), x);
-    }
-  }
-
-  @Override
-  public void updateByte(String columnName, byte x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateByte(findColumn(columnName), x);
-    }
-  }
-
-  @Override
-  public void updateShort(String columnName, short x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateShort(findColumn(columnName), x);
-    }
-  }
-
-  @Override
-  public void updateInt(String columnName, int x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateInt(findColumn(columnName), x);
-    }
-  }
-
-  @Override
-  public void updateLong(String columnName, long x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateLong(findColumn(columnName), x);
-    }
-  }
-
-  @Override
-  public void updateFloat(String columnName, float x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateFloat(findColumn(columnName), x);
-    }
-  }
-
-  @Override
-  public void updateDouble(String columnName, double x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateDouble(findColumn(columnName), x);
-    }
-  }
-
-  @Override
-  public void updateBigDecimal(
-      String columnName, BigDecimal x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateBigDecimal(findColumn(columnName), x);
-    }
-  }
-
-  @Override
-  public void updateString(
-      String columnName, String x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateString(findColumn(columnName), x);
-    }
-  }
-
-  @Override
-  public void updateBytes(
-      String columnName, byte [] x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateBytes(findColumn(columnName), x);
-    }
-  }
-
-  @Override
-  public void updateDate(
-      String columnName, Date x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateDate(findColumn(columnName), x);
-    }
-  }
-
-  @Override
-  public void updateTime(
-      String columnName, Time x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateTime(findColumn(columnName), x);
-    }
-  }
-
-  @Override
-  public void updateTimestamp(
-      String columnName, Timestamp x)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateTimestamp(findColumn(columnName), x);
-    }
-  }
-
-  @Override
-  public void updateAsciiStream(
-      String columnName, InputStream x, int length)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateAsciiStream(findColumn(columnName), x, length);
-    }
-  }
-
-  @Override
-  public void updateBinaryStream(
-      String columnName, InputStream x, int length)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateBinaryStream(findColumn(columnName), x, length);
-    }
-  }
-
-  @Override
-  public void updateCharacterStream(
-      String columnName, Reader reader,
-      int length) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateCharacterStream(findColumn(columnName), reader, length);
-    }
-  }
-
-  @Override
-  public void updateObject(
-      String columnName, Object x, int scale)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateObject(findColumn(columnName), x);
-    }
-  }
-
-  @Override
-  public void updateObject(
-      String columnName, Object x) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      updateObject(findColumn(columnName), x);
-    }
-  }
-
-  /**
-   * Is this ResultSet updateable?
-   */
-
-  boolean isUpdateable() throws SQLException {
-    checkClosed();
-
-    if (resultsetconcurrency == ResultSet.CONCUR_READ_ONLY) {
-      throw new PSQLException(
-          GT.tr("ResultSets with concurrency CONCUR_READ_ONLY cannot be updated."),
-          PSQLState.INVALID_CURSOR_STATE);
-    }
-
-    if (updateable) {
-      return true;
-    }
-
-    connection.getLogger().log(Level.FINE, "checking if rs is updateable");
-
-    parseQuery();
-
-    if (tableName == null) {
-      connection.getLogger().log(Level.FINE, "tableName is not found");
-      return false;
-    }
-
-    if (!singleTable) {
-      connection.getLogger().log(Level.FINE, "not a single table");
-      return false;
-    }
-
-    usingOID = false;
-
-    connection.getLogger().log(Level.FINE, "getting primary keys");
-
-    //
-    // Contains the primary key?
-    //
-
-    List<PrimaryKey> primaryKeys = new ArrayList<>();
-    this.primaryKeys = primaryKeys;
-
-    int i = 0;
-    int numPKcolumns = 0;
-
-    // otherwise go and get the primary keys and create a list of keys
-    String[] s = quotelessTableName(tableName);
-    String quotelessTableName = s[0];
-    String quotelessSchemaName = s[1];
-    ResultSet rs = ((PgDatabaseMetaData) connection.getMetaData()).getPrimaryUniqueKeys("",
-        quotelessSchemaName, quotelessTableName);
-
-    String lastConstraintName = null;
-
-    while (rs.next()) {
-      String constraintName = rs.getString(6); // get the constraintName
-      if (lastConstraintName == null || !lastConstraintName.equals(constraintName)) {
-        if (lastConstraintName != null) {
-          if (i == numPKcolumns && numPKcolumns > 0) {
-            break;
-          }
-          connection.getLogger().log(Level.FINE, "no of keys={0} from constraint {1}", new Object[]{i, lastConstraintName});
-        }
-        i = 0;
-        numPKcolumns = 0;
-
-        primaryKeys.clear();
-        lastConstraintName = constraintName;
-      }
-      numPKcolumns++;
-
-      boolean isNotNull = rs.getBoolean("IS_NOT_NULL");
-
-      /* make sure that only unique keys with all non-null attributes are handled */
-      if (isNotNull) {
-        String columnName = rs.getString(4); // get the columnName
-        int index = findColumnIndex(columnName);
-
-        /* make sure that the user has included the primary key in the resultset */
-        if (index > 0) {
-          i++;
-          primaryKeys.add(new PrimaryKey(index, columnName)); // get the primary key information
-        }
-      }
-    }
-
-    rs.close();
-    connection.getLogger().log(Level.FINE, "no of keys={0} from constraint {1}", new Object[]{i, lastConstraintName});
+        rs.close();
+        connection.getLogger().log(Level.FINE, "no of keys={0} from constraint {1}", new Object[]{i, lastConstraintName});
 
     /*
     it is only updatable if the primary keys are available in the resultset
      */
-    updateable = (i == numPKcolumns) && (numPKcolumns > 0);
+        updateable = (i == numPKcolumns) && (numPKcolumns > 0);
 
-    connection.getLogger().log(Level.FINE, "checking primary key {0}", updateable);
+        connection.getLogger().log(Level.FINE, "checking primary key {0}", updateable);
 
     /*
       if we haven't found a primary key we can check to see if the query includes the oid
       This is now a questionable check as oid's have been deprecated. Might still be useful for
       catalog tables, but again the query would have to include the oid.
      */
-    if (!updateable) {
-      int oidIndex = findColumnIndex("oid"); // 0 if not present
+        if (!updateable) {
+            int oidIndex = findColumnIndex("oid"); // 0 if not present
 
-      // oidIndex will be >0 if the oid was in the select list
-      if (oidIndex > 0) {
-        primaryKeys.add(new PrimaryKey(oidIndex, "oid"));
-        usingOID = true;
-        updateable = true;
-      }
-    }
-
-    if (!updateable) {
-      throw new PSQLException(GT.tr("No eligible primary or unique key found for table {0}.", tableName),
-          PSQLState.INVALID_CURSOR_STATE);
-    }
-
-    return updateable;
-  }
-
-  /**
-   * Turn on/off adaptive fetch for ResultSet.
-   *
-   * @param adaptiveFetch desired state of adaptive fetch.
-   * @throws SQLException exception returned if ResultSet is closed
-   */
-  public void setAdaptiveFetch(boolean adaptiveFetch) throws SQLException {
-    checkClosed();
-    updateQueryInsideAdaptiveFetchCache(adaptiveFetch);
-    this.adaptiveFetch = adaptiveFetch;
-  }
-
-  /**
-   * Update adaptive fetch cache during changing state of adaptive fetch inside
-   * ResultSet. Update inside AdaptiveFetchCache is required to collect data about max result
-   * row length for that query to compute adaptive fetch size.
-   *
-   * @param newAdaptiveFetch new state of adaptive fetch
-   */
-  private void updateQueryInsideAdaptiveFetchCache(boolean newAdaptiveFetch) {
-    if (Objects.nonNull(cursor)) {
-      ResultCursor resultCursor = cursor;
-      if (!this.adaptiveFetch && newAdaptiveFetch) {
-        // If we are here, that means we want to be added to adaptive fetch.
-        connection.getQueryExecutor().addQueryToAdaptiveFetchCache(true, resultCursor);
-      }
-
-      if (this.adaptiveFetch && !newAdaptiveFetch && Objects.nonNull(cursor)) {
-        // If we are here, that means we want to be removed from adaptive fetch.
-        connection.getQueryExecutor().removeQueryFromAdaptiveFetchCache(true, resultCursor);
-      }
-    }
-  }
-
-  /**
-   * Get state of adaptive fetch for resultSet.
-   *
-   * @return state of adaptive fetch (turned on or off)
-   * @throws SQLException exception returned if ResultSet is closed
-   */
-  public boolean getAdaptiveFetch() throws SQLException {
-    checkClosed();
-    return adaptiveFetch;
-  }
-
-  /**
-   * Cracks out the table name and schema (if it exists) from a fully qualified table name.
-   *
-   * @param fullname string that we are trying to crack. Test cases:
-   *
-   *        <pre>
-   *
-   *                 Table: table
-   *                                 ()
-   *
-   *                 "Table": Table
-   *                                 ()
-   *
-   *                 Schema.Table:
-   *                                 table (schema)
-   *
-   *                                 "Schema"."Table": Table
-   *                                                 (Schema)
-   *
-   *                                 "Schema"."Dot.Table": Dot.Table
-   *                                                 (Schema)
-   *
-   *                                 Schema."Dot.Table": Dot.Table
-   *                                                 (schema)
-   *
-   *        </pre>
-   *
-   * @return String array with element zero always being the tablename and element 1 the schema name
-   *         which may be a zero length string.
-   */
-  public static String[] quotelessTableName(String fullname) {
-
-    String[] parts = new String[]{null, ""};
-    StringBuilder acc = new StringBuilder();
-    boolean betweenQuotes = false;
-    for (int i = 0; i < fullname.length(); i++) {
-      char c = fullname.charAt(i);
-      switch (c) {
-        case '"':
-          if ((i < fullname.length() - 1) && (fullname.charAt(i + 1) == '"')) {
-            // two consecutive quotes - keep one
-            i++;
-            acc.append(c); // keep the quote
-          } else { // Discard it
-            betweenQuotes = !betweenQuotes;
-          }
-          break;
-        case '.':
-          if (betweenQuotes) { // Keep it
-            acc.append(c);
-          } else { // Have schema name
-            parts[1] = acc.toString();
-            acc = new StringBuilder();
-          }
-          break;
-        default:
-          acc.append(betweenQuotes ? c : Character.toLowerCase(c));
-          break;
-      }
-    }
-    // Always put table in slot 0
-    parts[0] = acc.toString();
-    return parts;
-  }
-
-  private void parseQuery() {
-    Query originalQuery = this.originalQuery;
-    if (originalQuery == null) {
-      return;
-    }
-    String sql = originalQuery.toString(null);
-    StringTokenizer st = new StringTokenizer(sql, " \r\t\n");
-    boolean tableFound = false;
-    boolean tablesChecked = false;
-    String name = "";
-
-    singleTable = true;
-
-    while (!tableFound && !tablesChecked && st.hasMoreTokens()) {
-      name = st.nextToken();
-      if ("from".equalsIgnoreCase(name)) {
-        tableName = st.nextToken();
-        if ("only".equalsIgnoreCase(tableName)) {
-          tableName = st.nextToken();
-          onlyTable = "ONLY ";
-        }
-        tableFound = true;
-      }
-    }
-  }
-
-  private void setRowBufferColumn(Tuple rowBuffer,
-      int columnIndex, Object valueObject) throws SQLException {
-    if (valueObject instanceof PGobject) {
-      String value = ((PGobject) valueObject).getValue();
-      rowBuffer.set(columnIndex, value == null ? null : connection.encodeString(value));
-    } else {
-      if (valueObject == null) {
-        rowBuffer.set(columnIndex, null);
-        return;
-      }
-      switch (getSQLType(columnIndex + 1)) {
-
-        // boolean needs to be formatted as t or f instead of true or false
-        case Types.BIT:
-        case Types.BOOLEAN:
-          rowBuffer.set(columnIndex, connection
-              .encodeString((Boolean) valueObject ? "t" : "f"));
-          break;
-        //
-        // toString() isn't enough for date and time types; we must format it correctly
-        // or we won't be able to re-parse it.
-        //
-        case Types.DATE:
-          rowBuffer.set(columnIndex, connection
-              .encodeString(
-                  getTimestampUtils().toString(
-                      getDefaultCalendar(), (Date) valueObject)));
-          break;
-
-        case Types.TIME:
-          rowBuffer.set(columnIndex, connection
-              .encodeString(
-                  getTimestampUtils().toString(
-                      getDefaultCalendar(), (Time) valueObject)));
-          break;
-
-        case Types.TIMESTAMP:
-          rowBuffer.set(columnIndex, connection.encodeString(
-              getTimestampUtils().toString(
-                  getDefaultCalendar(), (Timestamp) valueObject)));
-          break;
-
-        case Types.NULL:
-          // Should never happen?
-          break;
-
-        case Types.BINARY:
-        case Types.LONGVARBINARY:
-        case Types.VARBINARY:
-          if (isBinary(columnIndex + 1)) {
-            rowBuffer.set(columnIndex, (byte[]) valueObject);
-          } else {
-            try {
-              rowBuffer.set(columnIndex,
-                  PGbytea.toPGString((byte[]) valueObject).getBytes(connection.getEncoding().name()));
-            } catch (UnsupportedEncodingException e) {
-              throw new PSQLException(
-                  GT.tr("The JVM claims not to support the encoding: {0}", connection.getEncoding().name()),
-                  PSQLState.UNEXPECTED_ERROR, e);
+            // oidIndex will be >0 if the oid was in the select list
+            if (oidIndex > 0) {
+                primaryKeys.add(new PrimaryKey(oidIndex, "oid"));
+                usingOID = true;
+                updateable = true;
             }
-          }
-          break;
-
-        default:
-          rowBuffer.set(columnIndex, connection.encodeString(String.valueOf(valueObject)));
-          break;
-      }
-
-    }
-  }
-
-  private void updateRowBuffer(PreparedStatement insertStatement,
-      Tuple rowBuffer, HashMap<String, Object> updateValues) throws SQLException {
-    for (Map.Entry<String, Object> entry : updateValues.entrySet()) {
-      int columnIndex = findColumn(entry.getKey()) - 1;
-      Object valueObject = entry.getValue();
-      setRowBufferColumn(rowBuffer, columnIndex, valueObject);
-    }
-
-    if (insertStatement == null) {
-      return;
-    }
-    final ResultSet generatedKeys = insertStatement.getGeneratedKeys();
-    try {
-      generatedKeys.next();
-
-      List<PrimaryKey> primaryKeys = this.primaryKeys;
-      int numKeys = primaryKeys.size();
-
-      for (int i = 0; i < numKeys; i++) {
-        final PrimaryKey key = primaryKeys.get(i);
-        int columnIndex = key.index - 1;
-        Object valueObject = generatedKeys.getObject(key.name);
-        setRowBufferColumn(rowBuffer, columnIndex, valueObject);
-      }
-    } finally {
-      generatedKeys.close();
-    }
-  }
-
-  public class CursorResultHandler extends ResultHandlerBase {
-
-    public CursorResultHandler() {
-    }
-
-    @Override
-    public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
-        ResultCursor cursor) {
-      PgResultSet.this.rows = tuples;
-      PgResultSet.this.cursor = cursor;
-    }
-
-    @Override
-    public void handleCommandStatus(String status, long updateCount, long insertOID) {
-      handleError(new PSQLException(GT.tr("Unexpected command status: {0}.", status),
-          PSQLState.PROTOCOL_VIOLATION));
-    }
-
-    @Override
-    public void handleCompletion() throws SQLException {
-      SQLWarning warning = getWarning();
-      if (warning != null) {
-        PgResultSet.this.addWarning(warning);
-      }
-      super.handleCompletion();
-    }
-  }
-
-  public BaseStatement getPGStatement() {
-    return statement;
-  }
-
-  //
-  // Backwards compatibility with PGRefCursorResultSet
-  //
-
-  private String refCursorName;
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public String getRefCursor() {
-    // Can't check this because the PGRefCursorResultSet
-    // interface doesn't allow throwing a SQLException
-    //
-    // checkClosed();
-    return refCursorName;
-  }
-
-  private void setRefCursor(String refCursorName) {
-    this.refCursorName = refCursorName;
-  }
-
-  @Override
-  public void setFetchSize(int rows) throws SQLException {
-    checkClosed();
-    if (rows < 0) {
-      throw new PSQLException(GT.tr("Fetch size must be a value greater to or equal to 0."),
-          PSQLState.INVALID_PARAMETER_VALUE);
-    }
-    fetchSize = rows;
-  }
-
-  @Override
-  public int getFetchSize() throws SQLException {
-    checkClosed();
-    if (adaptiveFetch) {
-      return lastUsedFetchSize;
-    } else {
-      return fetchSize;
-    }
-  }
-
-  /**
-   * Get fetch size used during last fetch. Returned value can be useful if using adaptive
-   * fetch.
-   *
-   * @return fetch size used during last fetch.
-   * @throws SQLException exception returned if ResultSet is closed
-   */
-  public int getLastUsedFetchSize() throws SQLException {
-    checkClosed();
-    return lastUsedFetchSize;
-  }
-
-  @Override
-  public boolean next() throws SQLException {
-    checkClosed();
-
-    if (onInsertRow) {
-      throw new PSQLException(GT.tr("Can''t use relative move methods while on the insert row."),
-          PSQLState.INVALID_CURSOR_STATE);
-    }
-
-    if (currentRow + 1 >= rows.size()) {
-      ResultCursor cursor = this.cursor;
-      if (cursor == null || (maxRows > 0 && rowOffset + rows.size() >= maxRows)) {
-        currentRow = rows.size();
-        thisRow = null;
-        rowBuffer = null;
-        return false; // End of the resultset.
-      }
-
-      // Ask for some more data.
-      rowOffset += rows.size(); // We are discarding some data.
-
-      int fetchRows = fetchSize;
-      int adaptiveFetchRows = connection.getQueryExecutor()
-          .getAdaptiveFetchSize(adaptiveFetch, cursor);
-
-      if (adaptiveFetchRows != -1) {
-        fetchRows = adaptiveFetchRows;
-      }
-
-      if (maxRows != 0) {
-        if (fetchRows == 0 || rowOffset + fetchRows > maxRows) {
-          // Fetch would exceed maxRows, limit it.
-          fetchRows = maxRows - rowOffset;
         }
-      }
 
-      // Execute the fetch and update this resultset.
-      connection.getQueryExecutor()
-          .fetch(cursor, new CursorResultHandler(), fetchRows, adaptiveFetch);
+        if (!updateable) {
+            throw new PSQLException(GT.tr("No eligible primary or unique key found for table {0}.", tableName),
+                    PSQLState.INVALID_CURSOR_STATE);
+        }
 
-      // .fetch(...) could update this.cursor, and cursor==null means
-      // there are no more rows to fetch
-      closeRefCursor();
-
-      // After fetch, update last used fetch size (could be useful for adaptive fetch).
-      lastUsedFetchSize = fetchRows;
-
-      currentRow = 0;
-
-      // Test the new rows array.
-      if (rows == null || rows.isEmpty()) {
-        thisRow = null;
-        rowBuffer = null;
-        return false;
-      }
-    } else {
-      currentRow++;
+        return updateable;
     }
 
-    initRowBuffer();
-    return true;
-  }
+    /**
+     * Update adaptive fetch cache during changing state of adaptive fetch inside
+     * ResultSet. Update inside AdaptiveFetchCache is required to collect data about max result
+     * row length for that query to compute adaptive fetch size.
+     *
+     * @param newAdaptiveFetch new state of adaptive fetch
+     */
+    private void updateQueryInsideAdaptiveFetchCache(boolean newAdaptiveFetch) {
+        if (Objects.nonNull(cursor)) {
+            ResultCursor resultCursor = cursor;
+            if (!this.adaptiveFetch && newAdaptiveFetch) {
+                // If we are here, that means we want to be added to adaptive fetch.
+                connection.getQueryExecutor().addQueryToAdaptiveFetchCache(true, resultCursor);
+            }
 
-  @Override
-  public void close() throws SQLException {
-    try {
-      closeInternally();
-    } finally {
-      ((PgStatement) statement).checkCompletion();
+            if (this.adaptiveFetch && !newAdaptiveFetch && Objects.nonNull(cursor)) {
+                // If we are here, that means we want to be removed from adaptive fetch.
+                connection.getQueryExecutor().removeQueryFromAdaptiveFetchCache(true, resultCursor);
+            }
+        }
     }
-  }
 
-  /*
+    /**
+     * Get state of adaptive fetch for resultSet.
+     *
+     * @return state of adaptive fetch (turned on or off)
+     * @throws SQLException exception returned if ResultSet is closed
+     */
+    public boolean getAdaptiveFetch() throws SQLException {
+        checkClosed();
+        return adaptiveFetch;
+    }
+
+    /**
+     * Turn on/off adaptive fetch for ResultSet.
+     *
+     * @param adaptiveFetch desired state of adaptive fetch.
+     * @throws SQLException exception returned if ResultSet is closed
+     */
+    public void setAdaptiveFetch(boolean adaptiveFetch) throws SQLException {
+        checkClosed();
+        updateQueryInsideAdaptiveFetchCache(adaptiveFetch);
+        this.adaptiveFetch = adaptiveFetch;
+    }
+
+    private void parseQuery() {
+        Query originalQuery = this.originalQuery;
+        if (originalQuery == null) {
+            return;
+        }
+        String sql = originalQuery.toString(null);
+        StringTokenizer st = new StringTokenizer(sql, " \r\t\n");
+        boolean tableFound = false;
+        boolean tablesChecked = false;
+        String name = "";
+
+        singleTable = true;
+
+        while (!tableFound && !tablesChecked && st.hasMoreTokens()) {
+            name = st.nextToken();
+            if ("from".equalsIgnoreCase(name)) {
+                tableName = st.nextToken();
+                if ("only".equalsIgnoreCase(tableName)) {
+                    tableName = st.nextToken();
+                    onlyTable = "ONLY ";
+                }
+                tableFound = true;
+            }
+        }
+    }
+
+    private void setRowBufferColumn(Tuple rowBuffer,
+                                    int columnIndex, Object valueObject) throws SQLException {
+        if (valueObject instanceof PGobject) {
+            String value = ((PGobject) valueObject).getValue();
+            rowBuffer.set(columnIndex, value == null ? null : connection.encodeString(value));
+        } else {
+            if (valueObject == null) {
+                rowBuffer.set(columnIndex, null);
+                return;
+            }
+            switch (getSQLType(columnIndex + 1)) {
+
+                // boolean needs to be formatted as t or f instead of true or false
+                case Types.BIT:
+                case Types.BOOLEAN:
+                    rowBuffer.set(columnIndex, connection
+                            .encodeString((Boolean) valueObject ? "t" : "f"));
+                    break;
+                //
+                // toString() isn't enough for date and time types; we must format it correctly
+                // or we won't be able to re-parse it.
+                //
+                case Types.DATE:
+                    rowBuffer.set(columnIndex, connection
+                            .encodeString(
+                                    getTimestampUtils().toString(
+                                            getDefaultCalendar(), (Date) valueObject)));
+                    break;
+
+                case Types.TIME:
+                    rowBuffer.set(columnIndex, connection
+                            .encodeString(
+                                    getTimestampUtils().toString(
+                                            getDefaultCalendar(), (Time) valueObject)));
+                    break;
+
+                case Types.TIMESTAMP:
+                    rowBuffer.set(columnIndex, connection.encodeString(
+                            getTimestampUtils().toString(
+                                    getDefaultCalendar(), (Timestamp) valueObject)));
+                    break;
+
+                case Types.NULL:
+                    // Should never happen?
+                    break;
+
+                case Types.BINARY:
+                case Types.LONGVARBINARY:
+                case Types.VARBINARY:
+                    if (isBinary(columnIndex + 1)) {
+                        rowBuffer.set(columnIndex, (byte[]) valueObject);
+                    } else {
+                        try {
+                            rowBuffer.set(columnIndex,
+                                    PGbytea.toPGString((byte[]) valueObject).getBytes(connection.getEncoding().name()));
+                        } catch (UnsupportedEncodingException e) {
+                            throw new PSQLException(
+                                    GT.tr("The JVM claims not to support the encoding: {0}", connection.getEncoding().name()),
+                                    PSQLState.UNEXPECTED_ERROR, e);
+                        }
+                    }
+                    break;
+
+                default:
+                    rowBuffer.set(columnIndex, connection.encodeString(String.valueOf(valueObject)));
+                    break;
+            }
+
+        }
+    }
+
+    private void updateRowBuffer(PreparedStatement insertStatement,
+                                 Tuple rowBuffer, HashMap<String, Object> updateValues) throws SQLException {
+        for (Map.Entry<String, Object> entry : updateValues.entrySet()) {
+            int columnIndex = findColumn(entry.getKey()) - 1;
+            Object valueObject = entry.getValue();
+            setRowBufferColumn(rowBuffer, columnIndex, valueObject);
+        }
+
+        if (insertStatement == null) {
+            return;
+        }
+        final ResultSet generatedKeys = insertStatement.getGeneratedKeys();
+        try {
+            generatedKeys.next();
+
+            List<PrimaryKey> primaryKeys = this.primaryKeys;
+            int numKeys = primaryKeys.size();
+
+            for (int i = 0; i < numKeys; i++) {
+                final PrimaryKey key = primaryKeys.get(i);
+                int columnIndex = key.index - 1;
+                Object valueObject = generatedKeys.getObject(key.name);
+                setRowBufferColumn(rowBuffer, columnIndex, valueObject);
+            }
+        } finally {
+            generatedKeys.close();
+        }
+    }
+
+    public BaseStatement getPGStatement() {
+        return statement;
+    }
+
+    @Override
+    @SuppressWarnings("deprecation")
+    public String getRefCursor() {
+        // Can't check this because the PGRefCursorResultSet
+        // interface doesn't allow throwing a SQLException
+        //
+        // checkClosed();
+        return refCursorName;
+    }
+
+    private void setRefCursor(String refCursorName) {
+        this.refCursorName = refCursorName;
+    }
+
+    @Override
+    public int getFetchSize() throws SQLException {
+        checkClosed();
+        if (adaptiveFetch) {
+            return lastUsedFetchSize;
+        } else {
+            return fetchSize;
+        }
+    }
+
+    @Override
+    public void setFetchSize(int rows) throws SQLException {
+        checkClosed();
+        if (rows < 0) {
+            throw new PSQLException(GT.tr("Fetch size must be a value greater to or equal to 0."),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+        fetchSize = rows;
+    }
+
+    /**
+     * Get fetch size used during last fetch. Returned value can be useful if using adaptive
+     * fetch.
+     *
+     * @return fetch size used during last fetch.
+     * @throws SQLException exception returned if ResultSet is closed
+     */
+    public int getLastUsedFetchSize() throws SQLException {
+        checkClosed();
+        return lastUsedFetchSize;
+    }
+
+    @Override
+    public boolean next() throws SQLException {
+        checkClosed();
+
+        if (onInsertRow) {
+            throw new PSQLException(GT.tr("Can''t use relative move methods while on the insert row."),
+                    PSQLState.INVALID_CURSOR_STATE);
+        }
+
+        if (currentRow + 1 >= rows.size()) {
+            ResultCursor cursor = this.cursor;
+            if (cursor == null || (maxRows > 0 && rowOffset + rows.size() >= maxRows)) {
+                currentRow = rows.size();
+                thisRow = null;
+                rowBuffer = null;
+                return false; // End of the resultset.
+            }
+
+            // Ask for some more data.
+            rowOffset += rows.size(); // We are discarding some data.
+
+            int fetchRows = fetchSize;
+            int adaptiveFetchRows = connection.getQueryExecutor()
+                    .getAdaptiveFetchSize(adaptiveFetch, cursor);
+
+            if (adaptiveFetchRows != -1) {
+                fetchRows = adaptiveFetchRows;
+            }
+
+            if (maxRows != 0) {
+                if (fetchRows == 0 || rowOffset + fetchRows > maxRows) {
+                    // Fetch would exceed maxRows, limit it.
+                    fetchRows = maxRows - rowOffset;
+                }
+            }
+
+            // Execute the fetch and update this resultset.
+            connection.getQueryExecutor()
+                    .fetch(cursor, new CursorResultHandler(), fetchRows, adaptiveFetch);
+
+            // .fetch(...) could update this.cursor, and cursor==null means
+            // there are no more rows to fetch
+            closeRefCursor();
+
+            // After fetch, update last used fetch size (could be useful for adaptive fetch).
+            lastUsedFetchSize = fetchRows;
+
+            currentRow = 0;
+
+            // Test the new rows array.
+            if (rows == null || rows.isEmpty()) {
+                thisRow = null;
+                rowBuffer = null;
+                return false;
+            }
+        } else {
+            currentRow++;
+        }
+
+        initRowBuffer();
+        return true;
+    }
+
+    @Override
+    public void close() throws SQLException {
+        try {
+            closeInternally();
+        } finally {
+            ((PgStatement) statement).checkCompletion();
+        }
+    }
+
+    /*
   used by PgStatement.closeForNextExecution to avoid
   closing the firstUnclosedResult twice.
   checkCompletion above modifies firstUnclosedResult
   fixes issue #684
    */
-  protected void closeInternally() throws SQLException {
-    // release resources held (memory for tuples)
-    rows = null;
-    JdbcBlackHole.close(deleteStatement);
-    deleteStatement = null;
-    if (cursor != null) {
-      cursor.close();
-      cursor = null;
-    }
-    closeRefCursor();
-  }
-
-  /**
-   * Closes {@code <unnamed portal 1>} if no more fetch calls expected ({@code cursor==null})
-   * @throws SQLException if portal close fails
-   */
-  private void closeRefCursor() throws SQLException {
-    String refCursorName = this.refCursorName;
-    if (refCursorName == null || cursor != null) {
-      return;
-    }
-    try {
-      if (connection.getTransactionState() == TransactionState.OPEN) {
-        StringBuilder sb = new StringBuilder("CLOSE ");
-        Utils.escapeIdentifier(sb, refCursorName);
-        connection.execSQLUpdate(sb.toString());
-      }
-    } finally {
-      this.refCursorName = null;
-    }
-  }
-
-  @Override
-  public boolean wasNull() throws SQLException {
-    checkClosed();
-    return wasNullFlag;
-  }
-
-  
-  @Override
-  public String getString(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getString columnIndex: {0}", columnIndex);
-    byte[] value = getRawValue(columnIndex);
-    if (value == null) {
-      return null;
-    }
-
-    // varchar in binary is same as text, other binary fields are converted to their text format
-    if (isBinary(columnIndex) && getSQLType(columnIndex) != Types.VARCHAR) {
-      Field field = fields[columnIndex - 1];
-      TimestampUtils ts = getTimestampUtils();
-      // internalGetObject is used in getObject(int), so we can't easily alter the returned type
-      // Currently, internalGetObject delegates to getTime(), getTimestamp(), so it has issues
-      // with timezone conversions.
-      // However, as we know the explicit oids, we can do a better job here
-      switch (field.getOID()) {
-        case Oid.TIME:
-          return ts.toString(ts.toLocalTimeBin(value));
-        case Oid.TIMETZ:
-          return ts.toStringOffsetTimeBin(value);
-        case Oid.DATE:
-          return ts.toString(ts.toLocalDateBin(value));
-        case Oid.TIMESTAMP:
-          return ts.toString(ts.toLocalDateTimeBin(value));
-        case Oid.TIMESTAMPTZ:
-          return ts.toStringOffsetDateTime(value);
-      }
-      Object obj = internalGetObject(columnIndex, field);
-      if (obj == null) {
-        // internalGetObject() knows jdbc-types and some extra like hstore. It does not know of
-        // PGobject based types like geometric types but getObject does
-        obj = getObject(columnIndex);
-        if (obj == null) {
-          return null;
+    protected void closeInternally() throws SQLException {
+        // release resources held (memory for tuples)
+        rows = null;
+        JdbcBlackHole.close(deleteStatement);
+        deleteStatement = null;
+        if (cursor != null) {
+            cursor.close();
+            cursor = null;
         }
-        return obj.toString();
-      }
-      if ("hstore".equals(getPGType(columnIndex))) {
-        return HStoreConverter.toString((Map<?, ?>) obj);
-      }
-      return trimString(columnIndex, obj.toString());
+        closeRefCursor();
     }
 
-    Encoding encoding = connection.getEncoding();
-    try {
-      return trimString(columnIndex, encoding.decode(value));
-    } catch (IOException ioe) {
-      throw new PSQLException(
-          GT.tr(
-              "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database."),
-          PSQLState.DATA_ERROR, ioe);
-    }
-  }
-
-  /**
-   * <p>Retrieves the value of the designated column in the current row of this <code>ResultSet</code>
-   * object as a <code>boolean</code> in the Java programming language.</p>
-   *
-   * <p>If the designated column has a Character datatype and is one of the following values: "1",
-   * "true", "t", "yes", "y" or "on", a value of <code>true</code> is returned. If the designated
-   * column has a Character datatype and is one of the following values: "0", "false", "f", "no",
-   * "n" or "off", a value of <code>false</code> is returned. Leading or trailing whitespace is
-   * ignored, and case does not matter.</p>
-   *
-   * <p>If the designated column has a Numeric datatype and is a 1, a value of <code>true</code> is
-   * returned. If the designated column has a Numeric datatype and is a 0, a value of
-   * <code>false</code> is returned.</p>
-   *
-   * @param columnIndex the first column is 1, the second is 2, ...
-   * @return the column value; if the value is SQL <code>NULL</code>, the value returned is
-   *         <code>false</code>
-   * @exception SQLException if the columnIndex is not valid; if a database access error occurs; if
-   *            this method is called on a closed result set or is an invalid cast to boolean type.
-   * @see <a href="https://www.postgresql.org/docs/current/static/datatype-boolean.html">PostgreSQL
-   *      Boolean Type</a>
-   */
-  
-  @Override
-  public boolean getBoolean(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getBoolean columnIndex: {0}", columnIndex);
-    byte[] value = getRawValue(columnIndex);
-    if (value == null) {
-      return false;
-    }
-
-    int col = columnIndex - 1;
-    if (Oid.BOOL == fields[col].getOID()) {
-      final byte[] v = value;
-      return (1 == v.length) && ((116 == v[0] && !isBinary(columnIndex)) || (1 == v[0] && isBinary(columnIndex))); // 116 = 't'
-    }
-
-    if (isBinary(columnIndex)) {
-      return BooleanTypeUtil.castToBoolean(readDoubleValue(value, fields[col].getOID(), "boolean"));
-    }
-
-    String stringValue = getString(columnIndex);
-    return BooleanTypeUtil.castToBoolean(stringValue);
-  }
-
-  private static final BigInteger BYTEMAX = new BigInteger(Byte.toString(Byte.MAX_VALUE));
-  private static final BigInteger BYTEMIN = new BigInteger(Byte.toString(Byte.MIN_VALUE));
-
-  @Override
-  public byte getByte(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getByte columnIndex: {0}", columnIndex);
-    byte[] value = getRawValue(columnIndex);
-    if (value == null) {
-      return 0; // SQL NULL
-    }
-
-    if (isBinary(columnIndex)) {
-      int col = columnIndex - 1;
-      // there is no Oid for byte so must always do conversion from
-      // some other numeric type
-      return (byte) readLongValue(value, fields[col].getOID(), Byte.MIN_VALUE,
-          Byte.MAX_VALUE, "byte");
-    }
-
-    Encoding encoding = connection.getEncoding();
-    if (encoding.hasAsciiNumbers()) {
-      try {
-        return (byte) NumberParser.getFastLong(value, Byte.MIN_VALUE, Byte.MAX_VALUE);
-      } catch (NumberFormatException ignored) {
-      }
-    }
-
-    String s = getString(columnIndex);
-
-    if (s != null) {
-      s = s.trim();
-      if (s.isEmpty()) {
-        return 0;
-      }
-      try {
-        // try the optimal parse
-        return Byte.parseByte(s);
-      } catch (NumberFormatException e) {
-        // didn't work, assume the column is not a byte
+    /**
+     * Closes {@code <unnamed portal 1>} if no more fetch calls expected ({@code cursor==null})
+     *
+     * @throws SQLException if portal close fails
+     */
+    private void closeRefCursor() throws SQLException {
+        String refCursorName = this.refCursorName;
+        if (refCursorName == null || cursor != null) {
+            return;
+        }
         try {
-          BigDecimal n = new BigDecimal(s);
-          BigInteger i = n.toBigInteger();
-
-          int gt = i.compareTo(BYTEMAX);
-          int lt = i.compareTo(BYTEMIN);
-
-          if (gt > 0 || lt < 0) {
-            throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "byte", s),
-                PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
-          }
-          return i.byteValue();
-        } catch (NumberFormatException ex) {
-          throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "byte", s),
-              PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+            if (connection.getTransactionState() == TransactionState.OPEN) {
+                StringBuilder sb = new StringBuilder("CLOSE ");
+                Utils.escapeIdentifier(sb, refCursorName);
+                connection.execSQLUpdate(sb.toString());
+            }
+        } finally {
+            this.refCursorName = null;
         }
-      }
-    }
-    return 0; // SQL NULL
-  }
-
-  @Override
-  public short getShort(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getShort columnIndex: {0}", columnIndex);
-    byte[] value = getRawValue(columnIndex);
-    if (value == null) {
-      return 0; // SQL NULL
-    }
-
-    if (isBinary(columnIndex)) {
-      int col = columnIndex - 1;
-      int oid = fields[col].getOID();
-      if (oid == Oid.INT2) {
-        return ByteConverter.int2(value, 0);
-      }
-      return (short) readLongValue(value, oid, Short.MIN_VALUE, Short.MAX_VALUE, "short");
-    }
-    Encoding encoding = connection.getEncoding();
-    if (encoding.hasAsciiNumbers()) {
-      try {
-        return (short) NumberParser.getFastLong(value, Short.MIN_VALUE, Short.MAX_VALUE);
-      } catch (NumberFormatException ignored) {
-      }
-    }
-    return toShort(getFixedString(columnIndex));
-  }
-
-  
-  @Override
-  public int getInt(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getInt columnIndex: {0}", columnIndex);
-    byte[] value = getRawValue(columnIndex);
-    if (value == null) {
-      return 0; // SQL NULL
-    }
-
-    if (isBinary(columnIndex)) {
-      int col = columnIndex - 1;
-      int oid = fields[col].getOID();
-      if (oid == Oid.INT4) {
-        return ByteConverter.int4(value, 0);
-      }
-      return (int) readLongValue(value, oid, Integer.MIN_VALUE, Integer.MAX_VALUE, "int");
-    }
-
-    Encoding encoding = connection.getEncoding();
-    if (encoding.hasAsciiNumbers()) {
-      try {
-        return (int) NumberParser.getFastLong(value, Integer.MIN_VALUE, Integer.MAX_VALUE);
-      } catch (NumberFormatException ignored) {
-      }
-    }
-    return toInt(getFixedString(columnIndex));
-  }
-
-  
-  @Override
-  public long getLong(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getLong columnIndex: {0}", columnIndex);
-    byte[] value = getRawValue(columnIndex);
-    if (value == null) {
-      return 0; // SQL NULL
-    }
-
-    if (isBinary(columnIndex)) {
-      int col = columnIndex - 1;
-      int oid = fields[col].getOID();
-      if (oid == Oid.INT8) {
-        return ByteConverter.int8(value, 0);
-      }
-      return readLongValue(value, oid, Long.MIN_VALUE, Long.MAX_VALUE, "long");
-    }
-
-    Encoding encoding = connection.getEncoding();
-    if (encoding.hasAsciiNumbers()) {
-      try {
-        return NumberParser.getFastLong(value, Long.MIN_VALUE, Long.MAX_VALUE);
-      } catch (NumberFormatException ignored) {
-      }
-    }
-    return toLong(getFixedString(columnIndex));
-  }
-
-  /**
-   * A dummy exception thrown when fast byte[] to number parsing fails and no value can be returned.
-   * The exact stack trace does not matter because the exception is always caught and is not visible
-   * to users.
-   */
-  private static final NumberFormatException FAST_NUMBER_FAILED = new NumberFormatException() {
-
-    // Override fillInStackTrace to prevent memory leak via Throwable.backtrace hidden field
-    // The field is not observable via reflection, however when throwable contains stacktrace, it
-    // does
-    // hold strong references to user objects (e.g. classes -> classloaders), thus it might lead to
-    // OutOfMemory conditions.
-    @Override
-    public Throwable fillInStackTrace() {
-      return this;
-    }
-  };
-
-  /**
-   * Optimised byte[] to number parser. This code does not handle null values, so the caller must do
-   * checkResultSet and handle null values prior to calling this function.
-   *
-   * @param bytes integer represented as a sequence of ASCII bytes
-   * @return The parsed number.
-   * @throws NumberFormatException If the number is invalid or the out of range for fast parsing.
-   *         The value must then be parsed by {@link #toBigDecimal(String, int)}.
-   */
-  private BigDecimal getFastBigDecimal(byte[] bytes) throws NumberFormatException {
-    if (bytes.length == 0) {
-      throw FAST_NUMBER_FAILED;
-    }
-
-    int scale = 0;
-    long val = 0;
-    int start;
-    boolean neg;
-    if (bytes[0] == '-') {
-      neg = true;
-      start = 1;
-      if (bytes.length == 1 || bytes.length > 19) {
-        throw FAST_NUMBER_FAILED;
-      }
-    } else {
-      start = 0;
-      neg = false;
-      if (bytes.length > 18) {
-        throw FAST_NUMBER_FAILED;
-      }
-    }
-
-    int periodsSeen = 0;
-    while (start < bytes.length) {
-      byte b = bytes[start++];
-      if (b < '0' || b > '9') {
-        if (b == '.' && periodsSeen == 0) {
-          scale = bytes.length - start;
-          periodsSeen++;
-          continue;
-        } else {
-          throw FAST_NUMBER_FAILED;
-        }
-      }
-      val *= 10;
-      val += b - '0';
-    }
-
-    int numNonSignChars = neg ? bytes.length - 1 : bytes.length;
-    if (periodsSeen > 1 || periodsSeen == numNonSignChars) {
-      throw FAST_NUMBER_FAILED;
-    }
-
-    if (neg) {
-      val = -val;
-    }
-
-    return BigDecimal.valueOf(val, scale);
-  }
-
-  
-  @Override
-  public float getFloat(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getFloat columnIndex: {0}", columnIndex);
-    byte[] value = getRawValue(columnIndex);
-    if (value == null) {
-      return 0; // SQL NULL
-    }
-
-    if (isBinary(columnIndex)) {
-      int col = columnIndex - 1;
-      int oid = fields[col].getOID();
-      if (oid == Oid.FLOAT4) {
-        return ByteConverter.float4(value, 0);
-      }
-      return (float) readDoubleValue(value, oid, "float");
-    }
-
-    return toFloat(getFixedString(columnIndex));
-  }
-
-  
-  @Override
-  public double getDouble(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getDouble columnIndex: {0}", columnIndex);
-    byte[] value = getRawValue(columnIndex);
-    if (value == null) {
-      return 0; // SQL NULL
-    }
-
-    if (isBinary(columnIndex)) {
-      int col = columnIndex - 1;
-      int oid = fields[col].getOID();
-      if (oid == Oid.FLOAT8) {
-        return ByteConverter.float8(value, 0);
-      }
-      return readDoubleValue(value, oid, "double");
-    }
-
-    return toDouble(getFixedString(columnIndex));
-  }
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public BigDecimal getBigDecimal(
-      int columnIndex, int scale) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getBigDecimal columnIndex: {0}", columnIndex);
-    return (BigDecimal) getNumeric(columnIndex, scale, false);
-  }
-
-  
-  private Number getNumeric(
-      int columnIndex, int scale, boolean allowNaN) throws SQLException {
-    byte[] value = getRawValue(columnIndex);
-    if (value == null) {
-      return null;
-    }
-
-    if (isBinary(columnIndex)) {
-      int sqlType = getSQLType(columnIndex);
-      if (sqlType != Types.NUMERIC && sqlType != Types.DECIMAL) {
-        Object obj = internalGetObject(columnIndex, fields[columnIndex - 1]);
-        if (obj == null) {
-          return null;
-        }
-        if (obj instanceof Long || obj instanceof Integer || obj instanceof Byte) {
-          BigDecimal res = BigDecimal.valueOf(((Number) obj).longValue());
-          res = scaleBigDecimal(res, scale);
-          return res;
-        }
-        return toBigDecimal(trimMoney(String.valueOf(obj)), scale);
-      } else {
-        Number num = ByteConverter.numeric(value);
-        if (allowNaN && Double.isNaN(num.doubleValue())) {
-          return Double.NaN;
-        }
-
-        return num;
-      }
-    }
-
-    Encoding encoding = connection.getEncoding();
-    if (encoding.hasAsciiNumbers()) {
-      try {
-        BigDecimal res = getFastBigDecimal(value);
-        res = scaleBigDecimal(res, scale);
-        return res;
-      } catch (NumberFormatException ignore) {
-      }
-    }
-
-    String stringValue = getFixedString(columnIndex);
-    if (allowNaN && "NaN".equalsIgnoreCase(stringValue)) {
-      return Double.NaN;
-    }
-    return toBigDecimal(stringValue, scale);
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * <p>In normal use, the bytes represent the raw values returned by the backend. However, if the
-   * column is an OID, then it is assumed to refer to a Large Object, and that object is returned as
-   * a byte array.</p>
-   *
-   * <p><b>Be warned</b> If the large object is huge, then you may run out of memory.</p>
-   */
-  
-  @Override
-  public byte [] getBytes(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getBytes columnIndex: {0}", columnIndex);
-    byte[] value = getRawValue(columnIndex);
-    if (value == null) {
-      return null;
-    }
-
-    if (isBinary(columnIndex)) {
-      // If the data is already binary then just return it
-      return value;
-    }
-    if (fields[columnIndex - 1].getOID() == Oid.BYTEA) {
-      return trimBytes(columnIndex, PGbytea.toBytes(value));
-    } else {
-      return trimBytes(columnIndex, value);
-    }
-  }
-
-  @Override
-  
-  public Date getDate(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getDate columnIndex: {0}", columnIndex);
-    return getDate(columnIndex, null);
-  }
-
-  @Override
-  
-  public Time getTime(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getTime columnIndex: {0}", columnIndex);
-    return getTime(columnIndex, null);
-  }
-
-  @Override
-  
-  public Timestamp getTimestamp(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getTimestamp columnIndex: {0}", columnIndex);
-    return getTimestamp(columnIndex, null);
-  }
-
-  @Override
-  
-  public InputStream getAsciiStream(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getAsciiStream columnIndex: {0}", columnIndex);
-    byte[] value = getRawValue(columnIndex);
-    if (value == null) {
-      return null;
-    }
-
-    // Version 7.2 supports AsciiStream for all the PG text types
-    // As the spec/javadoc for this method indicate this is to be used for
-    // large text values (i.e. LONGVARCHAR) PG doesn't have a separate
-    // long string datatype, but with toast the text datatype is capable of
-    // handling very large values. Thus the implementation ends up calling
-    // getString() since there is no current way to stream the value from the server
-    String stringValue = getString(columnIndex);
-    return new ByteArrayInputStream(stringValue.getBytes(StandardCharsets.US_ASCII));
-  }
-
-  @Override
-  
-  @SuppressWarnings("deprecation")
-  public InputStream getUnicodeStream(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getUnicodeStream columnIndex: {0}", columnIndex);
-    byte[] value = getRawValue(columnIndex);
-    if (value == null) {
-      return null;
-    }
-
-    // Version 7.2 supports AsciiStream for all the PG text types
-    // As the spec/javadoc for this method indicate this is to be used for
-    // large text values (i.e. LONGVARCHAR) PG doesn't have a separate
-    // long string datatype, but with toast the text datatype is capable of
-    // handling very large values. Thus the implementation ends up calling
-    // getString() since there is no current way to stream the value from the server
-    String stringValue = getString(columnIndex);
-    return new ByteArrayInputStream(stringValue.getBytes(StandardCharsets.UTF_8));
-  }
-
-  @Override
-  
-  public InputStream getBinaryStream(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getBinaryStream columnIndex: {0}", columnIndex);
-    byte[] value = getRawValue(columnIndex);
-    if (value == null) {
-      return null;
-    }
-
-    // Version 7.2 supports BinaryStream for all PG bytea type
-    // As the spec/javadoc for this method indicate this is to be used for
-    // large binary values (i.e. LONGVARBINARY) PG doesn't have a separate
-    // long binary datatype, but with toast the bytea datatype is capable of
-    // handling very large values. Thus the implementation ends up calling
-    // getBytes() since there is no current way to stream the value from the server
-    byte[] b = getBytes(columnIndex);
-    if (b != null) {
-      return new ByteArrayInputStream(b);
-    }
-    return null;
-  }
-
-  @Override
-  
-  public String getString(String columnName) throws SQLException {
-    return getString(findColumn(columnName));
-  }
-
-  
-  @Override
-  public boolean getBoolean(String columnName) throws SQLException {
-    return getBoolean(findColumn(columnName));
-  }
-
-  @Override
-  
-  public byte getByte(String columnName) throws SQLException {
-    return getByte(findColumn(columnName));
-  }
-
-  @Override
-  
-  public short getShort(String columnName) throws SQLException {
-    return getShort(findColumn(columnName));
-  }
-
-  @Override
-  
-  public int getInt(String columnName) throws SQLException {
-    return getInt(findColumn(columnName));
-  }
-
-  @Override
-  
-  public long getLong(String columnName) throws SQLException {
-    return getLong(findColumn(columnName));
-  }
-
-  @Override
-  
-  public float getFloat(String columnName) throws SQLException {
-    return getFloat(findColumn(columnName));
-  }
-
-  @Override
-  
-  public double getDouble(String columnName) throws SQLException {
-    return getDouble(findColumn(columnName));
-  }
-
-  @Override
-  
-  @SuppressWarnings("deprecation")
-  public BigDecimal getBigDecimal(String columnName, int scale) throws SQLException {
-    return getBigDecimal(findColumn(columnName), scale);
-  }
-
-  @Override
-  
-  public byte [] getBytes(String columnName) throws SQLException {
-    return getBytes(findColumn(columnName));
-  }
-
-  @Override
-  
-  public Date getDate(String columnName) throws SQLException {
-    return getDate(findColumn(columnName), null);
-  }
-
-  @Override
-  
-  public Time getTime(String columnName) throws SQLException {
-    return getTime(findColumn(columnName), null);
-  }
-
-  @Override
-  
-  public Timestamp getTimestamp(String columnName) throws SQLException {
-    return getTimestamp(findColumn(columnName), null);
-  }
-
-  @Override
-  
-  public InputStream getAsciiStream(String columnName) throws SQLException {
-    return getAsciiStream(findColumn(columnName));
-  }
-
-  @Override
-  
-  @SuppressWarnings("deprecation")
-  public InputStream getUnicodeStream(String columnName) throws SQLException {
-    return getUnicodeStream(findColumn(columnName));
-  }
-
-  @Override
-  
-  public InputStream getBinaryStream(String columnName) throws SQLException {
-    return getBinaryStream(findColumn(columnName));
-  }
-
-  @Override
-  
-  public SQLWarning getWarnings() throws SQLException {
-    checkClosed();
-    return warnings;
-  }
-
-  @Override
-  public void clearWarnings() throws SQLException {
-    checkClosed();
-    warnings = null;
-  }
-
-  protected void addWarning(SQLWarning warnings) {
-    if (this.warnings != null) {
-      this.warnings.setNextWarning(warnings);
-    } else {
-      this.warnings = warnings;
-    }
-  }
-
-  @Override
-  public String getCursorName() throws SQLException {
-    checkClosed();
-    return null;
-  }
-
-  @Override
-  public Object getObject(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getObject columnIndex: {0}", columnIndex);
-    Field field;
-
-    byte[] value = getRawValue(columnIndex);
-    if (value == null) {
-      return null;
-    }
-
-    field = fields[columnIndex - 1];
-
-    // some fields can be null, mainly from those returned by MetaData methods
-    if (field == null) {
-      wasNullFlag = true;
-      return null;
-    }
-
-    Object result = internalGetObject(columnIndex, field);
-    if (result != null) {
-      return result;
-    }
-
-    if (isBinary(columnIndex)) {
-      return connection.getObject(getPGType(columnIndex), null, value);
-    }
-    String stringValue = getString(columnIndex);
-    return connection.getObject(getPGType(columnIndex), stringValue, null);
-  }
-
-  @Override
-  public Object getObject(String columnName) throws SQLException {
-    return getObject(findColumn(columnName));
-  }
-
-  @Override
-  public int findColumn(String columnName) throws SQLException {
-    checkClosed();
-
-    int col = findColumnIndex(columnName);
-    if (col == 0) {
-      throw new PSQLException(
-          GT.tr("The column name {0} was not found in this ResultSet.", columnName),
-          PSQLState.UNDEFINED_COLUMN);
-    }
-    return col;
-  }
-
-  public static Map<String, Integer> createColumnNameIndexMap(Field[] fields,
-      boolean isSanitiserDisabled) {
-    Map<String, Integer> columnNameIndexMap = new HashMap<>(fields.length * 2);
-    // The JDBC spec says when you have duplicate columns names,
-    // the first one should be returned. So load the map in
-    // reverse order so the first ones will overwrite later ones.
-    for (int i = fields.length - 1; i >= 0; i--) {
-      String columnLabel = fields[i].getColumnLabel();
-      if (isSanitiserDisabled) {
-        columnNameIndexMap.put(columnLabel, i + 1);
-      } else {
-        columnNameIndexMap.put(columnLabel.toLowerCase(Locale.US), i + 1);
-      }
-    }
-    return columnNameIndexMap;
-  }
-
-  private int findColumnIndex(String columnName) {
-    if (columnNameIndexMap == null) {
-      if (originalQuery != null) {
-        columnNameIndexMap = originalQuery.getResultSetColumnNameIndexMap();
-      }
-      if (columnNameIndexMap == null) {
-        columnNameIndexMap = createColumnNameIndexMap(fields, connection.isColumnSanitiserDisabled());
-      }
-    }
-
-    Integer index = columnNameIndexMap.get(columnName);
-    if (index != null) {
-      return index;
-    }
-
-    index = columnNameIndexMap.get(columnName.toLowerCase(Locale.US));
-    if (index != null) {
-      columnNameIndexMap.put(columnName, index);
-      return index;
-    }
-
-    index = columnNameIndexMap.get(columnName.toUpperCase(Locale.US));
-    if (index != null) {
-      columnNameIndexMap.put(columnName, index);
-      return index;
-    }
-
-    return 0;
-  }
-
-  /**
-   * Returns the OID of a field. It is used internally by the driver.
-   *
-   * @param field field index
-   * @return OID of a field
-   */
-  public int getColumnOID(int field) {
-    return fields[field - 1].getOID();
-  }
-
-  /**
-   * <p>This is used to fix get*() methods on Money fields. It should only be used by those methods!</p>
-   *
-   * <p>It converts ($##.##) to -##.## and $##.## to ##.##</p>
-   *
-   * @param col column position (1-based)
-   * @return numeric-parsable representation of money string literal
-   * @throws SQLException if something wrong happens
-   */
-  public String getFixedString(int col) throws SQLException {
-    String stringValue = getString(col);
-    return trimMoney(stringValue);
-  }
-
-  private String trimMoney(String s) {
-    if (s == null) {
-      return null;
-    }
-
-    // if we don't have at least 2 characters it can't be money.
-    if (s.length() < 2) {
-      return s;
-    }
-
-    // Handle Money
-    char ch = s.charAt(0);
-
-    // optimise for non-money type: return immediately with one check
-    // if the first char cannot be '(', '$' or '-'
-    if (ch > '-') {
-      return s;
-    }
-
-    if (ch == '(') {
-      s = "-" + PGtokenizer.removePara(s).substring(1);
-    } else if (ch == '$') {
-      s = s.substring(1);
-    } else if (ch == '-' && s.charAt(1) == '$') {
-      s = "-" + s.substring(2);
-    }
-
-    return s;
-  }
-
-  
-  protected String getPGType(int column) throws SQLException {
-    Field field = fields[column - 1];
-    initSqlType(field);
-    return field.getPGType();
-  }
-
-  
-  protected int getSQLType(int column) throws SQLException {
-    Field field = fields[column - 1];
-    initSqlType(field);
-    return field.getSQLType();
-  }
-
-  
-  private void initSqlType(Field field) throws SQLException {
-    if (field.isTypeInitialized()) {
-      return;
-    }
-    TypeInfo typeInfo = connection.getTypeInfo();
-    int oid = field.getOID();
-    String pgType = typeInfo.getPGType(oid);
-    int sqlType = typeInfo.getSQLType(pgType);
-    field.setSQLType(sqlType);
-    field.setPGType(pgType);
-  }
-
-  private void checkUpdateable() throws SQLException {
-    checkClosed();
-
-    if (!isUpdateable()) {
-      throw new PSQLException(
-          GT.tr(
-              "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."),
-          PSQLState.INVALID_CURSOR_STATE);
-    }
-
-    if (updateValues == null) {
-      // allow every column to be updated without a rehash.
-      updateValues = new HashMap<>((int) (fields.length / 0.75), 0.75f);
-    }
-  }
-
-  
-  
-  protected void checkClosed() throws SQLException {
-    if (rows == null) {
-      throw new PSQLException(GT.tr("This ResultSet is closed."), PSQLState.OBJECT_NOT_IN_STATE);
-    }
-  }
-
-  /*
-   * for jdbc3 to call internally
-   */
-  protected boolean isResultSetClosed() {
-    return rows == null;
-  }
-
-  
-  protected void checkColumnIndex(int column) throws SQLException {
-    if (column < 1 || column > fields.length) {
-      throw new PSQLException(
-          GT.tr("The column index is out of range: {0}, number of columns: {1}.",
-              column, fields.length),
-          PSQLState.INVALID_PARAMETER_VALUE);
-    }
-  }
-
-  /**
-   * Checks that the result set is not closed, it's positioned on a valid row and that the given
-   * column number is valid. Also updates the {@link #wasNullFlag} to correct value.
-   *
-   * @param column The column number to check. Range starts from 1.
-   * @return raw value or null
-   * @throws SQLException If state or column is invalid.
-   */
-  protected byte [] getRawValue(int column) throws SQLException {
-    checkClosed();
-    if (thisRow == null) {
-      throw new PSQLException(
-          GT.tr("ResultSet not positioned properly, perhaps you need to call next."),
-          PSQLState.INVALID_CURSOR_STATE);
-    }
-    checkColumnIndex(column);
-    byte[] bytes = thisRow.get(column - 1);
-    wasNullFlag = bytes == null;
-    return bytes;
-  }
-
-  /**
-   * Returns true if the value of the given column is in binary format.
-   *
-   * @param column The column to check. Range starts from 1.
-   * @return True if the column is in binary format.
-   */
-  
-  protected boolean isBinary(int column) {
-    return fields[column - 1].getFormat() == Field.BINARY_FORMAT;
-  }
-
-  // ----------------- Formatting Methods -------------------
-
-  private static final BigInteger SHORTMAX = new BigInteger(Short.toString(Short.MAX_VALUE));
-  private static final BigInteger SHORTMIN = new BigInteger(Short.toString(Short.MIN_VALUE));
-
-  public static short toShort(String s) throws SQLException {
-    if (s != null) {
-      try {
-        s = s.trim();
-        return Short.parseShort(s);
-      } catch (NumberFormatException e) {
-        try {
-          BigDecimal n = new BigDecimal(s);
-          BigInteger i = n.toBigInteger();
-          int gt = i.compareTo(SHORTMAX);
-          int lt = i.compareTo(SHORTMIN);
-
-          if (gt > 0 || lt < 0) {
-            throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "short", s),
-                PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
-          }
-          return i.shortValue();
-
-        } catch (NumberFormatException ne) {
-          throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "short", s),
-              PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
-        }
-      }
-    }
-    return 0; // SQL NULL
-  }
-
-  private static final BigInteger INTMAX = new BigInteger(Integer.toString(Integer.MAX_VALUE));
-  private static final BigInteger INTMIN = new BigInteger(Integer.toString(Integer.MIN_VALUE));
-
-  public static int toInt(String s) throws SQLException {
-    if (s != null) {
-      try {
-        s = s.trim();
-        return Integer.parseInt(s);
-      } catch (NumberFormatException e) {
-        try {
-          BigDecimal n = new BigDecimal(s);
-          BigInteger i = n.toBigInteger();
-
-          int gt = i.compareTo(INTMAX);
-          int lt = i.compareTo(INTMIN);
-
-          if (gt > 0 || lt < 0) {
-            throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "int", s),
-                PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
-          }
-          return i.intValue();
-
-        } catch (NumberFormatException ne) {
-          throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "int", s),
-              PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
-        }
-      }
-    }
-    return 0; // SQL NULL
-  }
-
-  private static final BigInteger LONGMAX = new BigInteger(Long.toString(Long.MAX_VALUE));
-  private static final BigInteger LONGMIN = new BigInteger(Long.toString(Long.MIN_VALUE));
-
-  public static long toLong(String s) throws SQLException {
-    if (s != null) {
-      try {
-        s = s.trim();
-        return Long.parseLong(s);
-      } catch (NumberFormatException e) {
-        try {
-          BigDecimal n = new BigDecimal(s);
-          BigInteger i = n.toBigInteger();
-          int gt = i.compareTo(LONGMAX);
-          int lt = i.compareTo(LONGMIN);
-
-          if (gt > 0 || lt < 0) {
-            throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "long", s),
-                PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
-          }
-          return i.longValue();
-        } catch (NumberFormatException ne) {
-          throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "long", s),
-              PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
-        }
-      }
-    }
-    return 0; // SQL NULL
-  }
-
-  public static BigDecimal toBigDecimal(String s) throws SQLException {
-    if (s == null) {
-      return null;
-    }
-    try {
-      s = s.trim();
-      return new BigDecimal(s);
-    } catch (NumberFormatException e) {
-      throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "BigDecimal", s),
-          PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
-    }
-  }
-
-  public BigDecimal toBigDecimal(String s, int scale) throws SQLException {
-    if (s == null) {
-      return null;
-    }
-    BigDecimal val = toBigDecimal(s);
-    return scaleBigDecimal(val, scale);
-  }
-
-  private BigDecimal scaleBigDecimal(BigDecimal val, int scale) throws PSQLException {
-    if (scale == -1) {
-      return val;
-    }
-    try {
-      return val.setScale(scale);
-    } catch (ArithmeticException e) {
-      throw new PSQLException(
-          GT.tr("Bad value for type {0} : {1}", "BigDecimal", val),
-          PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
-    }
-  }
-
-  public static float toFloat(String s) throws SQLException {
-    if (s != null) {
-      try {
-        s = s.trim();
-        return Float.parseFloat(s);
-      } catch (NumberFormatException e) {
-        throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "float", s),
-            PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
-      }
-    }
-    return 0; // SQL NULL
-  }
-
-  public static double toDouble(String s) throws SQLException {
-    if (s != null) {
-      try {
-        s = s.trim();
-        return Double.parseDouble(s);
-      } catch (NumberFormatException e) {
-        throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "double", s),
-            PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
-      }
-    }
-    return 0; // SQL NULL
-  }
-
-  private void initRowBuffer() {
-    thisRow = rows.get(currentRow);
-    // We only need a copy of the current row if we're going to
-    // modify it via an updatable resultset.
-    if (resultsetconcurrency == ResultSet.CONCUR_UPDATABLE) {
-      rowBuffer = thisRow.updateableCopy();
-    } else {
-      rowBuffer = null;
-    }
-  }
-
-  private boolean isColumnTrimmable(int columnIndex) throws SQLException {
-    switch (getSQLType(columnIndex)) {
-      case Types.CHAR:
-      case Types.VARCHAR:
-      case Types.LONGVARCHAR:
-      case Types.BINARY:
-      case Types.VARBINARY:
-      case Types.LONGVARBINARY:
-        return true;
-    }
-    return false;
-  }
-
-  private byte[] trimBytes(int columnIndex, byte[] bytes) throws SQLException {
-    // we need to trim if maxsize is set and the length is greater than maxsize and the
-    // type of this column is a candidate for trimming
-    if (maxFieldSize > 0 && bytes.length > maxFieldSize && isColumnTrimmable(columnIndex)) {
-      byte[] newBytes = new byte[maxFieldSize];
-      System.arraycopy(bytes, 0, newBytes, 0, maxFieldSize);
-      return newBytes;
-    } else {
-      return bytes;
-    }
-  }
-
-  private String trimString(int columnIndex, String string) throws SQLException {
-    // we need to trim if maxsize is set and the length is greater than maxsize and the
-    // type of this column is a candidate for trimming
-    if (maxFieldSize > 0 && string.length() > maxFieldSize && isColumnTrimmable(columnIndex)) {
-      return string.substring(0, maxFieldSize);
-    } else {
-      return string;
-    }
-  }
-
-  /**
-   * Converts any numeric binary field to double value. This method does no overflow checking.
-   *
-   * @param bytes The bytes of the numeric field.
-   * @param oid The oid of the field.
-   * @param targetType The target type. Used for error reporting.
-   * @return The value as double.
-   * @throws PSQLException If the field type is not supported numeric type.
-   */
-  private double readDoubleValue(byte[] bytes, int oid, String targetType) throws PSQLException {
-    // currently implemented binary encoded fields
-    switch (oid) {
-      case Oid.INT2:
-        return ByteConverter.int2(bytes, 0);
-      case Oid.INT4:
-        return ByteConverter.int4(bytes, 0);
-      case Oid.INT8:
-        // might not fit but there still should be no overflow checking
-        return ByteConverter.int8(bytes, 0);
-      case Oid.FLOAT4:
-        return ByteConverter.float4(bytes, 0);
-      case Oid.FLOAT8:
-        return ByteConverter.float8(bytes, 0);
-      case Oid.NUMERIC:
-        return ByteConverter.numeric(bytes).doubleValue();
-    }
-    throw new PSQLException(GT.tr("Cannot convert the column of type {0} to requested type {1}.",
-        Oid.toString(oid), targetType), PSQLState.DATA_TYPE_MISMATCH);
-  }
-
-  private static final float LONG_MAX_FLOAT = StrictMath.nextDown(Long.MAX_VALUE);
-  private static final float LONG_MIN_FLOAT = StrictMath.nextUp(Long.MIN_VALUE);
-  private static final double LONG_MAX_DOUBLE = StrictMath.nextDown((double) Long.MAX_VALUE);
-  private static final double LONG_MIN_DOUBLE = StrictMath.nextUp((double) Long.MIN_VALUE);
-
-  /**
-   * <p>Converts any numeric binary field to long value.</p>
-   *
-   * <p>This method is used by getByte,getShort,getInt and getLong. It must support a subset of the
-   * following java types that use Binary encoding. (fields that use text encoding use a different
-   * code path).
-   *
-   * <code>byte,short,int,long,float,double,BigDecimal,boolean,string</code>.
-   * </p>
-   *
-   * @param bytes The bytes of the numeric field.
-   * @param oid The oid of the field.
-   * @param minVal the minimum value allowed.
-   * @param maxVal the maximum value allowed.
-   * @param targetType The target type. Used for error reporting.
-   * @return The value as long.
-   * @throws PSQLException If the field type is not supported numeric type or if the value is out of
-   *         range.
-   */
-  
-  private long readLongValue(byte[] bytes, int oid, long minVal, long maxVal, String targetType)
-      throws PSQLException {
-    long val;
-    // currently implemented binary encoded fields
-    switch (oid) {
-      case Oid.INT2:
-        val = ByteConverter.int2(bytes, 0);
-        break;
-      case Oid.INT4:
-        val = ByteConverter.int4(bytes, 0);
-        break;
-      case Oid.INT8:
-        val = ByteConverter.int8(bytes, 0);
-        break;
-      case Oid.FLOAT4:
-        float f = ByteConverter.float4(bytes, 0);
-        // for float values we know to be within values of long, just cast directly to long
-        if (f <= LONG_MAX_FLOAT && f >= LONG_MIN_FLOAT) {
-          val = (long) f;
-        } else {
-          throw new PSQLException(GT.tr("Bad value for type {0} : {1}", targetType, f),
-              PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
-        }
-        break;
-      case Oid.FLOAT8:
-        double d = ByteConverter.float8(bytes, 0);
-        // for double values within the values of a long, just directly cast to long
-        if (d <= LONG_MAX_DOUBLE && d >= LONG_MIN_DOUBLE) {
-          val = (long) d;
-        } else {
-          throw new PSQLException(GT.tr("Bad value for type {0} : {1}", targetType, d),
-              PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
-        }
-        break;
-      case Oid.NUMERIC:
-        Number num = ByteConverter.numeric(bytes);
-        BigInteger i = ((BigDecimal) num).toBigInteger();
-        int gt = i.compareTo(LONGMAX);
-        int lt = i.compareTo(LONGMIN);
-
-        if (gt > 0 || lt < 0) {
-          throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "long", num),
-              PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
-        } else {
-          val = num.longValue();
-        }
-        break;
-      default:
-        throw new PSQLException(
-            GT.tr("Cannot convert the column of type {0} to requested type {1}.",
-                Oid.toString(oid), targetType),
-            PSQLState.DATA_TYPE_MISMATCH);
-    }
-    if (val < minVal || val > maxVal) {
-      throw new PSQLException(GT.tr("Bad value for type {0} : {1}", targetType, val),
-          PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
-    }
-    return val;
-  }
-
-  protected void updateValue(int columnIndex, Object value) throws SQLException {
-    checkUpdateable();
-
-    if (!onInsertRow && (isBeforeFirst() || isAfterLast() || rows.isEmpty())) {
-      throw new PSQLException(
-          GT.tr(
-              "Cannot update the ResultSet because it is either before the start or after the end of the results."),
-          PSQLState.INVALID_CURSOR_STATE);
-    }
-
-    checkColumnIndex(columnIndex);
-
-    doingUpdates = !onInsertRow;
-    if (value == null) {
-      updateNull(columnIndex);
-    } else {
-      PGResultSetMetaData md = (PGResultSetMetaData) getMetaData();
-      updateValues.put(md.getBaseColumnName(columnIndex), value);
-    }
-  }
-
-  
-  protected Object getUUID(String data) throws SQLException {
-    UUID uuid;
-    try {
-      uuid = UUID.fromString(data);
-    } catch (IllegalArgumentException iae) {
-      throw new PSQLException(GT.tr("Invalid UUID data."), PSQLState.INVALID_PARAMETER_VALUE, iae);
-    }
-
-    return uuid;
-  }
-
-  
-  protected Object getUUID(byte[] data) throws SQLException {
-    return new UUID(ByteConverter.int8(data, 0), ByteConverter.int8(data, 8));
-  }
-
-  private class PrimaryKey {
-    int index; // where in the result set is this primaryKey
-    String name; // what is the columnName of this primary Key
-
-    PrimaryKey(int index, String name) {
-      this.index = index;
-      this.name = name;
-    }
-
-    Object getValue() throws SQLException {
-      return getObject(index);
-    }
-  }
-
-  //
-  // We need to specify the type of NULL when updating a column to NULL, so
-  // NullObject is a simple extension of PGobject that always returns null
-  // values but retains column type info.
-  //
-
-  @SuppressWarnings("serial")
-  static class NullObject extends PGobject {
-    NullObject(String type) {
-      this.type = type;
     }
 
     @Override
-    public String getValue() {
-      return null;
+    public boolean wasNull() throws SQLException {
+        checkClosed();
+        return wasNullFlag;
     }
-  }
 
-  /**
-   * Used to add rows to an already existing ResultSet that exactly match the existing rows.
-   * Currently only used for assembling generated keys from batch statement execution.
-   */
-  void addRows(List<Tuple> tuples) {
-    rows.addAll(tuples);
-  }
+    @Override
+    public String getString(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getString columnIndex: {0}", columnIndex);
+        byte[] value = getRawValue(columnIndex);
+        if (value == null) {
+            return null;
+        }
 
-  @Override
-  public void updateRef(int columnIndex, Ref x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateRef(int,Ref)");
-  }
+        // varchar in binary is same as text, other binary fields are converted to their text format
+        if (isBinary(columnIndex) && getSQLType(columnIndex) != Types.VARCHAR) {
+            Field field = fields[columnIndex - 1];
+            TimestampUtils ts = getTimestampUtils();
+            // internalGetObject is used in getObject(int), so we can't easily alter the returned type
+            // Currently, internalGetObject delegates to getTime(), getTimestamp(), so it has issues
+            // with timezone conversions.
+            // However, as we know the explicit oids, we can do a better job here
+            switch (field.getOID()) {
+                case Oid.TIME:
+                    return ts.toString(ts.toLocalTimeBin(value));
+                case Oid.TIMETZ:
+                    return ts.toStringOffsetTimeBin(value);
+                case Oid.DATE:
+                    return ts.toString(ts.toLocalDateBin(value));
+                case Oid.TIMESTAMP:
+                    return ts.toString(ts.toLocalDateTimeBin(value));
+                case Oid.TIMESTAMPTZ:
+                    return ts.toStringOffsetDateTime(value);
+            }
+            Object obj = internalGetObject(columnIndex, field);
+            if (obj == null) {
+                // internalGetObject() knows jdbc-types and some extra like hstore. It does not know of
+                // PGobject based types like geometric types but getObject does
+                obj = getObject(columnIndex);
+                if (obj == null) {
+                    return null;
+                }
+                return obj.toString();
+            }
+            if ("hstore".equals(getPGType(columnIndex))) {
+                return HStoreConverter.toString((Map<?, ?>) obj);
+            }
+            return trimString(columnIndex, obj.toString());
+        }
 
-  @Override
-  public void updateRef(String columnName, Ref x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateRef(String,Ref)");
-  }
-
-  @Override
-  public void updateBlob(int columnIndex, Blob x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateBlob(int,Blob)");
-  }
-
-  @Override
-  public void updateBlob(String columnName, Blob x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateBlob(String,Blob)");
-  }
-
-  @Override
-  public void updateClob(int columnIndex, Clob x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateClob(int,Clob)");
-  }
-
-  @Override
-  public void updateClob(String columnName, Clob x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateClob(String,Clob)");
-  }
-
-  @Override
-  public void updateArray(int columnIndex, Array x) throws SQLException {
-    updateObject(columnIndex, x);
-  }
-
-  @Override
-  public void updateArray(String columnName, Array x) throws SQLException {
-    updateArray(findColumn(columnName), x);
-  }
-
-  @Override
-  public <T> T getObject(int columnIndex, Class<T> type) throws SQLException {
-    if (type == null) {
-      throw new SQLException("type is null");
+        Encoding encoding = connection.getEncoding();
+        try {
+            return trimString(columnIndex, encoding.decode(value));
+        } catch (IOException ioe) {
+            throw new PSQLException(
+                    GT.tr(
+                            "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database."),
+                    PSQLState.DATA_ERROR, ioe);
+        }
     }
-    int sqlType = getSQLType(columnIndex);
-    if (type == BigDecimal.class) {
-      if (sqlType == Types.NUMERIC || sqlType == Types.DECIMAL) {
-        return type.cast(getBigDecimal(columnIndex));
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == String.class) {
-      if (sqlType == Types.CHAR || sqlType == Types.VARCHAR) {
-        return type.cast(getString(columnIndex));
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == Boolean.class) {
-      if (sqlType == Types.BOOLEAN || sqlType == Types.BIT) {
-        boolean booleanValue = getBoolean(columnIndex);
-        if (wasNull()) {
-          return null;
+
+    /**
+     * <p>Retrieves the value of the designated column in the current row of this <code>ResultSet</code>
+     * object as a <code>boolean</code> in the Java programming language.</p>
+     *
+     * <p>If the designated column has a Character datatype and is one of the following values: "1",
+     * "true", "t", "yes", "y" or "on", a value of <code>true</code> is returned. If the designated
+     * column has a Character datatype and is one of the following values: "0", "false", "f", "no",
+     * "n" or "off", a value of <code>false</code> is returned. Leading or trailing whitespace is
+     * ignored, and case does not matter.</p>
+     *
+     * <p>If the designated column has a Numeric datatype and is a 1, a value of <code>true</code> is
+     * returned. If the designated column has a Numeric datatype and is a 0, a value of
+     * <code>false</code> is returned.</p>
+     *
+     * @param columnIndex the first column is 1, the second is 2, ...
+     * @return the column value; if the value is SQL <code>NULL</code>, the value returned is
+     * <code>false</code>
+     * @throws SQLException if the columnIndex is not valid; if a database access error occurs; if
+     *                      this method is called on a closed result set or is an invalid cast to boolean type.
+     * @see <a href="https://www.postgresql.org/docs/current/static/datatype-boolean.html">PostgreSQL
+     * Boolean Type</a>
+     */
+
+    @Override
+    public boolean getBoolean(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getBoolean columnIndex: {0}", columnIndex);
+        byte[] value = getRawValue(columnIndex);
+        if (value == null) {
+            return false;
         }
-        return type.cast(booleanValue);
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == Short.class) {
-      if (sqlType == Types.SMALLINT) {
-        short shortValue = getShort(columnIndex);
-        if (wasNull()) {
-          return null;
+
+        int col = columnIndex - 1;
+        if (Oid.BOOL == fields[col].getOID()) {
+            final byte[] v = value;
+            return (1 == v.length) && ((116 == v[0] && !isBinary(columnIndex)) || (1 == v[0] && isBinary(columnIndex))); // 116 = 't'
         }
-        return type.cast(shortValue);
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == Integer.class) {
-      if (sqlType == Types.INTEGER || sqlType == Types.SMALLINT) {
-        int intValue = getInt(columnIndex);
-        if (wasNull()) {
-          return null;
+
+        if (isBinary(columnIndex)) {
+            return BooleanTypeUtil.castToBoolean(readDoubleValue(value, fields[col].getOID(), "boolean"));
         }
-        return type.cast(intValue);
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == Long.class) {
-      if (sqlType == Types.BIGINT) {
-        long longValue = getLong(columnIndex);
-        if (wasNull()) {
-          return null;
+
+        String stringValue = getString(columnIndex);
+        return BooleanTypeUtil.castToBoolean(stringValue);
+    }
+
+    @Override
+    public byte getByte(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getByte columnIndex: {0}", columnIndex);
+        byte[] value = getRawValue(columnIndex);
+        if (value == null) {
+            return 0; // SQL NULL
         }
-        return type.cast(longValue);
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == BigInteger.class) {
-      if (sqlType == Types.BIGINT) {
-        long longValue = getLong(columnIndex);
-        if (wasNull()) {
-          return null;
+
+        if (isBinary(columnIndex)) {
+            int col = columnIndex - 1;
+            // there is no Oid for byte so must always do conversion from
+            // some other numeric type
+            return (byte) readLongValue(value, fields[col].getOID(), Byte.MIN_VALUE,
+                    Byte.MAX_VALUE, "byte");
         }
-        return type.cast(BigInteger.valueOf(longValue));
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == Float.class) {
-      if (sqlType == Types.REAL) {
-        float floatValue = getFloat(columnIndex);
-        if (wasNull()) {
-          return null;
+
+        Encoding encoding = connection.getEncoding();
+        if (encoding.hasAsciiNumbers()) {
+            try {
+                return (byte) NumberParser.getFastLong(value, Byte.MIN_VALUE, Byte.MAX_VALUE);
+            } catch (NumberFormatException ignored) {
+            }
         }
-        return type.cast(floatValue);
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == Double.class) {
-      if (sqlType == Types.FLOAT || sqlType == Types.DOUBLE) {
-        double doubleValue = getDouble(columnIndex);
-        if (wasNull()) {
-          return null;
+
+        String s = getString(columnIndex);
+
+        if (s != null) {
+            s = s.trim();
+            if (s.isEmpty()) {
+                return 0;
+            }
+            try {
+                // try the optimal parse
+                return Byte.parseByte(s);
+            } catch (NumberFormatException e) {
+                // didn't work, assume the column is not a byte
+                try {
+                    BigDecimal n = new BigDecimal(s);
+                    BigInteger i = n.toBigInteger();
+
+                    int gt = i.compareTo(BYTEMAX);
+                    int lt = i.compareTo(BYTEMIN);
+
+                    if (gt > 0 || lt < 0) {
+                        throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "byte", s),
+                                PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+                    }
+                    return i.byteValue();
+                } catch (NumberFormatException ex) {
+                    throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "byte", s),
+                            PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+                }
+            }
         }
-        return type.cast(doubleValue);
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == Date.class) {
-      if (sqlType == Types.DATE) {
-        return type.cast(getDate(columnIndex));
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == Time.class) {
-      if (sqlType == Types.TIME) {
-        return type.cast(getTime(columnIndex));
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == Timestamp.class) {
-      if (sqlType == Types.TIMESTAMP
-              || sqlType == Types.TIMESTAMP_WITH_TIMEZONE
-      ) {
-        return type.cast(getTimestamp(columnIndex));
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == Calendar.class) {
-      if (sqlType == Types.TIMESTAMP
-              || sqlType == Types.TIMESTAMP_WITH_TIMEZONE
-      ) {
-        Timestamp timestampValue = getTimestamp(columnIndex);
-        if (timestampValue == null) {
-          return null;
+        return 0; // SQL NULL
+    }
+
+    @Override
+    public short getShort(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getShort columnIndex: {0}", columnIndex);
+        byte[] value = getRawValue(columnIndex);
+        if (value == null) {
+            return 0; // SQL NULL
         }
-        Calendar calendar = Calendar.getInstance(getDefaultCalendar().getTimeZone());
-        calendar.setTimeInMillis(timestampValue.getTime());
-        return type.cast(calendar);
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == Blob.class) {
-      if (sqlType == Types.BLOB || sqlType == Types.BINARY || sqlType == Types.BIGINT) {
-        return type.cast(getBlob(columnIndex));
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == Clob.class) {
-      if (sqlType == Types.CLOB || sqlType == Types.BIGINT) {
-        return type.cast(getClob(columnIndex));
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == java.util.Date.class) {
-      if (sqlType == Types.TIMESTAMP) {
-        Timestamp timestamp = getTimestamp(columnIndex);
-        if (timestamp == null) {
-          return null;
+
+        if (isBinary(columnIndex)) {
+            int col = columnIndex - 1;
+            int oid = fields[col].getOID();
+            if (oid == Oid.INT2) {
+                return ByteConverter.int2(value, 0);
+            }
+            return (short) readLongValue(value, oid, Short.MIN_VALUE, Short.MAX_VALUE, "short");
+        }
+        Encoding encoding = connection.getEncoding();
+        if (encoding.hasAsciiNumbers()) {
+            try {
+                return (short) NumberParser.getFastLong(value, Short.MIN_VALUE, Short.MAX_VALUE);
+            } catch (NumberFormatException ignored) {
+            }
+        }
+        return toShort(getFixedString(columnIndex));
+    }
+
+    @Override
+    public int getInt(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getInt columnIndex: {0}", columnIndex);
+        byte[] value = getRawValue(columnIndex);
+        if (value == null) {
+            return 0; // SQL NULL
+        }
+
+        if (isBinary(columnIndex)) {
+            int col = columnIndex - 1;
+            int oid = fields[col].getOID();
+            if (oid == Oid.INT4) {
+                return ByteConverter.int4(value, 0);
+            }
+            return (int) readLongValue(value, oid, Integer.MIN_VALUE, Integer.MAX_VALUE, "int");
+        }
+
+        Encoding encoding = connection.getEncoding();
+        if (encoding.hasAsciiNumbers()) {
+            try {
+                return (int) NumberParser.getFastLong(value, Integer.MIN_VALUE, Integer.MAX_VALUE);
+            } catch (NumberFormatException ignored) {
+            }
+        }
+        return toInt(getFixedString(columnIndex));
+    }
+
+    @Override
+    public long getLong(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getLong columnIndex: {0}", columnIndex);
+        byte[] value = getRawValue(columnIndex);
+        if (value == null) {
+            return 0; // SQL NULL
+        }
+
+        if (isBinary(columnIndex)) {
+            int col = columnIndex - 1;
+            int oid = fields[col].getOID();
+            if (oid == Oid.INT8) {
+                return ByteConverter.int8(value, 0);
+            }
+            return readLongValue(value, oid, Long.MIN_VALUE, Long.MAX_VALUE, "long");
+        }
+
+        Encoding encoding = connection.getEncoding();
+        if (encoding.hasAsciiNumbers()) {
+            try {
+                return NumberParser.getFastLong(value, Long.MIN_VALUE, Long.MAX_VALUE);
+            } catch (NumberFormatException ignored) {
+            }
+        }
+        return toLong(getFixedString(columnIndex));
+    }
+
+    /**
+     * Optimised byte[] to number parser. This code does not handle null values, so the caller must do
+     * checkResultSet and handle null values prior to calling this function.
+     *
+     * @param bytes integer represented as a sequence of ASCII bytes
+     * @return The parsed number.
+     * @throws NumberFormatException If the number is invalid or the out of range for fast parsing.
+     *                               The value must then be parsed by {@link #toBigDecimal(String, int)}.
+     */
+    private BigDecimal getFastBigDecimal(byte[] bytes) throws NumberFormatException {
+        if (bytes.length == 0) {
+            throw FAST_NUMBER_FAILED;
+        }
+
+        int scale = 0;
+        long val = 0;
+        int start;
+        boolean neg;
+        if (bytes[0] == '-') {
+            neg = true;
+            start = 1;
+            if (bytes.length == 1 || bytes.length > 19) {
+                throw FAST_NUMBER_FAILED;
+            }
+        } else {
+            start = 0;
+            neg = false;
+            if (bytes.length > 18) {
+                throw FAST_NUMBER_FAILED;
+            }
+        }
+
+        int periodsSeen = 0;
+        while (start < bytes.length) {
+            byte b = bytes[start++];
+            if (b < '0' || b > '9') {
+                if (b == '.' && periodsSeen == 0) {
+                    scale = bytes.length - start;
+                    periodsSeen++;
+                    continue;
+                } else {
+                    throw FAST_NUMBER_FAILED;
+                }
+            }
+            val *= 10;
+            val += b - '0';
+        }
+
+        int numNonSignChars = neg ? bytes.length - 1 : bytes.length;
+        if (periodsSeen > 1 || periodsSeen == numNonSignChars) {
+            throw FAST_NUMBER_FAILED;
+        }
+
+        if (neg) {
+            val = -val;
+        }
+
+        return BigDecimal.valueOf(val, scale);
+    }
+
+    @Override
+    public float getFloat(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getFloat columnIndex: {0}", columnIndex);
+        byte[] value = getRawValue(columnIndex);
+        if (value == null) {
+            return 0; // SQL NULL
+        }
+
+        if (isBinary(columnIndex)) {
+            int col = columnIndex - 1;
+            int oid = fields[col].getOID();
+            if (oid == Oid.FLOAT4) {
+                return ByteConverter.float4(value, 0);
+            }
+            return (float) readDoubleValue(value, oid, "float");
+        }
+
+        return toFloat(getFixedString(columnIndex));
+    }
+
+    @Override
+    public double getDouble(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getDouble columnIndex: {0}", columnIndex);
+        byte[] value = getRawValue(columnIndex);
+        if (value == null) {
+            return 0; // SQL NULL
+        }
+
+        if (isBinary(columnIndex)) {
+            int col = columnIndex - 1;
+            int oid = fields[col].getOID();
+            if (oid == Oid.FLOAT8) {
+                return ByteConverter.float8(value, 0);
+            }
+            return readDoubleValue(value, oid, "double");
+        }
+
+        return toDouble(getFixedString(columnIndex));
+    }
+
+    @Override
+    @SuppressWarnings("deprecation")
+    public BigDecimal getBigDecimal(
+            int columnIndex, int scale) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getBigDecimal columnIndex: {0}", columnIndex);
+        return (BigDecimal) getNumeric(columnIndex, scale, false);
+    }
+
+    private Number getNumeric(
+            int columnIndex, int scale, boolean allowNaN) throws SQLException {
+        byte[] value = getRawValue(columnIndex);
+        if (value == null) {
+            return null;
+        }
+
+        if (isBinary(columnIndex)) {
+            int sqlType = getSQLType(columnIndex);
+            if (sqlType != Types.NUMERIC && sqlType != Types.DECIMAL) {
+                Object obj = internalGetObject(columnIndex, fields[columnIndex - 1]);
+                if (obj == null) {
+                    return null;
+                }
+                if (obj instanceof Long || obj instanceof Integer || obj instanceof Byte) {
+                    BigDecimal res = BigDecimal.valueOf(((Number) obj).longValue());
+                    res = scaleBigDecimal(res, scale);
+                    return res;
+                }
+                return toBigDecimal(trimMoney(String.valueOf(obj)), scale);
+            } else {
+                Number num = ByteConverter.numeric(value);
+                if (allowNaN && Double.isNaN(num.doubleValue())) {
+                    return Double.NaN;
+                }
+
+                return num;
+            }
+        }
+
+        Encoding encoding = connection.getEncoding();
+        if (encoding.hasAsciiNumbers()) {
+            try {
+                BigDecimal res = getFastBigDecimal(value);
+                res = scaleBigDecimal(res, scale);
+                return res;
+            } catch (NumberFormatException ignore) {
+            }
+        }
+
+        String stringValue = getFixedString(columnIndex);
+        if (allowNaN && "NaN".equalsIgnoreCase(stringValue)) {
+            return Double.NaN;
+        }
+        return toBigDecimal(stringValue, scale);
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * <p>In normal use, the bytes represent the raw values returned by the backend. However, if the
+     * column is an OID, then it is assumed to refer to a Large Object, and that object is returned as
+     * a byte array.</p>
+     *
+     * <p><b>Be warned</b> If the large object is huge, then you may run out of memory.</p>
+     */
+
+    @Override
+    public byte[] getBytes(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getBytes columnIndex: {0}", columnIndex);
+        byte[] value = getRawValue(columnIndex);
+        if (value == null) {
+            return null;
+        }
+
+        if (isBinary(columnIndex)) {
+            // If the data is already binary then just return it
+            return value;
+        }
+        if (fields[columnIndex - 1].getOID() == Oid.BYTEA) {
+            return trimBytes(columnIndex, PGbytea.toBytes(value));
+        } else {
+            return trimBytes(columnIndex, value);
+        }
+    }
+
+    @Override
+
+    public Date getDate(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getDate columnIndex: {0}", columnIndex);
+        return getDate(columnIndex, null);
+    }
+
+    @Override
+
+    public Time getTime(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getTime columnIndex: {0}", columnIndex);
+        return getTime(columnIndex, null);
+    }
+
+    @Override
+
+    public Timestamp getTimestamp(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getTimestamp columnIndex: {0}", columnIndex);
+        return getTimestamp(columnIndex, null);
+    }
+
+    @Override
+
+    public InputStream getAsciiStream(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getAsciiStream columnIndex: {0}", columnIndex);
+        byte[] value = getRawValue(columnIndex);
+        if (value == null) {
+            return null;
+        }
+
+        // Version 7.2 supports AsciiStream for all the PG text types
+        // As the spec/javadoc for this method indicate this is to be used for
+        // large text values (i.e. LONGVARCHAR) PG doesn't have a separate
+        // long string datatype, but with toast the text datatype is capable of
+        // handling very large values. Thus the implementation ends up calling
+        // getString() since there is no current way to stream the value from the server
+        String stringValue = getString(columnIndex);
+        return new ByteArrayInputStream(stringValue.getBytes(StandardCharsets.US_ASCII));
+    }
+
+    @Override
+
+    @SuppressWarnings("deprecation")
+    public InputStream getUnicodeStream(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getUnicodeStream columnIndex: {0}", columnIndex);
+        byte[] value = getRawValue(columnIndex);
+        if (value == null) {
+            return null;
+        }
+
+        // Version 7.2 supports AsciiStream for all the PG text types
+        // As the spec/javadoc for this method indicate this is to be used for
+        // large text values (i.e. LONGVARCHAR) PG doesn't have a separate
+        // long string datatype, but with toast the text datatype is capable of
+        // handling very large values. Thus the implementation ends up calling
+        // getString() since there is no current way to stream the value from the server
+        String stringValue = getString(columnIndex);
+        return new ByteArrayInputStream(stringValue.getBytes(StandardCharsets.UTF_8));
+    }
+
+    @Override
+
+    public InputStream getBinaryStream(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getBinaryStream columnIndex: {0}", columnIndex);
+        byte[] value = getRawValue(columnIndex);
+        if (value == null) {
+            return null;
+        }
+
+        // Version 7.2 supports BinaryStream for all PG bytea type
+        // As the spec/javadoc for this method indicate this is to be used for
+        // large binary values (i.e. LONGVARBINARY) PG doesn't have a separate
+        // long binary datatype, but with toast the bytea datatype is capable of
+        // handling very large values. Thus the implementation ends up calling
+        // getBytes() since there is no current way to stream the value from the server
+        byte[] b = getBytes(columnIndex);
+        if (b != null) {
+            return new ByteArrayInputStream(b);
         }
-        return type.cast(new java.util.Date(timestamp.getTime()));
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == Array.class) {
-      if (sqlType == Types.ARRAY) {
-        return type.cast(getArray(columnIndex));
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == SQLXML.class) {
-      if (sqlType == Types.SQLXML) {
-        return type.cast(getSQLXML(columnIndex));
-      } else {
-        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-                PSQLState.INVALID_PARAMETER_VALUE);
-      }
-    } else if (type == UUID.class) {
-      return type.cast(getObject(columnIndex));
-    } else if (type == InetAddress.class) {
-      String inetText = getString(columnIndex);
-      if (inetText == null) {
         return null;
-      }
-      int slash = inetText.indexOf("/");
-      try {
-        return type.cast(InetAddress.getByName(slash < 0 ? inetText : inetText.substring(0, slash)));
-      } catch (UnknownHostException ex) {
-        throw new PSQLException(GT.tr("Invalid Inet data."), PSQLState.INVALID_PARAMETER_VALUE, ex);
-      }
-      // JSR-310 support
-    } else if (type == LocalDate.class) {
-      return type.cast(getLocalDate(columnIndex));
-    } else if (type == LocalTime.class) {
-      return type.cast(getLocalTime(columnIndex));
-    } else if (type == LocalDateTime.class) {
-      return type.cast(getLocalDateTime(columnIndex));
-    } else if (type == OffsetDateTime.class) {
-      return type.cast(getOffsetDateTime(columnIndex));
-    } else if (type == OffsetTime.class) {
-      return type.cast(getOffsetTime(columnIndex));
-    } else if (PGobject.class.isAssignableFrom(type)) {
-      Object object;
-      if (isBinary(columnIndex)) {
-        byte[] byteValue = thisRow.get(columnIndex - 1);
-        object = connection.getObject(getPGType(columnIndex), null, byteValue);
-      } else {
-        object = connection.getObject(getPGType(columnIndex), getString(columnIndex), null);
-      }
-      return type.cast(object);
-    }
-    throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
-            PSQLState.INVALID_PARAMETER_VALUE);
-  }
-
-  @Override
-  public <T> T getObject(String columnLabel, Class<T> type) throws SQLException {
-    return getObject(findColumn(columnLabel), type);
-  }
-
-  @Override
-  public Object getObject(String s, Map<String, Class<?>> map) throws SQLException {
-    return getObjectImpl(s, map);
-  }
-
-  @Override
-  public Object getObject(int i, Map<String, Class<?>> map) throws SQLException {
-    return getObjectImpl(i, map);
-  }
-
-  @Override
-  public void updateObject(int columnIndex, Object x, SQLType targetSqlType,
-      int scaleOrLength) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateObject");
-  }
-
-  @Override
-  public void updateObject(String columnLabel, Object x, SQLType targetSqlType,
-      int scaleOrLength) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateObject");
-  }
-
-  @Override
-  public void updateObject(int columnIndex, Object x, SQLType targetSqlType)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateObject");
-  }
-
-  @Override
-  public void updateObject(String columnLabel, Object x, SQLType targetSqlType)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateObject");
-  }
-
-  @Override
-  public RowId getRowId(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getRowId columnIndex: {0}", columnIndex);
-    throw Driver.notImplemented(this.getClass(), "getRowId(int)");
-  }
-
-  @Override
-  public RowId getRowId(String columnName) throws SQLException {
-    return getRowId(findColumn(columnName));
-  }
-
-  @Override
-  public void updateRowId(int columnIndex, RowId x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateRowId(int, RowId)");
-  }
-
-  @Override
-  public void updateRowId(String columnName, RowId x) throws SQLException {
-    updateRowId(findColumn(columnName), x);
-  }
-
-  @Override
-  public int getHoldability() throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getHoldability()");
-  }
-
-  @Override
-  public boolean isClosed() throws SQLException {
-    return rows == null;
-  }
-
-  @Override
-  public void updateNString(int columnIndex, String nString) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateNString(int, String)");
-  }
-
-  @Override
-  public void updateNString(String columnName, String nString) throws SQLException {
-    updateNString(findColumn(columnName), nString);
-  }
-
-  @Override
-  public void updateNClob(int columnIndex, NClob nClob) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateNClob(int, NClob)");
-  }
-
-  @Override
-  public void updateNClob(String columnName, NClob nClob) throws SQLException {
-    updateNClob(findColumn(columnName), nClob);
-  }
-
-  @Override
-  public void updateNClob(int columnIndex, Reader reader) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateNClob(int, Reader)");
-  }
-
-  @Override
-  public void updateNClob(String columnName, Reader reader) throws SQLException {
-    updateNClob(findColumn(columnName), reader);
-  }
-
-  @Override
-  public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateNClob(int, Reader, long)");
-  }
-
-  @Override
-  public void updateNClob(String columnName, Reader reader, long length) throws SQLException {
-    updateNClob(findColumn(columnName), reader, length);
-  }
-
-  @Override
-  public NClob getNClob(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getNClob columnIndex: {0}", columnIndex);
-    throw Driver.notImplemented(this.getClass(), "getNClob(int)");
-  }
-
-  @Override
-  public NClob getNClob(String columnName) throws SQLException {
-    return getNClob(findColumn(columnName));
-  }
-
-  @Override
-  public void updateBlob(int columnIndex, InputStream inputStream, long length)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(),
-        "updateBlob(int, InputStream, long)");
-  }
-
-  @Override
-  public void updateBlob(String columnName, InputStream inputStream, long length)
-      throws SQLException {
-    updateBlob(findColumn(columnName), inputStream, length);
-  }
-
-  @Override
-  public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateBlob(int, InputStream)");
-  }
-
-  @Override
-  public void updateBlob(String columnName, InputStream inputStream) throws SQLException {
-    updateBlob(findColumn(columnName), inputStream);
-  }
-
-  @Override
-  public void updateClob(int columnIndex, Reader reader, long length) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateClob(int, Reader, long)");
-  }
-
-  @Override
-  public void updateClob(String columnName, Reader reader, long length) throws SQLException {
-    updateClob(findColumn(columnName), reader, length);
-  }
-
-  @Override
-  public void updateClob(int columnIndex, Reader reader) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "updateClob(int, Reader)");
-  }
-
-  @Override
-  public void updateClob(String columnName, Reader reader) throws SQLException {
-    updateClob(findColumn(columnName), reader);
-  }
-
-  @Override
-  
-  public SQLXML getSQLXML(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getSQLXML columnIndex: {0}", columnIndex);
-    String data = getString(columnIndex);
-    if (data == null) {
-      return null;
     }
 
-    return new PgSQLXML(connection, data);
-  }
+    @Override
 
-  @Override
-  public SQLXML getSQLXML(String columnName) throws SQLException {
-    return getSQLXML(findColumn(columnName));
-  }
-
-  @Override
-  public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException {
-    updateValue(columnIndex, xmlObject);
-  }
-
-  @Override
-  public void updateSQLXML(String columnName, SQLXML xmlObject) throws SQLException {
-    updateSQLXML(findColumn(columnName), xmlObject);
-  }
-
-  @Override
-  public String getNString(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getNString columnIndex: {0}", columnIndex);
-    throw Driver.notImplemented(this.getClass(), "getNString(int)");
-  }
-
-  @Override
-  public String getNString(String columnName) throws SQLException {
-    return getNString(findColumn(columnName));
-  }
-
-  @Override
-  public Reader getNCharacterStream(int columnIndex) throws SQLException {
-    connection.getLogger().log(Level.FINEST, "  getNCharacterStream columnIndex: {0}", columnIndex);
-    throw Driver.notImplemented(this.getClass(), "getNCharacterStream(int)");
-  }
-
-  @Override
-  public Reader getNCharacterStream(String columnName) throws SQLException {
-    return getNCharacterStream(findColumn(columnName));
-  }
-
-  public void updateNCharacterStream(int columnIndex,
-      Reader x, int length) throws SQLException {
-    throw Driver.notImplemented(this.getClass(),
-        "updateNCharacterStream(int, Reader, int)");
-  }
-
-  public void updateNCharacterStream(String columnName,
-      Reader x, int length) throws SQLException {
-    updateNCharacterStream(findColumn(columnName), x, length);
-  }
-
-  @Override
-  public void updateNCharacterStream(int columnIndex,
-      Reader x) throws SQLException {
-    throw Driver.notImplemented(this.getClass(),
-        "updateNCharacterStream(int, Reader)");
-  }
-
-  @Override
-  public void updateNCharacterStream(String columnName,
-      Reader x) throws SQLException {
-    updateNCharacterStream(findColumn(columnName), x);
-  }
-
-  @Override
-  public void updateNCharacterStream(int columnIndex,
-      Reader x, long length) throws SQLException {
-    throw Driver.notImplemented(this.getClass(),
-        "updateNCharacterStream(int, Reader, long)");
-  }
-
-  @Override
-  public void updateNCharacterStream(String columnName,
-      Reader x, long length) throws SQLException {
-    updateNCharacterStream(findColumn(columnName), x, length);
-  }
-
-  @Override
-  public void updateCharacterStream(int columnIndex,
-      Reader reader, long length)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(),
-        "updateCharacterStream(int, Reader, long)");
-  }
-
-  @Override
-  public void updateCharacterStream(String columnName,
-      Reader reader, long length)
-      throws SQLException {
-    updateCharacterStream(findColumn(columnName), reader, length);
-  }
-
-  @Override
-  public void updateCharacterStream(int columnIndex,
-      Reader reader) throws SQLException {
-    throw Driver.notImplemented(this.getClass(),
-        "updateCharacterStream(int, Reader)");
-  }
-
-  @Override
-  public void updateCharacterStream(String columnName,
-      Reader reader) throws SQLException {
-    updateCharacterStream(findColumn(columnName), reader);
-  }
-
-  @Override
-  public void updateBinaryStream(int columnIndex,
-      InputStream inputStream, long length)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(),
-        "updateBinaryStream(int, InputStream, long)");
-  }
-
-  @Override
-  public void updateBinaryStream(String columnName,
-      InputStream inputStream, long length)
-      throws SQLException {
-    updateBinaryStream(findColumn(columnName), inputStream, length);
-  }
-
-  @Override
-  public void updateBinaryStream(int columnIndex,
-      InputStream inputStream) throws SQLException {
-    throw Driver.notImplemented(this.getClass(),
-        "updateBinaryStream(int, InputStream)");
-  }
-
-  @Override
-  public void updateBinaryStream(String columnName,
-      InputStream inputStream) throws SQLException {
-    updateBinaryStream(findColumn(columnName), inputStream);
-  }
-
-  @Override
-  public void updateAsciiStream(int columnIndex,
-      InputStream inputStream, long length)
-      throws SQLException {
-    throw Driver.notImplemented(this.getClass(),
-        "updateAsciiStream(int, InputStream, long)");
-  }
-
-  @Override
-  public void updateAsciiStream(String columnName,
-      InputStream inputStream, long length)
-      throws SQLException {
-    updateAsciiStream(findColumn(columnName), inputStream, length);
-  }
-
-  @Override
-  public void updateAsciiStream(int columnIndex,
-      InputStream inputStream) throws SQLException {
-    throw Driver.notImplemented(this.getClass(),
-        "updateAsciiStream(int, InputStream)");
-  }
-
-  @Override
-  public void updateAsciiStream(String columnName,
-      InputStream inputStream) throws SQLException {
-    updateAsciiStream(findColumn(columnName), inputStream);
-  }
-
-  @Override
-  public boolean isWrapperFor(Class<?> iface) throws SQLException {
-    return iface.isAssignableFrom(getClass());
-  }
-
-  @Override
-  public <T> T unwrap(Class<T> iface) throws SQLException {
-    if (iface.isAssignableFrom(getClass())) {
-      return iface.cast(this);
+    public String getString(String columnName) throws SQLException {
+        return getString(findColumn(columnName));
     }
-    throw new SQLException("Cannot unwrap to " + iface.getName());
-  }
 
-  private Calendar getDefaultCalendar() {
-    if (getTimestampUtils().hasFastDefaultTimeZone()) {
-      return getTimestampUtils().getSharedCalendar(null);
+    @Override
+    public boolean getBoolean(String columnName) throws SQLException {
+        return getBoolean(findColumn(columnName));
     }
-    Calendar sharedCalendar = getTimestampUtils().getSharedCalendar(defaultTimeZone);
-    if (defaultTimeZone == null) {
-      defaultTimeZone = sharedCalendar.getTimeZone();
-    }
-    return sharedCalendar;
-  }
 
-  private TimestampUtils getTimestampUtils() {
-    if (timestampUtils == null) {
-      timestampUtils = new TimestampUtils(!connection.getQueryExecutor().getIntegerDateTimes(), (Provider<TimeZone>) new QueryExecutorTimeZoneProvider(connection.getQueryExecutor()));
-    }
-    return timestampUtils;
-  }
+    @Override
 
-  /**
-   * This is here to be used by metadata functions
-   * to make all column labels upper case.
-   * Because postgres folds columns to lower case in queries it will be easier
-   * to change the fields after the fact rather than try to coerce all the columns
-   * to upper case in the queries as this would require surrounding all columns with " and
-   * escaping them making them even harder to read than they are now.
-   * @return PgResultSet
-   */
-  protected PgResultSet upperCaseFieldLabels() {
-    for (Field field: fields ) {
-      field.upperCaseLabel();
+    public byte getByte(String columnName) throws SQLException {
+        return getByte(findColumn(columnName));
+    }
+
+    @Override
+
+    public short getShort(String columnName) throws SQLException {
+        return getShort(findColumn(columnName));
+    }
+
+    @Override
+
+    public int getInt(String columnName) throws SQLException {
+        return getInt(findColumn(columnName));
+    }
+
+    @Override
+
+    public long getLong(String columnName) throws SQLException {
+        return getLong(findColumn(columnName));
+    }
+
+    @Override
+
+    public float getFloat(String columnName) throws SQLException {
+        return getFloat(findColumn(columnName));
+    }
+
+    @Override
+
+    public double getDouble(String columnName) throws SQLException {
+        return getDouble(findColumn(columnName));
+    }
+
+    @Override
+
+    @SuppressWarnings("deprecation")
+    public BigDecimal getBigDecimal(String columnName, int scale) throws SQLException {
+        return getBigDecimal(findColumn(columnName), scale);
+    }
+
+    @Override
+
+    public byte[] getBytes(String columnName) throws SQLException {
+        return getBytes(findColumn(columnName));
+    }
+
+    @Override
+
+    public Date getDate(String columnName) throws SQLException {
+        return getDate(findColumn(columnName), null);
+    }
+
+    @Override
+
+    public Time getTime(String columnName) throws SQLException {
+        return getTime(findColumn(columnName), null);
+    }
+
+    @Override
+
+    public Timestamp getTimestamp(String columnName) throws SQLException {
+        return getTimestamp(findColumn(columnName), null);
+    }
+
+    @Override
+
+    public InputStream getAsciiStream(String columnName) throws SQLException {
+        return getAsciiStream(findColumn(columnName));
+    }
+
+    @Override
+
+    @SuppressWarnings("deprecation")
+    public InputStream getUnicodeStream(String columnName) throws SQLException {
+        return getUnicodeStream(findColumn(columnName));
+    }
+
+    @Override
+
+    public InputStream getBinaryStream(String columnName) throws SQLException {
+        return getBinaryStream(findColumn(columnName));
+    }
+
+    @Override
+
+    public SQLWarning getWarnings() throws SQLException {
+        checkClosed();
+        return warnings;
+    }
+
+    @Override
+    public void clearWarnings() throws SQLException {
+        checkClosed();
+        warnings = null;
+    }
+
+    protected void addWarning(SQLWarning warnings) {
+        if (this.warnings != null) {
+            this.warnings.setNextWarning(warnings);
+        } else {
+            this.warnings = warnings;
+        }
+    }
+
+    @Override
+    public String getCursorName() throws SQLException {
+        checkClosed();
+        return null;
+    }
+
+    @Override
+    public Object getObject(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getObject columnIndex: {0}", columnIndex);
+        Field field;
+
+        byte[] value = getRawValue(columnIndex);
+        if (value == null) {
+            return null;
+        }
+
+        field = fields[columnIndex - 1];
+
+        // some fields can be null, mainly from those returned by MetaData methods
+        if (field == null) {
+            wasNullFlag = true;
+            return null;
+        }
+
+        Object result = internalGetObject(columnIndex, field);
+        if (result != null) {
+            return result;
+        }
+
+        if (isBinary(columnIndex)) {
+            return connection.getObject(getPGType(columnIndex), null, value);
+        }
+        String stringValue = getString(columnIndex);
+        return connection.getObject(getPGType(columnIndex), stringValue, null);
+    }
+
+    // ----------------- Formatting Methods -------------------
+
+    @Override
+    public Object getObject(String columnName) throws SQLException {
+        return getObject(findColumn(columnName));
+    }
+
+    @Override
+    public int findColumn(String columnName) throws SQLException {
+        checkClosed();
+
+        int col = findColumnIndex(columnName);
+        if (col == 0) {
+            throw new PSQLException(
+                    GT.tr("The column name {0} was not found in this ResultSet.", columnName),
+                    PSQLState.UNDEFINED_COLUMN);
+        }
+        return col;
+    }
+
+    private int findColumnIndex(String columnName) {
+        if (columnNameIndexMap == null) {
+            if (originalQuery != null) {
+                columnNameIndexMap = originalQuery.getResultSetColumnNameIndexMap();
+            }
+            if (columnNameIndexMap == null) {
+                columnNameIndexMap = createColumnNameIndexMap(fields, connection.isColumnSanitiserDisabled());
+            }
+        }
+
+        Integer index = columnNameIndexMap.get(columnName);
+        if (index != null) {
+            return index;
+        }
+
+        index = columnNameIndexMap.get(columnName.toLowerCase(Locale.US));
+        if (index != null) {
+            columnNameIndexMap.put(columnName, index);
+            return index;
+        }
+
+        index = columnNameIndexMap.get(columnName.toUpperCase(Locale.US));
+        if (index != null) {
+            columnNameIndexMap.put(columnName, index);
+            return index;
+        }
+
+        return 0;
+    }
+
+    /**
+     * Returns the OID of a field. It is used internally by the driver.
+     *
+     * @param field field index
+     * @return OID of a field
+     */
+    public int getColumnOID(int field) {
+        return fields[field - 1].getOID();
+    }
+
+    /**
+     * <p>This is used to fix get*() methods on Money fields. It should only be used by those methods!</p>
+     *
+     * <p>It converts ($##.##) to -##.## and $##.## to ##.##</p>
+     *
+     * @param col column position (1-based)
+     * @return numeric-parsable representation of money string literal
+     * @throws SQLException if something wrong happens
+     */
+    public String getFixedString(int col) throws SQLException {
+        String stringValue = getString(col);
+        return trimMoney(stringValue);
+    }
+
+    private String trimMoney(String s) {
+        if (s == null) {
+            return null;
+        }
+
+        // if we don't have at least 2 characters it can't be money.
+        if (s.length() < 2) {
+            return s;
+        }
+
+        // Handle Money
+        char ch = s.charAt(0);
+
+        // optimise for non-money type: return immediately with one check
+        // if the first char cannot be '(', '$' or '-'
+        if (ch > '-') {
+            return s;
+        }
+
+        if (ch == '(') {
+            s = "-" + PGtokenizer.removePara(s).substring(1);
+        } else if (ch == '$') {
+            s = s.substring(1);
+        } else if (ch == '-' && s.charAt(1) == '$') {
+            s = "-" + s.substring(2);
+        }
+
+        return s;
+    }
+
+    protected String getPGType(int column) throws SQLException {
+        Field field = fields[column - 1];
+        initSqlType(field);
+        return field.getPGType();
+    }
+
+    protected int getSQLType(int column) throws SQLException {
+        Field field = fields[column - 1];
+        initSqlType(field);
+        return field.getSQLType();
+    }
+
+    private void initSqlType(Field field) throws SQLException {
+        if (field.isTypeInitialized()) {
+            return;
+        }
+        TypeInfo typeInfo = connection.getTypeInfo();
+        int oid = field.getOID();
+        String pgType = typeInfo.getPGType(oid);
+        int sqlType = typeInfo.getSQLType(pgType);
+        field.setSQLType(sqlType);
+        field.setPGType(pgType);
+    }
+
+    private void checkUpdateable() throws SQLException {
+        checkClosed();
+
+        if (!isUpdateable()) {
+            throw new PSQLException(
+                    GT.tr(
+                            "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details."),
+                    PSQLState.INVALID_CURSOR_STATE);
+        }
+
+        if (updateValues == null) {
+            // allow every column to be updated without a rehash.
+            updateValues = new HashMap<>((int) (fields.length / 0.75), 0.75f);
+        }
+    }
+
+    protected void checkClosed() throws SQLException {
+        if (rows == null) {
+            throw new PSQLException(GT.tr("This ResultSet is closed."), PSQLState.OBJECT_NOT_IN_STATE);
+        }
+    }
+
+    /*
+     * for jdbc3 to call internally
+     */
+    protected boolean isResultSetClosed() {
+        return rows == null;
+    }
+
+    protected void checkColumnIndex(int column) throws SQLException {
+        if (column < 1 || column > fields.length) {
+            throw new PSQLException(
+                    GT.tr("The column index is out of range: {0}, number of columns: {1}.",
+                            column, fields.length),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+    }
+
+    /**
+     * Checks that the result set is not closed, it's positioned on a valid row and that the given
+     * column number is valid. Also updates the {@link #wasNullFlag} to correct value.
+     *
+     * @param column The column number to check. Range starts from 1.
+     * @return raw value or null
+     * @throws SQLException If state or column is invalid.
+     */
+    protected byte[] getRawValue(int column) throws SQLException {
+        checkClosed();
+        if (thisRow == null) {
+            throw new PSQLException(
+                    GT.tr("ResultSet not positioned properly, perhaps you need to call next."),
+                    PSQLState.INVALID_CURSOR_STATE);
+        }
+        checkColumnIndex(column);
+        byte[] bytes = thisRow.get(column - 1);
+        wasNullFlag = bytes == null;
+        return bytes;
+    }
+
+    /**
+     * Returns true if the value of the given column is in binary format.
+     *
+     * @param column The column to check. Range starts from 1.
+     * @return True if the column is in binary format.
+     */
+
+    protected boolean isBinary(int column) {
+        return fields[column - 1].getFormat() == Field.BINARY_FORMAT;
+    }
+
+    public BigDecimal toBigDecimal(String s, int scale) throws SQLException {
+        if (s == null) {
+            return null;
+        }
+        BigDecimal val = toBigDecimal(s);
+        return scaleBigDecimal(val, scale);
+    }
+
+    private BigDecimal scaleBigDecimal(BigDecimal val, int scale) throws PSQLException {
+        if (scale == -1) {
+            return val;
+        }
+        try {
+            return val.setScale(scale);
+        } catch (ArithmeticException e) {
+            throw new PSQLException(
+                    GT.tr("Bad value for type {0} : {1}", "BigDecimal", val),
+                    PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+        }
+    }
+
+    private void initRowBuffer() {
+        thisRow = rows.get(currentRow);
+        // We only need a copy of the current row if we're going to
+        // modify it via an updatable resultset.
+        if (resultsetconcurrency == ResultSet.CONCUR_UPDATABLE) {
+            rowBuffer = thisRow.updateableCopy();
+        } else {
+            rowBuffer = null;
+        }
+    }
+
+    private boolean isColumnTrimmable(int columnIndex) throws SQLException {
+        switch (getSQLType(columnIndex)) {
+            case Types.CHAR:
+            case Types.VARCHAR:
+            case Types.LONGVARCHAR:
+            case Types.BINARY:
+            case Types.VARBINARY:
+            case Types.LONGVARBINARY:
+                return true;
+        }
+        return false;
+    }
+
+    private byte[] trimBytes(int columnIndex, byte[] bytes) throws SQLException {
+        // we need to trim if maxsize is set and the length is greater than maxsize and the
+        // type of this column is a candidate for trimming
+        if (maxFieldSize > 0 && bytes.length > maxFieldSize && isColumnTrimmable(columnIndex)) {
+            byte[] newBytes = new byte[maxFieldSize];
+            System.arraycopy(bytes, 0, newBytes, 0, maxFieldSize);
+            return newBytes;
+        } else {
+            return bytes;
+        }
+    }
+
+    private String trimString(int columnIndex, String string) throws SQLException {
+        // we need to trim if maxsize is set and the length is greater than maxsize and the
+        // type of this column is a candidate for trimming
+        if (maxFieldSize > 0 && string.length() > maxFieldSize && isColumnTrimmable(columnIndex)) {
+            return string.substring(0, maxFieldSize);
+        } else {
+            return string;
+        }
+    }
+
+    /**
+     * Converts any numeric binary field to double value. This method does no overflow checking.
+     *
+     * @param bytes      The bytes of the numeric field.
+     * @param oid        The oid of the field.
+     * @param targetType The target type. Used for error reporting.
+     * @return The value as double.
+     * @throws PSQLException If the field type is not supported numeric type.
+     */
+    private double readDoubleValue(byte[] bytes, int oid, String targetType) throws PSQLException {
+        // currently implemented binary encoded fields
+        switch (oid) {
+            case Oid.INT2:
+                return ByteConverter.int2(bytes, 0);
+            case Oid.INT4:
+                return ByteConverter.int4(bytes, 0);
+            case Oid.INT8:
+                // might not fit but there still should be no overflow checking
+                return ByteConverter.int8(bytes, 0);
+            case Oid.FLOAT4:
+                return ByteConverter.float4(bytes, 0);
+            case Oid.FLOAT8:
+                return ByteConverter.float8(bytes, 0);
+            case Oid.NUMERIC:
+                return ByteConverter.numeric(bytes).doubleValue();
+        }
+        throw new PSQLException(GT.tr("Cannot convert the column of type {0} to requested type {1}.",
+                Oid.toString(oid), targetType), PSQLState.DATA_TYPE_MISMATCH);
+    }
+
+    /**
+     * <p>Converts any numeric binary field to long value.</p>
+     *
+     * <p>This method is used by getByte,getShort,getInt and getLong. It must support a subset of the
+     * following java types that use Binary encoding. (fields that use text encoding use a different
+     * code path).
+     *
+     * <code>byte,short,int,long,float,double,BigDecimal,boolean,string</code>.
+     * </p>
+     *
+     * @param bytes      The bytes of the numeric field.
+     * @param oid        The oid of the field.
+     * @param minVal     the minimum value allowed.
+     * @param maxVal     the maximum value allowed.
+     * @param targetType The target type. Used for error reporting.
+     * @return The value as long.
+     * @throws PSQLException If the field type is not supported numeric type or if the value is out of
+     *                       range.
+     */
+
+    private long readLongValue(byte[] bytes, int oid, long minVal, long maxVal, String targetType)
+            throws PSQLException {
+        long val;
+        // currently implemented binary encoded fields
+        switch (oid) {
+            case Oid.INT2:
+                val = ByteConverter.int2(bytes, 0);
+                break;
+            case Oid.INT4:
+                val = ByteConverter.int4(bytes, 0);
+                break;
+            case Oid.INT8:
+                val = ByteConverter.int8(bytes, 0);
+                break;
+            case Oid.FLOAT4:
+                float f = ByteConverter.float4(bytes, 0);
+                // for float values we know to be within values of long, just cast directly to long
+                if (f <= LONG_MAX_FLOAT && f >= LONG_MIN_FLOAT) {
+                    val = (long) f;
+                } else {
+                    throw new PSQLException(GT.tr("Bad value for type {0} : {1}", targetType, f),
+                            PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+                }
+                break;
+            case Oid.FLOAT8:
+                double d = ByteConverter.float8(bytes, 0);
+                // for double values within the values of a long, just directly cast to long
+                if (d <= LONG_MAX_DOUBLE && d >= LONG_MIN_DOUBLE) {
+                    val = (long) d;
+                } else {
+                    throw new PSQLException(GT.tr("Bad value for type {0} : {1}", targetType, d),
+                            PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+                }
+                break;
+            case Oid.NUMERIC:
+                Number num = ByteConverter.numeric(bytes);
+                BigInteger i = ((BigDecimal) num).toBigInteger();
+                int gt = i.compareTo(LONGMAX);
+                int lt = i.compareTo(LONGMIN);
+
+                if (gt > 0 || lt < 0) {
+                    throw new PSQLException(GT.tr("Bad value for type {0} : {1}", "long", num),
+                            PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+                } else {
+                    val = num.longValue();
+                }
+                break;
+            default:
+                throw new PSQLException(
+                        GT.tr("Cannot convert the column of type {0} to requested type {1}.",
+                                Oid.toString(oid), targetType),
+                        PSQLState.DATA_TYPE_MISMATCH);
+        }
+        if (val < minVal || val > maxVal) {
+            throw new PSQLException(GT.tr("Bad value for type {0} : {1}", targetType, val),
+                    PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+        }
+        return val;
+    }
+
+    protected void updateValue(int columnIndex, Object value) throws SQLException {
+        checkUpdateable();
+
+        if (!onInsertRow && (isBeforeFirst() || isAfterLast() || rows.isEmpty())) {
+            throw new PSQLException(
+                    GT.tr(
+                            "Cannot update the ResultSet because it is either before the start or after the end of the results."),
+                    PSQLState.INVALID_CURSOR_STATE);
+        }
+
+        checkColumnIndex(columnIndex);
+
+        doingUpdates = !onInsertRow;
+        if (value == null) {
+            updateNull(columnIndex);
+        } else {
+            PGResultSetMetaData md = (PGResultSetMetaData) getMetaData();
+            updateValues.put(md.getBaseColumnName(columnIndex), value);
+        }
+    }
+
+    protected Object getUUID(String data) throws SQLException {
+        UUID uuid;
+        try {
+            uuid = UUID.fromString(data);
+        } catch (IllegalArgumentException iae) {
+            throw new PSQLException(GT.tr("Invalid UUID data."), PSQLState.INVALID_PARAMETER_VALUE, iae);
+        }
+
+        return uuid;
+    }
+
+    protected Object getUUID(byte[] data) throws SQLException {
+        return new UUID(ByteConverter.int8(data, 0), ByteConverter.int8(data, 8));
+    }
+
+    /**
+     * Used to add rows to an already existing ResultSet that exactly match the existing rows.
+     * Currently only used for assembling generated keys from batch statement execution.
+     */
+    void addRows(List<Tuple> tuples) {
+        rows.addAll(tuples);
+    }
+
+    @Override
+    public void updateRef(int columnIndex, Ref x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateRef(int,Ref)");
+    }
+
+    //
+    // We need to specify the type of NULL when updating a column to NULL, so
+    // NullObject is a simple extension of PGobject that always returns null
+    // values but retains column type info.
+    //
+
+    @Override
+    public void updateRef(String columnName, Ref x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateRef(String,Ref)");
+    }
+
+    @Override
+    public void updateBlob(int columnIndex, Blob x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateBlob(int,Blob)");
+    }
+
+    @Override
+    public void updateBlob(String columnName, Blob x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateBlob(String,Blob)");
+    }
+
+    @Override
+    public void updateClob(int columnIndex, Clob x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateClob(int,Clob)");
+    }
+
+    @Override
+    public void updateClob(String columnName, Clob x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateClob(String,Clob)");
+    }
+
+    @Override
+    public void updateArray(int columnIndex, Array x) throws SQLException {
+        updateObject(columnIndex, x);
+    }
+
+    @Override
+    public void updateArray(String columnName, Array x) throws SQLException {
+        updateArray(findColumn(columnName), x);
+    }
+
+    @Override
+    public <T> T getObject(int columnIndex, Class<T> type) throws SQLException {
+        if (type == null) {
+            throw new SQLException("type is null");
+        }
+        int sqlType = getSQLType(columnIndex);
+        if (type == BigDecimal.class) {
+            if (sqlType == Types.NUMERIC || sqlType == Types.DECIMAL) {
+                return type.cast(getBigDecimal(columnIndex));
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == String.class) {
+            if (sqlType == Types.CHAR || sqlType == Types.VARCHAR) {
+                return type.cast(getString(columnIndex));
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == Boolean.class) {
+            if (sqlType == Types.BOOLEAN || sqlType == Types.BIT) {
+                boolean booleanValue = getBoolean(columnIndex);
+                if (wasNull()) {
+                    return null;
+                }
+                return type.cast(booleanValue);
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == Short.class) {
+            if (sqlType == Types.SMALLINT) {
+                short shortValue = getShort(columnIndex);
+                if (wasNull()) {
+                    return null;
+                }
+                return type.cast(shortValue);
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == Integer.class) {
+            if (sqlType == Types.INTEGER || sqlType == Types.SMALLINT) {
+                int intValue = getInt(columnIndex);
+                if (wasNull()) {
+                    return null;
+                }
+                return type.cast(intValue);
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == Long.class) {
+            if (sqlType == Types.BIGINT) {
+                long longValue = getLong(columnIndex);
+                if (wasNull()) {
+                    return null;
+                }
+                return type.cast(longValue);
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == BigInteger.class) {
+            if (sqlType == Types.BIGINT) {
+                long longValue = getLong(columnIndex);
+                if (wasNull()) {
+                    return null;
+                }
+                return type.cast(BigInteger.valueOf(longValue));
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == Float.class) {
+            if (sqlType == Types.REAL) {
+                float floatValue = getFloat(columnIndex);
+                if (wasNull()) {
+                    return null;
+                }
+                return type.cast(floatValue);
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == Double.class) {
+            if (sqlType == Types.FLOAT || sqlType == Types.DOUBLE) {
+                double doubleValue = getDouble(columnIndex);
+                if (wasNull()) {
+                    return null;
+                }
+                return type.cast(doubleValue);
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == Date.class) {
+            if (sqlType == Types.DATE) {
+                return type.cast(getDate(columnIndex));
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == Time.class) {
+            if (sqlType == Types.TIME) {
+                return type.cast(getTime(columnIndex));
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == Timestamp.class) {
+            if (sqlType == Types.TIMESTAMP
+                    || sqlType == Types.TIMESTAMP_WITH_TIMEZONE
+            ) {
+                return type.cast(getTimestamp(columnIndex));
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == Calendar.class) {
+            if (sqlType == Types.TIMESTAMP
+                    || sqlType == Types.TIMESTAMP_WITH_TIMEZONE
+            ) {
+                Timestamp timestampValue = getTimestamp(columnIndex);
+                if (timestampValue == null) {
+                    return null;
+                }
+                Calendar calendar = Calendar.getInstance(getDefaultCalendar().getTimeZone());
+                calendar.setTimeInMillis(timestampValue.getTime());
+                return type.cast(calendar);
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == Blob.class) {
+            if (sqlType == Types.BLOB || sqlType == Types.BINARY || sqlType == Types.BIGINT) {
+                return type.cast(getBlob(columnIndex));
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == Clob.class) {
+            if (sqlType == Types.CLOB || sqlType == Types.BIGINT) {
+                return type.cast(getClob(columnIndex));
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == java.util.Date.class) {
+            if (sqlType == Types.TIMESTAMP) {
+                Timestamp timestamp = getTimestamp(columnIndex);
+                if (timestamp == null) {
+                    return null;
+                }
+                return type.cast(new java.util.Date(timestamp.getTime()));
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == Array.class) {
+            if (sqlType == Types.ARRAY) {
+                return type.cast(getArray(columnIndex));
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == SQLXML.class) {
+            if (sqlType == Types.SQLXML) {
+                return type.cast(getSQLXML(columnIndex));
+            } else {
+                throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+            }
+        } else if (type == UUID.class) {
+            return type.cast(getObject(columnIndex));
+        } else if (type == InetAddress.class) {
+            String inetText = getString(columnIndex);
+            if (inetText == null) {
+                return null;
+            }
+            int slash = inetText.indexOf("/");
+            try {
+                return type.cast(InetAddress.getByName(slash < 0 ? inetText : inetText.substring(0, slash)));
+            } catch (UnknownHostException ex) {
+                throw new PSQLException(GT.tr("Invalid Inet data."), PSQLState.INVALID_PARAMETER_VALUE, ex);
+            }
+            // JSR-310 support
+        } else if (type == LocalDate.class) {
+            return type.cast(getLocalDate(columnIndex));
+        } else if (type == LocalTime.class) {
+            return type.cast(getLocalTime(columnIndex));
+        } else if (type == LocalDateTime.class) {
+            return type.cast(getLocalDateTime(columnIndex));
+        } else if (type == OffsetDateTime.class) {
+            return type.cast(getOffsetDateTime(columnIndex));
+        } else if (type == OffsetTime.class) {
+            return type.cast(getOffsetTime(columnIndex));
+        } else if (PGobject.class.isAssignableFrom(type)) {
+            Object object;
+            if (isBinary(columnIndex)) {
+                byte[] byteValue = thisRow.get(columnIndex - 1);
+                object = connection.getObject(getPGType(columnIndex), null, byteValue);
+            } else {
+                object = connection.getObject(getPGType(columnIndex), getString(columnIndex), null);
+            }
+            return type.cast(object);
+        }
+        throw new PSQLException(GT.tr("conversion to {0} from {1} not supported", type, getPGType(columnIndex)),
+                PSQLState.INVALID_PARAMETER_VALUE);
+    }
+
+    @Override
+    public <T> T getObject(String columnLabel, Class<T> type) throws SQLException {
+        return getObject(findColumn(columnLabel), type);
+    }
+
+    @Override
+    public Object getObject(String s, Map<String, Class<?>> map) throws SQLException {
+        return getObjectImpl(s, map);
+    }
+
+    @Override
+    public Object getObject(int i, Map<String, Class<?>> map) throws SQLException {
+        return getObjectImpl(i, map);
+    }
+
+    @Override
+    public void updateObject(int columnIndex, Object x, SQLType targetSqlType,
+                             int scaleOrLength) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateObject");
+    }
+
+    @Override
+    public void updateObject(String columnLabel, Object x, SQLType targetSqlType,
+                             int scaleOrLength) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateObject");
+    }
+
+    @Override
+    public void updateObject(int columnIndex, Object x, SQLType targetSqlType)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateObject");
+    }
+
+    @Override
+    public void updateObject(String columnLabel, Object x, SQLType targetSqlType)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateObject");
+    }
+
+    @Override
+    public RowId getRowId(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getRowId columnIndex: {0}", columnIndex);
+        throw Driver.notImplemented(this.getClass(), "getRowId(int)");
+    }
+
+    @Override
+    public RowId getRowId(String columnName) throws SQLException {
+        return getRowId(findColumn(columnName));
+    }
+
+    @Override
+    public void updateRowId(int columnIndex, RowId x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateRowId(int, RowId)");
+    }
+
+    @Override
+    public void updateRowId(String columnName, RowId x) throws SQLException {
+        updateRowId(findColumn(columnName), x);
+    }
+
+    @Override
+    public int getHoldability() throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getHoldability()");
+    }
+
+    @Override
+    public boolean isClosed() throws SQLException {
+        return rows == null;
+    }
+
+    @Override
+    public void updateNString(int columnIndex, String nString) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateNString(int, String)");
+    }
+
+    @Override
+    public void updateNString(String columnName, String nString) throws SQLException {
+        updateNString(findColumn(columnName), nString);
+    }
+
+    @Override
+    public void updateNClob(int columnIndex, NClob nClob) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateNClob(int, NClob)");
+    }
+
+    @Override
+    public void updateNClob(String columnName, NClob nClob) throws SQLException {
+        updateNClob(findColumn(columnName), nClob);
+    }
+
+    @Override
+    public void updateNClob(int columnIndex, Reader reader) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateNClob(int, Reader)");
+    }
+
+    @Override
+    public void updateNClob(String columnName, Reader reader) throws SQLException {
+        updateNClob(findColumn(columnName), reader);
+    }
+
+    @Override
+    public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateNClob(int, Reader, long)");
+    }
+
+    @Override
+    public void updateNClob(String columnName, Reader reader, long length) throws SQLException {
+        updateNClob(findColumn(columnName), reader, length);
+    }
+
+    @Override
+    public NClob getNClob(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getNClob columnIndex: {0}", columnIndex);
+        throw Driver.notImplemented(this.getClass(), "getNClob(int)");
+    }
+
+    @Override
+    public NClob getNClob(String columnName) throws SQLException {
+        return getNClob(findColumn(columnName));
+    }
+
+    @Override
+    public void updateBlob(int columnIndex, InputStream inputStream, long length)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(),
+                "updateBlob(int, InputStream, long)");
+    }
+
+    @Override
+    public void updateBlob(String columnName, InputStream inputStream, long length)
+            throws SQLException {
+        updateBlob(findColumn(columnName), inputStream, length);
+    }
+
+    @Override
+    public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateBlob(int, InputStream)");
+    }
+
+    @Override
+    public void updateBlob(String columnName, InputStream inputStream) throws SQLException {
+        updateBlob(findColumn(columnName), inputStream);
+    }
+
+    @Override
+    public void updateClob(int columnIndex, Reader reader, long length) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateClob(int, Reader, long)");
+    }
+
+    @Override
+    public void updateClob(String columnName, Reader reader, long length) throws SQLException {
+        updateClob(findColumn(columnName), reader, length);
+    }
+
+    @Override
+    public void updateClob(int columnIndex, Reader reader) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "updateClob(int, Reader)");
+    }
+
+    @Override
+    public void updateClob(String columnName, Reader reader) throws SQLException {
+        updateClob(findColumn(columnName), reader);
+    }
+
+    @Override
+
+    public SQLXML getSQLXML(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getSQLXML columnIndex: {0}", columnIndex);
+        String data = getString(columnIndex);
+        if (data == null) {
+            return null;
+        }
+
+        return new PgSQLXML(connection, data);
+    }
+
+    @Override
+    public SQLXML getSQLXML(String columnName) throws SQLException {
+        return getSQLXML(findColumn(columnName));
+    }
+
+    @Override
+    public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException {
+        updateValue(columnIndex, xmlObject);
+    }
+
+    @Override
+    public void updateSQLXML(String columnName, SQLXML xmlObject) throws SQLException {
+        updateSQLXML(findColumn(columnName), xmlObject);
+    }
+
+    @Override
+    public String getNString(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getNString columnIndex: {0}", columnIndex);
+        throw Driver.notImplemented(this.getClass(), "getNString(int)");
+    }
+
+    @Override
+    public String getNString(String columnName) throws SQLException {
+        return getNString(findColumn(columnName));
+    }
+
+    @Override
+    public Reader getNCharacterStream(int columnIndex) throws SQLException {
+        connection.getLogger().log(Level.FINEST, "  getNCharacterStream columnIndex: {0}", columnIndex);
+        throw Driver.notImplemented(this.getClass(), "getNCharacterStream(int)");
+    }
+
+    @Override
+    public Reader getNCharacterStream(String columnName) throws SQLException {
+        return getNCharacterStream(findColumn(columnName));
+    }
+
+    public void updateNCharacterStream(int columnIndex,
+                                       Reader x, int length) throws SQLException {
+        throw Driver.notImplemented(this.getClass(),
+                "updateNCharacterStream(int, Reader, int)");
+    }
+
+    public void updateNCharacterStream(String columnName,
+                                       Reader x, int length) throws SQLException {
+        updateNCharacterStream(findColumn(columnName), x, length);
+    }
+
+    @Override
+    public void updateNCharacterStream(int columnIndex,
+                                       Reader x) throws SQLException {
+        throw Driver.notImplemented(this.getClass(),
+                "updateNCharacterStream(int, Reader)");
+    }
+
+    @Override
+    public void updateNCharacterStream(String columnName,
+                                       Reader x) throws SQLException {
+        updateNCharacterStream(findColumn(columnName), x);
+    }
+
+    @Override
+    public void updateNCharacterStream(int columnIndex,
+                                       Reader x, long length) throws SQLException {
+        throw Driver.notImplemented(this.getClass(),
+                "updateNCharacterStream(int, Reader, long)");
+    }
+
+    @Override
+    public void updateNCharacterStream(String columnName,
+                                       Reader x, long length) throws SQLException {
+        updateNCharacterStream(findColumn(columnName), x, length);
+    }
+
+    @Override
+    public void updateCharacterStream(int columnIndex,
+                                      Reader reader, long length)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(),
+                "updateCharacterStream(int, Reader, long)");
+    }
+
+    @Override
+    public void updateCharacterStream(String columnName,
+                                      Reader reader, long length)
+            throws SQLException {
+        updateCharacterStream(findColumn(columnName), reader, length);
+    }
+
+    @Override
+    public void updateCharacterStream(int columnIndex,
+                                      Reader reader) throws SQLException {
+        throw Driver.notImplemented(this.getClass(),
+                "updateCharacterStream(int, Reader)");
+    }
+
+    @Override
+    public void updateCharacterStream(String columnName,
+                                      Reader reader) throws SQLException {
+        updateCharacterStream(findColumn(columnName), reader);
+    }
+
+    @Override
+    public void updateBinaryStream(int columnIndex,
+                                   InputStream inputStream, long length)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(),
+                "updateBinaryStream(int, InputStream, long)");
+    }
+
+    @Override
+    public void updateBinaryStream(String columnName,
+                                   InputStream inputStream, long length)
+            throws SQLException {
+        updateBinaryStream(findColumn(columnName), inputStream, length);
+    }
+
+    @Override
+    public void updateBinaryStream(int columnIndex,
+                                   InputStream inputStream) throws SQLException {
+        throw Driver.notImplemented(this.getClass(),
+                "updateBinaryStream(int, InputStream)");
+    }
+
+    @Override
+    public void updateBinaryStream(String columnName,
+                                   InputStream inputStream) throws SQLException {
+        updateBinaryStream(findColumn(columnName), inputStream);
+    }
+
+    @Override
+    public void updateAsciiStream(int columnIndex,
+                                  InputStream inputStream, long length)
+            throws SQLException {
+        throw Driver.notImplemented(this.getClass(),
+                "updateAsciiStream(int, InputStream, long)");
+    }
+
+    @Override
+    public void updateAsciiStream(String columnName,
+                                  InputStream inputStream, long length)
+            throws SQLException {
+        updateAsciiStream(findColumn(columnName), inputStream, length);
+    }
+
+    @Override
+    public void updateAsciiStream(int columnIndex,
+                                  InputStream inputStream) throws SQLException {
+        throw Driver.notImplemented(this.getClass(),
+                "updateAsciiStream(int, InputStream)");
+    }
+
+    @Override
+    public void updateAsciiStream(String columnName,
+                                  InputStream inputStream) throws SQLException {
+        updateAsciiStream(findColumn(columnName), inputStream);
+    }
+
+    @Override
+    public boolean isWrapperFor(Class<?> iface) throws SQLException {
+        return iface.isAssignableFrom(getClass());
+    }
+
+    @Override
+    public <T> T unwrap(Class<T> iface) throws SQLException {
+        if (iface.isAssignableFrom(getClass())) {
+            return iface.cast(this);
+        }
+        throw new SQLException("Cannot unwrap to " + iface.getName());
+    }
+
+    private Calendar getDefaultCalendar() {
+        if (getTimestampUtils().hasFastDefaultTimeZone()) {
+            return getTimestampUtils().getSharedCalendar(null);
+        }
+        Calendar sharedCalendar = getTimestampUtils().getSharedCalendar(defaultTimeZone);
+        if (defaultTimeZone == null) {
+            defaultTimeZone = sharedCalendar.getTimeZone();
+        }
+        return sharedCalendar;
+    }
+
+    private TimestampUtils getTimestampUtils() {
+        if (timestampUtils == null) {
+            timestampUtils = new TimestampUtils(!connection.getQueryExecutor().getIntegerDateTimes(), (Provider<TimeZone>) new QueryExecutorTimeZoneProvider(connection.getQueryExecutor()));
+        }
+        return timestampUtils;
+    }
+
+    /**
+     * This is here to be used by metadata functions
+     * to make all column labels upper case.
+     * Because postgres folds columns to lower case in queries it will be easier
+     * to change the fields after the fact rather than try to coerce all the columns
+     * to upper case in the queries as this would require surrounding all columns with " and
+     * escaping them making them even harder to read than they are now.
+     *
+     * @return PgResultSet
+     */
+    protected PgResultSet upperCaseFieldLabels() {
+        for (Field field : fields) {
+            field.upperCaseLabel();
+        }
+        return this;
+    }
+
+    @SuppressWarnings("serial")
+    static class NullObject extends PGobject {
+        NullObject(String type) {
+            this.type = type;
+        }
+
+        @Override
+        public String getValue() {
+            return null;
+        }
+    }
+
+    public class CursorResultHandler extends ResultHandlerBase {
+
+        public CursorResultHandler() {
+        }
+
+        @Override
+        public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
+                                     ResultCursor cursor) {
+            PgResultSet.this.rows = tuples;
+            PgResultSet.this.cursor = cursor;
+        }
+
+        @Override
+        public void handleCommandStatus(String status, long updateCount, long insertOID) {
+            handleError(new PSQLException(GT.tr("Unexpected command status: {0}.", status),
+                    PSQLState.PROTOCOL_VIOLATION));
+        }
+
+        @Override
+        public void handleCompletion() throws SQLException {
+            SQLWarning warning = getWarning();
+            if (warning != null) {
+                PgResultSet.this.addWarning(warning);
+            }
+            super.handleCompletion();
+        }
+    }
+
+    private class PrimaryKey {
+        int index; // where in the result set is this primaryKey
+        String name; // what is the columnName of this primary Key
+
+        PrimaryKey(int index, String name) {
+            this.index = index;
+            this.name = name;
+        }
+
+        Object getValue() throws SQLException {
+            return getObject(index);
+        }
     }
-    return this;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSetMetaData.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSetMetaData.java
index fe5f98e..1cc676b 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSetMetaData.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgResultSetMetaData.java
@@ -23,446 +23,446 @@ import java.sql.Statement;
 import java.sql.Types;
 
 public class PgResultSetMetaData implements ResultSetMetaData, PGResultSetMetaData {
-  protected final BaseConnection connection;
-  protected final Field[] fields;
+    protected final BaseConnection connection;
+    protected final Field[] fields;
 
-  private boolean fieldInfoFetched;
+    private boolean fieldInfoFetched;
 
-  /**
-   * Initialise for a result with a tuple set and a field descriptor set
-   *
-   * @param connection the connection to retrieve metadata
-   * @param fields the array of field descriptors
-   */
-  public PgResultSetMetaData(BaseConnection connection, Field[] fields) {
-    this.connection = connection;
-    this.fields = fields;
-    this.fieldInfoFetched = false;
-  }
-
-  @Override
-  public int getColumnCount() throws SQLException {
-    return fields.length;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * <p>It is believed that PostgreSQL does not support this feature.
-   *
-   * @param column the first column is 1, the second is 2...
-   * @return true if so
-   * @exception SQLException if a database access error occurs
-   */
-  @Override
-  public boolean isAutoIncrement(int column) throws SQLException {
-    fetchFieldMetaData();
-    Field field = getField(column);
-    FieldMetadata metadata = field.getMetadata();
-    return metadata != null && metadata.autoIncrement;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * <p>Does a column's case matter? ASSUMPTION: Any field that is not obviously case insensitive is
-   * assumed to be case sensitive
-   *
-   * @param column the first column is 1, the second is 2...
-   * @return true if so
-   * @exception SQLException if a database access error occurs
-   */
-  @Override
-  public boolean isCaseSensitive(int column) throws SQLException {
-    Field field = getField(column);
-    return connection.getTypeInfo().isCaseSensitive(field.getOID());
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * <p>Can the column be used in a WHERE clause? Basically for this, I split the functions into two
-   * types: recognised types (which are always useable), and OTHER types (which may or may not be
-   * useable). The OTHER types, for now, I will assume they are useable. We should really query the
-   * catalog to see if they are useable.
-   *
-   * @param column the first column is 1, the second is 2...
-   * @return true if they can be used in a WHERE clause
-   * @exception SQLException if a database access error occurs
-   */
-  @Override
-  public boolean isSearchable(int column) throws SQLException {
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * <p>Is the column a cash value? 6.1 introduced the cash/money type, which haven't been incorporated
-   * as of 970414, so I just check the type name for both 'cash' and 'money'
-   *
-   * @param column the first column is 1, the second is 2...
-   * @return true if its a cash column
-   * @exception SQLException if a database access error occurs
-   */
-  @Override
-  public boolean isCurrency(int column) throws SQLException {
-    String typeName = getPGType(column);
-
-    return "cash".equals(typeName) || "money".equals(typeName);
-  }
-
-  @Override
-  public int isNullable(int column) throws SQLException {
-    fetchFieldMetaData();
-    Field field = getField(column);
-    FieldMetadata metadata = field.getMetadata();
-    return metadata == null ? ResultSetMetaData.columnNullable : metadata.nullable;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * <p>Is the column a signed number? In PostgreSQL, all numbers are signed, so this is trivial.
-   * However, strings are not signed (duh!)
-   *
-   * @param column the first column is 1, the second is 2...
-   * @return true if so
-   * @exception SQLException if a database access error occurs
-   */
-  @Override
-  public boolean isSigned(int column) throws SQLException {
-    Field field = getField(column);
-    return connection.getTypeInfo().isSigned(field.getOID());
-  }
-
-  @Override
-  public int getColumnDisplaySize(int column) throws SQLException {
-    Field field = getField(column);
-    return connection.getTypeInfo().getDisplaySize(field.getOID(), field.getMod());
-  }
-
-  @Override
-  public String getColumnLabel(int column) throws SQLException {
-    Field field = getField(column);
-    return field.getColumnLabel();
-  }
-
-  @Override
-  public String getColumnName(int column) throws SQLException {
-    return getColumnLabel(column);
-  }
-
-  @Override
-  public String getBaseColumnName(int column) throws SQLException {
-    Field field = getField(column);
-    if (field.getTableOid() == 0) {
-      return "";
-    }
-    fetchFieldMetaData();
-    FieldMetadata metadata = field.getMetadata();
-    return metadata == null ? "" : metadata.columnName;
-  }
-
-  @Override
-  public String getSchemaName(int column) throws SQLException {
-    return "";
-  }
-
-  private boolean populateFieldsWithMetadata(Gettable<FieldMetadata.Key, FieldMetadata> metadata) {
-    boolean allOk = true;
-    for (Field field : fields) {
-      if (field.getMetadata() != null) {
-        // No need to update metadata
-        continue;
-      }
-
-      final FieldMetadata fieldMetadata =
-          metadata.get(new FieldMetadata.Key(field.getTableOid(), field.getPositionInTable()));
-      if (fieldMetadata == null) {
-        allOk = false;
-      } else {
-        field.setMetadata(fieldMetadata);
-      }
-    }
-    fieldInfoFetched |= allOk;
-    return allOk;
-  }
-
-  private void fetchFieldMetaData() throws SQLException {
-    if (fieldInfoFetched) {
-      return;
+    /**
+     * Initialise for a result with a tuple set and a field descriptor set
+     *
+     * @param connection the connection to retrieve metadata
+     * @param fields     the array of field descriptors
+     */
+    public PgResultSetMetaData(BaseConnection connection, Field[] fields) {
+        this.connection = connection;
+        this.fields = fields;
+        this.fieldInfoFetched = false;
     }
 
-    if (populateFieldsWithMetadata(connection.getFieldMetadataCache())) {
-      return;
+    @Override
+    public int getColumnCount() throws SQLException {
+        return fields.length;
     }
 
-    StringBuilder sql = new StringBuilder(
-        "SELECT c.oid, a.attnum, a.attname, c.relname, n.nspname, "
-            + "a.attnotnull OR (t.typtype = 'd' AND t.typnotnull), ");
-
-    if ( connection.haveMinimumServerVersion(ServerVersion.v10)) {
-      sql.append("a.attidentity != '' OR pg_catalog.pg_get_expr(d.adbin, d.adrelid) LIKE '%nextval(%' ");
-    } else {
-      sql.append("pg_catalog.pg_get_expr(d.adbin, d.adrelid) LIKE '%nextval(%' ");
-    }
-    sql.append("FROM pg_catalog.pg_class c "
-            + "JOIN pg_catalog.pg_namespace n ON (c.relnamespace = n.oid) "
-            + "JOIN pg_catalog.pg_attribute a ON (c.oid = a.attrelid) "
-            + "JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid) "
-            + "LEFT JOIN pg_catalog.pg_attrdef d ON (d.adrelid = a.attrelid AND d.adnum = a.attnum) "
-            + "JOIN (");
-
-    // 7.4 servers don't support row IN operations (a,b) IN ((c,d),(e,f))
-    // so we've got to fake that with a JOIN here.
-    //
-    boolean hasSourceInfo = false;
-    for (Field field : fields) {
-      if (field.getMetadata() != null) {
-        continue;
-      }
-
-      if (hasSourceInfo) {
-        sql.append(" UNION ALL ");
-      }
-
-      sql.append("SELECT ");
-      sql.append(field.getTableOid());
-      if (!hasSourceInfo) {
-        sql.append(" AS oid ");
-      }
-      sql.append(", ");
-      sql.append(field.getPositionInTable());
-      if (!hasSourceInfo) {
-        sql.append(" AS attnum");
-      }
-
-      if (!hasSourceInfo) {
-        hasSourceInfo = true;
-      }
-    }
-    sql.append(") vals ON (c.oid = vals.oid AND a.attnum = vals.attnum) ");
-
-    if (!hasSourceInfo) {
-      fieldInfoFetched = true;
-      return;
+    /**
+     * {@inheritDoc}
+     *
+     * <p>It is believed that PostgreSQL does not support this feature.
+     *
+     * @param column the first column is 1, the second is 2...
+     * @return true if so
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean isAutoIncrement(int column) throws SQLException {
+        fetchFieldMetaData();
+        Field field = getField(column);
+        FieldMetadata metadata = field.getMetadata();
+        return metadata != null && metadata.autoIncrement;
     }
 
-    Statement stmt = connection.createStatement();
-    ResultSet rs = null;
-    GettableHashMap<FieldMetadata.Key, FieldMetadata> md = new GettableHashMap<>();
-    try {
-      rs = stmt.executeQuery(sql.toString());
-      while (rs.next()) {
-        int table = (int) rs.getLong(1);
-        int column = (int) rs.getLong(2);
-        String columnName = rs.getString(3);
-        String tableName = rs.getString(4);
-        String schemaName = rs.getString(5);
-        int nullable =
-            rs.getBoolean(6) ? ResultSetMetaData.columnNoNulls : ResultSetMetaData.columnNullable;
-        boolean autoIncrement = rs.getBoolean(7);
-        FieldMetadata fieldMetadata =
-            new FieldMetadata(columnName, tableName, schemaName, nullable, autoIncrement);
-        FieldMetadata.Key key = new FieldMetadata.Key(table, column);
-        md.put(key, fieldMetadata);
-      }
-    } finally {
-      JdbcBlackHole.close(rs);
-      JdbcBlackHole.close(stmt);
-    }
-    populateFieldsWithMetadata(md);
-    connection.getFieldMetadataCache().putAll(md);
-  }
-
-  @Override
-  public String getBaseSchemaName(int column) throws SQLException {
-    fetchFieldMetaData();
-    Field field = getField(column);
-    FieldMetadata metadata = field.getMetadata();
-    return metadata == null ? "" : metadata.schemaName;
-  }
-
-  @Override
-  public int getPrecision(int column) throws SQLException {
-    Field field = getField(column);
-    return connection.getTypeInfo().getPrecision(field.getOID(), field.getMod());
-  }
-
-  @Override
-  public int getScale(int column) throws SQLException {
-    Field field = getField(column);
-    return connection.getTypeInfo().getScale(field.getOID(), field.getMod());
-  }
-
-  @Override
-  public String getTableName(int column) throws SQLException {
-    return getBaseTableName(column);
-  }
-
-  @Override
-  public String getBaseTableName(int column) throws SQLException {
-    fetchFieldMetaData();
-    Field field = getField(column);
-    FieldMetadata metadata = field.getMetadata();
-    return metadata == null ? "" : metadata.tableName;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * <p>As with getSchemaName(), we can say that if
-   * getTableName() returns n/a, then we can too - otherwise, we need to work on it.
-   *
-   * @param column the first column is 1, the second is 2...
-   * @return catalog name, or "" if not applicable
-   * @exception SQLException if a database access error occurs
-   */
-  @Override
-  public String getCatalogName(int column) throws SQLException {
-    return "";
-  }
-
-  @Override
-  public int getColumnType(int column) throws SQLException {
-    return getSQLType(column);
-  }
-
-  @Override
-  public int getFormat(int column) throws SQLException {
-    return getField(column).getFormat();
-  }
-
-  @Override
-  public String getColumnTypeName(int column) throws SQLException {
-    String type = getPGType(column);
-    if (isAutoIncrement(column)) {
-      if ("int4".equals(type)) {
-        return "serial";
-      } else if ("int8".equals(type)) {
-        return "bigserial";
-      } else if ("int2".equals(type) && connection.haveMinimumServerVersion(ServerVersion.v9_2)) {
-        return "smallserial";
-      }
+    /**
+     * {@inheritDoc}
+     *
+     * <p>Does a column's case matter? ASSUMPTION: Any field that is not obviously case insensitive is
+     * assumed to be case sensitive
+     *
+     * @param column the first column is 1, the second is 2...
+     * @return true if so
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean isCaseSensitive(int column) throws SQLException {
+        Field field = getField(column);
+        return connection.getTypeInfo().isCaseSensitive(field.getOID());
     }
 
-    return type;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * <p>In reality, we would have to check the GRANT/REVOKE
-   * stuff for this to be effective, and I haven't really looked into that yet, so this will get
-   * re-visited.
-   *
-   * @param column the first column is 1, the second is 2, etc.*
-   * @return true if so*
-   * @exception SQLException if a database access error occurs
-   */
-  @Override
-  public boolean isReadOnly(int column) throws SQLException {
-    return false;
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * <p>In reality have to check
-   * the GRANT/REVOKE stuff, which I haven't worked with as yet. However, if it isn't ReadOnly, then
-   * it is obviously writable.
-   *
-   * @param column the first column is 1, the second is 2, etc.
-   * @return true if so
-   * @exception SQLException if a database access error occurs
-   */
-  @Override
-  public boolean isWritable(int column) throws SQLException {
-    return !isReadOnly(column);
-  }
-
-  /**
-   * {@inheritDoc}
-   *
-   * <p>Hmmm...this is a bad one, since the two
-   * preceding functions have not been really defined. I cannot tell is the short answer. I thus
-   * return isWritable() just to give us an idea.
-   *
-   * @param column the first column is 1, the second is 2, etc..
-   * @return true if so
-   * @exception SQLException if a database access error occurs
-   */
-  @Override
-  public boolean isDefinitelyWritable(int column) throws SQLException {
-    return false;
-  }
-
-  // ********************************************************
-  // END OF PUBLIC INTERFACE
-  // ********************************************************
-
-  /**
-   * For several routines in this package, we need to convert a columnIndex into a Field[]
-   * descriptor. Rather than do the same code several times, here it is.
-   *
-   * @param columnIndex the first column is 1, the second is 2...
-   * @return the Field description
-   * @exception SQLException if a database access error occurs
-   */
-  protected Field getField(int columnIndex) throws SQLException {
-    if (columnIndex < 1 || columnIndex > fields.length) {
-      throw new PSQLException(
-          GT.tr("The column index is out of range: {0}, number of columns: {1}.",
-              columnIndex, fields.length),
-          PSQLState.INVALID_PARAMETER_VALUE);
-    }
-    return fields[columnIndex - 1];
-  }
-
-  protected String getPGType(int columnIndex) throws SQLException {
-    return connection.getTypeInfo().getPGType(getField(columnIndex).getOID());
-  }
-
-  protected int getSQLType(int columnIndex) throws SQLException {
-    return connection.getTypeInfo().getSQLType(getField(columnIndex).getOID());
-  }
-
-  // ** JDBC 2 Extensions **
-
-  // This can hook into our PG_Object mechanism
-
-  @Override
-  public String getColumnClassName(int column) throws SQLException {
-    Field field = getField(column);
-    String result = connection.getTypeInfo().getJavaClass(field.getOID());
-
-    if (result != null) {
-      return result;
+    /**
+     * {@inheritDoc}
+     *
+     * <p>Can the column be used in a WHERE clause? Basically for this, I split the functions into two
+     * types: recognised types (which are always useable), and OTHER types (which may or may not be
+     * useable). The OTHER types, for now, I will assume they are useable. We should really query the
+     * catalog to see if they are useable.
+     *
+     * @param column the first column is 1, the second is 2...
+     * @return true if they can be used in a WHERE clause
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean isSearchable(int column) throws SQLException {
+        return true;
     }
 
-    int sqlType = getSQLType(column);
-    if (sqlType == Types.ARRAY) {
-      return "java.sql.Array";
-    } else {
-      String type = getPGType(column);
-      if ("unknown".equals(type)) {
-        return "java.lang.String";
-      }
-      return "java.lang.Object";
-    }
-  }
+    /**
+     * {@inheritDoc}
+     *
+     * <p>Is the column a cash value? 6.1 introduced the cash/money type, which haven't been incorporated
+     * as of 970414, so I just check the type name for both 'cash' and 'money'
+     *
+     * @param column the first column is 1, the second is 2...
+     * @return true if its a cash column
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean isCurrency(int column) throws SQLException {
+        String typeName = getPGType(column);
 
-  @Override
-  public boolean isWrapperFor(Class<?> iface) throws SQLException {
-    return iface.isAssignableFrom(getClass());
-  }
-
-  @Override
-  public <T> T unwrap(Class<T> iface) throws SQLException {
-    if (iface.isAssignableFrom(getClass())) {
-      return iface.cast(this);
+        return "cash".equals(typeName) || "money".equals(typeName);
+    }
+
+    @Override
+    public int isNullable(int column) throws SQLException {
+        fetchFieldMetaData();
+        Field field = getField(column);
+        FieldMetadata metadata = field.getMetadata();
+        return metadata == null ? ResultSetMetaData.columnNullable : metadata.nullable;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * <p>Is the column a signed number? In PostgreSQL, all numbers are signed, so this is trivial.
+     * However, strings are not signed (duh!)
+     *
+     * @param column the first column is 1, the second is 2...
+     * @return true if so
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean isSigned(int column) throws SQLException {
+        Field field = getField(column);
+        return connection.getTypeInfo().isSigned(field.getOID());
+    }
+
+    @Override
+    public int getColumnDisplaySize(int column) throws SQLException {
+        Field field = getField(column);
+        return connection.getTypeInfo().getDisplaySize(field.getOID(), field.getMod());
+    }
+
+    @Override
+    public String getColumnLabel(int column) throws SQLException {
+        Field field = getField(column);
+        return field.getColumnLabel();
+    }
+
+    @Override
+    public String getColumnName(int column) throws SQLException {
+        return getColumnLabel(column);
+    }
+
+    @Override
+    public String getBaseColumnName(int column) throws SQLException {
+        Field field = getField(column);
+        if (field.getTableOid() == 0) {
+            return "";
+        }
+        fetchFieldMetaData();
+        FieldMetadata metadata = field.getMetadata();
+        return metadata == null ? "" : metadata.columnName;
+    }
+
+    @Override
+    public String getSchemaName(int column) throws SQLException {
+        return "";
+    }
+
+    private boolean populateFieldsWithMetadata(Gettable<FieldMetadata.Key, FieldMetadata> metadata) {
+        boolean allOk = true;
+        for (Field field : fields) {
+            if (field.getMetadata() != null) {
+                // No need to update metadata
+                continue;
+            }
+
+            final FieldMetadata fieldMetadata =
+                    metadata.get(new FieldMetadata.Key(field.getTableOid(), field.getPositionInTable()));
+            if (fieldMetadata == null) {
+                allOk = false;
+            } else {
+                field.setMetadata(fieldMetadata);
+            }
+        }
+        fieldInfoFetched |= allOk;
+        return allOk;
+    }
+
+    private void fetchFieldMetaData() throws SQLException {
+        if (fieldInfoFetched) {
+            return;
+        }
+
+        if (populateFieldsWithMetadata(connection.getFieldMetadataCache())) {
+            return;
+        }
+
+        StringBuilder sql = new StringBuilder(
+                "SELECT c.oid, a.attnum, a.attname, c.relname, n.nspname, "
+                        + "a.attnotnull OR (t.typtype = 'd' AND t.typnotnull), ");
+
+        if (connection.haveMinimumServerVersion(ServerVersion.v10)) {
+            sql.append("a.attidentity != '' OR pg_catalog.pg_get_expr(d.adbin, d.adrelid) LIKE '%nextval(%' ");
+        } else {
+            sql.append("pg_catalog.pg_get_expr(d.adbin, d.adrelid) LIKE '%nextval(%' ");
+        }
+        sql.append("FROM pg_catalog.pg_class c "
+                + "JOIN pg_catalog.pg_namespace n ON (c.relnamespace = n.oid) "
+                + "JOIN pg_catalog.pg_attribute a ON (c.oid = a.attrelid) "
+                + "JOIN pg_catalog.pg_type t ON (a.atttypid = t.oid) "
+                + "LEFT JOIN pg_catalog.pg_attrdef d ON (d.adrelid = a.attrelid AND d.adnum = a.attnum) "
+                + "JOIN (");
+
+        // 7.4 servers don't support row IN operations (a,b) IN ((c,d),(e,f))
+        // so we've got to fake that with a JOIN here.
+        //
+        boolean hasSourceInfo = false;
+        for (Field field : fields) {
+            if (field.getMetadata() != null) {
+                continue;
+            }
+
+            if (hasSourceInfo) {
+                sql.append(" UNION ALL ");
+            }
+
+            sql.append("SELECT ");
+            sql.append(field.getTableOid());
+            if (!hasSourceInfo) {
+                sql.append(" AS oid ");
+            }
+            sql.append(", ");
+            sql.append(field.getPositionInTable());
+            if (!hasSourceInfo) {
+                sql.append(" AS attnum");
+            }
+
+            if (!hasSourceInfo) {
+                hasSourceInfo = true;
+            }
+        }
+        sql.append(") vals ON (c.oid = vals.oid AND a.attnum = vals.attnum) ");
+
+        if (!hasSourceInfo) {
+            fieldInfoFetched = true;
+            return;
+        }
+
+        Statement stmt = connection.createStatement();
+        ResultSet rs = null;
+        GettableHashMap<FieldMetadata.Key, FieldMetadata> md = new GettableHashMap<>();
+        try {
+            rs = stmt.executeQuery(sql.toString());
+            while (rs.next()) {
+                int table = (int) rs.getLong(1);
+                int column = (int) rs.getLong(2);
+                String columnName = rs.getString(3);
+                String tableName = rs.getString(4);
+                String schemaName = rs.getString(5);
+                int nullable =
+                        rs.getBoolean(6) ? ResultSetMetaData.columnNoNulls : ResultSetMetaData.columnNullable;
+                boolean autoIncrement = rs.getBoolean(7);
+                FieldMetadata fieldMetadata =
+                        new FieldMetadata(columnName, tableName, schemaName, nullable, autoIncrement);
+                FieldMetadata.Key key = new FieldMetadata.Key(table, column);
+                md.put(key, fieldMetadata);
+            }
+        } finally {
+            JdbcBlackHole.close(rs);
+            JdbcBlackHole.close(stmt);
+        }
+        populateFieldsWithMetadata(md);
+        connection.getFieldMetadataCache().putAll(md);
+    }
+
+    @Override
+    public String getBaseSchemaName(int column) throws SQLException {
+        fetchFieldMetaData();
+        Field field = getField(column);
+        FieldMetadata metadata = field.getMetadata();
+        return metadata == null ? "" : metadata.schemaName;
+    }
+
+    @Override
+    public int getPrecision(int column) throws SQLException {
+        Field field = getField(column);
+        return connection.getTypeInfo().getPrecision(field.getOID(), field.getMod());
+    }
+
+    @Override
+    public int getScale(int column) throws SQLException {
+        Field field = getField(column);
+        return connection.getTypeInfo().getScale(field.getOID(), field.getMod());
+    }
+
+    @Override
+    public String getTableName(int column) throws SQLException {
+        return getBaseTableName(column);
+    }
+
+    @Override
+    public String getBaseTableName(int column) throws SQLException {
+        fetchFieldMetaData();
+        Field field = getField(column);
+        FieldMetadata metadata = field.getMetadata();
+        return metadata == null ? "" : metadata.tableName;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * <p>As with getSchemaName(), we can say that if
+     * getTableName() returns n/a, then we can too - otherwise, we need to work on it.
+     *
+     * @param column the first column is 1, the second is 2...
+     * @return catalog name, or "" if not applicable
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public String getCatalogName(int column) throws SQLException {
+        return "";
+    }
+
+    @Override
+    public int getColumnType(int column) throws SQLException {
+        return getSQLType(column);
+    }
+
+    @Override
+    public int getFormat(int column) throws SQLException {
+        return getField(column).getFormat();
+    }
+
+    @Override
+    public String getColumnTypeName(int column) throws SQLException {
+        String type = getPGType(column);
+        if (isAutoIncrement(column)) {
+            if ("int4".equals(type)) {
+                return "serial";
+            } else if ("int8".equals(type)) {
+                return "bigserial";
+            } else if ("int2".equals(type) && connection.haveMinimumServerVersion(ServerVersion.v9_2)) {
+                return "smallserial";
+            }
+        }
+
+        return type;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * <p>In reality, we would have to check the GRANT/REVOKE
+     * stuff for this to be effective, and I haven't really looked into that yet, so this will get
+     * re-visited.
+     *
+     * @param column the first column is 1, the second is 2, etc.*
+     * @return true if so*
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean isReadOnly(int column) throws SQLException {
+        return false;
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * <p>In reality have to check
+     * the GRANT/REVOKE stuff, which I haven't worked with as yet. However, if it isn't ReadOnly, then
+     * it is obviously writable.
+     *
+     * @param column the first column is 1, the second is 2, etc.
+     * @return true if so
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean isWritable(int column) throws SQLException {
+        return !isReadOnly(column);
+    }
+
+    /**
+     * {@inheritDoc}
+     *
+     * <p>Hmmm...this is a bad one, since the two
+     * preceding functions have not been really defined. I cannot tell is the short answer. I thus
+     * return isWritable() just to give us an idea.
+     *
+     * @param column the first column is 1, the second is 2, etc..
+     * @return true if so
+     * @throws SQLException if a database access error occurs
+     */
+    @Override
+    public boolean isDefinitelyWritable(int column) throws SQLException {
+        return false;
+    }
+
+    // ********************************************************
+    // END OF PUBLIC INTERFACE
+    // ********************************************************
+
+    /**
+     * For several routines in this package, we need to convert a columnIndex into a Field[]
+     * descriptor. Rather than do the same code several times, here it is.
+     *
+     * @param columnIndex the first column is 1, the second is 2...
+     * @return the Field description
+     * @throws SQLException if a database access error occurs
+     */
+    protected Field getField(int columnIndex) throws SQLException {
+        if (columnIndex < 1 || columnIndex > fields.length) {
+            throw new PSQLException(
+                    GT.tr("The column index is out of range: {0}, number of columns: {1}.",
+                            columnIndex, fields.length),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+        return fields[columnIndex - 1];
+    }
+
+    protected String getPGType(int columnIndex) throws SQLException {
+        return connection.getTypeInfo().getPGType(getField(columnIndex).getOID());
+    }
+
+    protected int getSQLType(int columnIndex) throws SQLException {
+        return connection.getTypeInfo().getSQLType(getField(columnIndex).getOID());
+    }
+
+    // ** JDBC 2 Extensions **
+
+    // This can hook into our PG_Object mechanism
+
+    @Override
+    public String getColumnClassName(int column) throws SQLException {
+        Field field = getField(column);
+        String result = connection.getTypeInfo().getJavaClass(field.getOID());
+
+        if (result != null) {
+            return result;
+        }
+
+        int sqlType = getSQLType(column);
+        if (sqlType == Types.ARRAY) {
+            return "java.sql.Array";
+        } else {
+            String type = getPGType(column);
+            if ("unknown".equals(type)) {
+                return "java.lang.String";
+            }
+            return "java.lang.Object";
+        }
+    }
+
+    @Override
+    public boolean isWrapperFor(Class<?> iface) throws SQLException {
+        return iface.isAssignableFrom(getClass());
+    }
+
+    @Override
+    public <T> T unwrap(Class<T> iface) throws SQLException {
+        if (iface.isAssignableFrom(getClass())) {
+            return iface.cast(this);
+        }
+        throw new SQLException("Cannot unwrap to " + iface.getName());
     }
-    throw new SQLException("Cannot unwrap to " + iface.getName());
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgSQLXML.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgSQLXML.java
index a904d76..1dd028d 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/PgSQLXML.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgSQLXML.java
@@ -52,282 +52,282 @@ import javax.xml.transform.stream.StreamSource;
 @SuppressWarnings("try")
 public class PgSQLXML implements SQLXML {
 
-  private final ResourceLock lock = new ResourceLock();
-  private final BaseConnection conn;
-  private String data; // The actual data contained.
-  private boolean initialized; // Has someone assigned the data for this object?
-  private boolean active; // Is anyone in the process of loading data into us?
-  private boolean freed;
+    private final ResourceLock lock = new ResourceLock();
+    private final BaseConnection conn;
+    private String data; // The actual data contained.
+    private boolean initialized; // Has someone assigned the data for this object?
+    private boolean active; // Is anyone in the process of loading data into us?
+    private boolean freed;
 
-  private ByteArrayOutputStream byteArrayOutputStream;
-  private StringWriter stringWriter;
-  private DOMResult domResult;
+    private ByteArrayOutputStream byteArrayOutputStream;
+    private StringWriter stringWriter;
+    private DOMResult domResult;
 
-  public PgSQLXML(BaseConnection conn) {
-    this(conn, null, false);
-  }
-
-  public PgSQLXML(BaseConnection conn, String data) {
-    this(conn, data, true);
-  }
-
-  private PgSQLXML(BaseConnection conn, String data, boolean initialized) {
-    this.conn = conn;
-    this.data = data;
-    this.initialized = initialized;
-    this.active = false;
-    this.freed = false;
-  }
-
-  private PGXmlFactoryFactory getXmlFactoryFactory() throws SQLException {
-    if (conn != null) {
-      return conn.getXmlFactoryFactory();
+    public PgSQLXML(BaseConnection conn) {
+        this(conn, null, false);
     }
-    return DefaultPGXmlFactoryFactory.INSTANCE;
-  }
 
-  @Override
-  public void free() {
-    try (ResourceLock ignore = lock.obtain()) {
-      freed = true;
-      data = null;
+    public PgSQLXML(BaseConnection conn, String data) {
+        this(conn, data, true);
     }
-  }
 
-  @Override
-  public InputStream getBinaryStream() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      ensureInitialized();
-
-      if (data == null) {
-        return null;
-      }
-
-      try {
-        return new ByteArrayInputStream(conn.getEncoding().encode(data));
-      } catch (IOException ioe) {
-        // This should be a can't happen exception. We just
-        // decoded this data, so it would be surprising that
-        // we couldn't encode it.
-        // For this reason don't make it translatable.
-        throw new PSQLException("Failed to re-encode xml data.", PSQLState.DATA_ERROR, ioe);
-      }
+    private PgSQLXML(BaseConnection conn, String data, boolean initialized) {
+        this.conn = conn;
+        this.data = data;
+        this.initialized = initialized;
+        this.active = false;
+        this.freed = false;
     }
-  }
 
-  @Override
-  public Reader getCharacterStream() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      ensureInitialized();
-
-      if (data == null) {
-        return null;
-      }
-
-      return new StringReader(data);
-    }
-  }
-
-  // We must implement this unsafely because that's what the
-  // interface requires. Because it says we're returning T
-  // which is unknown, none of the return values can satisfy it
-  // as Java isn't going to understand the if statements that
-  // ensure they are the same.
-  //
-  @SuppressWarnings("unchecked")
-  @Override
-  public <T extends Source> T getSource(Class<T> sourceClass)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      ensureInitialized();
-
-      String data = this.data;
-      if (data == null) {
-        return null;
-      }
-
-      try {
-        if (sourceClass == null || DOMSource.class.equals(sourceClass)) {
-          DocumentBuilder builder = getXmlFactoryFactory().newDocumentBuilder();
-          InputSource input = new InputSource(new StringReader(data));
-          DOMSource domSource = new DOMSource(builder.parse(input));
-          return (T) domSource;
-        } else if (SAXSource.class.equals(sourceClass)) {
-          XMLReader reader = getXmlFactoryFactory().createXMLReader();
-          InputSource is = new InputSource(new StringReader(data));
-          return sourceClass.cast(new SAXSource(reader, is));
-        } else if (StreamSource.class.equals(sourceClass)) {
-          return sourceClass.cast(new StreamSource(new StringReader(data)));
-        } else if (StAXSource.class.equals(sourceClass)) {
-          XMLInputFactory xif = getXmlFactoryFactory().newXMLInputFactory();
-          XMLStreamReader xsr = xif.createXMLStreamReader(new StringReader(data));
-          return sourceClass.cast(new StAXSource(xsr));
+    private PGXmlFactoryFactory getXmlFactoryFactory() throws SQLException {
+        if (conn != null) {
+            return conn.getXmlFactoryFactory();
         }
-      } catch (Exception e) {
-        throw new PSQLException(GT.tr("Unable to decode xml data."), PSQLState.DATA_ERROR, e);
-      }
-
-      throw new PSQLException(GT.tr("Unknown XML Source class: {0}", sourceClass),
-          PSQLState.INVALID_PARAMETER_TYPE);
+        return DefaultPGXmlFactoryFactory.INSTANCE;
     }
-  }
 
-  @Override
-  public String getString() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      ensureInitialized();
-      return data;
-    }
-  }
-
-  @Override
-  public OutputStream setBinaryStream() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      initialize();
-      active = true;
-      byteArrayOutputStream = new ByteArrayOutputStream();
-      return byteArrayOutputStream;
-    }
-  }
-
-  @Override
-  public Writer setCharacterStream() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      initialize();
-      active = true;
-      stringWriter = new StringWriter();
-      return stringWriter;
-    }
-  }
-
-  @SuppressWarnings("unchecked")
-  @Override
-  public <T extends Result> T setResult(Class<T> resultClass) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      initialize();
-
-      if (resultClass == null || DOMResult.class.equals(resultClass)) {
-        domResult = new DOMResult();
-        active = true;
-        return (T) domResult;
-      } else if (SAXResult.class.equals(resultClass)) {
-        try {
-          SAXTransformerFactory transformerFactory = getXmlFactoryFactory().newSAXTransformerFactory();
-          TransformerHandler transformerHandler = transformerFactory.newTransformerHandler();
-          stringWriter = new StringWriter();
-          transformerHandler.setResult(new StreamResult(stringWriter));
-          active = true;
-          return resultClass.cast(new SAXResult(transformerHandler));
-        } catch (TransformerException te) {
-          throw new PSQLException(GT.tr("Unable to create SAXResult for SQLXML."),
-              PSQLState.UNEXPECTED_ERROR, te);
+    @Override
+    public void free() {
+        try (ResourceLock ignore = lock.obtain()) {
+            freed = true;
+            data = null;
         }
-      } else if (StreamResult.class.equals(resultClass)) {
-        stringWriter = new StringWriter();
-        active = true;
-        return resultClass.cast(new StreamResult(stringWriter));
-      } else if (StAXResult.class.equals(resultClass)) {
-        StringWriter stringWriter = new StringWriter();
-        this.stringWriter = stringWriter;
-        try {
-          XMLOutputFactory xof = getXmlFactoryFactory().newXMLOutputFactory();
-          XMLStreamWriter xsw = xof.createXMLStreamWriter(stringWriter);
-          active = true;
-          return resultClass.cast(new StAXResult(xsw));
-        } catch (XMLStreamException xse) {
-          throw new PSQLException(GT.tr("Unable to create StAXResult for SQLXML"),
-              PSQLState.UNEXPECTED_ERROR, xse);
+    }
+
+    @Override
+    public InputStream getBinaryStream() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            ensureInitialized();
+
+            if (data == null) {
+                return null;
+            }
+
+            try {
+                return new ByteArrayInputStream(conn.getEncoding().encode(data));
+            } catch (IOException ioe) {
+                // This should be a can't happen exception. We just
+                // decoded this data, so it would be surprising that
+                // we couldn't encode it.
+                // For this reason don't make it translatable.
+                throw new PSQLException("Failed to re-encode xml data.", PSQLState.DATA_ERROR, ioe);
+            }
         }
-      }
-
-      throw new PSQLException(GT.tr("Unknown XML Result class: {0}", resultClass),
-          PSQLState.INVALID_PARAMETER_TYPE);
-    }
-  }
-
-  @Override
-  public void setString(String value) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkFreed();
-      initialize();
-      data = value;
-    }
-  }
-
-  private void checkFreed() throws SQLException {
-    if (freed) {
-      throw new PSQLException(GT.tr("This SQLXML object has already been freed."),
-          PSQLState.OBJECT_NOT_IN_STATE);
-    }
-  }
-
-  private void ensureInitialized() throws SQLException {
-    if (!initialized) {
-      throw new PSQLException(
-          GT.tr(
-              "This SQLXML object has not been initialized, so you cannot retrieve data from it."),
-          PSQLState.OBJECT_NOT_IN_STATE);
     }
 
-    // Is anyone loading data into us at the moment?
-    if (!active) {
-      return;
+    @Override
+    public Reader getCharacterStream() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            ensureInitialized();
+
+            if (data == null) {
+                return null;
+            }
+
+            return new StringReader(data);
+        }
     }
 
-    if (byteArrayOutputStream != null) {
-      try {
-        data = conn.getEncoding().decode(byteArrayOutputStream.toByteArray());
-      } catch (IOException ioe) {
-        throw new PSQLException(GT.tr("Failed to convert binary xml data to encoding: {0}.",
-            conn.getEncoding().name()), PSQLState.DATA_ERROR, ioe);
-      } finally {
-        byteArrayOutputStream = null;
-        active = false;
-      }
-    } else if (stringWriter != null) {
-      // This is also handling the work for Stream, SAX, and StAX Results
-      // as they will use the same underlying stringwriter variable.
-      //
-      data = stringWriter.toString();
-      stringWriter = null;
-      active = false;
-    } else if (domResult != null) {
-      DOMResult domResult = this.domResult;
-      // Copy the content from the result to a source
-      // and use the identify transform to get it into a
-      // friendlier result format.
-      try {
-        TransformerFactory factory = getXmlFactoryFactory().newTransformerFactory();
-        Transformer transformer = factory.newTransformer();
-        DOMSource domSource = new DOMSource(domResult.getNode());
-        StringWriter stringWriter = new StringWriter();
-        StreamResult streamResult = new StreamResult(stringWriter);
-        transformer.transform(domSource, streamResult);
-        data = stringWriter.toString();
-      } catch (TransformerException te) {
-        throw new PSQLException(GT.tr("Unable to convert DOMResult SQLXML data to a string."),
-            PSQLState.DATA_ERROR, te);
-      } finally {
-        domResult = null;
-        active = false;
-      }
-    }
-  }
+    // We must implement this unsafely because that's what the
+    // interface requires. Because it says we're returning T
+    // which is unknown, none of the return values can satisfy it
+    // as Java isn't going to understand the if statements that
+    // ensure they are the same.
+    //
+    @SuppressWarnings("unchecked")
+    @Override
+    public <T extends Source> T getSource(Class<T> sourceClass)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            ensureInitialized();
 
-  private void initialize() throws SQLException {
-    if (initialized) {
-      throw new PSQLException(
-          GT.tr(
-              "This SQLXML object has already been initialized, so you cannot manipulate it further."),
-          PSQLState.OBJECT_NOT_IN_STATE);
+            String data = this.data;
+            if (data == null) {
+                return null;
+            }
+
+            try {
+                if (sourceClass == null || DOMSource.class.equals(sourceClass)) {
+                    DocumentBuilder builder = getXmlFactoryFactory().newDocumentBuilder();
+                    InputSource input = new InputSource(new StringReader(data));
+                    DOMSource domSource = new DOMSource(builder.parse(input));
+                    return (T) domSource;
+                } else if (SAXSource.class.equals(sourceClass)) {
+                    XMLReader reader = getXmlFactoryFactory().createXMLReader();
+                    InputSource is = new InputSource(new StringReader(data));
+                    return sourceClass.cast(new SAXSource(reader, is));
+                } else if (StreamSource.class.equals(sourceClass)) {
+                    return sourceClass.cast(new StreamSource(new StringReader(data)));
+                } else if (StAXSource.class.equals(sourceClass)) {
+                    XMLInputFactory xif = getXmlFactoryFactory().newXMLInputFactory();
+                    XMLStreamReader xsr = xif.createXMLStreamReader(new StringReader(data));
+                    return sourceClass.cast(new StAXSource(xsr));
+                }
+            } catch (Exception e) {
+                throw new PSQLException(GT.tr("Unable to decode xml data."), PSQLState.DATA_ERROR, e);
+            }
+
+            throw new PSQLException(GT.tr("Unknown XML Source class: {0}", sourceClass),
+                    PSQLState.INVALID_PARAMETER_TYPE);
+        }
+    }
+
+    @Override
+    public String getString() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            ensureInitialized();
+            return data;
+        }
+    }
+
+    @Override
+    public void setString(String value) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            initialize();
+            data = value;
+        }
+    }
+
+    @Override
+    public OutputStream setBinaryStream() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            initialize();
+            active = true;
+            byteArrayOutputStream = new ByteArrayOutputStream();
+            return byteArrayOutputStream;
+        }
+    }
+
+    @Override
+    public Writer setCharacterStream() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            initialize();
+            active = true;
+            stringWriter = new StringWriter();
+            return stringWriter;
+        }
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    public <T extends Result> T setResult(Class<T> resultClass) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkFreed();
+            initialize();
+
+            if (resultClass == null || DOMResult.class.equals(resultClass)) {
+                domResult = new DOMResult();
+                active = true;
+                return (T) domResult;
+            } else if (SAXResult.class.equals(resultClass)) {
+                try {
+                    SAXTransformerFactory transformerFactory = getXmlFactoryFactory().newSAXTransformerFactory();
+                    TransformerHandler transformerHandler = transformerFactory.newTransformerHandler();
+                    stringWriter = new StringWriter();
+                    transformerHandler.setResult(new StreamResult(stringWriter));
+                    active = true;
+                    return resultClass.cast(new SAXResult(transformerHandler));
+                } catch (TransformerException te) {
+                    throw new PSQLException(GT.tr("Unable to create SAXResult for SQLXML."),
+                            PSQLState.UNEXPECTED_ERROR, te);
+                }
+            } else if (StreamResult.class.equals(resultClass)) {
+                stringWriter = new StringWriter();
+                active = true;
+                return resultClass.cast(new StreamResult(stringWriter));
+            } else if (StAXResult.class.equals(resultClass)) {
+                StringWriter stringWriter = new StringWriter();
+                this.stringWriter = stringWriter;
+                try {
+                    XMLOutputFactory xof = getXmlFactoryFactory().newXMLOutputFactory();
+                    XMLStreamWriter xsw = xof.createXMLStreamWriter(stringWriter);
+                    active = true;
+                    return resultClass.cast(new StAXResult(xsw));
+                } catch (XMLStreamException xse) {
+                    throw new PSQLException(GT.tr("Unable to create StAXResult for SQLXML"),
+                            PSQLState.UNEXPECTED_ERROR, xse);
+                }
+            }
+
+            throw new PSQLException(GT.tr("Unknown XML Result class: {0}", resultClass),
+                    PSQLState.INVALID_PARAMETER_TYPE);
+        }
+    }
+
+    private void checkFreed() throws SQLException {
+        if (freed) {
+            throw new PSQLException(GT.tr("This SQLXML object has already been freed."),
+                    PSQLState.OBJECT_NOT_IN_STATE);
+        }
+    }
+
+    private void ensureInitialized() throws SQLException {
+        if (!initialized) {
+            throw new PSQLException(
+                    GT.tr(
+                            "This SQLXML object has not been initialized, so you cannot retrieve data from it."),
+                    PSQLState.OBJECT_NOT_IN_STATE);
+        }
+
+        // Is anyone loading data into us at the moment?
+        if (!active) {
+            return;
+        }
+
+        if (byteArrayOutputStream != null) {
+            try {
+                data = conn.getEncoding().decode(byteArrayOutputStream.toByteArray());
+            } catch (IOException ioe) {
+                throw new PSQLException(GT.tr("Failed to convert binary xml data to encoding: {0}.",
+                        conn.getEncoding().name()), PSQLState.DATA_ERROR, ioe);
+            } finally {
+                byteArrayOutputStream = null;
+                active = false;
+            }
+        } else if (stringWriter != null) {
+            // This is also handling the work for Stream, SAX, and StAX Results
+            // as they will use the same underlying stringwriter variable.
+            //
+            data = stringWriter.toString();
+            stringWriter = null;
+            active = false;
+        } else if (domResult != null) {
+            DOMResult domResult = this.domResult;
+            // Copy the content from the result to a source
+            // and use the identify transform to get it into a
+            // friendlier result format.
+            try {
+                TransformerFactory factory = getXmlFactoryFactory().newTransformerFactory();
+                Transformer transformer = factory.newTransformer();
+                DOMSource domSource = new DOMSource(domResult.getNode());
+                StringWriter stringWriter = new StringWriter();
+                StreamResult streamResult = new StreamResult(stringWriter);
+                transformer.transform(domSource, streamResult);
+                data = stringWriter.toString();
+            } catch (TransformerException te) {
+                throw new PSQLException(GT.tr("Unable to convert DOMResult SQLXML data to a string."),
+                        PSQLState.DATA_ERROR, te);
+            } finally {
+                domResult = null;
+                active = false;
+            }
+        }
+    }
+
+    private void initialize() throws SQLException {
+        if (initialized) {
+            throw new PSQLException(
+                    GT.tr(
+                            "This SQLXML object has already been initialized, so you cannot manipulate it further."),
+                    PSQLState.OBJECT_NOT_IN_STATE);
+        }
+        initialized = true;
     }
-    initialized = true;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PgStatement.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PgStatement.java
index e27c6ad..b702f78 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/PgStatement.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PgStatement.java
@@ -35,1338 +35,1318 @@ import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
 
 @SuppressWarnings("try")
 public class PgStatement implements Statement, BaseStatement {
-  private static final String[] NO_RETURNING_COLUMNS = new String[0];
+    private static final String[] NO_RETURNING_COLUMNS = new String[0];
 
-  /**
-   * Default state for use or not binary transfers. Can use only for testing purposes
-   */
-  private static final boolean DEFAULT_FORCE_BINARY_TRANSFERS =
-      Boolean.getBoolean("org.postgresql.forceBinary");
-  // only for testing purposes. even single shot statements will use binary transfers
-  private boolean forceBinaryTransfers = DEFAULT_FORCE_BINARY_TRANSFERS;
-
-  protected final ResourceLock lock = new ResourceLock();
-  protected ArrayList<Query> batchStatements;
-  protected ArrayList<ParameterList> batchParameters;
-  protected final int resultsettype; // the resultset type to return (ResultSet.TYPE_xxx)
-  protected final int concurrency; // is it updateable or not? (ResultSet.CONCUR_xxx)
-  private final int rsHoldability;
-  private boolean poolable;
-  private boolean closeOnCompletion;
-  protected int fetchdirection = ResultSet.FETCH_FORWARD;
-  // fetch direction hint (currently ignored)
-
-  /**
-   * Protects current statement from cancelTask starting, waiting for a bit, and waking up exactly
-   * on subsequent query execution. The idea is to atomically compare and swap the reference to the
-   * task, so the task can detect that statement executes different query than the one the
-   * cancelTask was created. Note: the field must be set/get/compareAndSet via
-   * {@link #CANCEL_TIMER_UPDATER} as per {@link AtomicReferenceFieldUpdater} javadoc.
-   */
-  private volatile TimerTask cancelTimerTask;
-
-  private static final AtomicReferenceFieldUpdater<PgStatement, TimerTask> CANCEL_TIMER_UPDATER =
-      AtomicReferenceFieldUpdater.newUpdater(
-          PgStatement.class, TimerTask.class, "cancelTimerTask");
-
-  /**
-   * Protects statement from out-of-order cancels. It protects from both
-   * {@link #setQueryTimeout(int)} and {@link #cancel()} induced ones.
-   *
-   * {@link #execute(String)} and friends change the field to
-   * {@link StatementCancelState#IN_QUERY} during execute. {@link #cancel()}
-   * ignores cancel request if state is {@link StatementCancelState#IDLE}.
-   * In case {@link #execute(String)} observes non-{@link StatementCancelState#IDLE} state as it
-   * completes the query, it waits till {@link StatementCancelState#CANCELLED}. Note: the field must be
-   * set/get/compareAndSet via {@link #STATE_UPDATER} as per {@link AtomicIntegerFieldUpdater}
-   * javadoc.
-   */
-  private volatile StatementCancelState statementState = StatementCancelState.IDLE;
-
-  private static final AtomicReferenceFieldUpdater<PgStatement, StatementCancelState> STATE_UPDATER =
-      AtomicReferenceFieldUpdater.newUpdater(PgStatement.class, StatementCancelState.class, "statementState");
-
-  /**
-   * Does the caller of execute/executeUpdate want generated keys for this execution? This is set by
-   * Statement methods that have generated keys arguments and cleared after execution is complete.
-   */
-  protected boolean wantsGeneratedKeysOnce;
-
-  /**
-   * Was this PreparedStatement created to return generated keys for every execution? This is set at
-   * creation time and never cleared by execution.
-   */
-  public boolean wantsGeneratedKeysAlways;
-
-  // The connection who created us
-  protected final PgConnection connection;
-
-  /**
-   * The warnings chain.
-   */
-  protected volatile PSQLWarningWrapper warnings;
-
-  /**
-   * Maximum number of rows to return, 0 = unlimited.
-   */
-  protected int maxrows;
-
-  /**
-   * Number of rows to get in a batch.
-   */
-  protected int fetchSize;
-
-  /**
-   * Timeout (in milliseconds) for a query.
-   */
-  protected long timeout;
-
-  protected boolean replaceProcessingEnabled = true;
-
-  /**
-   * The current results.
-   */
-  protected ResultWrapper result;
-
-  /**
-   * The first unclosed result.
-   */
-  protected ResultWrapper firstUnclosedResult;
-
-  /**
-   * Results returned by a statement that wants generated keys.
-   */
-  protected ResultWrapper generatedKeys;
-
-  protected int mPrepareThreshold; // Reuse threshold to enable use of PREPARE
-
-  protected int maxFieldSize;
-
-  protected boolean adaptiveFetch;
-
-  private TimestampUtils timestampUtils; // our own Object because it's not thread safe
-
-  PgStatement(PgConnection c, int rsType, int rsConcurrency, int rsHoldability)
-      throws SQLException {
-    this.connection = c;
-    forceBinaryTransfers |= c.getForceBinary();
-    // validation check for allowed values of resultset type
-    if (rsType != ResultSet.TYPE_FORWARD_ONLY && rsType != ResultSet.TYPE_SCROLL_INSENSITIVE && rsType != ResultSet.TYPE_SCROLL_SENSITIVE) {
-      throw new PSQLException(GT.tr("Unknown value for ResultSet type"),
-          PSQLState.INVALID_PARAMETER_VALUE);
-    }
-    resultsettype = rsType;
-    // validation check for allowed values of resultset concurrency
-    if (rsConcurrency != ResultSet.CONCUR_READ_ONLY && rsConcurrency != ResultSet.CONCUR_UPDATABLE) {
-      throw new PSQLException(GT.tr("Unknown value for ResultSet concurrency"),
-          PSQLState.INVALID_PARAMETER_VALUE);
-    }
-    concurrency = rsConcurrency;
-    setFetchSize(c.getDefaultFetchSize());
-    setPrepareThreshold(c.getPrepareThreshold());
-    setAdaptiveFetch(c.getAdaptiveFetch());
-    // validation check for allowed values of resultset holdability
-    if (rsHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT && rsHoldability != ResultSet.CLOSE_CURSORS_AT_COMMIT) {
-      throw new PSQLException(GT.tr("Unknown value for ResultSet holdability"),
-          PSQLState.INVALID_PARAMETER_VALUE);
-    }
-    this.rsHoldability = rsHoldability;
-  }
-
-  @Override
-  public ResultSet createResultSet(Query originalQuery, Field[] fields, List<Tuple> tuples,
-      ResultCursor cursor) throws SQLException {
-    PgResultSet newResult = new PgResultSet(originalQuery, this, fields, tuples, cursor,
-        getMaxRows(), getMaxFieldSize(), getResultSetType(), getResultSetConcurrency(),
-        getResultSetHoldability(), getAdaptiveFetch());
-    newResult.setFetchSize(getFetchSize());
-    newResult.setFetchDirection(getFetchDirection());
-    return newResult;
-  }
-
-  public BaseConnection getPGConnection() {
-    return connection;
-  }
-
-  public String getFetchingCursorName() {
-    return null;
-  }
-
-  @Override
-  public int getFetchSize() {
-    return fetchSize;
-  }
-
-  protected boolean wantsScrollableResultSet() {
-    return resultsettype != ResultSet.TYPE_FORWARD_ONLY;
-  }
-
-  protected boolean wantsHoldableResultSet() {
-    // FIXME: false if not supported
-    return rsHoldability == ResultSet.HOLD_CURSORS_OVER_COMMIT;
-  }
-
-  /**
-   * ResultHandler implementations for updates, queries, and either-or.
-   */
-  public class StatementResultHandler extends ResultHandlerBase {
-    private ResultWrapper results;
-    private ResultWrapper lastResult;
-
-    public StatementResultHandler() {
-    }
-
-    ResultWrapper getResults() {
-      return results;
-    }
-
-    private void append(ResultWrapper newResult) {
-      if (results == null) {
-        lastResult = results = newResult;
-      } else {
-        lastResult.append(newResult);
-      }
-    }
-
-    @Override
-    public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
-        ResultCursor cursor) {
-      try {
-        ResultSet rs = PgStatement.this.createResultSet(fromQuery, fields, tuples, cursor);
-        append(new ResultWrapper(rs));
-      } catch (SQLException e) {
-        handleError(e);
-      }
-    }
-
-    @Override
-    public void handleCommandStatus(String status, long updateCount, long insertOID) {
-      append(new ResultWrapper(updateCount, insertOID));
-    }
-
-    @Override
-    public void handleWarning(SQLWarning warning) {
-      PgStatement.this.addWarning(warning);
-    }
-
-  }
-
-  @Override
-  public ResultSet executeQuery(String sql) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (!executeWithFlags(sql, 0)) {
-        throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
-      }
-
-      return getSingleResultSet();
-    }
-  }
-
-  protected ResultSet getSingleResultSet() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkClosed();
-      ResultWrapper result = this.result;
-      if (result.getNext() != null) {
-        throw new PSQLException(GT.tr("Multiple ResultSets were returned by the query."),
-            PSQLState.TOO_MANY_RESULTS);
-      }
-
-      return result.getResultSet();
-    }
-  }
-
-  @Override
-  public int executeUpdate(String sql) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      executeWithFlags(sql, QueryExecutor.QUERY_NO_RESULTS);
-      checkNoResultUpdate();
-      return getUpdateCount();
-    }
-  }
-
-  protected final void checkNoResultUpdate() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkClosed();
-      ResultWrapper iter = result;
-      while (iter != null) {
-        if (iter.getResultSet() != null) {
-          throw new PSQLException(GT.tr("A result was returned when none was expected."),
-              PSQLState.TOO_MANY_RESULTS);
-        }
-        iter = iter.getNext();
-      }
-    }
-  }
-
-  @Override
-  public boolean execute(String sql) throws SQLException {
-    return executeWithFlags(sql, 0);
-  }
-
-  @Override
-  public boolean executeWithFlags(String sql, int flags) throws SQLException {
-    return executeCachedSql(sql, flags, NO_RETURNING_COLUMNS);
-  }
-
-  private boolean executeCachedSql(String sql, int flags,
-      String [] columnNames) throws SQLException {
-    PreferQueryMode preferQueryMode = connection.getPreferQueryMode();
-    // Simple statements should not replace ?, ? with $1, $2
-    boolean shouldUseParameterized = false;
-    QueryExecutor queryExecutor = connection.getQueryExecutor();
-    Object key = queryExecutor
-        .createQueryKey(sql, replaceProcessingEnabled, shouldUseParameterized, columnNames);
-    CachedQuery cachedQuery;
-    boolean shouldCache = preferQueryMode == PreferQueryMode.EXTENDED_CACHE_EVERYTHING;
-    if (shouldCache) {
-      cachedQuery = queryExecutor.borrowQueryByKey(key);
-    } else {
-      cachedQuery = queryExecutor.createQueryByKey(key);
-    }
-    if (wantsGeneratedKeysOnce) {
-      SqlCommand sqlCommand = cachedQuery.query.getSqlCommand();
-      wantsGeneratedKeysOnce = sqlCommand != null && sqlCommand.isReturningKeywordPresent();
-    }
-    boolean res;
-    try {
-      res = executeWithFlags(cachedQuery, flags);
-    } finally {
-      if (shouldCache) {
-        queryExecutor.releaseQuery(cachedQuery);
-      }
-    }
-    return res;
-  }
-
-  @Override
-  public boolean executeWithFlags(CachedQuery simpleQuery, int flags) throws SQLException {
-    checkClosed();
-    if (connection.getPreferQueryMode().compareTo(PreferQueryMode.EXTENDED) < 0) {
-      flags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
-    }
-    execute(simpleQuery, null, flags);
-    try (ResourceLock ignore = lock.obtain()) {
-      checkClosed();
-      return result != null && result.getResultSet() != null;
-    }
-  }
-
-  @Override
-  public boolean executeWithFlags(int flags) throws SQLException {
-    checkClosed();
-    throw new PSQLException(GT.tr("Can''t use executeWithFlags(int) on a Statement."),
-        PSQLState.WRONG_OBJECT_TYPE);
-  }
-
-  /*
-  If there are multiple result sets we close any that have been processed and left open
-  by the client.
-   */
-  private void closeUnclosedProcessedResults() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      ResultWrapper resultWrapper = this.firstUnclosedResult;
-      ResultWrapper currentResult = this.result;
-      for (; resultWrapper != currentResult && resultWrapper != null;
-           resultWrapper = resultWrapper.getNext()) {
-        PgResultSet rs = (PgResultSet) resultWrapper.getResultSet();
-        if (rs != null) {
-          rs.closeInternally();
-        }
-      }
-      firstUnclosedResult = resultWrapper;
-    }
-  }
-
-  protected void closeForNextExecution() throws SQLException {
-
-    // Every statement execution clears any previous warnings.
-    clearWarnings();
-
-    // Close any existing resultsets associated with this statement.
-    try (ResourceLock ignore = lock.obtain()) {
-      closeUnclosedProcessedResults();
-
-      if ( this.result != null && this.result.getResultSet() != null ) {
-        this.result.getResultSet().close();
-      }
-      result = null;
-
-      ResultWrapper generatedKeys = this.generatedKeys;
-      if (generatedKeys != null) {
-        ResultSet resultSet = generatedKeys.getResultSet();
-        if (resultSet != null) {
-          resultSet.close();
-        }
-        this.generatedKeys = null;
-      }
-    }
-  }
-
-  /**
-   * Returns true if query is unlikely to be reused.
-   *
-   * @param cachedQuery to check (null if current query)
-   * @return true if query is unlikely to be reused
-   */
-  protected boolean isOneShotQuery(CachedQuery cachedQuery) {
-    if (cachedQuery == null) {
-      return true;
-    }
-    cachedQuery.increaseExecuteCount();
-    return (mPrepareThreshold == 0 || cachedQuery.getExecuteCount() < mPrepareThreshold)
-        && !getForceBinaryTransfer();
-  }
-
-  protected final void execute(CachedQuery cachedQuery,
-      ParameterList queryParameters, int flags)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      try {
-        executeInternal(cachedQuery, queryParameters, flags);
-      } catch (SQLException e) {
-        // Don't retry composite queries as it might get partially executed
-        if (cachedQuery.query.getSubqueries() != null
-            || !connection.getQueryExecutor().willHealOnRetry(e)) {
-          throw e;
-        }
-        cachedQuery.query.close();
-        // Execute the query one more time
-        executeInternal(cachedQuery, queryParameters, flags);
-      }
-    }
-  }
-
-  private void executeInternal(CachedQuery cachedQuery,
-      ParameterList queryParameters, int flags)
-      throws SQLException {
-    closeForNextExecution();
-
-    // Enable cursor-based resultset if possible.
-    if (fetchSize > 0 && !wantsScrollableResultSet() && !connection.getAutoCommit()
-        && !wantsHoldableResultSet()) {
-      flags |= QueryExecutor.QUERY_FORWARD_CURSOR;
-    }
-
-    if (wantsGeneratedKeysOnce || wantsGeneratedKeysAlways) {
-      flags |= QueryExecutor.QUERY_BOTH_ROWS_AND_STATUS;
-
-      // If the no results flag is set (from executeUpdate)
-      // clear it so we get the generated keys results.
-      //
-      if ((flags & QueryExecutor.QUERY_NO_RESULTS) != 0) {
-        flags &= ~(QueryExecutor.QUERY_NO_RESULTS);
-      }
-    }
-
-    // Only use named statements after we hit the threshold. Note that only
-    // named statements can be transferred in binary format.
-    // isOneShotQuery will check to see if we have hit the prepareThreshold count
-
-    if (isOneShotQuery(cachedQuery)) {
-      flags |= QueryExecutor.QUERY_ONESHOT;
-    }
-
-    if (connection.getAutoCommit()) {
-      flags |= QueryExecutor.QUERY_SUPPRESS_BEGIN;
-    }
-    if (connection.hintReadOnly()) {
-      flags |= QueryExecutor.QUERY_READ_ONLY_HINT;
-    }
-
-    // updateable result sets do not yet support binary updates
-    if (concurrency != ResultSet.CONCUR_READ_ONLY) {
-      flags |= QueryExecutor.QUERY_NO_BINARY_TRANSFER;
-    }
-
-    Query queryToExecute = cachedQuery.query;
-
-    if (queryToExecute.isEmpty()) {
-      flags |= QueryExecutor.QUERY_SUPPRESS_BEGIN;
-    }
-
-    if (!queryToExecute.isStatementDescribed() && forceBinaryTransfers
-        && (flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) == 0) {
-      // Simple 'Q' execution does not need to know parameter types
-      // When binaryTransfer is forced, then we need to know resulting parameter and column types,
-      // thus sending a describe request.
-      int flags2 = flags | QueryExecutor.QUERY_DESCRIBE_ONLY;
-      StatementResultHandler handler2 = new StatementResultHandler();
-      connection.getQueryExecutor().execute(queryToExecute, queryParameters, handler2, 0, 0,
-          flags2);
-      ResultWrapper result2 = handler2.getResults();
-      if (result2 != null) {
-        result2.getResultSet().close();
-      }
-    }
-
-    StatementResultHandler handler = new StatementResultHandler();
-    try (ResourceLock ignore = lock.obtain()) {
-      result = null;
-    }
-    try {
-      startTimer();
-      connection.getQueryExecutor().execute(queryToExecute, queryParameters, handler, maxrows,
-          fetchSize, flags, adaptiveFetch);
-    } finally {
-      killTimerTask();
-    }
-    try (ResourceLock ignore = lock.obtain()) {
-      checkClosed();
-
-      ResultWrapper currentResult = handler.getResults();
-      result = firstUnclosedResult = currentResult;
-
-      if (wantsGeneratedKeysOnce || wantsGeneratedKeysAlways) {
-        generatedKeys = currentResult;
-        result = currentResult.getNext();
-
-        if (wantsGeneratedKeysOnce) {
-          wantsGeneratedKeysOnce = false;
-        }
-      }
-    }
-  }
-
-  @Override
-  public void setCursorName(String name) throws SQLException {
-    checkClosed();
-    // No-op.
-  }
-
-  private volatile int isClosed;
-  private static final AtomicIntegerFieldUpdater<PgStatement> IS_CLOSED_UPDATER =
-      AtomicIntegerFieldUpdater.newUpdater(
-          PgStatement.class, "isClosed");
-
-  @Override
-  public int getUpdateCount() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkClosed();
-      if (result == null || result.getResultSet() != null) {
-        return -1;
-      }
-
-      long count = result.getUpdateCount();
-      return count > Integer.MAX_VALUE ? Statement.SUCCESS_NO_INFO : (int) count;
-    }
-  }
-
-  @Override
-  public boolean getMoreResults() throws SQLException {
-    return getMoreResults(CLOSE_ALL_RESULTS);
-  }
-
-  @Override
-  public int getMaxRows() throws SQLException {
-    checkClosed();
-    return maxrows;
-  }
-
-  @Override
-  public void setMaxRows(int max) throws SQLException {
-    checkClosed();
-    if (max < 0) {
-      throw new PSQLException(
-          GT.tr("Maximum number of rows must be a value greater than or equal to 0."),
-          PSQLState.INVALID_PARAMETER_VALUE);
-    }
-    maxrows = max;
-  }
-
-  @Override
-  public void setEscapeProcessing(boolean enable) throws SQLException {
-    checkClosed();
-    replaceProcessingEnabled = enable;
-  }
-
-  @Override
-  public int getQueryTimeout() throws SQLException {
-    checkClosed();
-    long seconds = timeout / 1000;
-    if (seconds >= Integer.MAX_VALUE) {
-      return Integer.MAX_VALUE;
-    }
-    return (int) seconds;
-  }
-
-  @Override
-  public void setQueryTimeout(int seconds) throws SQLException {
-    setQueryTimeoutMs(seconds * 1000L);
-  }
-
-  /**
-   * The queryTimeout limit is the number of milliseconds the driver will wait for a Statement to
-   * execute. If the limit is exceeded, a SQLException is thrown.
-   *
-   * @return the current query timeout limit in milliseconds; 0 = unlimited
-   * @throws SQLException if a database access error occurs
-   */
-  public long getQueryTimeoutMs() throws SQLException {
-    checkClosed();
-    return timeout;
-  }
-
-  /**
-   * Sets the queryTimeout limit.
-   *
-   * @param millis - the new query timeout limit in milliseconds
-   * @throws SQLException if a database access error occurs
-   */
-  public void setQueryTimeoutMs(long millis) throws SQLException {
-    checkClosed();
-
-    if (millis < 0) {
-      throw new PSQLException(GT.tr("Query timeout must be a value greater than or equals to 0."),
-          PSQLState.INVALID_PARAMETER_VALUE);
-    }
-    timeout = millis;
-  }
-
-  /**
-   * <p>Either initializes new warning wrapper, or adds warning onto the chain.</p>
-   *
-   * <p>Although warnings are expected to be added sequentially, the warnings chain may be cleared
-   * concurrently at any time via {@link #clearWarnings()}, therefore it is possible that a warning
-   * added via this method is placed onto the end of the previous warning chain</p>
-   *
-   * @param warn warning to add
-   */
-  public void addWarning(SQLWarning warn) {
-    //copy reference to avoid NPE from concurrent modification of this.warnings
-    final PSQLWarningWrapper warnWrap = this.warnings;
-    if (warnWrap == null) {
-      this.warnings = new PSQLWarningWrapper(warn);
-    } else {
-      warnWrap.addWarning(warn);
-    }
-  }
-
-  @Override
-  public SQLWarning getWarnings() throws SQLException {
-    checkClosed();
-    //copy reference to avoid NPE from concurrent modification of this.warnings
-    final PSQLWarningWrapper warnWrap = this.warnings;
-    return warnWrap != null ? warnWrap.getFirstWarning() : null;
-  }
-
-  @Override
-  public int getMaxFieldSize() throws SQLException {
-    return maxFieldSize;
-  }
-
-  @Override
-  public void setMaxFieldSize(int max) throws SQLException {
-    checkClosed();
-    if (max < 0) {
-      throw new PSQLException(
-          GT.tr("The maximum field size must be a value greater than or equal to 0."),
-          PSQLState.INVALID_PARAMETER_VALUE);
-    }
-    maxFieldSize = max;
-  }
-
-  /**
-   * <p>Clears the warning chain.</p>
-   * <p>Note that while it is safe to clear warnings while the query is executing, warnings that are
-   * added between calls to {@link #getWarnings()} and #clearWarnings() may be missed.
-   * Therefore you should hold a reference to the tail of the previous warning chain
-   * and verify if its {@link SQLWarning#getNextWarning()} value is holds any new value.</p>
-   */
-  @Override
-  public void clearWarnings() throws SQLException {
-    warnings = null;
-  }
-
-  @Override
-  public ResultSet getResultSet() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkClosed();
-
-      if (result == null) {
-        return null;
-      }
-
-      return result.getResultSet();
-    }
-  }
-
-  /**
-   * <B>Note:</B> even though {@code Statement} is automatically closed when it is garbage
-   * collected, it is better to close it explicitly to lower resource consumption.
-   *
-   * {@inheritDoc}
-   */
-  @Override
-  public final void close() throws SQLException {
-    // closing an already closed Statement is a no-op.
-    if (!IS_CLOSED_UPDATER.compareAndSet(this, 0, 1)) {
-      return;
-    }
-
-    cancel();
-
-    closeForNextExecution();
-
-    closeImpl();
-  }
-
-  /**
-   * This is guaranteed to be called exactly once even in case of concurrent {@link #close()} calls.
-   * @throws SQLException in case of error
-   */
-  protected void closeImpl() throws SQLException {
-  }
-
-  /*
-   *
-   * The following methods are postgres extensions and are defined in the interface BaseStatement
-   *
-   */
-
-  @Override
-  public long getLastOID() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkClosed();
-      if (result == null) {
-        return 0;
-      }
-      return result.getInsertOID();
-    }
-  }
-
-  @Override
-  public void setPrepareThreshold(int newThreshold) throws SQLException {
-    checkClosed();
-
-    if (newThreshold < 0) {
-      forceBinaryTransfers = true;
-      newThreshold = 1;
-    }
-
-    this.mPrepareThreshold = newThreshold;
-  }
-
-  @Override
-  public int getPrepareThreshold() {
-    return mPrepareThreshold;
-  }
-
-  @Override
-  @SuppressWarnings("deprecation")
-  public void setUseServerPrepare(boolean flag) throws SQLException {
-    setPrepareThreshold(flag ? 1 : 0);
-  }
-
-  @Override
-  public boolean isUseServerPrepare() {
-    return false;
-  }
-
-  protected void checkClosed() throws SQLException {
-    if (isClosed()) {
-      throw new PSQLException(GT.tr("This statement has been closed."),
-          PSQLState.OBJECT_NOT_IN_STATE);
-    }
-  }
-
-  // ** JDBC 2 Extensions **
-
-  @Override
-  public void addBatch(String sql) throws SQLException {
-    checkClosed();
-
-    ArrayList<Query> batchStatements = this.batchStatements;
-    if (batchStatements == null) {
-      this.batchStatements = batchStatements = new ArrayList<>();
-    }
-    ArrayList<ParameterList> batchParameters = this.batchParameters;
-    if (batchParameters == null) {
-      this.batchParameters = batchParameters = new ArrayList<ParameterList>();
-    }
-
-    // Simple statements should not replace ?, ? with $1, $2
-    boolean shouldUseParameterized = false;
-    CachedQuery cachedQuery = connection.createQuery(sql, replaceProcessingEnabled, shouldUseParameterized);
-    batchStatements.add(cachedQuery.query);
-    batchParameters.add(null);
-  }
-
-  @Override
-  public void clearBatch() throws SQLException {
-    if (batchStatements != null) {
-      batchStatements.clear();
-    }
-    if (batchParameters != null) {
-      batchParameters.clear();
-    }
-  }
-
-  protected BatchResultHandler createBatchHandler(Query[] queries,
-      ParameterList[] parameterLists) {
-    return new BatchResultHandler(this, queries, parameterLists,
-        wantsGeneratedKeysAlways);
-  }
-
-  private BatchResultHandler internalExecuteBatch() throws SQLException {
-    // Construct query/parameter arrays.
-    transformQueriesAndParameters();
-    ArrayList<Query> batchStatements = this.batchStatements;
-    ArrayList<ParameterList> batchParameters = this.batchParameters;
-    // Empty arrays should be passed to toArray
-    // see http://shipilev.net/blog/2016/arrays-wisdom-ancients/
-    Query[] queries = batchStatements.toArray(new Query[0]);
-    ParameterList[] parameterLists = batchParameters.toArray(new ParameterList[0]);
-    batchStatements.clear();
-    batchParameters.clear();
-
-    int flags;
-
-    // Force a Describe before any execution? We need to do this if we're going
-    // to send anything dependent on the Describe results, e.g. binary parameters.
-    boolean preDescribe = false;
-
-    if (wantsGeneratedKeysAlways) {
-      /*
-       * This batch will return generated keys, tell the executor to expect result rows. We also
-       * force a Describe later so we know the size of the results to expect.
-       *
-       * If the parameter type(s) change between batch entries and the default binary-mode changes
-       * we might get mixed binary and text in a single result set column, which we cannot handle.
-       * To prevent this, disable binary transfer mode in batches that return generated keys. See
-       * GitHub issue #267
-       */
-      flags = QueryExecutor.QUERY_BOTH_ROWS_AND_STATUS | QueryExecutor.QUERY_NO_BINARY_TRANSFER;
-    } else {
-      // If a batch hasn't specified that it wants generated keys, using the appropriate
-      // Connection.createStatement(...) interfaces, disallow any result set.
-      flags = QueryExecutor.QUERY_NO_RESULTS;
-    }
-
-    PreferQueryMode preferQueryMode = connection.getPreferQueryMode();
-    if (preferQueryMode == PreferQueryMode.SIMPLE
-        || (preferQueryMode == PreferQueryMode.EXTENDED_FOR_PREPARED
-        && parameterLists[0] == null)) {
-      flags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
-    }
-
-    boolean sameQueryAhead = queries.length > 1 && queries[0] == queries[1];
-
-    if (!sameQueryAhead
-        // If executing the same query twice in a batch, make sure the statement
-        // is server-prepared. In other words, "oneshot" only if the query is one in the batch
-        // or the queries are different
-        || isOneShotQuery(null)) {
-      flags |= QueryExecutor.QUERY_ONESHOT;
-    } else {
-      // If a batch requests generated keys and isn't already described,
-      // force a Describe of the query before proceeding. That way we can
-      // determine the appropriate size of each batch by estimating the
-      // maximum data returned. Without that, we don't know how many queries
-      // we'll be able to queue up before we risk a deadlock.
-      // (see v3.QueryExecutorImpl's MAX_BUFFERED_RECV_BYTES)
-
-      // SameQueryAhead is just a quick way to issue pre-describe for batch execution
-      // TODO: It should be reworked into "pre-describe if query has unknown parameter
-      // types and same query is ahead".
-      preDescribe = (wantsGeneratedKeysAlways || sameQueryAhead)
-          && !queries[0].isStatementDescribed();
-      /*
-       * It's also necessary to force a Describe on the first execution of the new statement, even
-       * though we already described it, to work around bug #267.
-       */
-      flags |= QueryExecutor.QUERY_FORCE_DESCRIBE_PORTAL;
-    }
-
-    if (connection.getAutoCommit()) {
-      flags |= QueryExecutor.QUERY_SUPPRESS_BEGIN;
-    }
-    if (connection.hintReadOnly()) {
-      flags |= QueryExecutor.QUERY_READ_ONLY_HINT;
-    }
-
-    BatchResultHandler handler;
-    handler = createBatchHandler(queries, parameterLists);
-
-    if ((preDescribe || forceBinaryTransfers)
-        && (flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) == 0) {
-      // Do a client-server round trip, parsing and describing the query so we
-      // can determine its result types for use in binary parameters, batch sizing,
-      // etc.
-      int flags2 = flags | QueryExecutor.QUERY_DESCRIBE_ONLY;
-      StatementResultHandler handler2 = new StatementResultHandler();
-      try {
-        connection.getQueryExecutor().execute(queries[0], parameterLists[0], handler2, 0, 0, flags2);
-      } catch (SQLException e) {
-        // Unable to parse the first statement -> throw BatchUpdateException
-        handler.handleError(e);
-        handler.handleCompletion();
-        // Will not reach here (see above)
-      }
-      ResultWrapper result2 = handler2.getResults();
-      if (result2 != null) {
-        result2.getResultSet().close();
-      }
-    }
-
-    try (ResourceLock ignore = lock.obtain()) {
-      result = null;
-    }
-
-    try {
-      startTimer();
-      connection.getQueryExecutor().execute(queries, parameterLists, handler, maxrows, fetchSize,
-          flags, adaptiveFetch);
-    } finally {
-      killTimerTask();
-      // There might be some rows generated even in case of failures
-      try (ResourceLock ignore = lock.obtain()) {
-        checkClosed();
-        if (wantsGeneratedKeysAlways) {
-          generatedKeys = new ResultWrapper(handler.getGeneratedKeys());
-        }
-      }
-    }
-    return handler;
-  }
-
-  @Override
-  public int[] executeBatch() throws SQLException {
-    checkClosed();
-    closeForNextExecution();
-
-    if (batchStatements == null || batchStatements.isEmpty() || batchParameters == null) {
-      return new int[0];
-    }
-
-    return internalExecuteBatch().getUpdateCount();
-  }
-
-  @Override
-  public void cancel() throws SQLException {
-    if (statementState == StatementCancelState.IDLE) {
-      return;
-    }
-    if (!STATE_UPDATER.compareAndSet(this, StatementCancelState.IN_QUERY,
-        StatementCancelState.CANCELING)) {
-      // Not in query, there's nothing to cancel
-      return;
-    }
-    // Use connection lock to avoid spinning in killTimerTask
-    try (ResourceLock connectionLock = connection.obtainLock()) {
-      try {
-        connection.cancelQuery();
-      } finally {
-        STATE_UPDATER.set(this, StatementCancelState.CANCELLED);
-        connection.lockCondition().signalAll(); // wake-up killTimerTask
-      }
-    }
-  }
-
-  @Override
-  public Connection getConnection() throws SQLException {
-    return connection;
-  }
-
-  @Override
-  public int getFetchDirection() {
-    return fetchdirection;
-  }
-
-  @Override
-  public int getResultSetConcurrency() {
-    return concurrency;
-  }
-
-  @Override
-  public int getResultSetType() {
-    return resultsettype;
-  }
-
-  @Override
-  public void setFetchDirection(int direction) throws SQLException {
-    switch (direction) {
-      case ResultSet.FETCH_FORWARD:
-      case ResultSet.FETCH_REVERSE:
-      case ResultSet.FETCH_UNKNOWN:
-        fetchdirection = direction;
-        break;
-      default:
-        throw new PSQLException(GT.tr("Invalid fetch direction constant: {0}.", direction),
-            PSQLState.INVALID_PARAMETER_VALUE);
-    }
-  }
-
-  @Override
-  public void setFetchSize(int rows) throws SQLException {
-    checkClosed();
-    if (rows < 0) {
-      throw new PSQLException(GT.tr("Fetch size must be a value greater to or equal to 0."),
-          PSQLState.INVALID_PARAMETER_VALUE);
-    }
-    fetchSize = rows;
-  }
-
-  private void startTimer() {
-    /*
-     * there shouldn't be any previous timer active, but better safe than sorry.
+    /**
+     * Default state for use or not binary transfers. Can use only for testing purposes
      */
-    cleanupTimer();
+    private static final boolean DEFAULT_FORCE_BINARY_TRANSFERS =
+            Boolean.getBoolean("org.postgresql.forceBinary");
+    private static final AtomicReferenceFieldUpdater<PgStatement, TimerTask> CANCEL_TIMER_UPDATER =
+            AtomicReferenceFieldUpdater.newUpdater(
+                    PgStatement.class, TimerTask.class, "cancelTimerTask");
+    private static final AtomicReferenceFieldUpdater<PgStatement, StatementCancelState> STATE_UPDATER =
+            AtomicReferenceFieldUpdater.newUpdater(PgStatement.class, StatementCancelState.class, "statementState");
+    private static final AtomicIntegerFieldUpdater<PgStatement> IS_CLOSED_UPDATER =
+            AtomicIntegerFieldUpdater.newUpdater(
+                    PgStatement.class, "isClosed");
+    protected final ResourceLock lock = new ResourceLock();
+    protected final int resultsettype; // the resultset type to return (ResultSet.TYPE_xxx)
+    protected final int concurrency; // is it updateable or not? (ResultSet.CONCUR_xxx)
+    // The connection who created us
+    protected final PgConnection connection;
+    private final int rsHoldability;
+    public ArrayList<Query> batchStatements;
+    public ArrayList<ParameterList> batchParameters;
+    // fetch direction hint (currently ignored)
+    /**
+     * Was this PreparedStatement created to return generated keys for every execution? This is set at
+     * creation time and never cleared by execution.
+     */
+    public boolean wantsGeneratedKeysAlways;
+    protected int fetchdirection = ResultSet.FETCH_FORWARD;
+    /**
+     * Does the caller of execute/executeUpdate want generated keys for this execution? This is set by
+     * Statement methods that have generated keys arguments and cleared after execution is complete.
+     */
+    protected boolean wantsGeneratedKeysOnce;
+    /**
+     * The warnings chain.
+     */
+    protected volatile PSQLWarningWrapper warnings;
+    /**
+     * Maximum number of rows to return, 0 = unlimited.
+     */
+    protected int maxrows;
+    /**
+     * Number of rows to get in a batch.
+     */
+    protected int fetchSize;
+    /**
+     * Timeout (in milliseconds) for a query.
+     */
+    protected long timeout;
+    protected boolean replaceProcessingEnabled = true;
+    /**
+     * The current results.
+     */
+    protected ResultWrapper result;
+    /**
+     * The first unclosed result.
+     */
+    protected ResultWrapper firstUnclosedResult;
+    /**
+     * Results returned by a statement that wants generated keys.
+     */
+    protected ResultWrapper generatedKeys;
+    protected int mPrepareThreshold; // Reuse threshold to enable use of PREPARE
+    protected int maxFieldSize;
+    protected boolean adaptiveFetch;
+    // only for testing purposes. even single shot statements will use binary transfers
+    private boolean forceBinaryTransfers = DEFAULT_FORCE_BINARY_TRANSFERS;
+    private boolean poolable;
+    private boolean closeOnCompletion;
+    /**
+     * Protects current statement from cancelTask starting, waiting for a bit, and waking up exactly
+     * on subsequent query execution. The idea is to atomically compare and swap the reference to the
+     * task, so the task can detect that statement executes different query than the one the
+     * cancelTask was created. Note: the field must be set/get/compareAndSet via
+     * {@link #CANCEL_TIMER_UPDATER} as per {@link AtomicReferenceFieldUpdater} javadoc.
+     */
+    private volatile TimerTask cancelTimerTask;
+    /**
+     * Protects statement from out-of-order cancels. It protects from both
+     * {@link #setQueryTimeout(int)} and {@link #cancel()} induced ones.
+     * <p>
+     * {@link #execute(String)} and friends change the field to
+     * {@link StatementCancelState#IN_QUERY} during execute. {@link #cancel()}
+     * ignores cancel request if state is {@link StatementCancelState#IDLE}.
+     * In case {@link #execute(String)} observes non-{@link StatementCancelState#IDLE} state as it
+     * completes the query, it waits till {@link StatementCancelState#CANCELLED}. Note: the field must be
+     * set/get/compareAndSet via {@link #STATE_UPDATER} as per {@link AtomicIntegerFieldUpdater}
+     * javadoc.
+     */
+    private volatile StatementCancelState statementState = StatementCancelState.IDLE;
+    private TimestampUtils timestampUtils; // our own Object because it's not thread safe
+    private volatile int isClosed;
 
-    STATE_UPDATER.set(this, StatementCancelState.IN_QUERY);
-
-    if (timeout == 0) {
-      return;
+    PgStatement(PgConnection c, int rsType, int rsConcurrency, int rsHoldability)
+            throws SQLException {
+        this.connection = c;
+        forceBinaryTransfers |= c.getForceBinary();
+        // validation check for allowed values of resultset type
+        if (rsType != ResultSet.TYPE_FORWARD_ONLY && rsType != ResultSet.TYPE_SCROLL_INSENSITIVE && rsType != ResultSet.TYPE_SCROLL_SENSITIVE) {
+            throw new PSQLException(GT.tr("Unknown value for ResultSet type"),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+        resultsettype = rsType;
+        // validation check for allowed values of resultset concurrency
+        if (rsConcurrency != ResultSet.CONCUR_READ_ONLY && rsConcurrency != ResultSet.CONCUR_UPDATABLE) {
+            throw new PSQLException(GT.tr("Unknown value for ResultSet concurrency"),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+        concurrency = rsConcurrency;
+        setFetchSize(c.getDefaultFetchSize());
+        setPrepareThreshold(c.getPrepareThreshold());
+        setAdaptiveFetch(c.getAdaptiveFetch());
+        // validation check for allowed values of resultset holdability
+        if (rsHoldability != ResultSet.HOLD_CURSORS_OVER_COMMIT && rsHoldability != ResultSet.CLOSE_CURSORS_AT_COMMIT) {
+            throw new PSQLException(GT.tr("Unknown value for ResultSet holdability"),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+        this.rsHoldability = rsHoldability;
     }
 
-    TimerTask cancelTask = new StatementCancelTimerTask(this);
-
-    CANCEL_TIMER_UPDATER.set(this, cancelTask);
-    connection.addTimerTask(cancelTask, timeout);
-  }
-
-  void cancelIfStillNeeded(TimerTask timerTask) {
-    try {
-      if (!CANCEL_TIMER_UPDATER.compareAndSet(this, timerTask, null)) {
-        // Nothing to do here, statement has already finished and cleared
-        // cancelTimerTask reference
-        return;
-      }
-      cancel();
-    } catch (SQLException e) {
-    }
-  }
-
-  /**
-   * Clears {@link #cancelTimerTask} if any. Returns true if and only if "cancel" timer task would
-   * never invoke {@link #cancel()}.
-   */
-  private boolean cleanupTimer() {
-    TimerTask timerTask = CANCEL_TIMER_UPDATER.get(this);
-    if (timerTask == null) {
-      // If timeout is zero, then timer task did not exist, so we safely report "all clear"
-      return timeout == 0;
-    }
-    if (!CANCEL_TIMER_UPDATER.compareAndSet(this, timerTask, null)) {
-      // Failed to update reference -> timer has just fired, so we must wait for the query state to
-      // become "cancelling".
-      return false;
-    }
-    timerTask.cancel();
-    connection.purgeTimerTasks();
-    // All clear
-    return true;
-  }
-
-  private void killTimerTask() {
-    boolean timerTaskIsClear = cleanupTimer();
-    // The order is important here: in case we need to wait for the cancel task, the state must be
-    // kept StatementCancelState.IN_QUERY, so cancelTask would be able to cancel the query.
-    // It is believed that this case is very rare, so "additional cancel and wait below" would not
-    // harm it.
-    if (timerTaskIsClear && STATE_UPDATER.compareAndSet(this, StatementCancelState.IN_QUERY, StatementCancelState.IDLE)) {
-      return;
+    @Override
+    public ResultSet createResultSet(Query originalQuery, Field[] fields, List<Tuple> tuples,
+                                     ResultCursor cursor) throws SQLException {
+        PgResultSet newResult = new PgResultSet(originalQuery, this, fields, tuples, cursor,
+                getMaxRows(), getMaxFieldSize(), getResultSetType(), getResultSetConcurrency(),
+                getResultSetHoldability(), getAdaptiveFetch());
+        newResult.setFetchSize(getFetchSize());
+        newResult.setFetchDirection(getFetchDirection());
+        return newResult;
     }
 
-    // Being here means someone managed to call .cancel() and our connection did not receive
-    // "timeout error"
-    // We wait till state becomes "cancelled"
-    boolean interrupted = false;
-    try (ResourceLock connectionLock = connection.obtainLock()) {
-      // state check is performed with connection lock so it detects "cancelled" state faster
-      // In other words, it prevents unnecessary ".wait()" call
-      while (!STATE_UPDATER.compareAndSet(this, StatementCancelState.CANCELLED, StatementCancelState.IDLE)) {
+    public BaseConnection getPGConnection() {
+        return connection;
+    }
+
+    public String getFetchingCursorName() {
+        return null;
+    }
+
+    @Override
+    public int getFetchSize() {
+        return fetchSize;
+    }
+
+    @Override
+    public void setFetchSize(int rows) throws SQLException {
+        checkClosed();
+        if (rows < 0) {
+            throw new PSQLException(GT.tr("Fetch size must be a value greater to or equal to 0."),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+        fetchSize = rows;
+    }
+
+    protected boolean wantsScrollableResultSet() {
+        return resultsettype != ResultSet.TYPE_FORWARD_ONLY;
+    }
+
+    protected boolean wantsHoldableResultSet() {
+        // FIXME: false if not supported
+        return rsHoldability == ResultSet.HOLD_CURSORS_OVER_COMMIT;
+    }
+
+    @Override
+    public ResultSet executeQuery(String sql) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (!executeWithFlags(sql, 0)) {
+                throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+            }
+
+            return getSingleResultSet();
+        }
+    }
+
+    protected ResultSet getSingleResultSet() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkClosed();
+            ResultWrapper result = this.result;
+            if (result.getNext() != null) {
+                throw new PSQLException(GT.tr("Multiple ResultSets were returned by the query."),
+                        PSQLState.TOO_MANY_RESULTS);
+            }
+
+            return result.getResultSet();
+        }
+    }
+
+    @Override
+    public int executeUpdate(String sql) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            executeWithFlags(sql, QueryExecutor.QUERY_NO_RESULTS);
+            checkNoResultUpdate();
+            return getUpdateCount();
+        }
+    }
+
+    protected final void checkNoResultUpdate() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkClosed();
+            ResultWrapper iter = result;
+            while (iter != null) {
+                if (iter.getResultSet() != null) {
+                    throw new PSQLException(GT.tr("A result was returned when none was expected."),
+                            PSQLState.TOO_MANY_RESULTS);
+                }
+                iter = iter.getNext();
+            }
+        }
+    }
+
+    @Override
+    public boolean execute(String sql) throws SQLException {
+        return executeWithFlags(sql, 0);
+    }
+
+    @Override
+    public boolean executeWithFlags(String sql, int flags) throws SQLException {
+        return executeCachedSql(sql, flags, NO_RETURNING_COLUMNS);
+    }
+
+    private boolean executeCachedSql(String sql, int flags,
+                                     String[] columnNames) throws SQLException {
+        PreferQueryMode preferQueryMode = connection.getPreferQueryMode();
+        // Simple statements should not replace ?, ? with $1, $2
+        boolean shouldUseParameterized = false;
+        QueryExecutor queryExecutor = connection.getQueryExecutor();
+        Object key = queryExecutor
+                .createQueryKey(sql, replaceProcessingEnabled, shouldUseParameterized, columnNames);
+        CachedQuery cachedQuery;
+        boolean shouldCache = preferQueryMode == PreferQueryMode.EXTENDED_CACHE_EVERYTHING;
+        if (shouldCache) {
+            cachedQuery = queryExecutor.borrowQueryByKey(key);
+        } else {
+            cachedQuery = queryExecutor.createQueryByKey(key);
+        }
+        if (wantsGeneratedKeysOnce) {
+            SqlCommand sqlCommand = cachedQuery.query.getSqlCommand();
+            wantsGeneratedKeysOnce = sqlCommand != null && sqlCommand.isReturningKeywordPresent();
+        }
+        boolean res;
         try {
-          // Note: wait timeout here is irrelevant since connection.obtainLock() would block until
-          // .cancel finishes
-          connection.lockCondition().await(10, TimeUnit.MILLISECONDS);
-        } catch (InterruptedException e) { // NOSONAR
-          // Either re-interrupt this method or rethrow the "InterruptedException"
-          interrupted = true;
+            res = executeWithFlags(cachedQuery, flags);
+        } finally {
+            if (shouldCache) {
+                queryExecutor.releaseQuery(cachedQuery);
+            }
         }
-      }
-    }
-    if (interrupted) {
-      Thread.currentThread().interrupt();
-    }
-  }
-
-  protected boolean getForceBinaryTransfer() {
-    return forceBinaryTransfers;
-  }
-
-  @Override
-  public long getLargeUpdateCount() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkClosed();
-      if (result == null || result.getResultSet() != null) {
-        return -1;
-      }
-
-      return result.getUpdateCount();
-    }
-  }
-
-  @Override
-  public void setLargeMaxRows(long max) throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "setLargeMaxRows");
-  }
-
-  @Override
-  public long getLargeMaxRows() throws SQLException {
-    throw Driver.notImplemented(this.getClass(), "getLargeMaxRows");
-  }
-
-  @Override
-  public long[] executeLargeBatch() throws SQLException {
-    checkClosed();
-    closeForNextExecution();
-
-    if (batchStatements == null || batchStatements.isEmpty() || batchParameters == null) {
-      return new long[0];
+        return res;
     }
 
-    return internalExecuteBatch().getLargeUpdateCount();
-  }
-
-  @Override
-  public long executeLargeUpdate(String sql) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      executeWithFlags(sql, QueryExecutor.QUERY_NO_RESULTS);
-      checkNoResultUpdate();
-      return getLargeUpdateCount();
-    }
-  }
-
-  @Override
-  public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
-    if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) {
-      return executeLargeUpdate(sql);
-    }
-
-    return executeLargeUpdate(sql, (String[]) null);
-  }
-
-  @Override
-  public long executeLargeUpdate(String sql, int[] columnIndexes) throws SQLException {
-    if (columnIndexes == null || columnIndexes.length == 0) {
-      return executeLargeUpdate(sql);
-    }
-
-    throw new PSQLException(GT.tr("Returning autogenerated keys by column index is not supported."),
-        PSQLState.NOT_IMPLEMENTED);
-  }
-
-  @Override
-  public long executeLargeUpdate(String sql, String [] columnNames) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (columnNames != null && columnNames.length == 0) {
-        return executeLargeUpdate(sql);
-      }
-
-      wantsGeneratedKeysOnce = true;
-      if (!executeCachedSql(sql, 0, columnNames)) {
-        // no resultset returned. What's a pity!
-      }
-      return getLargeUpdateCount();
-    }
-  }
-
-  @Override
-  public boolean isClosed() throws SQLException {
-    return isClosed == 1;
-  }
-
-  @Override
-  public void setPoolable(boolean poolable) throws SQLException {
-    checkClosed();
-    this.poolable = poolable;
-  }
-
-  @Override
-  public boolean isPoolable() throws SQLException {
-    checkClosed();
-    return poolable;
-  }
-
-  @Override
-  public boolean isWrapperFor(Class<?> iface) throws SQLException {
-    return iface.isAssignableFrom(getClass());
-  }
-
-  @Override
-  public <T> T unwrap(Class<T> iface) throws SQLException {
-    if (iface.isAssignableFrom(getClass())) {
-      return iface.cast(this);
-    }
-    throw new SQLException("Cannot unwrap to " + iface.getName());
-  }
-
-  @Override
-  public void closeOnCompletion() throws SQLException {
-    closeOnCompletion = true;
-  }
-
-  @Override
-  public boolean isCloseOnCompletion() throws SQLException {
-    return closeOnCompletion;
-  }
-
-  protected void checkCompletion() throws SQLException {
-    if (!closeOnCompletion) {
-      return;
-    }
-
-    try (ResourceLock ignore = lock.obtain()) {
-      ResultWrapper result = firstUnclosedResult;
-      while (result != null) {
-        ResultSet resultSet = result.getResultSet();
-        if (resultSet != null && !resultSet.isClosed()) {
-          return;
+    @Override
+    public boolean executeWithFlags(CachedQuery simpleQuery, int flags) throws SQLException {
+        checkClosed();
+        if (connection.getPreferQueryMode().compareTo(PreferQueryMode.EXTENDED) < 0) {
+            flags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
+        }
+        execute(simpleQuery, null, flags);
+        try (ResourceLock ignore = lock.obtain()) {
+            checkClosed();
+            return result != null && result.getResultSet() != null;
         }
-        result = result.getNext();
-      }
     }
 
-    // prevent all ResultSet.close arising from Statement.close to loop here
-    closeOnCompletion = false;
-    try {
-      close();
-    } finally {
-      // restore the status if one rely on isCloseOnCompletion
-      closeOnCompletion = true;
-    }
-  }
-
-  @Override
-  public boolean getMoreResults(int current) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkClosed();
-      // CLOSE_CURRENT_RESULT
-      if (current == Statement.CLOSE_CURRENT_RESULT && result != null
-          && result.getResultSet() != null) {
-        result.getResultSet().close();
-      }
-
-      // Advance resultset.
-      if (result != null) {
-        result = result.getNext();
-      }
-
-      // CLOSE_ALL_RESULTS
-      if (current == Statement.CLOSE_ALL_RESULTS) {
-        // Close preceding resultsets.
-        closeUnclosedProcessedResults();
-      }
-
-      // Done.
-      return result != null && result.getResultSet() != null;
-    }
-  }
-
-  @Override
-  public ResultSet getGeneratedKeys() throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      checkClosed();
-      if (generatedKeys == null || generatedKeys.getResultSet() == null) {
-        return createDriverResultSet(new Field[0], new ArrayList<>());
-      }
-
-      return generatedKeys.getResultSet();
-    }
-  }
-
-  @Override
-  public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
-    if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) {
-      return executeUpdate(sql);
+    @Override
+    public boolean executeWithFlags(int flags) throws SQLException {
+        checkClosed();
+        throw new PSQLException(GT.tr("Can''t use executeWithFlags(int) on a Statement."),
+                PSQLState.WRONG_OBJECT_TYPE);
     }
 
-    return executeUpdate(sql, (String[]) null);
-  }
-
-  @Override
-  public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
-    if (columnIndexes == null || columnIndexes.length == 0) {
-      return executeUpdate(sql);
+    /*
+    If there are multiple result sets we close any that have been processed and left open
+    by the client.
+     */
+    private void closeUnclosedProcessedResults() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            ResultWrapper resultWrapper = this.firstUnclosedResult;
+            ResultWrapper currentResult = this.result;
+            for (; resultWrapper != currentResult && resultWrapper != null;
+                 resultWrapper = resultWrapper.getNext()) {
+                PgResultSet rs = (PgResultSet) resultWrapper.getResultSet();
+                if (rs != null) {
+                    rs.closeInternally();
+                }
+            }
+            firstUnclosedResult = resultWrapper;
+        }
     }
 
-    throw new PSQLException(GT.tr("Returning autogenerated keys by column index is not supported."),
-        PSQLState.NOT_IMPLEMENTED);
-  }
+    protected void closeForNextExecution() throws SQLException {
 
-  @Override
-  public int executeUpdate(String sql, String [] columnNames) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (columnNames != null && columnNames.length == 0) {
-        return executeUpdate(sql);
-      }
+        // Every statement execution clears any previous warnings.
+        clearWarnings();
 
-      wantsGeneratedKeysOnce = true;
-      if (!executeCachedSql(sql, 0, columnNames)) {
-        // no resultset returned. What's a pity!
-      }
-      return getUpdateCount();
-    }
-  }
+        // Close any existing resultsets associated with this statement.
+        try (ResourceLock ignore = lock.obtain()) {
+            closeUnclosedProcessedResults();
 
-  @Override
-  public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
-    if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) {
-      return execute(sql);
-    }
-    return execute(sql, (String[]) null);
-  }
+            if (this.result != null && this.result.getResultSet() != null) {
+                this.result.getResultSet().close();
+            }
+            result = null;
 
-  @Override
-  public boolean execute(String sql, int [] columnIndexes) throws SQLException {
-    if (columnIndexes != null && columnIndexes.length == 0) {
-      return execute(sql);
+            ResultWrapper generatedKeys = this.generatedKeys;
+            if (generatedKeys != null) {
+                ResultSet resultSet = generatedKeys.getResultSet();
+                if (resultSet != null) {
+                    resultSet.close();
+                }
+                this.generatedKeys = null;
+            }
+        }
     }
 
-    throw new PSQLException(GT.tr("Returning autogenerated keys by column index is not supported."),
-        PSQLState.NOT_IMPLEMENTED);
-  }
-
-  @Override
-  public boolean execute(String sql, String [] columnNames) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (columnNames != null && columnNames.length == 0) {
-        return execute(sql);
-      }
-
-      wantsGeneratedKeysOnce = true;
-      return executeCachedSql(sql, 0, columnNames);
+    /**
+     * Returns true if query is unlikely to be reused.
+     *
+     * @param cachedQuery to check (null if current query)
+     * @return true if query is unlikely to be reused
+     */
+    protected boolean isOneShotQuery(CachedQuery cachedQuery) {
+        if (cachedQuery == null) {
+            return true;
+        }
+        cachedQuery.increaseExecuteCount();
+        return (mPrepareThreshold == 0 || cachedQuery.getExecuteCount() < mPrepareThreshold)
+                && !getForceBinaryTransfer();
     }
-  }
 
-  @Override
-  public int getResultSetHoldability() throws SQLException {
-    return rsHoldability;
-  }
-
-  @Override
-  public ResultSet createDriverResultSet(Field[] fields, List<Tuple> tuples)
-      throws SQLException {
-    return createResultSet(null, fields, tuples, null);
-  }
-
-  protected void transformQueriesAndParameters() throws SQLException {
-  }
-
-  @Override
-  public void setAdaptiveFetch(boolean adaptiveFetch) {
-    this.adaptiveFetch = adaptiveFetch;
-  }
-
-  @Override
-  public boolean getAdaptiveFetch() {
-    return adaptiveFetch;
-  }
-
-  protected TimestampUtils getTimestampUtils() {
-    if (timestampUtils == null) {
-      timestampUtils = new TimestampUtils(!connection.getQueryExecutor().getIntegerDateTimes(), new QueryExecutorTimeZoneProvider(connection.getQueryExecutor()));
+    protected final void execute(CachedQuery cachedQuery,
+                                 ParameterList queryParameters, int flags)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            try {
+                executeInternal(cachedQuery, queryParameters, flags);
+            } catch (SQLException e) {
+                // Don't retry composite queries as it might get partially executed
+                if (cachedQuery.query.getSubqueries() != null
+                        || !connection.getQueryExecutor().willHealOnRetry(e)) {
+                    throw e;
+                }
+                cachedQuery.query.close();
+                // Execute the query one more time
+                executeInternal(cachedQuery, queryParameters, flags);
+            }
+        }
+    }
+
+    private void executeInternal(CachedQuery cachedQuery,
+                                 ParameterList queryParameters, int flags)
+            throws SQLException {
+        closeForNextExecution();
+
+        // Enable cursor-based resultset if possible.
+        if (fetchSize > 0 && !wantsScrollableResultSet() && !connection.getAutoCommit()
+                && !wantsHoldableResultSet()) {
+            flags |= QueryExecutor.QUERY_FORWARD_CURSOR;
+        }
+
+        if (wantsGeneratedKeysOnce || wantsGeneratedKeysAlways) {
+            flags |= QueryExecutor.QUERY_BOTH_ROWS_AND_STATUS;
+
+            // If the no results flag is set (from executeUpdate)
+            // clear it so we get the generated keys results.
+            //
+            if ((flags & QueryExecutor.QUERY_NO_RESULTS) != 0) {
+                flags &= ~(QueryExecutor.QUERY_NO_RESULTS);
+            }
+        }
+
+        // Only use named statements after we hit the threshold. Note that only
+        // named statements can be transferred in binary format.
+        // isOneShotQuery will check to see if we have hit the prepareThreshold count
+
+        if (isOneShotQuery(cachedQuery)) {
+            flags |= QueryExecutor.QUERY_ONESHOT;
+        }
+
+        if (connection.getAutoCommit()) {
+            flags |= QueryExecutor.QUERY_SUPPRESS_BEGIN;
+        }
+        if (connection.hintReadOnly()) {
+            flags |= QueryExecutor.QUERY_READ_ONLY_HINT;
+        }
+
+        // updateable result sets do not yet support binary updates
+        if (concurrency != ResultSet.CONCUR_READ_ONLY) {
+            flags |= QueryExecutor.QUERY_NO_BINARY_TRANSFER;
+        }
+
+        Query queryToExecute = cachedQuery.query;
+
+        if (queryToExecute.isEmpty()) {
+            flags |= QueryExecutor.QUERY_SUPPRESS_BEGIN;
+        }
+
+        if (!queryToExecute.isStatementDescribed() && forceBinaryTransfers
+                && (flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) == 0) {
+            // Simple 'Q' execution does not need to know parameter types
+            // When binaryTransfer is forced, then we need to know resulting parameter and column types,
+            // thus sending a describe request.
+            int flags2 = flags | QueryExecutor.QUERY_DESCRIBE_ONLY;
+            StatementResultHandler handler2 = new StatementResultHandler();
+            connection.getQueryExecutor().execute(queryToExecute, queryParameters, handler2, 0, 0,
+                    flags2);
+            ResultWrapper result2 = handler2.getResults();
+            if (result2 != null) {
+                result2.getResultSet().close();
+            }
+        }
+
+        StatementResultHandler handler = new StatementResultHandler();
+        try (ResourceLock ignore = lock.obtain()) {
+            result = null;
+        }
+        try {
+            startTimer();
+            connection.getQueryExecutor().execute(queryToExecute, queryParameters, handler, maxrows,
+                    fetchSize, flags, adaptiveFetch);
+        } finally {
+            killTimerTask();
+        }
+        try (ResourceLock ignore = lock.obtain()) {
+            checkClosed();
+
+            ResultWrapper currentResult = handler.getResults();
+            result = firstUnclosedResult = currentResult;
+
+            if (wantsGeneratedKeysOnce || wantsGeneratedKeysAlways) {
+                generatedKeys = currentResult;
+                result = currentResult.getNext();
+
+                if (wantsGeneratedKeysOnce) {
+                    wantsGeneratedKeysOnce = false;
+                }
+            }
+        }
+    }
+
+    @Override
+    public void setCursorName(String name) throws SQLException {
+        checkClosed();
+        // No-op.
+    }
+
+    @Override
+    public int getUpdateCount() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkClosed();
+            if (result == null || result.getResultSet() != null) {
+                return -1;
+            }
+
+            long count = result.getUpdateCount();
+            return count > Integer.MAX_VALUE ? Statement.SUCCESS_NO_INFO : (int) count;
+        }
+    }
+
+    @Override
+    public boolean getMoreResults() throws SQLException {
+        return getMoreResults(CLOSE_ALL_RESULTS);
+    }
+
+    @Override
+    public int getMaxRows() throws SQLException {
+        checkClosed();
+        return maxrows;
+    }
+
+    @Override
+    public void setMaxRows(int max) throws SQLException {
+        checkClosed();
+        if (max < 0) {
+            throw new PSQLException(
+                    GT.tr("Maximum number of rows must be a value greater than or equal to 0."),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+        maxrows = max;
+    }
+
+    @Override
+    public void setEscapeProcessing(boolean enable) throws SQLException {
+        checkClosed();
+        replaceProcessingEnabled = enable;
+    }
+
+    @Override
+    public int getQueryTimeout() throws SQLException {
+        checkClosed();
+        long seconds = timeout / 1000;
+        if (seconds >= Integer.MAX_VALUE) {
+            return Integer.MAX_VALUE;
+        }
+        return (int) seconds;
+    }
+
+    @Override
+    public void setQueryTimeout(int seconds) throws SQLException {
+        setQueryTimeoutMs(seconds * 1000L);
+    }
+
+    /**
+     * The queryTimeout limit is the number of milliseconds the driver will wait for a Statement to
+     * execute. If the limit is exceeded, a SQLException is thrown.
+     *
+     * @return the current query timeout limit in milliseconds; 0 = unlimited
+     * @throws SQLException if a database access error occurs
+     */
+    public long getQueryTimeoutMs() throws SQLException {
+        checkClosed();
+        return timeout;
+    }
+
+    /**
+     * Sets the queryTimeout limit.
+     *
+     * @param millis - the new query timeout limit in milliseconds
+     * @throws SQLException if a database access error occurs
+     */
+    public void setQueryTimeoutMs(long millis) throws SQLException {
+        checkClosed();
+
+        if (millis < 0) {
+            throw new PSQLException(GT.tr("Query timeout must be a value greater than or equals to 0."),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+        timeout = millis;
+    }
+
+    /**
+     * <p>Either initializes new warning wrapper, or adds warning onto the chain.</p>
+     *
+     * <p>Although warnings are expected to be added sequentially, the warnings chain may be cleared
+     * concurrently at any time via {@link #clearWarnings()}, therefore it is possible that a warning
+     * added via this method is placed onto the end of the previous warning chain</p>
+     *
+     * @param warn warning to add
+     */
+    public void addWarning(SQLWarning warn) {
+        //copy reference to avoid NPE from concurrent modification of this.warnings
+        final PSQLWarningWrapper warnWrap = this.warnings;
+        if (warnWrap == null) {
+            this.warnings = new PSQLWarningWrapper(warn);
+        } else {
+            warnWrap.addWarning(warn);
+        }
+    }
+
+    @Override
+    public SQLWarning getWarnings() throws SQLException {
+        checkClosed();
+        //copy reference to avoid NPE from concurrent modification of this.warnings
+        final PSQLWarningWrapper warnWrap = this.warnings;
+        return warnWrap != null ? warnWrap.getFirstWarning() : null;
+    }
+
+    @Override
+    public int getMaxFieldSize() throws SQLException {
+        return maxFieldSize;
+    }
+
+    @Override
+    public void setMaxFieldSize(int max) throws SQLException {
+        checkClosed();
+        if (max < 0) {
+            throw new PSQLException(
+                    GT.tr("The maximum field size must be a value greater than or equal to 0."),
+                    PSQLState.INVALID_PARAMETER_VALUE);
+        }
+        maxFieldSize = max;
+    }
+
+    /**
+     * <p>Clears the warning chain.</p>
+     * <p>Note that while it is safe to clear warnings while the query is executing, warnings that are
+     * added between calls to {@link #getWarnings()} and #clearWarnings() may be missed.
+     * Therefore you should hold a reference to the tail of the previous warning chain
+     * and verify if its {@link SQLWarning#getNextWarning()} value is holds any new value.</p>
+     */
+    @Override
+    public void clearWarnings() throws SQLException {
+        warnings = null;
+    }
+
+    @Override
+    public ResultSet getResultSet() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkClosed();
+
+            if (result == null) {
+                return null;
+            }
+
+            return result.getResultSet();
+        }
+    }
+
+    /**
+     * <B>Note:</B> even though {@code Statement} is automatically closed when it is garbage
+     * collected, it is better to close it explicitly to lower resource consumption.
+     * <p>
+     * {@inheritDoc}
+     */
+    @Override
+    public final void close() throws SQLException {
+        // closing an already closed Statement is a no-op.
+        if (!IS_CLOSED_UPDATER.compareAndSet(this, 0, 1)) {
+            return;
+        }
+
+        cancel();
+
+        closeForNextExecution();
+
+        closeImpl();
+    }
+
+    /**
+     * This is guaranteed to be called exactly once even in case of concurrent {@link #close()} calls.
+     *
+     * @throws SQLException in case of error
+     */
+    protected void closeImpl() throws SQLException {
+    }
+
+    /*
+     *
+     * The following methods are postgres extensions and are defined in the interface BaseStatement
+     *
+     */
+
+    @Override
+    public long getLastOID() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkClosed();
+            if (result == null) {
+                return 0;
+            }
+            return result.getInsertOID();
+        }
+    }
+
+    @Override
+    public int getPrepareThreshold() {
+        return mPrepareThreshold;
+    }
+
+    @Override
+    public void setPrepareThreshold(int newThreshold) throws SQLException {
+        checkClosed();
+
+        if (newThreshold < 0) {
+            forceBinaryTransfers = true;
+            newThreshold = 1;
+        }
+
+        this.mPrepareThreshold = newThreshold;
+    }
+
+    @Override
+    public boolean isUseServerPrepare() {
+        return false;
+    }
+
+    @Override
+    @SuppressWarnings("deprecation")
+    public void setUseServerPrepare(boolean flag) throws SQLException {
+        setPrepareThreshold(flag ? 1 : 0);
+    }
+
+    protected void checkClosed() throws SQLException {
+        if (isClosed()) {
+            throw new PSQLException(GT.tr("This statement has been closed."),
+                    PSQLState.OBJECT_NOT_IN_STATE);
+        }
+    }
+
+    // ** JDBC 2 Extensions **
+
+    @Override
+    public void addBatch(String sql) throws SQLException {
+        checkClosed();
+
+        ArrayList<Query> batchStatements = this.batchStatements;
+        if (batchStatements == null) {
+            this.batchStatements = batchStatements = new ArrayList<>();
+        }
+        ArrayList<ParameterList> batchParameters = this.batchParameters;
+        if (batchParameters == null) {
+            this.batchParameters = batchParameters = new ArrayList<ParameterList>();
+        }
+
+        // Simple statements should not replace ?, ? with $1, $2
+        boolean shouldUseParameterized = false;
+        CachedQuery cachedQuery = connection.createQuery(sql, replaceProcessingEnabled, shouldUseParameterized);
+        batchStatements.add(cachedQuery.query);
+        batchParameters.add(null);
+    }
+
+    @Override
+    public void clearBatch() throws SQLException {
+        if (batchStatements != null) {
+            batchStatements.clear();
+        }
+        if (batchParameters != null) {
+            batchParameters.clear();
+        }
+    }
+
+    protected BatchResultHandler createBatchHandler(Query[] queries,
+                                                    ParameterList[] parameterLists) {
+        return new BatchResultHandler(this, queries, parameterLists,
+                wantsGeneratedKeysAlways);
+    }
+
+    private BatchResultHandler internalExecuteBatch() throws SQLException {
+        // Construct query/parameter arrays.
+        transformQueriesAndParameters();
+        ArrayList<Query> batchStatements = this.batchStatements;
+        ArrayList<ParameterList> batchParameters = this.batchParameters;
+        // Empty arrays should be passed to toArray
+        // see http://shipilev.net/blog/2016/arrays-wisdom-ancients/
+        Query[] queries = batchStatements.toArray(new Query[0]);
+        ParameterList[] parameterLists = batchParameters.toArray(new ParameterList[0]);
+        batchStatements.clear();
+        batchParameters.clear();
+
+        int flags;
+
+        // Force a Describe before any execution? We need to do this if we're going
+        // to send anything dependent on the Describe results, e.g. binary parameters.
+        boolean preDescribe = false;
+
+        if (wantsGeneratedKeysAlways) {
+            /*
+             * This batch will return generated keys, tell the executor to expect result rows. We also
+             * force a Describe later so we know the size of the results to expect.
+             *
+             * If the parameter type(s) change between batch entries and the default binary-mode changes
+             * we might get mixed binary and text in a single result set column, which we cannot handle.
+             * To prevent this, disable binary transfer mode in batches that return generated keys. See
+             * GitHub issue #267
+             */
+            flags = QueryExecutor.QUERY_BOTH_ROWS_AND_STATUS | QueryExecutor.QUERY_NO_BINARY_TRANSFER;
+        } else {
+            // If a batch hasn't specified that it wants generated keys, using the appropriate
+            // Connection.createStatement(...) interfaces, disallow any result set.
+            flags = QueryExecutor.QUERY_NO_RESULTS;
+        }
+
+        PreferQueryMode preferQueryMode = connection.getPreferQueryMode();
+        if (preferQueryMode == PreferQueryMode.SIMPLE
+                || (preferQueryMode == PreferQueryMode.EXTENDED_FOR_PREPARED
+                && parameterLists[0] == null)) {
+            flags |= QueryExecutor.QUERY_EXECUTE_AS_SIMPLE;
+        }
+
+        boolean sameQueryAhead = queries.length > 1 && queries[0] == queries[1];
+
+        if (!sameQueryAhead
+                // If executing the same query twice in a batch, make sure the statement
+                // is server-prepared. In other words, "oneshot" only if the query is one in the batch
+                // or the queries are different
+                || isOneShotQuery(null)) {
+            flags |= QueryExecutor.QUERY_ONESHOT;
+        } else {
+            // If a batch requests generated keys and isn't already described,
+            // force a Describe of the query before proceeding. That way we can
+            // determine the appropriate size of each batch by estimating the
+            // maximum data returned. Without that, we don't know how many queries
+            // we'll be able to queue up before we risk a deadlock.
+            // (see v3.QueryExecutorImpl's MAX_BUFFERED_RECV_BYTES)
+
+            // SameQueryAhead is just a quick way to issue pre-describe for batch execution
+            // TODO: It should be reworked into "pre-describe if query has unknown parameter
+            // types and same query is ahead".
+            preDescribe = (wantsGeneratedKeysAlways || sameQueryAhead)
+                    && !queries[0].isStatementDescribed();
+            /*
+             * It's also necessary to force a Describe on the first execution of the new statement, even
+             * though we already described it, to work around bug #267.
+             */
+            flags |= QueryExecutor.QUERY_FORCE_DESCRIBE_PORTAL;
+        }
+
+        if (connection.getAutoCommit()) {
+            flags |= QueryExecutor.QUERY_SUPPRESS_BEGIN;
+        }
+        if (connection.hintReadOnly()) {
+            flags |= QueryExecutor.QUERY_READ_ONLY_HINT;
+        }
+
+        BatchResultHandler handler;
+        handler = createBatchHandler(queries, parameterLists);
+
+        if ((preDescribe || forceBinaryTransfers)
+                && (flags & QueryExecutor.QUERY_EXECUTE_AS_SIMPLE) == 0) {
+            // Do a client-server round trip, parsing and describing the query so we
+            // can determine its result types for use in binary parameters, batch sizing,
+            // etc.
+            int flags2 = flags | QueryExecutor.QUERY_DESCRIBE_ONLY;
+            StatementResultHandler handler2 = new StatementResultHandler();
+            try {
+                connection.getQueryExecutor().execute(queries[0], parameterLists[0], handler2, 0, 0, flags2);
+            } catch (SQLException e) {
+                // Unable to parse the first statement -> throw BatchUpdateException
+                handler.handleError(e);
+                handler.handleCompletion();
+                // Will not reach here (see above)
+            }
+            ResultWrapper result2 = handler2.getResults();
+            if (result2 != null) {
+                result2.getResultSet().close();
+            }
+        }
+
+        try (ResourceLock ignore = lock.obtain()) {
+            result = null;
+        }
+
+        try {
+            startTimer();
+            connection.getQueryExecutor().execute(queries, parameterLists, handler, maxrows, fetchSize,
+                    flags, adaptiveFetch);
+        } finally {
+            killTimerTask();
+            // There might be some rows generated even in case of failures
+            try (ResourceLock ignore = lock.obtain()) {
+                checkClosed();
+                if (wantsGeneratedKeysAlways) {
+                    generatedKeys = new ResultWrapper(handler.getGeneratedKeys());
+                }
+            }
+        }
+        return handler;
+    }
+
+    @Override
+    public int[] executeBatch() throws SQLException {
+        checkClosed();
+        closeForNextExecution();
+
+        if (batchStatements == null || batchStatements.isEmpty() || batchParameters == null) {
+            return new int[0];
+        }
+
+        return internalExecuteBatch().getUpdateCount();
+    }
+
+    @Override
+    public void cancel() throws SQLException {
+        if (statementState == StatementCancelState.IDLE) {
+            return;
+        }
+        if (!STATE_UPDATER.compareAndSet(this, StatementCancelState.IN_QUERY,
+                StatementCancelState.CANCELING)) {
+            // Not in query, there's nothing to cancel
+            return;
+        }
+        // Use connection lock to avoid spinning in killTimerTask
+        try (ResourceLock connectionLock = connection.obtainLock()) {
+            try {
+                connection.cancelQuery();
+            } finally {
+                STATE_UPDATER.set(this, StatementCancelState.CANCELLED);
+                connection.lockCondition().signalAll(); // wake-up killTimerTask
+            }
+        }
+    }
+
+    @Override
+    public Connection getConnection() throws SQLException {
+        return connection;
+    }
+
+    @Override
+    public int getFetchDirection() {
+        return fetchdirection;
+    }
+
+    @Override
+    public void setFetchDirection(int direction) throws SQLException {
+        switch (direction) {
+            case ResultSet.FETCH_FORWARD:
+            case ResultSet.FETCH_REVERSE:
+            case ResultSet.FETCH_UNKNOWN:
+                fetchdirection = direction;
+                break;
+            default:
+                throw new PSQLException(GT.tr("Invalid fetch direction constant: {0}.", direction),
+                        PSQLState.INVALID_PARAMETER_VALUE);
+        }
+    }
+
+    @Override
+    public int getResultSetConcurrency() {
+        return concurrency;
+    }
+
+    @Override
+    public int getResultSetType() {
+        return resultsettype;
+    }
+
+    private void startTimer() {
+        /*
+         * there shouldn't be any previous timer active, but better safe than sorry.
+         */
+        cleanupTimer();
+
+        STATE_UPDATER.set(this, StatementCancelState.IN_QUERY);
+
+        if (timeout == 0) {
+            return;
+        }
+
+        TimerTask cancelTask = new StatementCancelTimerTask(this);
+
+        CANCEL_TIMER_UPDATER.set(this, cancelTask);
+        connection.addTimerTask(cancelTask, timeout);
+    }
+
+    void cancelIfStillNeeded(TimerTask timerTask) {
+        try {
+            if (!CANCEL_TIMER_UPDATER.compareAndSet(this, timerTask, null)) {
+                // Nothing to do here, statement has already finished and cleared
+                // cancelTimerTask reference
+                return;
+            }
+            cancel();
+        } catch (SQLException e) {
+        }
+    }
+
+    /**
+     * Clears {@link #cancelTimerTask} if any. Returns true if and only if "cancel" timer task would
+     * never invoke {@link #cancel()}.
+     */
+    private boolean cleanupTimer() {
+        TimerTask timerTask = CANCEL_TIMER_UPDATER.get(this);
+        if (timerTask == null) {
+            // If timeout is zero, then timer task did not exist, so we safely report "all clear"
+            return timeout == 0;
+        }
+        if (!CANCEL_TIMER_UPDATER.compareAndSet(this, timerTask, null)) {
+            // Failed to update reference -> timer has just fired, so we must wait for the query state to
+            // become "cancelling".
+            return false;
+        }
+        timerTask.cancel();
+        connection.purgeTimerTasks();
+        // All clear
+        return true;
+    }
+
+    private void killTimerTask() {
+        boolean timerTaskIsClear = cleanupTimer();
+        // The order is important here: in case we need to wait for the cancel task, the state must be
+        // kept StatementCancelState.IN_QUERY, so cancelTask would be able to cancel the query.
+        // It is believed that this case is very rare, so "additional cancel and wait below" would not
+        // harm it.
+        if (timerTaskIsClear && STATE_UPDATER.compareAndSet(this, StatementCancelState.IN_QUERY, StatementCancelState.IDLE)) {
+            return;
+        }
+
+        // Being here means someone managed to call .cancel() and our connection did not receive
+        // "timeout error"
+        // We wait till state becomes "cancelled"
+        boolean interrupted = false;
+        try (ResourceLock connectionLock = connection.obtainLock()) {
+            // state check is performed with connection lock so it detects "cancelled" state faster
+            // In other words, it prevents unnecessary ".wait()" call
+            while (!STATE_UPDATER.compareAndSet(this, StatementCancelState.CANCELLED, StatementCancelState.IDLE)) {
+                try {
+                    // Note: wait timeout here is irrelevant since connection.obtainLock() would block until
+                    // .cancel finishes
+                    connection.lockCondition().await(10, TimeUnit.MILLISECONDS);
+                } catch (InterruptedException e) { // NOSONAR
+                    // Either re-interrupt this method or rethrow the "InterruptedException"
+                    interrupted = true;
+                }
+            }
+        }
+        if (interrupted) {
+            Thread.currentThread().interrupt();
+        }
+    }
+
+    protected boolean getForceBinaryTransfer() {
+        return forceBinaryTransfers;
+    }
+
+    @Override
+    public long getLargeUpdateCount() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkClosed();
+            if (result == null || result.getResultSet() != null) {
+                return -1;
+            }
+
+            return result.getUpdateCount();
+        }
+    }
+
+    @Override
+    public long getLargeMaxRows() throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "getLargeMaxRows");
+    }
+
+    @Override
+    public void setLargeMaxRows(long max) throws SQLException {
+        throw Driver.notImplemented(this.getClass(), "setLargeMaxRows");
+    }
+
+    @Override
+    public long[] executeLargeBatch() throws SQLException {
+        checkClosed();
+        closeForNextExecution();
+
+        if (batchStatements == null || batchStatements.isEmpty() || batchParameters == null) {
+            return new long[0];
+        }
+
+        return internalExecuteBatch().getLargeUpdateCount();
+    }
+
+    @Override
+    public long executeLargeUpdate(String sql) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            executeWithFlags(sql, QueryExecutor.QUERY_NO_RESULTS);
+            checkNoResultUpdate();
+            return getLargeUpdateCount();
+        }
+    }
+
+    @Override
+    public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
+        if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) {
+            return executeLargeUpdate(sql);
+        }
+
+        return executeLargeUpdate(sql, (String[]) null);
+    }
+
+    @Override
+    public long executeLargeUpdate(String sql, int[] columnIndexes) throws SQLException {
+        if (columnIndexes == null || columnIndexes.length == 0) {
+            return executeLargeUpdate(sql);
+        }
+
+        throw new PSQLException(GT.tr("Returning autogenerated keys by column index is not supported."),
+                PSQLState.NOT_IMPLEMENTED);
+    }
+
+    @Override
+    public long executeLargeUpdate(String sql, String[] columnNames) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (columnNames != null && columnNames.length == 0) {
+                return executeLargeUpdate(sql);
+            }
+
+            wantsGeneratedKeysOnce = true;
+            if (!executeCachedSql(sql, 0, columnNames)) {
+                // no resultset returned. What's a pity!
+            }
+            return getLargeUpdateCount();
+        }
+    }
+
+    @Override
+    public boolean isClosed() throws SQLException {
+        return isClosed == 1;
+    }
+
+    @Override
+    public boolean isPoolable() throws SQLException {
+        checkClosed();
+        return poolable;
+    }
+
+    @Override
+    public void setPoolable(boolean poolable) throws SQLException {
+        checkClosed();
+        this.poolable = poolable;
+    }
+
+    @Override
+    public boolean isWrapperFor(Class<?> iface) throws SQLException {
+        return iface.isAssignableFrom(getClass());
+    }
+
+    @Override
+    public <T> T unwrap(Class<T> iface) throws SQLException {
+        if (iface.isAssignableFrom(getClass())) {
+            return iface.cast(this);
+        }
+        throw new SQLException("Cannot unwrap to " + iface.getName());
+    }
+
+    @Override
+    public void closeOnCompletion() throws SQLException {
+        closeOnCompletion = true;
+    }
+
+    @Override
+    public boolean isCloseOnCompletion() throws SQLException {
+        return closeOnCompletion;
+    }
+
+    protected void checkCompletion() throws SQLException {
+        if (!closeOnCompletion) {
+            return;
+        }
+
+        try (ResourceLock ignore = lock.obtain()) {
+            ResultWrapper result = firstUnclosedResult;
+            while (result != null) {
+                ResultSet resultSet = result.getResultSet();
+                if (resultSet != null && !resultSet.isClosed()) {
+                    return;
+                }
+                result = result.getNext();
+            }
+        }
+
+        // prevent all ResultSet.close arising from Statement.close to loop here
+        closeOnCompletion = false;
+        try {
+            close();
+        } finally {
+            // restore the status if one rely on isCloseOnCompletion
+            closeOnCompletion = true;
+        }
+    }
+
+    @Override
+    public boolean getMoreResults(int current) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkClosed();
+            // CLOSE_CURRENT_RESULT
+            if (current == Statement.CLOSE_CURRENT_RESULT && result != null
+                    && result.getResultSet() != null) {
+                result.getResultSet().close();
+            }
+
+            // Advance resultset.
+            if (result != null) {
+                result = result.getNext();
+            }
+
+            // CLOSE_ALL_RESULTS
+            if (current == Statement.CLOSE_ALL_RESULTS) {
+                // Close preceding resultsets.
+                closeUnclosedProcessedResults();
+            }
+
+            // Done.
+            return result != null && result.getResultSet() != null;
+        }
+    }
+
+    @Override
+    public ResultSet getGeneratedKeys() throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            checkClosed();
+            if (generatedKeys == null || generatedKeys.getResultSet() == null) {
+                return createDriverResultSet(new Field[0], new ArrayList<>());
+            }
+
+            return generatedKeys.getResultSet();
+        }
+    }
+
+    @Override
+    public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
+        if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) {
+            return executeUpdate(sql);
+        }
+
+        return executeUpdate(sql, (String[]) null);
+    }
+
+    @Override
+    public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
+        if (columnIndexes == null || columnIndexes.length == 0) {
+            return executeUpdate(sql);
+        }
+
+        throw new PSQLException(GT.tr("Returning autogenerated keys by column index is not supported."),
+                PSQLState.NOT_IMPLEMENTED);
+    }
+
+    @Override
+    public int executeUpdate(String sql, String[] columnNames) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (columnNames != null && columnNames.length == 0) {
+                return executeUpdate(sql);
+            }
+
+            wantsGeneratedKeysOnce = true;
+            if (!executeCachedSql(sql, 0, columnNames)) {
+                // no resultset returned. What's a pity!
+            }
+            return getUpdateCount();
+        }
+    }
+
+    @Override
+    public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
+        if (autoGeneratedKeys == Statement.NO_GENERATED_KEYS) {
+            return execute(sql);
+        }
+        return execute(sql, (String[]) null);
+    }
+
+    @Override
+    public boolean execute(String sql, int[] columnIndexes) throws SQLException {
+        if (columnIndexes != null && columnIndexes.length == 0) {
+            return execute(sql);
+        }
+
+        throw new PSQLException(GT.tr("Returning autogenerated keys by column index is not supported."),
+                PSQLState.NOT_IMPLEMENTED);
+    }
+
+    @Override
+    public boolean execute(String sql, String[] columnNames) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (columnNames != null && columnNames.length == 0) {
+                return execute(sql);
+            }
+
+            wantsGeneratedKeysOnce = true;
+            return executeCachedSql(sql, 0, columnNames);
+        }
+    }
+
+    @Override
+    public int getResultSetHoldability() throws SQLException {
+        return rsHoldability;
+    }
+
+    @Override
+    public ResultSet createDriverResultSet(Field[] fields, List<Tuple> tuples)
+            throws SQLException {
+        return createResultSet(null, fields, tuples, null);
+    }
+
+    protected void transformQueriesAndParameters() throws SQLException {
+    }
+
+    @Override
+    public boolean getAdaptiveFetch() {
+        return adaptiveFetch;
+    }
+
+    @Override
+    public void setAdaptiveFetch(boolean adaptiveFetch) {
+        this.adaptiveFetch = adaptiveFetch;
+    }
+
+    protected TimestampUtils getTimestampUtils() {
+        if (timestampUtils == null) {
+            timestampUtils = new TimestampUtils(!connection.getQueryExecutor().getIntegerDateTimes(), new QueryExecutorTimeZoneProvider(connection.getQueryExecutor()));
+        }
+        return timestampUtils;
+    }
+
+    /**
+     * ResultHandler implementations for updates, queries, and either-or.
+     */
+    public class StatementResultHandler extends ResultHandlerBase {
+        private ResultWrapper results;
+        private ResultWrapper lastResult;
+
+        public StatementResultHandler() {
+        }
+
+        ResultWrapper getResults() {
+            return results;
+        }
+
+        private void append(ResultWrapper newResult) {
+            if (results == null) {
+                lastResult = results = newResult;
+            } else {
+                lastResult.append(newResult);
+            }
+        }
+
+        @Override
+        public void handleResultRows(Query fromQuery, Field[] fields, List<Tuple> tuples,
+                                     ResultCursor cursor) {
+            try {
+                ResultSet rs = PgStatement.this.createResultSet(fromQuery, fields, tuples, cursor);
+                append(new ResultWrapper(rs));
+            } catch (SQLException e) {
+                handleError(e);
+            }
+        }
+
+        @Override
+        public void handleCommandStatus(String status, long updateCount, long insertOID) {
+            append(new ResultWrapper(updateCount, insertOID));
+        }
+
+        @Override
+        public void handleWarning(SQLWarning warning) {
+            PgStatement.this.addWarning(warning);
+        }
+
     }
-    return timestampUtils;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/PreferQueryMode.java b/pgjdbc/src/main/java/org/postgresql/jdbc/PreferQueryMode.java
index 6526a1d..d1e4f3a 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/PreferQueryMode.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/PreferQueryMode.java
@@ -14,27 +14,27 @@ package org.postgresql.jdbc;
  * @see org.postgresql.PGProperty#PREFER_QUERY_MODE
  */
 public enum PreferQueryMode {
-  SIMPLE("simple"),
-  EXTENDED_FOR_PREPARED("extendedForPrepared"),
-  EXTENDED("extended"),
-  EXTENDED_CACHE_EVERYTHING("extendedCacheEverything");
+    SIMPLE("simple"),
+    EXTENDED_FOR_PREPARED("extendedForPrepared"),
+    EXTENDED("extended"),
+    EXTENDED_CACHE_EVERYTHING("extendedCacheEverything");
 
-  private final String value;
+    private final String value;
 
-  PreferQueryMode(String value) {
-    this.value = value;
-  }
-
-  public static PreferQueryMode of(String mode) {
-    for (PreferQueryMode preferQueryMode : values()) {
-      if (preferQueryMode.value.equals(mode)) {
-        return preferQueryMode;
-      }
+    PreferQueryMode(String value) {
+        this.value = value;
     }
-    return EXTENDED;
-  }
 
-  public String value() {
-    return value;
-  }
+    public static PreferQueryMode of(String mode) {
+        for (PreferQueryMode preferQueryMode : values()) {
+            if (preferQueryMode.value.equals(mode)) {
+                return preferQueryMode;
+            }
+        }
+        return EXTENDED;
+    }
+
+    public String value() {
+        return value;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/QueryExecutorTimeZoneProvider.java b/pgjdbc/src/main/java/org/postgresql/jdbc/QueryExecutorTimeZoneProvider.java
index b2b4ff2..6cc83d2 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/QueryExecutorTimeZoneProvider.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/QueryExecutorTimeZoneProvider.java
@@ -18,21 +18,21 @@ import java.util.TimeZone;
  * </p>
  */
 class QueryExecutorTimeZoneProvider implements Provider<TimeZone> {
-  private final QueryExecutor queryExecutor;
+    private final QueryExecutor queryExecutor;
 
-  QueryExecutorTimeZoneProvider(QueryExecutor queryExecutor) {
-    this.queryExecutor = queryExecutor;
-  }
-
-  @Override
-  public TimeZone get() {
-    TimeZone timeZone = queryExecutor.getTimeZone();
-    if (timeZone == null) {
-      throw new IllegalStateException(
-          GT.tr("Backend timezone is not known. Backend should have returned TimeZone when "
-              + "establishing a connection")
-      );
+    QueryExecutorTimeZoneProvider(QueryExecutor queryExecutor) {
+        this.queryExecutor = queryExecutor;
+    }
+
+    @Override
+    public TimeZone get() {
+        TimeZone timeZone = queryExecutor.getTimeZone();
+        if (timeZone == null) {
+            throw new IllegalStateException(
+                    GT.tr("Backend timezone is not known. Backend should have returned TimeZone when "
+                            + "establishing a connection")
+            );
+        }
+        return timeZone;
     }
-    return timeZone;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/ResourceLock.java b/pgjdbc/src/main/java/org/postgresql/jdbc/ResourceLock.java
index 19ec31c..b8b8a4a 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/ResourceLock.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/ResourceLock.java
@@ -22,22 +22,22 @@ import java.util.concurrent.locks.ReentrantLock;
 @SuppressWarnings("serial")
 public final class ResourceLock extends ReentrantLock implements AutoCloseable {
 
-  public ResourceLock() {
-  }
+    public ResourceLock() {
+    }
 
-  /**
-   * Obtain a lock and return the ResourceLock for use in try-with-resources block.
-   */
-  public ResourceLock obtain() {
-    lock();
-    return this;
-  }
+    /**
+     * Obtain a lock and return the ResourceLock for use in try-with-resources block.
+     */
+    public ResourceLock obtain() {
+        lock();
+        return this;
+    }
 
-  /**
-   * Unlock on exit of try-with-resources block.
-   */
-  @Override
-  public void close() {
-    this.unlock();
-  }
+    /**
+     * Unlock on exit of try-with-resources block.
+     */
+    @Override
+    public void close() {
+        this.unlock();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/ResultWrapper.java b/pgjdbc/src/main/java/org/postgresql/jdbc/ResultWrapper.java
index df79ae7..1635431 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/ResultWrapper.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/ResultWrapper.java
@@ -15,45 +15,45 @@ import java.sql.ResultSet;
  * @author Oliver Jowett (oliver@opencloud.com)
  */
 public class ResultWrapper {
-  public ResultWrapper(ResultSet rs) {
-    this.rs = rs;
-    this.updateCount = -1;
-    this.insertOID = -1;
-  }
+    private final ResultSet rs;
+    private final long updateCount;
+    private final long insertOID;
+    private ResultWrapper next;
 
-  public ResultWrapper(long updateCount, long insertOID) {
-    this.rs = null;
-    this.updateCount = updateCount;
-    this.insertOID = insertOID;
-  }
-
-  public ResultSet getResultSet() {
-    return rs;
-  }
-
-  public long getUpdateCount() {
-    return updateCount;
-  }
-
-  public long getInsertOID() {
-    return insertOID;
-  }
-
-  public ResultWrapper getNext() {
-    return next;
-  }
-
-  public void append(ResultWrapper newResult) {
-    ResultWrapper tail = this;
-    while (tail.next != null) {
-      tail = tail.next;
+    public ResultWrapper(ResultSet rs) {
+        this.rs = rs;
+        this.updateCount = -1;
+        this.insertOID = -1;
     }
 
-    tail.next = newResult;
-  }
+    public ResultWrapper(long updateCount, long insertOID) {
+        this.rs = null;
+        this.updateCount = updateCount;
+        this.insertOID = insertOID;
+    }
 
-  private final ResultSet rs;
-  private final long updateCount;
-  private final long insertOID;
-  private ResultWrapper next;
+    public ResultSet getResultSet() {
+        return rs;
+    }
+
+    public long getUpdateCount() {
+        return updateCount;
+    }
+
+    public long getInsertOID() {
+        return insertOID;
+    }
+
+    public ResultWrapper getNext() {
+        return next;
+    }
+
+    public void append(ResultWrapper newResult) {
+        ResultWrapper tail = this;
+        while (tail.next != null) {
+            tail = tail.next;
+        }
+
+        tail.next = newResult;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/SslMode.java b/pgjdbc/src/main/java/org/postgresql/jdbc/SslMode.java
index addea00..1eb4a42 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/SslMode.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/SslMode.java
@@ -13,69 +13,69 @@ import org.postgresql.util.PSQLState;
 import java.util.Properties;
 
 public enum SslMode {
-  /**
-   * Do not use encrypted connections.
-   */
-  DISABLE("disable"),
-  /**
-   * Start with non-encrypted connection, then try encrypted one.
-   */
-  ALLOW("allow"),
-  /**
-   * Start with encrypted connection, fallback to non-encrypted (default).
-   */
-  PREFER("prefer"),
-  /**
-   * Ensure connection is encrypted.
-   */
-  REQUIRE("require"),
-  /**
-   * Ensure connection is encrypted, and client trusts server certificate.
-   */
-  VERIFY_CA("verify-ca"),
-  /**
-   * Ensure connection is encrypted, client trusts server certificate, and server hostname matches
-   * the one listed in the server certificate.
-   */
-  VERIFY_FULL("verify-full"),
-  ;
+    /**
+     * Do not use encrypted connections.
+     */
+    DISABLE("disable"),
+    /**
+     * Start with non-encrypted connection, then try encrypted one.
+     */
+    ALLOW("allow"),
+    /**
+     * Start with encrypted connection, fallback to non-encrypted (default).
+     */
+    PREFER("prefer"),
+    /**
+     * Ensure connection is encrypted.
+     */
+    REQUIRE("require"),
+    /**
+     * Ensure connection is encrypted, and client trusts server certificate.
+     */
+    VERIFY_CA("verify-ca"),
+    /**
+     * Ensure connection is encrypted, client trusts server certificate, and server hostname matches
+     * the one listed in the server certificate.
+     */
+    VERIFY_FULL("verify-full"),
+    ;
 
-  public static final SslMode[] VALUES = values();
+    public static final SslMode[] VALUES = values();
 
-  public final String value;
+    public final String value;
 
-  SslMode(String value) {
-    this.value = value;
-  }
-
-  public boolean requireEncryption() {
-    return this.compareTo(REQUIRE) >= 0;
-  }
-
-  public boolean verifyCertificate() {
-    return this == VERIFY_CA || this == VERIFY_FULL;
-  }
-
-  public boolean verifyPeerName() {
-    return this == VERIFY_FULL;
-  }
-
-  public static SslMode of(Properties info) throws PSQLException {
-    String sslmode = PGProperty.SSL_MODE.getOrDefault(info);
-    // If sslmode is not set, fallback to ssl parameter
-    if (sslmode == null) {
-      if (PGProperty.SSL.getBoolean(info) || "".equals(PGProperty.SSL.getOrDefault(info))) {
-        return VERIFY_FULL;
-      }
-      return PREFER;
+    SslMode(String value) {
+        this.value = value;
     }
 
-    for (SslMode sslMode : VALUES) {
-      if (sslMode.value.equalsIgnoreCase(sslmode)) {
-        return sslMode;
-      }
+    public static SslMode of(Properties info) throws PSQLException {
+        String sslmode = PGProperty.SSL_MODE.getOrDefault(info);
+        // If sslmode is not set, fallback to ssl parameter
+        if (sslmode == null) {
+            if (PGProperty.SSL.getBoolean(info) || "".equals(PGProperty.SSL.getOrDefault(info))) {
+                return VERIFY_FULL;
+            }
+            return PREFER;
+        }
+
+        for (SslMode sslMode : VALUES) {
+            if (sslMode.value.equalsIgnoreCase(sslmode)) {
+                return sslMode;
+            }
+        }
+        throw new PSQLException(GT.tr("Invalid sslmode value: {0}", sslmode),
+                PSQLState.CONNECTION_UNABLE_TO_CONNECT);
+    }
+
+    public boolean requireEncryption() {
+        return this.compareTo(REQUIRE) >= 0;
+    }
+
+    public boolean verifyCertificate() {
+        return this == VERIFY_CA || this == VERIFY_FULL;
+    }
+
+    public boolean verifyPeerName() {
+        return this == VERIFY_FULL;
     }
-    throw new PSQLException(GT.tr("Invalid sslmode value: {0}", sslmode),
-        PSQLState.CONNECTION_UNABLE_TO_CONNECT);
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelState.java b/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelState.java
index f149048..9cf623d 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelState.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelState.java
@@ -9,8 +9,8 @@ package org.postgresql.jdbc;
  * Represents {@link PgStatement#cancel()} state.
  */
 enum StatementCancelState {
-  IDLE,
-  IN_QUERY,
-  CANCELING,
-  CANCELLED
+    IDLE,
+    IN_QUERY,
+    CANCELING,
+    CANCELLED
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelTimerTask.java b/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelTimerTask.java
index 5da4624..f7bedf8 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelTimerTask.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/StatementCancelTimerTask.java
@@ -13,27 +13,27 @@ import java.util.TimerTask;
  * might keep reference to the latest executed task in its local variable.
  */
 class StatementCancelTimerTask extends TimerTask {
-  private PgStatement statement;
+    private PgStatement statement;
 
-  StatementCancelTimerTask(PgStatement statement) {
-    this.statement = statement;
-  }
-
-  @Override
-  public boolean cancel() {
-    boolean result = super.cancel();
-    // Help GC to avoid keeping reference via TimerThread -> TimerTask -> statement -> connection
-    statement = null;
-    return result;
-  }
-
-  @Override
-  public void run() {
-    PgStatement statement = this.statement;
-    if (statement != null) {
-      statement.cancelIfStillNeeded(this);
+    StatementCancelTimerTask(PgStatement statement) {
+        this.statement = statement;
+    }
+
+    @Override
+    public boolean cancel() {
+        boolean result = super.cancel();
+        // Help GC to avoid keeping reference via TimerThread -> TimerTask -> statement -> connection
+        statement = null;
+        return result;
+    }
+
+    @Override
+    public void run() {
+        PgStatement statement = this.statement;
+        if (statement != null) {
+            statement.cancelIfStillNeeded(this);
+        }
+        // Help GC to avoid keeping reference via TimerThread -> TimerTask -> statement -> connection
+        this.statement = null;
     }
-    // Help GC to avoid keeping reference via TimerThread -> TimerTask -> statement -> connection
-    this.statement = null;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/TimestampUtils.java b/pgjdbc/src/main/java/org/postgresql/jdbc/TimestampUtils.java
index 57b1597..3b05f45 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/TimestampUtils.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/TimestampUtils.java
@@ -43,1674 +43,1677 @@ import java.util.TimeZone;
  */
 @SuppressWarnings("try")
 public class TimestampUtils {
-  /**
-   * Number of milliseconds in one day.
-   */
-  private static final int ONEDAY = 24 * 3600 * 1000;
-  private static final char[] ZEROS = {'0', '0', '0', '0', '0', '0', '0', '0', '0'};
-  private static final char[][] NUMBERS;
-  private static final HashMap<String, TimeZone> GMT_ZONES = new HashMap<>();
-  private static final int MAX_NANOS_BEFORE_WRAP_ON_ROUND = 999999500;
-  private static final Duration ONE_MICROSECOND = Duration.ofNanos(1000);
-  // LocalTime.MAX is 23:59:59.999_999_999, and it wraps to 24:00:00 when nanos exceed 999_999_499
-  // since PostgreSQL has microsecond resolution only
-  private static final LocalTime MAX_TIME = LocalTime.MAX.minus(Duration.ofNanos(500));
-  private static final OffsetDateTime MAX_OFFSET_DATETIME = OffsetDateTime.MAX.minus(Duration.ofMillis(500));
-  private static final LocalDateTime MAX_LOCAL_DATETIME = LocalDateTime.MAX.minus(Duration.ofMillis(500));
-  // low value for dates is   4713 BC
-  private static final LocalDate MIN_LOCAL_DATE = LocalDate.of(4713, 1, 1).with(ChronoField.ERA, IsoEra.BCE.getValue());
-  private static final LocalDateTime MIN_LOCAL_DATETIME = MIN_LOCAL_DATE.atStartOfDay();
-  private static final OffsetDateTime MIN_OFFSET_DATETIME = MIN_LOCAL_DATETIME.atOffset(ZoneOffset.UTC);
-  private static final Duration PG_EPOCH_DIFF =
-      Duration.between(Instant.EPOCH, LocalDate.of(2000, 1, 1).atStartOfDay().toInstant(ZoneOffset.UTC));
+    /**
+     * Number of milliseconds in one day.
+     */
+    private static final int ONEDAY = 24 * 3600 * 1000;
+    private static final char[] ZEROS = {'0', '0', '0', '0', '0', '0', '0', '0', '0'};
+    private static final char[][] NUMBERS;
+    private static final HashMap<String, TimeZone> GMT_ZONES = new HashMap<>();
+    private static final int MAX_NANOS_BEFORE_WRAP_ON_ROUND = 999999500;
+    private static final Duration ONE_MICROSECOND = Duration.ofNanos(1000);
+    // LocalTime.MAX is 23:59:59.999_999_999, and it wraps to 24:00:00 when nanos exceed 999_999_499
+    // since PostgreSQL has microsecond resolution only
+    private static final LocalTime MAX_TIME = LocalTime.MAX.minus(Duration.ofNanos(500));
+    private static final OffsetDateTime MAX_OFFSET_DATETIME = OffsetDateTime.MAX.minus(Duration.ofMillis(500));
+    private static final LocalDateTime MAX_LOCAL_DATETIME = LocalDateTime.MAX.minus(Duration.ofMillis(500));
+    // low value for dates is   4713 BC
+    private static final LocalDate MIN_LOCAL_DATE = LocalDate.of(4713, 1, 1).with(ChronoField.ERA, IsoEra.BCE.getValue());
+    private static final LocalDateTime MIN_LOCAL_DATETIME = MIN_LOCAL_DATE.atStartOfDay();
+    private static final OffsetDateTime MIN_OFFSET_DATETIME = MIN_LOCAL_DATETIME.atOffset(ZoneOffset.UTC);
+    private static final Duration PG_EPOCH_DIFF =
+            Duration.between(Instant.EPOCH, LocalDate.of(2000, 1, 1).atStartOfDay().toInstant(ZoneOffset.UTC));
 
-  private static final Field DEFAULT_TIME_ZONE_FIELD;
+    private static final Field DEFAULT_TIME_ZONE_FIELD;
 
-  private static final TimeZone UTC_TIMEZONE = TimeZone.getTimeZone(ZoneOffset.UTC);
+    private static final TimeZone UTC_TIMEZONE = TimeZone.getTimeZone(ZoneOffset.UTC);
 
-  private TimeZone prevDefaultZoneFieldValue;
-  private TimeZone defaultTimeZoneCache;
+    static {
+        // The expected maximum value is 60 (seconds), so 64 is used "just in case"
+        NUMBERS = new char[64][];
+        for (int i = 0; i < NUMBERS.length; i++) {
+            NUMBERS[i] = ((i < 10 ? "0" : "") + Integer.toString(i)).toCharArray();
+        }
 
-  static {
-    // The expected maximum value is 60 (seconds), so 64 is used "just in case"
-    NUMBERS = new char[64][];
-    for (int i = 0; i < NUMBERS.length; i++) {
-      NUMBERS[i] = ((i < 10 ? "0" : "") + Integer.toString(i)).toCharArray();
+        // Backend's gmt-3 means GMT+03 in Java. Here a map is created so gmt-3 can be converted to
+        // java TimeZone
+        for (int i = -12; i <= 14; i++) {
+            TimeZone timeZone;
+            String pgZoneName;
+            if (i == 0) {
+                timeZone = TimeZone.getTimeZone("GMT");
+                pgZoneName = "GMT";
+            } else {
+                timeZone = TimeZone.getTimeZone("GMT" + (i <= 0 ? "+" : "-") + Math.abs(i));
+                pgZoneName = "GMT" + (i >= 0 ? "+" : "-");
+            }
+
+            if (i == 0) {
+                GMT_ZONES.put(pgZoneName, timeZone);
+                continue;
+            }
+            GMT_ZONES.put(pgZoneName + Math.abs(i), timeZone);
+            GMT_ZONES.put(pgZoneName + new String(NUMBERS[Math.abs(i)]), timeZone);
+        }
+        // Fast path to getting the default timezone.
+        // Accessing the default timezone over and over creates a clone with regular API.
+        // Because we don't mutate that object in our use of it, we can access the field directly.
+        // This saves the creation of a clone everytime, and the memory associated to all these clones.
+        Field tzField;
+        try {
+            tzField = null;
+            // Avoid reflective access in Java 9+
+            if (JavaVersion.getRuntimeVersion().compareTo(JavaVersion.v1_8) <= 0) {
+                tzField = TimeZone.class.getDeclaredField("defaultTimeZone");
+                tzField.setAccessible(true);
+                TimeZone defaultTz = TimeZone.getDefault();
+                Object tzFromField = tzField.get(null);
+                if (defaultTz == null || !defaultTz.equals(tzFromField)) {
+                    tzField = null;
+                }
+            }
+        } catch (Exception e) {
+            tzField = null;
+        }
+        DEFAULT_TIME_ZONE_FIELD = tzField;
     }
 
-    // Backend's gmt-3 means GMT+03 in Java. Here a map is created so gmt-3 can be converted to
-    // java TimeZone
-    for (int i = -12; i <= 14; i++) {
-      TimeZone timeZone;
-      String pgZoneName;
-      if (i == 0) {
-        timeZone = TimeZone.getTimeZone("GMT");
-        pgZoneName = "GMT";
-      } else {
-        timeZone = TimeZone.getTimeZone("GMT" + (i <= 0 ? "+" : "-") + Math.abs(i));
-        pgZoneName = "GMT" + (i >= 0 ? "+" : "-");
-      }
+    private final StringBuilder sbuf = new StringBuilder();
+    // This calendar is used when user provides calendar in setX(, Calendar) method.
+    // It ensures calendar is Gregorian.
+    private final Calendar calendarWithUserTz = new GregorianCalendar();
+    /**
+     * True if the backend uses doubles for time values. False if long is used.
+     */
+    private final boolean usesDouble;
+    private final Provider<TimeZone> timeZoneProvider;
+    private final ResourceLock lock = new ResourceLock();
+    private TimeZone prevDefaultZoneFieldValue;
+    private TimeZone defaultTimeZoneCache;
+    private Calendar calCache;
+    private ZoneOffset calCacheZone;
 
-      if (i == 0) {
-        GMT_ZONES.put(pgZoneName, timeZone);
-        continue;
-      }
-      GMT_ZONES.put(pgZoneName + Math.abs(i), timeZone);
-      GMT_ZONES.put(pgZoneName + new String(NUMBERS[Math.abs(i)]), timeZone);
-    }
-    // Fast path to getting the default timezone.
-    // Accessing the default timezone over and over creates a clone with regular API.
-    // Because we don't mutate that object in our use of it, we can access the field directly.
-    // This saves the creation of a clone everytime, and the memory associated to all these clones.
-    Field tzField;
-    try {
-      tzField = null;
-      // Avoid reflective access in Java 9+
-      if (JavaVersion.getRuntimeVersion().compareTo(JavaVersion.v1_8) <= 0) {
-        tzField = TimeZone.class.getDeclaredField("defaultTimeZone");
-        tzField.setAccessible(true);
-        TimeZone defaultTz = TimeZone.getDefault();
-        Object tzFromField = tzField.get(null);
-        if (defaultTz == null || !defaultTz.equals(tzFromField)) {
-          tzField = null;
-        }
-      }
-    } catch (Exception e) {
-      tzField = null;
-    }
-    DEFAULT_TIME_ZONE_FIELD = tzField;
-  }
-
-  private final StringBuilder sbuf = new StringBuilder();
-
-  // This calendar is used when user provides calendar in setX(, Calendar) method.
-  // It ensures calendar is Gregorian.
-  private final Calendar calendarWithUserTz = new GregorianCalendar();
-
-  private Calendar calCache;
-  private ZoneOffset calCacheZone;
-
-  /**
-   * True if the backend uses doubles for time values. False if long is used.
-   */
-  private final boolean usesDouble;
-  private final Provider<TimeZone> timeZoneProvider;
-  private final ResourceLock lock = new ResourceLock();
-
-  public TimestampUtils(boolean usesDouble, Provider<TimeZone> timeZoneProvider) {
-    this.usesDouble = usesDouble;
-    this.timeZoneProvider = timeZoneProvider;
-  }
-
-  private Calendar getCalendar(ZoneOffset offset) {
-    if (calCache != null && Objects.equals(offset, calCacheZone)) {
-      return calCache;
+    public TimestampUtils(boolean usesDouble, Provider<TimeZone> timeZoneProvider) {
+        this.usesDouble = usesDouble;
+        this.timeZoneProvider = timeZoneProvider;
     }
 
-    // normally we would use:
-    // calCache = new GregorianCalendar(TimeZone.getTimeZone(offset));
-    // But this seems to cause issues for some crazy offsets as returned by server for BC dates!
-    final String tzid = offset.getTotalSeconds() == 0 ? "UTC" : "GMT".concat(offset.getId());
-    final TimeZone syntheticTZ = new SimpleTimeZone(offset.getTotalSeconds() * 1000, tzid);
-    calCache = new GregorianCalendar(syntheticTZ);
-    calCacheZone = offset;
-    return calCache;
-  }
+    /**
+     * Returns true when microsecond part of the time should be increased
+     * when rounding to microseconds
+     *
+     * @param nanos nanosecond part of the time
+     * @return true when microsecond part of the time should be increased when rounding to microseconds
+     */
+    private static boolean nanosExceed499(int nanos) {
+        return nanos % 1000 > 499;
+    }
 
-  private static class ParsedTimestamp {
-    boolean hasDate;
-    int era = GregorianCalendar.AD;
-    int year = 1970;
-    int month = 1;
+    private static void appendDate(StringBuilder sb, Calendar cal) {
+        int year = cal.get(Calendar.YEAR);
+        int month = cal.get(Calendar.MONTH) + 1;
+        int day = cal.get(Calendar.DAY_OF_MONTH);
+        appendDate(sb, year, month, day);
+    }
 
-    boolean hasTime;
-    int day = 1;
-    int hour;
-    int minute;
-    int second;
-    int nanos;
-
-    boolean hasOffset;
-    ZoneOffset offset = ZoneOffset.UTC;
-  }
-
-  private static class ParsedBinaryTimestamp {
-    Infinity infinity;
-    long millis;
-    int nanos;
-  }
-
-  enum Infinity {
-    POSITIVE,
-    NEGATIVE
-  }
-
-  /**
-   * Load date/time information into the provided calendar returning the fractional seconds.
-   */
-  private ParsedTimestamp parseBackendTimestamp(String str) throws SQLException {
-    char[] s = str.toCharArray();
-    int slen = s.length;
-
-    // This is pretty gross..
-    ParsedTimestamp result = new ParsedTimestamp();
-
-    // We try to parse these fields in order; all are optional
-    // (but some combinations don't make sense, e.g. if you have
-    // both date and time then they must be whitespace-separated).
-    // At least one of date and time must be present.
-
-    // leading whitespace
-    // yyyy-mm-dd
-    // whitespace
-    // hh:mm:ss
-    // whitespace
-    // timezone in one of the formats: +hh, -hh, +hh:mm, -hh:mm
-    // whitespace
-    // if date is present, an era specifier: AD or BC
-    // trailing whitespace
-
-    try {
-      int start = skipWhitespace(s, 0); // Skip leading whitespace
-      int end = firstNonDigit(s, start);
-      int num;
-      char sep;
-
-      // Possibly read date.
-      if (charAt(s, end) == '-') {
+    private static void appendDate(StringBuilder sb, int year, int month, int day) {
+        // always use at least four digits for the year so very
+        // early years, like 2, don't get misinterpreted
         //
-        // Date
-        //
-        result.hasDate = true;
-
-        // year
-        result.year = number(s, start, end);
-        start = end + 1; // Skip '-'
-
-        // month
-        end = firstNonDigit(s, start);
-        result.month = number(s, start, end);
-
-        sep = charAt(s, end);
-        if (sep != '-') {
-          throw new NumberFormatException("Expected date to be dash-separated, got '" + sep + "'");
+        int prevLength = sb.length();
+        sb.append(year);
+        int leadingZerosForYear = 4 - (sb.length() - prevLength);
+        if (leadingZerosForYear > 0) {
+            sb.insert(prevLength, ZEROS, 0, leadingZerosForYear);
         }
 
-        start = end + 1; // Skip '-'
+        sb.append('-');
+        sb.append(NUMBERS[month]);
+        sb.append('-');
+        sb.append(NUMBERS[day]);
+    }
 
-        // day of month
-        end = firstNonDigit(s, start);
-        result.day = number(s, start, end);
+    private static void appendTime(StringBuilder sb, Calendar cal, int nanos) {
+        int hours = cal.get(Calendar.HOUR_OF_DAY);
+        int minutes = cal.get(Calendar.MINUTE);
+        int seconds = cal.get(Calendar.SECOND);
+        appendTime(sb, hours, minutes, seconds, nanos);
+    }
 
-        start = skipWhitespace(s, end); // Skip trailing whitespace
-      }
+    /**
+     * Appends time part to the {@code StringBuilder} in PostgreSQL-compatible format.
+     * The function truncates {@param nanos} to microseconds. The value is expected to be rounded
+     * beforehand.
+     *
+     * @param sb      destination
+     * @param hours   hours
+     * @param minutes minutes
+     * @param seconds seconds
+     * @param nanos   nanoseconds
+     */
+    private static void appendTime(StringBuilder sb, int hours, int minutes, int seconds, int nanos) {
+        sb.append(NUMBERS[hours]);
 
-      // Possibly read time.
-      if (Character.isDigit(charAt(s, start))) {
+        sb.append(':');
+        sb.append(NUMBERS[minutes]);
+
+        sb.append(':');
+        sb.append(NUMBERS[seconds]);
+
+        // Add nanoseconds.
+        // This won't work for server versions < 7.2 which only want
+        // a two digit fractional second, but we don't need to support 7.1
+        // anymore and getting the version number here is difficult.
         //
-        // Time.
-        //
-
-        result.hasTime = true;
-
-        // Hours
-
-        end = firstNonDigit(s, start);
-        result.hour = number(s, start, end);
-
-        sep = charAt(s, end);
-        if (sep != ':') {
-          throw new NumberFormatException("Expected time to be colon-separated, got '" + sep + "'");
+        if (nanos < 1000) {
+            return;
+        }
+        sb.append('.');
+        int len = sb.length();
+        sb.append(nanos / 1000); // append microseconds
+        int needZeros = 6 - (sb.length() - len);
+        if (needZeros > 0) {
+            sb.insert(len, ZEROS, 0, needZeros);
         }
 
-        start = end + 1; // Skip ':'
+        int end = sb.length() - 1;
+        while (sb.charAt(end) == '0') {
+            sb.deleteCharAt(end);
+            end--;
+        }
+    }
 
-        // minutes
+    private static void appendEra(StringBuilder sb, Calendar cal) {
+        if (cal.get(Calendar.ERA) == GregorianCalendar.BC) {
+            sb.append(" BC");
+        }
+    }
 
-        end = firstNonDigit(s, start);
-        result.minute = number(s, start, end);
+    private static void appendDate(StringBuilder sb, LocalDate localDate) {
+        int year = localDate.get(ChronoField.YEAR_OF_ERA);
+        int month = localDate.getMonthValue();
+        int day = localDate.getDayOfMonth();
+        appendDate(sb, year, month, day);
+    }
 
-        sep = charAt(s, end);
-        if (sep != ':') {
-          throw new NumberFormatException("Expected time to be colon-separated, got '" + sep + "'");
+    private static void appendTime(StringBuilder sb, LocalTime localTime) {
+        int hours = localTime.getHour();
+        int minutes = localTime.getMinute();
+        int seconds = localTime.getSecond();
+        int nanos = localTime.getNano();
+        appendTime(sb, hours, minutes, seconds, nanos);
+    }
+
+    private static void appendEra(StringBuilder sb, LocalDate localDate) {
+        if (localDate.get(ChronoField.ERA) == IsoEra.BCE.getValue()) {
+            sb.append(" BC");
+        }
+    }
+
+    @SuppressWarnings("deprecation")
+    private static int skipWhitespace(char[] s, int start) {
+        int slen = s.length;
+        for (int i = start; i < slen; i++) {
+            if (!Character.isSpace(s[i])) {
+                return i;
+            }
+        }
+        return slen;
+    }
+
+    private static int firstNonDigit(char[] s, int start) {
+        int slen = s.length;
+        for (int i = start; i < slen; i++) {
+            if (!Character.isDigit(s[i])) {
+                return i;
+            }
+        }
+        return slen;
+    }
+
+    private static int number(char[] s, int start, int end) {
+        if (start >= end) {
+            throw new NumberFormatException();
+        }
+        int n = 0;
+        for (int i = start; i < end; i++) {
+            n = 10 * n + (s[i] - '0');
+        }
+        return n;
+    }
+
+    private static char charAt(char[] s, int pos) {
+        if (pos >= 0 && pos < s.length) {
+            return s[pos];
+        }
+        return '\0';
+    }
+
+    private static boolean isSimpleTimeZone(String id) {
+        return id.startsWith("GMT") || id.startsWith("UTC");
+    }
+
+    /**
+     * Converts the given postgresql seconds to java seconds. Reverse engineered by inserting varying
+     * dates to postgresql and tuning the formula until the java dates matched. See {@link #toPgSecs}
+     * for the reverse operation.
+     *
+     * @param secs Postgresql seconds.
+     * @return Java seconds.
+     */
+    private static long toJavaSecs(long secs) {
+        // postgres epoc to java epoc
+        secs += PG_EPOCH_DIFF.getSeconds();
+
+        // Julian/Gregorian calendar cutoff point
+        if (secs < -12219292800L) { // October 4, 1582 -> October 15, 1582
+            secs += 86400 * 10;
+            if (secs < -14825808000L) { // 1500-02-28 -> 1500-03-01
+                int extraLeaps = (int) ((secs + 14825808000L) / 3155760000L);
+                extraLeaps--;
+                extraLeaps -= extraLeaps / 4;
+                secs += extraLeaps * 86400L;
+            }
+        }
+        return secs;
+    }
+
+    /**
+     * Converts the given java seconds to postgresql seconds. See {@link #toJavaSecs} for the reverse
+     * operation. The conversion is valid for any year 100 BC onwards.
+     *
+     * @param secs Postgresql seconds.
+     * @return Java seconds.
+     */
+    private static long toPgSecs(long secs) {
+        // java epoc to postgres epoc
+        secs -= PG_EPOCH_DIFF.getSeconds();
+
+        // Julian/Gregorian calendar cutoff point
+        if (secs < -13165977600L) { // October 15, 1582 -> October 4, 1582
+            secs -= 86400 * 10;
+            if (secs < -15773356800L) { // 1500-03-01 -> 1500-02-28
+                int years = (int) ((secs + 15773356800L) / -3155823050L);
+                years++;
+                years -= years / 4;
+                secs += years * 86400L;
+            }
         }
 
-        start = end + 1; // Skip ':'
+        return secs;
+    }
 
-        // seconds
+    /**
+     * Converts backend's TimeZone parameter to java format.
+     * Notable difference: backend's gmt-3 is GMT+03 in Java.
+     *
+     * @param timeZone time zone to use
+     * @return java TimeZone
+     */
+    public static TimeZone parseBackendTimeZone(String timeZone) {
+        if (timeZone.startsWith("GMT")) {
+            TimeZone tz = GMT_ZONES.get(timeZone);
+            if (tz != null) {
+                return tz;
+            }
+        }
+        return TimeZone.getTimeZone(timeZone);
+    }
 
-        end = firstNonDigit(s, start);
-        result.second = number(s, start, end);
-        start = end;
+    private static long floorDiv(long x, long y) {
+        long r = x / y;
+        // if the signs are different and modulo not zero, round down
+        if ((x ^ y) < 0 && (r * y != x)) {
+            r--;
+        }
+        return r;
+    }
 
-        // Fractional seconds.
-        if (charAt(s, start) == '.') {
-          end = firstNonDigit(s, start + 1); // Skip '.'
-          num = number(s, start + 1, end);
+    private static long floorMod(long x, long y) {
+        return x - floorDiv(x, y) * y;
+    }
 
-          for (int numlength = end - (start + 1); numlength < 9; numlength++) {
-            num *= 10;
-          }
-
-          result.nanos = num;
-          start = end;
+    private Calendar getCalendar(ZoneOffset offset) {
+        if (calCache != null && Objects.equals(offset, calCacheZone)) {
+            return calCache;
         }
 
-        start = skipWhitespace(s, start); // Skip trailing whitespace
-      }
+        // normally we would use:
+        // calCache = new GregorianCalendar(TimeZone.getTimeZone(offset));
+        // But this seems to cause issues for some crazy offsets as returned by server for BC dates!
+        final String tzid = offset.getTotalSeconds() == 0 ? "UTC" : "GMT".concat(offset.getId());
+        final TimeZone syntheticTZ = new SimpleTimeZone(offset.getTotalSeconds() * 1000, tzid);
+        calCache = new GregorianCalendar(syntheticTZ);
+        calCacheZone = offset;
+        return calCache;
+    }
 
-      // Possibly read timezone.
-      sep = charAt(s, start);
-      if (sep == '-' || sep == '+') {
-        result.hasOffset = true;
+    /**
+     * Load date/time information into the provided calendar returning the fractional seconds.
+     */
+    private ParsedTimestamp parseBackendTimestamp(String str) throws SQLException {
+        char[] s = str.toCharArray();
+        int slen = s.length;
 
-        int tzsign = sep == '-' ? -1 : 1;
-        int tzhr;
-        int tzmin;
-        int tzsec;
+        // This is pretty gross..
+        ParsedTimestamp result = new ParsedTimestamp();
 
-        end = firstNonDigit(s, start + 1); // Skip +/-
-        tzhr = number(s, start + 1, end);
-        start = end;
+        // We try to parse these fields in order; all are optional
+        // (but some combinations don't make sense, e.g. if you have
+        // both date and time then they must be whitespace-separated).
+        // At least one of date and time must be present.
 
-        sep = charAt(s, start);
-        if (sep == ':') {
-          end = firstNonDigit(s, start + 1); // Skip ':'
-          tzmin = number(s, start + 1, end);
-          start = end;
+        // leading whitespace
+        // yyyy-mm-dd
+        // whitespace
+        // hh:mm:ss
+        // whitespace
+        // timezone in one of the formats: +hh, -hh, +hh:mm, -hh:mm
+        // whitespace
+        // if date is present, an era specifier: AD or BC
+        // trailing whitespace
+
+        try {
+            int start = skipWhitespace(s, 0); // Skip leading whitespace
+            int end = firstNonDigit(s, start);
+            int num;
+            char sep;
+
+            // Possibly read date.
+            if (charAt(s, end) == '-') {
+                //
+                // Date
+                //
+                result.hasDate = true;
+
+                // year
+                result.year = number(s, start, end);
+                start = end + 1; // Skip '-'
+
+                // month
+                end = firstNonDigit(s, start);
+                result.month = number(s, start, end);
+
+                sep = charAt(s, end);
+                if (sep != '-') {
+                    throw new NumberFormatException("Expected date to be dash-separated, got '" + sep + "'");
+                }
+
+                start = end + 1; // Skip '-'
+
+                // day of month
+                end = firstNonDigit(s, start);
+                result.day = number(s, start, end);
+
+                start = skipWhitespace(s, end); // Skip trailing whitespace
+            }
+
+            // Possibly read time.
+            if (Character.isDigit(charAt(s, start))) {
+                //
+                // Time.
+                //
+
+                result.hasTime = true;
+
+                // Hours
+
+                end = firstNonDigit(s, start);
+                result.hour = number(s, start, end);
+
+                sep = charAt(s, end);
+                if (sep != ':') {
+                    throw new NumberFormatException("Expected time to be colon-separated, got '" + sep + "'");
+                }
+
+                start = end + 1; // Skip ':'
+
+                // minutes
+
+                end = firstNonDigit(s, start);
+                result.minute = number(s, start, end);
+
+                sep = charAt(s, end);
+                if (sep != ':') {
+                    throw new NumberFormatException("Expected time to be colon-separated, got '" + sep + "'");
+                }
+
+                start = end + 1; // Skip ':'
+
+                // seconds
+
+                end = firstNonDigit(s, start);
+                result.second = number(s, start, end);
+                start = end;
+
+                // Fractional seconds.
+                if (charAt(s, start) == '.') {
+                    end = firstNonDigit(s, start + 1); // Skip '.'
+                    num = number(s, start + 1, end);
+
+                    for (int numlength = end - (start + 1); numlength < 9; numlength++) {
+                        num *= 10;
+                    }
+
+                    result.nanos = num;
+                    start = end;
+                }
+
+                start = skipWhitespace(s, start); // Skip trailing whitespace
+            }
+
+            // Possibly read timezone.
+            sep = charAt(s, start);
+            if (sep == '-' || sep == '+') {
+                result.hasOffset = true;
+
+                int tzsign = sep == '-' ? -1 : 1;
+                int tzhr;
+                int tzmin;
+                int tzsec;
+
+                end = firstNonDigit(s, start + 1); // Skip +/-
+                tzhr = number(s, start + 1, end);
+                start = end;
+
+                sep = charAt(s, start);
+                if (sep == ':') {
+                    end = firstNonDigit(s, start + 1); // Skip ':'
+                    tzmin = number(s, start + 1, end);
+                    start = end;
+                } else {
+                    tzmin = 0;
+                }
+
+                tzsec = 0;
+                sep = charAt(s, start);
+                if (sep == ':') {
+                    end = firstNonDigit(s, start + 1); // Skip ':'
+                    tzsec = number(s, start + 1, end);
+                    start = end;
+                }
+
+                result.offset = ZoneOffset.ofHoursMinutesSeconds(tzsign * tzhr, tzsign * tzmin, tzsign * tzsec);
+
+                start = skipWhitespace(s, start); // Skip trailing whitespace
+            }
+
+            if (result.hasDate && start < slen) {
+                String eraString = new String(s, start, slen - start);
+                if (eraString.startsWith("AD")) {
+                    result.era = GregorianCalendar.AD;
+                    start += 2;
+                } else if (eraString.startsWith("BC")) {
+                    result.era = GregorianCalendar.BC;
+                    start += 2;
+                }
+            }
+
+            if (start < slen) {
+                throw new NumberFormatException(
+                        "Trailing junk on timestamp: '" + new String(s, start, slen - start) + "'");
+            }
+
+            if (!result.hasTime && !result.hasDate) {
+                throw new NumberFormatException("Timestamp has neither date nor time");
+            }
+
+        } catch (NumberFormatException nfe) {
+            throw new PSQLException(
+                    GT.tr("Bad value for type timestamp/date/time: {0}", str),
+                    PSQLState.BAD_DATETIME_FORMAT, nfe);
+        }
+
+        return result;
+    }
+
+    /**
+     * Parse a string and return a timestamp representing its value.
+     *
+     * @param cal calendar to be used to parse the input string
+     * @param s   The ISO formated date string to parse.
+     * @return null if s is null or a timestamp of the parsed string s.
+     * @throws SQLException if there is a problem parsing s.
+     */
+    public Timestamp toTimestamp(Calendar cal,
+                                 String s) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (s == null) {
+                return null;
+            }
+
+            int slen = s.length();
+
+            // convert postgres's infinity values to internal infinity magic value
+            if (slen == 8 && "infinity".equals(s)) {
+                return new Timestamp(PGStatement.DATE_POSITIVE_INFINITY);
+            }
+
+            if (slen == 9 && "-infinity".equals(s)) {
+                return new Timestamp(PGStatement.DATE_NEGATIVE_INFINITY);
+            }
+
+            ParsedTimestamp ts = parseBackendTimestamp(s);
+            Calendar useCal = ts.hasOffset ? getCalendar(ts.offset) : setupCalendar(cal);
+            useCal.set(Calendar.ERA, ts.era);
+            useCal.set(Calendar.YEAR, ts.year);
+            useCal.set(Calendar.MONTH, ts.month - 1);
+            useCal.set(Calendar.DAY_OF_MONTH, ts.day);
+            useCal.set(Calendar.HOUR_OF_DAY, ts.hour);
+            useCal.set(Calendar.MINUTE, ts.minute);
+            useCal.set(Calendar.SECOND, ts.second);
+            useCal.set(Calendar.MILLISECOND, 0);
+
+            Timestamp result = new Timestamp(useCal.getTimeInMillis());
+            result.setNanos(ts.nanos);
+            return result;
+        }
+    }
+
+    /**
+     * Parse a string and return a LocalTime representing its value.
+     *
+     * @param s The ISO formated time string to parse.
+     * @return null if s is null or a LocalTime of the parsed string s.
+     * @throws SQLException if there is a problem parsing s.
+     */
+    public LocalTime toLocalTime(String s) throws SQLException {
+        if (s == null) {
+            return null;
+        }
+
+        if ("24:00:00".equals(s)) {
+            return LocalTime.MAX;
+        }
+
+        try {
+            return LocalTime.parse(s);
+        } catch (DateTimeParseException nfe) {
+            throw new PSQLException(
+                    GT.tr("Bad value for type timestamp/date/time: {0}", s),
+                    PSQLState.BAD_DATETIME_FORMAT, nfe);
+        }
+
+    }
+
+    /**
+     * Returns the offset time object matching the given bytes with Oid#TIMETZ or Oid#TIME.
+     *
+     * @param bytes The binary encoded TIMETZ/TIME value.
+     * @return The parsed offset time object.
+     * @throws PSQLException If binary format could not be parsed.
+     */
+    public OffsetTime toOffsetTimeBin(byte[] bytes) throws PSQLException {
+        if (bytes.length != 12) {
+            throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "time"),
+                    PSQLState.BAD_DATETIME_FORMAT);
+        }
+
+        final long micros;
+
+        if (usesDouble) {
+            double seconds = ByteConverter.float8(bytes, 0);
+            micros = (long) (seconds * 1_000_000d);
         } else {
-          tzmin = 0;
+            micros = ByteConverter.int8(bytes, 0);
         }
 
-        tzsec = 0;
-        sep = charAt(s, start);
-        if (sep == ':') {
-          end = firstNonDigit(s, start + 1); // Skip ':'
-          tzsec = number(s, start + 1, end);
-          start = end;
+        // postgres offset is negative, so we have to flip sign:
+        final ZoneOffset timeOffset = ZoneOffset.ofTotalSeconds(-ByteConverter.int4(bytes, 8));
+
+        return OffsetTime.of(LocalTime.ofNanoOfDay(Math.multiplyExact(micros, 1000L)), timeOffset);
+    }
+
+    /**
+     * Parse a string and return a OffsetTime representing its value.
+     *
+     * @param s The ISO formated time string to parse.
+     * @return null if s is null or a OffsetTime of the parsed string s.
+     * @throws SQLException if there is a problem parsing s.
+     */
+    public OffsetTime toOffsetTime(String s) throws SQLException {
+        if (s == null) {
+            return null;
         }
 
-        result.offset = ZoneOffset.ofHoursMinutesSeconds(tzsign * tzhr, tzsign * tzmin, tzsign * tzsec);
-
-        start = skipWhitespace(s, start); // Skip trailing whitespace
-      }
-
-      if (result.hasDate && start < slen) {
-        String eraString = new String(s, start, slen - start);
-        if (eraString.startsWith("AD")) {
-          result.era = GregorianCalendar.AD;
-          start += 2;
-        } else if (eraString.startsWith("BC")) {
-          result.era = GregorianCalendar.BC;
-          start += 2;
+        if (s.startsWith("24:00:00")) {
+            return OffsetTime.MAX;
         }
-      }
 
-      if (start < slen) {
-        throw new NumberFormatException(
-            "Trailing junk on timestamp: '" + new String(s, start, slen - start) + "'");
-      }
-
-      if (!result.hasTime && !result.hasDate) {
-        throw new NumberFormatException("Timestamp has neither date nor time");
-      }
-
-    } catch (NumberFormatException nfe) {
-      throw new PSQLException(
-          GT.tr("Bad value for type timestamp/date/time: {0}", str),
-          PSQLState.BAD_DATETIME_FORMAT, nfe);
+        final ParsedTimestamp ts = parseBackendTimestamp(s);
+        return OffsetTime.of(ts.hour, ts.minute, ts.second, ts.nanos, ts.offset);
     }
 
-    return result;
-  }
-
-  /**
-   * Parse a string and return a timestamp representing its value.
-   *
-   * @param cal calendar to be used to parse the input string
-   * @param s The ISO formated date string to parse.
-   * @return null if s is null or a timestamp of the parsed string s.
-   * @throws SQLException if there is a problem parsing s.
-   */
-  public Timestamp toTimestamp(Calendar cal,
-      String s) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (s == null) {
-        return null;
-      }
-
-      int slen = s.length();
-
-      // convert postgres's infinity values to internal infinity magic value
-      if (slen == 8 && "infinity".equals(s)) {
-        return new Timestamp(PGStatement.DATE_POSITIVE_INFINITY);
-      }
-
-      if (slen == 9 && "-infinity".equals(s)) {
-        return new Timestamp(PGStatement.DATE_NEGATIVE_INFINITY);
-      }
-
-      ParsedTimestamp ts = parseBackendTimestamp(s);
-      Calendar useCal = ts.hasOffset ? getCalendar(ts.offset) : setupCalendar(cal);
-      useCal.set(Calendar.ERA, ts.era);
-      useCal.set(Calendar.YEAR, ts.year);
-      useCal.set(Calendar.MONTH, ts.month - 1);
-      useCal.set(Calendar.DAY_OF_MONTH, ts.day);
-      useCal.set(Calendar.HOUR_OF_DAY, ts.hour);
-      useCal.set(Calendar.MINUTE, ts.minute);
-      useCal.set(Calendar.SECOND, ts.second);
-      useCal.set(Calendar.MILLISECOND, 0);
-
-      Timestamp result = new Timestamp(useCal.getTimeInMillis());
-      result.setNanos(ts.nanos);
-      return result;
-    }
-  }
-
-  /**
-   * Parse a string and return a LocalTime representing its value.
-   *
-   * @param s The ISO formated time string to parse.
-   * @return null if s is null or a LocalTime of the parsed string s.
-   * @throws SQLException if there is a problem parsing s.
-   */
-  public LocalTime toLocalTime(String s) throws SQLException {
-    if (s == null) {
-      return null;
-    }
-
-    if ("24:00:00".equals(s)) {
-      return LocalTime.MAX;
-    }
-
-    try {
-      return LocalTime.parse(s);
-    } catch (DateTimeParseException nfe) {
-      throw new PSQLException(
-          GT.tr("Bad value for type timestamp/date/time: {0}", s),
-          PSQLState.BAD_DATETIME_FORMAT, nfe);
-    }
-
-  }
-
-  /**
-   * Returns the offset time object matching the given bytes with Oid#TIMETZ or Oid#TIME.
-   *
-   * @param bytes The binary encoded TIMETZ/TIME value.
-   * @return The parsed offset time object.
-   * @throws PSQLException If binary format could not be parsed.
-   */
-  public OffsetTime toOffsetTimeBin(byte[] bytes) throws PSQLException {
-    if (bytes.length != 12) {
-      throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "time"),
-          PSQLState.BAD_DATETIME_FORMAT);
-    }
-
-    final long micros;
-
-    if (usesDouble) {
-      double seconds = ByteConverter.float8(bytes, 0);
-      micros = (long) (seconds * 1_000_000d);
-    } else {
-      micros = ByteConverter.int8(bytes, 0);
-    }
-
-    // postgres offset is negative, so we have to flip sign:
-    final ZoneOffset timeOffset = ZoneOffset.ofTotalSeconds(-ByteConverter.int4(bytes, 8));
-
-    return OffsetTime.of(LocalTime.ofNanoOfDay(Math.multiplyExact(micros, 1000L)), timeOffset);
-  }
-
-  /**
-   * Parse a string and return a OffsetTime representing its value.
-   *
-   * @param s The ISO formated time string to parse.
-   * @return null if s is null or a OffsetTime of the parsed string s.
-   * @throws SQLException if there is a problem parsing s.
-   */
-  public OffsetTime toOffsetTime(String s) throws SQLException {
-    if (s == null) {
-      return null;
-    }
-
-    if (s.startsWith("24:00:00")) {
-      return OffsetTime.MAX;
-    }
-
-    final ParsedTimestamp ts = parseBackendTimestamp(s);
-    return OffsetTime.of(ts.hour, ts.minute, ts.second, ts.nanos, ts.offset);
-  }
-
-  /**
-   * Parse a string and return a LocalDateTime representing its value.
-   *
-   * @param s The ISO formated date string to parse.
-   * @return null if s is null or a LocalDateTime of the parsed string s.
-   * @throws SQLException if there is a problem parsing s.
-   */
-  public LocalDateTime toLocalDateTime(String s) throws SQLException {
-    if (s == null) {
-      return null;
-    }
-
-    int slen = s.length();
-
-    // convert postgres's infinity values to internal infinity magic value
-    if (slen == 8 && "infinity".equals(s)) {
-      return LocalDateTime.MAX;
-    }
-
-    if (slen == 9 && "-infinity".equals(s)) {
-      return LocalDateTime.MIN;
-    }
-
-    ParsedTimestamp ts = parseBackendTimestamp(s);
-
-    // intentionally ignore time zone
-    // 2004-10-19 10:23:54+03:00 is 2004-10-19 10:23:54 locally
-    LocalDateTime result = LocalDateTime.of(ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second, ts.nanos);
-    if (ts.era == GregorianCalendar.BC) {
-      return result.with(ChronoField.ERA, IsoEra.BCE.getValue());
-    } else {
-      return result;
-    }
-  }
-
-  /**
-   * Returns the offset date time object matching the given bytes with Oid#TIMETZ.
-   * Not used internally anymore, function is here to retain compatibility with previous versions
-   *
-   * @param t the time value
-   * @return the matching offset date time
-   * @deprecated was used internally, and not used anymore
-   */
-  @Deprecated
-  public OffsetDateTime toOffsetDateTime(Time t) {
-    // hardcode utc because the backend does not provide us the timezone
-    // hardcode UNIX epoch, JDBC requires OffsetDateTime but doesn't describe what date should be used
-    return t.toLocalTime().atDate(LocalDate.of(1970, 1, 1)).atOffset(ZoneOffset.UTC);
-  }
-
-  /**
-   * Parse a string and return a OffsetDateTime representing its value.
-   *
-   * @param s The ISO formatted date string to parse.
-   * @return null if s is null or a OffsetDateTime of the parsed string s.
-   * @throws SQLException if there is a problem parsing s.
-   */
-  public OffsetDateTime toOffsetDateTime(
-      String s) throws SQLException {
-    if (s == null) {
-      return null;
-    }
-
-    int slen = s.length();
-
-    // convert postgres's infinity values to internal infinity magic value
-    if (slen == 8 && "infinity".equals(s)) {
-      return OffsetDateTime.MAX;
-    }
-
-    if (slen == 9 && "-infinity".equals(s)) {
-      return OffsetDateTime.MIN;
-    }
-
-    final ParsedTimestamp ts = parseBackendTimestamp(s);
-    OffsetDateTime result =
-        OffsetDateTime.of(ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second, ts.nanos, ts.offset);
-    if (ts.era == GregorianCalendar.BC) {
-      return result.with(ChronoField.ERA, IsoEra.BCE.getValue());
-    } else {
-      return result;
-    }
-  }
-
-  /**
-   * Returns the offset date time object matching the given bytes with Oid#TIMESTAMPTZ.
-   *
-   * @param bytes The binary encoded local date time value.
-   * @return The parsed local date time object.
-   * @throws PSQLException If binary format could not be parsed.
-   */
-  public OffsetDateTime toOffsetDateTimeBin(byte[] bytes) throws PSQLException {
-    ParsedBinaryTimestamp parsedTimestamp = this.toProlepticParsedTimestampBin(bytes);
-    if (parsedTimestamp.infinity == Infinity.POSITIVE) {
-      return OffsetDateTime.MAX;
-    } else if (parsedTimestamp.infinity == Infinity.NEGATIVE) {
-      return OffsetDateTime.MIN;
-    }
-
-    // hardcode utc because the backend does not provide us the timezone
-    // Postgres is always UTC
-    Instant instant = Instant.ofEpochSecond(parsedTimestamp.millis / 1000L, parsedTimestamp.nanos);
-    return OffsetDateTime.ofInstant(instant, ZoneOffset.UTC);
-  }
-
-  public Time toTime(
-      Calendar cal, String s) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      // 1) Parse backend string
-      if (s == null) {
-        return null;
-      }
-      ParsedTimestamp ts = parseBackendTimestamp(s);
-      Calendar useCal = ts.hasOffset ? getCalendar(ts.offset) : setupCalendar(cal);
-      if (!ts.hasOffset) {
-        // When no time zone provided (e.g. time or timestamp)
-        // We get the year-month-day from the string, then truncate the day to 1970-01-01
-        // This is used for timestamp -> time conversion
-        // Note: this cannot be merged with "else" branch since
-        // timestamps at which the time flips to/from DST depend on the date
-        // For instance, 2000-03-26 02:00:00 is invalid timestamp in Europe/Moscow time zone
-        // and the valid one is 2000-03-26 03:00:00. That is why we parse full timestamp
-        // then set year to 1970 later
-        useCal.set(Calendar.ERA, ts.era);
-        useCal.set(Calendar.YEAR, ts.year);
-        useCal.set(Calendar.MONTH, ts.month - 1);
-        useCal.set(Calendar.DAY_OF_MONTH, ts.day);
-      } else {
-        // When time zone is given, we just pick the time part and assume date to be 1970-01-01
-        // this is used for time, timez, and timestamptz parsing
-        useCal.set(Calendar.ERA, GregorianCalendar.AD);
-        useCal.set(Calendar.YEAR, 1970);
-        useCal.set(Calendar.MONTH, Calendar.JANUARY);
-        useCal.set(Calendar.DAY_OF_MONTH, 1);
-      }
-      useCal.set(Calendar.HOUR_OF_DAY, ts.hour);
-      useCal.set(Calendar.MINUTE, ts.minute);
-      useCal.set(Calendar.SECOND, ts.second);
-      useCal.set(Calendar.MILLISECOND, 0);
-
-      long timeMillis = useCal.getTimeInMillis() + ts.nanos / 1000000;
-      if (ts.hasOffset || (ts.year == 1970 && ts.era == GregorianCalendar.AD)) {
-        // time with time zone has proper time zone, so the value can be returned as is
-        return new Time(timeMillis);
-      }
-
-      // 2) Truncate date part so in given time zone the date would be formatted as 01/01/1970
-      return convertToTime(timeMillis, useCal.getTimeZone());
-    }
-  }
-
-  public Date toDate(Calendar cal,
-      String s) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      // 1) Parse backend string
-      Timestamp timestamp = toTimestamp(cal, s);
-
-      if (timestamp == null) {
-        return null;
-      }
-
-      // Note: infinite dates are handled in convertToDate
-      // 2) Truncate date part so in given time zone the date would be formatted as 00:00
-      return convertToDate(timestamp.getTime(), cal == null ? null : cal.getTimeZone());
-    }
-  }
-
-  private Calendar setupCalendar(Calendar cal) {
-    TimeZone timeZone = cal == null ? null : cal.getTimeZone();
-    return getSharedCalendar(timeZone);
-  }
-
-  /**
-   * Get a shared calendar, applying the supplied time zone or the default time zone if null.
-   *
-   * @param timeZone time zone to be set for the calendar
-   * @return The shared calendar.
-   */
-  public Calendar getSharedCalendar(TimeZone timeZone) {
-    if (timeZone == null) {
-      timeZone = getDefaultTz();
-    }
-    Calendar tmp = calendarWithUserTz;
-    tmp.setTimeZone(timeZone);
-    return tmp;
-  }
-
-  /**
-   * Returns true when microsecond part of the time should be increased
-   * when rounding to microseconds
-   * @param nanos nanosecond part of the time
-   * @return true when microsecond part of the time should be increased when rounding to microseconds
-   */
-  private static boolean nanosExceed499(int nanos) {
-    return nanos % 1000 > 499;
-  }
-
-  public String toString(Calendar cal, Timestamp x) {
-    return toString(cal, x, true);
-  }
-
-  public String toString(Calendar cal, Timestamp x,
-      boolean withTimeZone) {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (x.getTime() == PGStatement.DATE_POSITIVE_INFINITY) {
-        return "infinity";
-      } else if (x.getTime() == PGStatement.DATE_NEGATIVE_INFINITY) {
-        return "-infinity";
-      }
-
-      cal = setupCalendar(cal);
-      long timeMillis = x.getTime();
-
-      // Round to microseconds
-      int nanos = x.getNanos();
-      if (nanos >= MAX_NANOS_BEFORE_WRAP_ON_ROUND) {
-        nanos = 0;
-        timeMillis++;
-      } else if (nanosExceed499(nanos)) {
-        // PostgreSQL does not support nanosecond resolution yet, and appendTime will just ignore
-        // 0..999 part of the nanoseconds, however we subtract nanos % 1000 to make the value
-        // a little bit saner for debugging reasons
-        nanos += 1000 - nanos % 1000;
-      }
-      cal.setTimeInMillis(timeMillis);
-
-      sbuf.setLength(0);
-
-      appendDate(sbuf, cal);
-      sbuf.append(' ');
-      appendTime(sbuf, cal, nanos);
-      if (withTimeZone) {
-        appendTimeZone(sbuf, cal);
-      }
-      appendEra(sbuf, cal);
-
-      return sbuf.toString();
-    }
-  }
-
-  public String toString(Calendar cal, Date x) {
-    return toString(cal, x, true);
-  }
-
-  public String toString(Calendar cal, Date x,
-      boolean withTimeZone) {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (x.getTime() == PGStatement.DATE_POSITIVE_INFINITY) {
-        return "infinity";
-      } else if (x.getTime() == PGStatement.DATE_NEGATIVE_INFINITY) {
-        return "-infinity";
-      }
-
-      cal = setupCalendar(cal);
-      cal.setTime(x);
-
-      sbuf.setLength(0);
-
-      appendDate(sbuf, cal);
-      appendEra(sbuf, cal);
-      if (withTimeZone) {
-        sbuf.append(' ');
-        appendTimeZone(sbuf, cal);
-      }
-
-      return sbuf.toString();
-    }
-  }
-
-  public String toString(Calendar cal, Time x) {
-    return toString(cal, x, true);
-  }
-
-  public String toString(Calendar cal, Time x,
-      boolean withTimeZone) {
-    try (ResourceLock ignore = lock.obtain()) {
-      cal = setupCalendar(cal);
-      cal.setTime(x);
-
-      sbuf.setLength(0);
-
-      appendTime(sbuf, cal, cal.get(Calendar.MILLISECOND) * 1000000);
-
-      // The 'time' parser for <= 7.3 doesn't like timezones.
-      if (withTimeZone) {
-        appendTimeZone(sbuf, cal);
-      }
-
-      return sbuf.toString();
-    }
-  }
-
-  private static void appendDate(StringBuilder sb, Calendar cal) {
-    int year = cal.get(Calendar.YEAR);
-    int month = cal.get(Calendar.MONTH) + 1;
-    int day = cal.get(Calendar.DAY_OF_MONTH);
-    appendDate(sb, year, month, day);
-  }
-
-  private static void appendDate(StringBuilder sb, int year, int month, int day) {
-    // always use at least four digits for the year so very
-    // early years, like 2, don't get misinterpreted
-    //
-    int prevLength = sb.length();
-    sb.append(year);
-    int leadingZerosForYear = 4 - (sb.length() - prevLength);
-    if (leadingZerosForYear > 0) {
-      sb.insert(prevLength, ZEROS, 0, leadingZerosForYear);
-    }
-
-    sb.append('-');
-    sb.append(NUMBERS[month]);
-    sb.append('-');
-    sb.append(NUMBERS[day]);
-  }
-
-  private static void appendTime(StringBuilder sb, Calendar cal, int nanos) {
-    int hours = cal.get(Calendar.HOUR_OF_DAY);
-    int minutes = cal.get(Calendar.MINUTE);
-    int seconds = cal.get(Calendar.SECOND);
-    appendTime(sb, hours, minutes, seconds, nanos);
-  }
-
-  /**
-   * Appends time part to the {@code StringBuilder} in PostgreSQL-compatible format.
-   * The function truncates {@param nanos} to microseconds. The value is expected to be rounded
-   * beforehand.
-   * @param sb destination
-   * @param hours hours
-   * @param minutes minutes
-   * @param seconds seconds
-   * @param nanos nanoseconds
-   */
-  private static void appendTime(StringBuilder sb, int hours, int minutes, int seconds, int nanos) {
-    sb.append(NUMBERS[hours]);
-
-    sb.append(':');
-    sb.append(NUMBERS[minutes]);
-
-    sb.append(':');
-    sb.append(NUMBERS[seconds]);
-
-    // Add nanoseconds.
-    // This won't work for server versions < 7.2 which only want
-    // a two digit fractional second, but we don't need to support 7.1
-    // anymore and getting the version number here is difficult.
-    //
-    if (nanos < 1000) {
-      return;
-    }
-    sb.append('.');
-    int len = sb.length();
-    sb.append(nanos / 1000); // append microseconds
-    int needZeros = 6 - (sb.length() - len);
-    if (needZeros > 0) {
-      sb.insert(len, ZEROS, 0, needZeros);
-    }
-
-    int end = sb.length() - 1;
-    while (sb.charAt(end) == '0') {
-      sb.deleteCharAt(end);
-      end--;
-    }
-  }
-
-  private void appendTimeZone(StringBuilder sb, Calendar cal) {
-    int offset = (cal.get(Calendar.ZONE_OFFSET) + cal.get(Calendar.DST_OFFSET)) / 1000;
-
-    appendTimeZone(sb, offset);
-  }
-
-  private void appendTimeZone(StringBuilder sb, int offset) {
-    int absoff = Math.abs(offset);
-    int hours = absoff / 60 / 60;
-    int mins = (absoff - hours * 60 * 60) / 60;
-    int secs = absoff - hours * 60 * 60 - mins * 60;
-
-    sb.append(offset >= 0 ? "+" : "-");
-
-    sb.append(NUMBERS[hours]);
-
-    if (mins == 0 && secs == 0) {
-      return;
-    }
-    sb.append(':');
-
-    sb.append(NUMBERS[mins]);
-
-    if (secs != 0) {
-      sb.append(':');
-      sb.append(NUMBERS[secs]);
-    }
-  }
-
-  private static void appendEra(StringBuilder sb, Calendar cal) {
-    if (cal.get(Calendar.ERA) == GregorianCalendar.BC) {
-      sb.append(" BC");
-    }
-  }
-
-  public String toString(LocalDate localDate) {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (LocalDate.MAX.equals(localDate)) {
-        return "infinity";
-      } else if (localDate.isBefore(MIN_LOCAL_DATE)) {
-        return "-infinity";
-      }
-
-      sbuf.setLength(0);
-
-      appendDate(sbuf, localDate);
-      appendEra(sbuf, localDate);
-
-      return sbuf.toString();
-    }
-  }
-
-  public String toString(LocalTime localTime) {
-    try (ResourceLock ignore = lock.obtain()) {
-      sbuf.setLength(0);
-
-      if (localTime.isAfter(MAX_TIME)) {
-        return "24:00:00";
-      }
-
-      int nano = localTime.getNano();
-      if (nanosExceed499(nano)) {
-        // Technically speaking this is not a proper rounding, however
-        // it relies on the fact that appendTime just truncates 000..999 nanosecond part
-        localTime = localTime.plus(ONE_MICROSECOND);
-      }
-      appendTime(sbuf, localTime);
-
-      return sbuf.toString();
-    }
-  }
-
-  public String toString(OffsetTime offsetTime) {
-    try (ResourceLock ignore = lock.obtain()) {
-      sbuf.setLength(0);
-
-      final LocalTime localTime = offsetTime.toLocalTime();
-      if (localTime.isAfter(MAX_TIME)) {
-        sbuf.append("24:00:00");
-        appendTimeZone(sbuf, offsetTime.getOffset());
-        return sbuf.toString();
-      }
-
-      int nano = offsetTime.getNano();
-      if (nanosExceed499(nano)) {
-        // Technically speaking this is not a proper rounding, however
-        // it relies on the fact that appendTime just truncates 000..999 nanosecond part
-        offsetTime = offsetTime.plus(ONE_MICROSECOND);
-      }
-      appendTime(sbuf, localTime);
-      appendTimeZone(sbuf, offsetTime.getOffset());
-
-      return sbuf.toString();
-    }
-  }
-
-  /**
-   * Converts {@code timetz} to string taking client time zone ({@link #timeZoneProvider})
-   * into account.
-   * @param value binary representation of {@code timetz}
-   * @return string representation of {@code timetz}
-   */
-  public String toStringOffsetTimeBin(byte[] value) throws PSQLException {
-    OffsetTime offsetTimeBin = toOffsetTimeBin(value);
-    return toString(withClientOffsetSameInstant(offsetTimeBin));
-  }
-
-  /**
-   * PostgreSQL does not store the time zone in the binary representation of timetz.
-   * However, we want to preserve the output of {@code getString()} in both binary and text formats
-   * So we try a client time zone when serializing {@link OffsetTime} to string.
-   * @param input input offset time
-   * @return adjusted offset time (it represents the same instant as the input one)
-   */
-  public OffsetTime withClientOffsetSameInstant(OffsetTime input) {
-    if (input == OffsetTime.MAX || input == OffsetTime.MIN) {
-      return input;
-    }
-    TimeZone timeZone = timeZoneProvider.get();
-    int offsetMillis = timeZone.getRawOffset();
-    return input.withOffsetSameInstant(
-        offsetMillis == 0
-            ? ZoneOffset.UTC
-            : ZoneOffset.ofTotalSeconds(offsetMillis / 1000));
-  }
-
-  public String toString(OffsetDateTime offsetDateTime) {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (offsetDateTime.isAfter(MAX_OFFSET_DATETIME)) {
-        return "infinity";
-      } else if (offsetDateTime.isBefore(MIN_OFFSET_DATETIME)) {
-        return "-infinity";
-      }
-
-      sbuf.setLength(0);
-
-      int nano = offsetDateTime.getNano();
-      if (nanosExceed499(nano)) {
-        // Technically speaking this is not a proper rounding, however
-        // it relies on the fact that appendTime just truncates 000..999 nanosecond part
-        offsetDateTime = offsetDateTime.plus(ONE_MICROSECOND);
-      }
-      LocalDateTime localDateTime = offsetDateTime.toLocalDateTime();
-      LocalDate localDate = localDateTime.toLocalDate();
-      appendDate(sbuf, localDate);
-      sbuf.append(' ');
-      appendTime(sbuf, localDateTime.toLocalTime());
-      appendTimeZone(sbuf, offsetDateTime.getOffset());
-      appendEra(sbuf, localDate);
-
-      return sbuf.toString();
-    }
-  }
-
-  /**
-   * Converts {@code timestamptz} to string taking client time zone ({@link #timeZoneProvider})
-   * into account.
-   * @param value binary representation of {@code timestamptz}
-   * @return string representation of {@code timestamptz}
-   */
-  public String toStringOffsetDateTime(byte[] value) throws PSQLException {
-    OffsetDateTime offsetDateTime = toOffsetDateTimeBin(value);
-    return toString(withClientOffsetSameInstant(offsetDateTime));
-  }
-
-  /**
-   * PostgreSQL does not store the time zone in the binary representation of timestamptz.
-   * However, we want to preserve the output of {@code getString()} in both binary and text formats
-   * So we try a client time zone when serializing {@link OffsetDateTime} to string.
-   * @param input input offset date time
-   * @return adjusted offset date time (it represents the same instant as the input one)
-   */
-  public OffsetDateTime withClientOffsetSameInstant(OffsetDateTime input) {
-    if (input == OffsetDateTime.MAX || input == OffsetDateTime.MIN) {
-      return input;
-    }
-    int offsetMillis;
-    TimeZone timeZone = timeZoneProvider.get();
-    if (isSimpleTimeZone(timeZone.getID())) {
-      offsetMillis = timeZone.getRawOffset();
-    } else {
-      offsetMillis = timeZone.getOffset(input.toEpochSecond() * 1000L);
-    }
-    return input.withOffsetSameInstant(
-        offsetMillis == 0
-            ? ZoneOffset.UTC
-            : ZoneOffset.ofTotalSeconds(offsetMillis / 1000));
-  }
-
-  /**
-   * Formats {@link LocalDateTime} to be sent to the backend, thus it adds time zone.
-   * Do not use this method in {@link java.sql.ResultSet#getString(int)}
-   * @param localDateTime The local date to format as a String
-   * @return The formatted local date
-   */
-  public String toString(LocalDateTime localDateTime) {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (localDateTime.isAfter(MAX_LOCAL_DATETIME)) {
-        return "infinity";
-      } else if (localDateTime.isBefore(MIN_LOCAL_DATETIME)) {
-        return "-infinity";
-      }
-
-      sbuf.setLength(0);
-
-      if (nanosExceed499(localDateTime.getNano())) {
-        localDateTime = localDateTime.plus(ONE_MICROSECOND);
-      }
-
-      LocalDate localDate = localDateTime.toLocalDate();
-      appendDate(sbuf, localDate);
-      sbuf.append(' ');
-      appendTime(sbuf, localDateTime.toLocalTime());
-      appendEra(sbuf, localDate);
-
-      return sbuf.toString();
-    }
-  }
-
-  private static void appendDate(StringBuilder sb, LocalDate localDate) {
-    int year = localDate.get(ChronoField.YEAR_OF_ERA);
-    int month = localDate.getMonthValue();
-    int day = localDate.getDayOfMonth();
-    appendDate(sb, year, month, day);
-  }
-
-  private static void appendTime(StringBuilder sb, LocalTime localTime) {
-    int hours = localTime.getHour();
-    int minutes = localTime.getMinute();
-    int seconds = localTime.getSecond();
-    int nanos = localTime.getNano();
-    appendTime(sb, hours, minutes, seconds, nanos);
-  }
-
-  private void appendTimeZone(StringBuilder sb, ZoneOffset offset) {
-    int offsetSeconds = offset.getTotalSeconds();
-
-    appendTimeZone(sb, offsetSeconds);
-  }
-
-  private static void appendEra(StringBuilder sb, LocalDate localDate) {
-    if (localDate.get(ChronoField.ERA) == IsoEra.BCE.getValue()) {
-      sb.append(" BC");
-    }
-  }
-
-  @SuppressWarnings("deprecation")
-  private static int skipWhitespace(char[] s, int start) {
-    int slen = s.length;
-    for (int i = start; i < slen; i++) {
-      if (!Character.isSpace(s[i])) {
-        return i;
-      }
-    }
-    return slen;
-  }
-
-  private static int firstNonDigit(char[] s, int start) {
-    int slen = s.length;
-    for (int i = start; i < slen; i++) {
-      if (!Character.isDigit(s[i])) {
-        return i;
-      }
-    }
-    return slen;
-  }
-
-  private static int number(char[] s, int start, int end) {
-    if (start >= end) {
-      throw new NumberFormatException();
-    }
-    int n = 0;
-    for (int i = start; i < end; i++) {
-      n = 10 * n + (s[i] - '0');
-    }
-    return n;
-  }
-
-  private static char charAt(char[] s, int pos) {
-    if (pos >= 0 && pos < s.length) {
-      return s[pos];
-    }
-    return '\0';
-  }
-
-  /**
-   * Returns the SQL Date object matching the given bytes with {@link Oid#DATE}.
-   *
-   * @param tz The timezone used.
-   * @param bytes The binary encoded date value.
-   * @return The parsed date object.
-   * @throws PSQLException If binary format could not be parsed.
-   */
-  public Date toDateBin(TimeZone tz, byte[] bytes) throws PSQLException {
-    if (bytes.length != 4) {
-      throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "date"),
-          PSQLState.BAD_DATETIME_FORMAT);
-    }
-    int days = ByteConverter.int4(bytes, 0);
-    if (tz == null) {
-      tz = getDefaultTz();
-    }
-    long secs = toJavaSecs(days * 86400L);
-    long millis = secs * 1000L;
-
-    if (millis <= PGStatement.DATE_NEGATIVE_SMALLER_INFINITY) {
-      millis = PGStatement.DATE_NEGATIVE_INFINITY;
-    } else if (millis >= PGStatement.DATE_POSITIVE_SMALLER_INFINITY) {
-      millis = PGStatement.DATE_POSITIVE_INFINITY;
-    } else {
-      // Here be dragons: backend did not provide us the timezone, so we guess the actual point in
-      // time
-
-      millis = guessTimestamp(millis, tz);
-    }
-    return new Date(millis);
-  }
-
-  private TimeZone getDefaultTz() {
-    // Fast path to getting the default timezone.
-    if (DEFAULT_TIME_ZONE_FIELD != null) {
-      try {
-        TimeZone defaultTimeZone = (TimeZone) DEFAULT_TIME_ZONE_FIELD.get(null);
-        if (defaultTimeZone == prevDefaultZoneFieldValue) {
-          return defaultTimeZoneCache;
+    /**
+     * Parse a string and return a LocalDateTime representing its value.
+     *
+     * @param s The ISO formated date string to parse.
+     * @return null if s is null or a LocalDateTime of the parsed string s.
+     * @throws SQLException if there is a problem parsing s.
+     */
+    public LocalDateTime toLocalDateTime(String s) throws SQLException {
+        if (s == null) {
+            return null;
         }
-        prevDefaultZoneFieldValue = defaultTimeZone;
-      } catch (Exception e) {
-        // If this were to fail, fallback on slow method.
-      }
-    }
-    TimeZone tz = TimeZone.getDefault();
-    defaultTimeZoneCache = tz;
-    return tz;
-  }
 
-  public boolean hasFastDefaultTimeZone() {
-    return DEFAULT_TIME_ZONE_FIELD != null;
-  }
+        int slen = s.length();
 
-  /**
-   * Returns the SQL Time object matching the given bytes with {@link Oid#TIME} or
-   * {@link Oid#TIMETZ}.
-   *
-   * @param tz The timezone used when received data is {@link Oid#TIME}, ignored if data already
-   *        contains {@link Oid#TIMETZ}.
-   * @param bytes The binary encoded time value.
-   * @return The parsed time object.
-   * @throws PSQLException If binary format could not be parsed.
-   */
-  public Time toTimeBin(TimeZone tz, byte[] bytes) throws PSQLException {
-    if (bytes.length != 8 && bytes.length != 12) {
-      throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "time"),
-          PSQLState.BAD_DATETIME_FORMAT);
+        // convert postgres's infinity values to internal infinity magic value
+        if (slen == 8 && "infinity".equals(s)) {
+            return LocalDateTime.MAX;
+        }
+
+        if (slen == 9 && "-infinity".equals(s)) {
+            return LocalDateTime.MIN;
+        }
+
+        ParsedTimestamp ts = parseBackendTimestamp(s);
+
+        // intentionally ignore time zone
+        // 2004-10-19 10:23:54+03:00 is 2004-10-19 10:23:54 locally
+        LocalDateTime result = LocalDateTime.of(ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second, ts.nanos);
+        if (ts.era == GregorianCalendar.BC) {
+            return result.with(ChronoField.ERA, IsoEra.BCE.getValue());
+        } else {
+            return result;
+        }
     }
 
-    long millis;
-    int timeOffset;
-
-    if (usesDouble) {
-      double time = ByteConverter.float8(bytes, 0);
-
-      millis = (long) (time * 1000);
-    } else {
-      long time = ByteConverter.int8(bytes, 0);
-
-      millis = time / 1000;
+    /**
+     * Returns the offset date time object matching the given bytes with Oid#TIMETZ.
+     * Not used internally anymore, function is here to retain compatibility with previous versions
+     *
+     * @param t the time value
+     * @return the matching offset date time
+     * @deprecated was used internally, and not used anymore
+     */
+    @Deprecated
+    public OffsetDateTime toOffsetDateTime(Time t) {
+        // hardcode utc because the backend does not provide us the timezone
+        // hardcode UNIX epoch, JDBC requires OffsetDateTime but doesn't describe what date should be used
+        return t.toLocalTime().atDate(LocalDate.of(1970, 1, 1)).atOffset(ZoneOffset.UTC);
     }
 
-    if (bytes.length == 12) {
-      timeOffset = ByteConverter.int4(bytes, 8);
-      timeOffset *= -1000;
-      millis -= timeOffset;
-      return new Time(millis);
+    /**
+     * Parse a string and return a OffsetDateTime representing its value.
+     *
+     * @param s The ISO formatted date string to parse.
+     * @return null if s is null or a OffsetDateTime of the parsed string s.
+     * @throws SQLException if there is a problem parsing s.
+     */
+    public OffsetDateTime toOffsetDateTime(
+            String s) throws SQLException {
+        if (s == null) {
+            return null;
+        }
+
+        int slen = s.length();
+
+        // convert postgres's infinity values to internal infinity magic value
+        if (slen == 8 && "infinity".equals(s)) {
+            return OffsetDateTime.MAX;
+        }
+
+        if (slen == 9 && "-infinity".equals(s)) {
+            return OffsetDateTime.MIN;
+        }
+
+        final ParsedTimestamp ts = parseBackendTimestamp(s);
+        OffsetDateTime result =
+                OffsetDateTime.of(ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second, ts.nanos, ts.offset);
+        if (ts.era == GregorianCalendar.BC) {
+            return result.with(ChronoField.ERA, IsoEra.BCE.getValue());
+        } else {
+            return result;
+        }
     }
 
-    if (tz == null) {
-      tz = getDefaultTz();
+    /**
+     * Returns the offset date time object matching the given bytes with Oid#TIMESTAMPTZ.
+     *
+     * @param bytes The binary encoded local date time value.
+     * @return The parsed local date time object.
+     * @throws PSQLException If binary format could not be parsed.
+     */
+    public OffsetDateTime toOffsetDateTimeBin(byte[] bytes) throws PSQLException {
+        ParsedBinaryTimestamp parsedTimestamp = this.toProlepticParsedTimestampBin(bytes);
+        if (parsedTimestamp.infinity == Infinity.POSITIVE) {
+            return OffsetDateTime.MAX;
+        } else if (parsedTimestamp.infinity == Infinity.NEGATIVE) {
+            return OffsetDateTime.MIN;
+        }
+
+        // hardcode utc because the backend does not provide us the timezone
+        // Postgres is always UTC
+        Instant instant = Instant.ofEpochSecond(parsedTimestamp.millis / 1000L, parsedTimestamp.nanos);
+        return OffsetDateTime.ofInstant(instant, ZoneOffset.UTC);
     }
 
-    // Here be dragons: backend did not provide us the timezone, so we guess the actual point in
-    // time
-    millis = guessTimestamp(millis, tz);
+    public Time toTime(
+            Calendar cal, String s) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            // 1) Parse backend string
+            if (s == null) {
+                return null;
+            }
+            ParsedTimestamp ts = parseBackendTimestamp(s);
+            Calendar useCal = ts.hasOffset ? getCalendar(ts.offset) : setupCalendar(cal);
+            if (!ts.hasOffset) {
+                // When no time zone provided (e.g. time or timestamp)
+                // We get the year-month-day from the string, then truncate the day to 1970-01-01
+                // This is used for timestamp -> time conversion
+                // Note: this cannot be merged with "else" branch since
+                // timestamps at which the time flips to/from DST depend on the date
+                // For instance, 2000-03-26 02:00:00 is invalid timestamp in Europe/Moscow time zone
+                // and the valid one is 2000-03-26 03:00:00. That is why we parse full timestamp
+                // then set year to 1970 later
+                useCal.set(Calendar.ERA, ts.era);
+                useCal.set(Calendar.YEAR, ts.year);
+                useCal.set(Calendar.MONTH, ts.month - 1);
+                useCal.set(Calendar.DAY_OF_MONTH, ts.day);
+            } else {
+                // When time zone is given, we just pick the time part and assume date to be 1970-01-01
+                // this is used for time, timez, and timestamptz parsing
+                useCal.set(Calendar.ERA, GregorianCalendar.AD);
+                useCal.set(Calendar.YEAR, 1970);
+                useCal.set(Calendar.MONTH, Calendar.JANUARY);
+                useCal.set(Calendar.DAY_OF_MONTH, 1);
+            }
+            useCal.set(Calendar.HOUR_OF_DAY, ts.hour);
+            useCal.set(Calendar.MINUTE, ts.minute);
+            useCal.set(Calendar.SECOND, ts.second);
+            useCal.set(Calendar.MILLISECOND, 0);
 
-    return convertToTime(millis, tz); // Ensure date part is 1970-01-01
-  }
+            long timeMillis = useCal.getTimeInMillis() + ts.nanos / 1000000;
+            if (ts.hasOffset || (ts.year == 1970 && ts.era == GregorianCalendar.AD)) {
+                // time with time zone has proper time zone, so the value can be returned as is
+                return new Time(timeMillis);
+            }
 
-  /**
-   * Returns the SQL Time object matching the given bytes with {@link Oid#TIME}.
-   *
-   * @param bytes The binary encoded time value.
-   * @return The parsed time object.
-   * @throws PSQLException If binary format could not be parsed.
-   */
-  public LocalTime toLocalTimeBin(byte[] bytes) throws PSQLException {
-    if (bytes.length != 8) {
-      throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "time"),
-          PSQLState.BAD_DATETIME_FORMAT);
+            // 2) Truncate date part so in given time zone the date would be formatted as 01/01/1970
+            return convertToTime(timeMillis, useCal.getTimeZone());
+        }
     }
 
-    long micros;
+    public Date toDate(Calendar cal,
+                       String s) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            // 1) Parse backend string
+            Timestamp timestamp = toTimestamp(cal, s);
 
-    if (usesDouble) {
-      double seconds = ByteConverter.float8(bytes, 0);
+            if (timestamp == null) {
+                return null;
+            }
 
-      micros = (long) (seconds * 1000000d);
-    } else {
-      micros = ByteConverter.int8(bytes, 0);
+            // Note: infinite dates are handled in convertToDate
+            // 2) Truncate date part so in given time zone the date would be formatted as 00:00
+            return convertToDate(timestamp.getTime(), cal == null ? null : cal.getTimeZone());
+        }
     }
 
-    return LocalTime.ofNanoOfDay(Math.multiplyExact(micros, 1000L));
-  }
-
-  /**
-   * Returns the SQL Timestamp object matching the given bytes with {@link Oid#TIMESTAMP} or
-   * {@link Oid#TIMESTAMPTZ}.
-   *
-   * @param tz The timezone used when received data is {@link Oid#TIMESTAMP}, ignored if data
-   *        already contains {@link Oid#TIMESTAMPTZ}.
-   * @param bytes The binary encoded timestamp value.
-   * @param timestamptz True if the binary is in GMT.
-   * @return The parsed timestamp object.
-   * @throws PSQLException If binary format could not be parsed.
-   */
-  public Timestamp toTimestampBin(TimeZone tz, byte[] bytes, boolean timestamptz)
-      throws PSQLException {
-
-    ParsedBinaryTimestamp parsedTimestamp = this.toParsedTimestampBin(tz, bytes, timestamptz);
-    if (parsedTimestamp.infinity == Infinity.POSITIVE) {
-      return new Timestamp(PGStatement.DATE_POSITIVE_INFINITY);
-    } else if (parsedTimestamp.infinity == Infinity.NEGATIVE) {
-      return new Timestamp(PGStatement.DATE_NEGATIVE_INFINITY);
+    private Calendar setupCalendar(Calendar cal) {
+        TimeZone timeZone = cal == null ? null : cal.getTimeZone();
+        return getSharedCalendar(timeZone);
     }
 
-    Timestamp ts = new Timestamp(parsedTimestamp.millis);
-    ts.setNanos(parsedTimestamp.nanos);
-    return ts;
-  }
-
-  private ParsedBinaryTimestamp toParsedTimestampBinPlain(byte[] bytes)
-      throws PSQLException {
-
-    if (bytes.length != 8) {
-      throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "timestamp"),
-              PSQLState.BAD_DATETIME_FORMAT);
+    /**
+     * Get a shared calendar, applying the supplied time zone or the default time zone if null.
+     *
+     * @param timeZone time zone to be set for the calendar
+     * @return The shared calendar.
+     */
+    public Calendar getSharedCalendar(TimeZone timeZone) {
+        if (timeZone == null) {
+            timeZone = getDefaultTz();
+        }
+        Calendar tmp = calendarWithUserTz;
+        tmp.setTimeZone(timeZone);
+        return tmp;
     }
 
-    long secs;
-    int nanos;
-
-    if (usesDouble) {
-      double time = ByteConverter.float8(bytes, 0);
-      if (time == Double.POSITIVE_INFINITY) {
-        ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp();
-        ts.infinity = Infinity.POSITIVE;
-        return ts;
-      } else if (time == Double.NEGATIVE_INFINITY) {
-        ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp();
-        ts.infinity = Infinity.NEGATIVE;
-        return ts;
-      }
-
-      secs = (long) time;
-      nanos = (int) ((time - secs) * 1000000);
-    } else {
-      long time = ByteConverter.int8(bytes, 0);
-
-      // compatibility with text based receiving, not strictly necessary
-      // and can actually be confusing because there are timestamps
-      // that are larger than infinite
-      if (time == Long.MAX_VALUE) {
-        ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp();
-        ts.infinity = Infinity.POSITIVE;
-        return ts;
-      } else if (time == Long.MIN_VALUE) {
-        ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp();
-        ts.infinity = Infinity.NEGATIVE;
-        return ts;
-      }
-
-      secs = time / 1000000;
-      nanos = (int) (time - secs * 1000000);
-    }
-    if (nanos < 0) {
-      secs--;
-      nanos += 1000000;
-    }
-    nanos *= 1000;
-
-    long millis = secs * 1000L;
-
-    ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp();
-    ts.millis = millis;
-    ts.nanos = nanos;
-    return ts;
-  }
-
-  private ParsedBinaryTimestamp toParsedTimestampBin(TimeZone tz, byte[] bytes,
-      boolean timestamptz)
-      throws PSQLException {
-
-    ParsedBinaryTimestamp ts = toParsedTimestampBinPlain(bytes);
-    if (ts.infinity != null) {
-      return ts;
+    public String toString(Calendar cal, Timestamp x) {
+        return toString(cal, x, true);
     }
 
-    long secs = ts.millis / 1000L;
+    public String toString(Calendar cal, Timestamp x,
+                           boolean withTimeZone) {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (x.getTime() == PGStatement.DATE_POSITIVE_INFINITY) {
+                return "infinity";
+            } else if (x.getTime() == PGStatement.DATE_NEGATIVE_INFINITY) {
+                return "-infinity";
+            }
 
-    secs = toJavaSecs(secs);
-    long millis = secs * 1000L;
-    if (!timestamptz) {
-      // Here be dragons: backend did not provide us the timezone, so we guess the actual point in
-      // time
-      millis = guessTimestamp(millis, tz);
+            cal = setupCalendar(cal);
+            long timeMillis = x.getTime();
+
+            // Round to microseconds
+            int nanos = x.getNanos();
+            if (nanos >= MAX_NANOS_BEFORE_WRAP_ON_ROUND) {
+                nanos = 0;
+                timeMillis++;
+            } else if (nanosExceed499(nanos)) {
+                // PostgreSQL does not support nanosecond resolution yet, and appendTime will just ignore
+                // 0..999 part of the nanoseconds, however we subtract nanos % 1000 to make the value
+                // a little bit saner for debugging reasons
+                nanos += 1000 - nanos % 1000;
+            }
+            cal.setTimeInMillis(timeMillis);
+
+            sbuf.setLength(0);
+
+            appendDate(sbuf, cal);
+            sbuf.append(' ');
+            appendTime(sbuf, cal, nanos);
+            if (withTimeZone) {
+                appendTimeZone(sbuf, cal);
+            }
+            appendEra(sbuf, cal);
+
+            return sbuf.toString();
+        }
     }
 
-    ts.millis = millis;
-    return ts;
-  }
-
-  private ParsedBinaryTimestamp toProlepticParsedTimestampBin(byte[] bytes)
-      throws PSQLException {
-
-    ParsedBinaryTimestamp ts = toParsedTimestampBinPlain(bytes);
-    if (ts.infinity != null) {
-      return ts;
+    public String toString(Calendar cal, Date x) {
+        return toString(cal, x, true);
     }
 
-    long secs = ts.millis / 1000L;
+    public String toString(Calendar cal, Date x,
+                           boolean withTimeZone) {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (x.getTime() == PGStatement.DATE_POSITIVE_INFINITY) {
+                return "infinity";
+            } else if (x.getTime() == PGStatement.DATE_NEGATIVE_INFINITY) {
+                return "-infinity";
+            }
 
-    // postgres epoc to java epoc
-    secs += PG_EPOCH_DIFF.getSeconds();
-    long millis = secs * 1000L;
+            cal = setupCalendar(cal);
+            cal.setTime(x);
 
-    ts.millis = millis;
-    return ts;
-  }
+            sbuf.setLength(0);
 
-  /**
-   * Returns the local date time object matching the given bytes with {@link Oid#TIMESTAMP} or
-   * {@link Oid#TIMESTAMPTZ}.
-   * @param bytes The binary encoded local date time value.
-   *
-   * @return The parsed local date time object.
-   * @throws PSQLException If binary format could not be parsed.
-   */
-  public LocalDateTime toLocalDateTimeBin(byte[] bytes) throws PSQLException {
+            appendDate(sbuf, cal);
+            appendEra(sbuf, cal);
+            if (withTimeZone) {
+                sbuf.append(' ');
+                appendTimeZone(sbuf, cal);
+            }
 
-    ParsedBinaryTimestamp parsedTimestamp = this.toProlepticParsedTimestampBin(bytes);
-    if (parsedTimestamp.infinity == Infinity.POSITIVE) {
-      return LocalDateTime.MAX;
-    } else if (parsedTimestamp.infinity == Infinity.NEGATIVE) {
-      return LocalDateTime.MIN;
+            return sbuf.toString();
+        }
     }
 
-    // hardcode utc because the backend does not provide us the timezone
-    // Postgres is always UTC
-    return LocalDateTime.ofEpochSecond(parsedTimestamp.millis / 1000L, parsedTimestamp.nanos, ZoneOffset.UTC);
-  }
-
-  /**
-   * Returns the local date time object matching the given bytes with {@link Oid#DATE} or
-   * {@link Oid#TIMESTAMP}.
-   * @param bytes The binary encoded local date value.
-   *
-   * @return The parsed local date object.
-   * @throws PSQLException If binary format could not be parsed.
-   */
-  public LocalDate toLocalDateBin(byte[] bytes) throws PSQLException {
-    if (bytes.length != 4) {
-      throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "date"),
-          PSQLState.BAD_DATETIME_FORMAT);
-    }
-    int days = ByteConverter.int4(bytes, 0);
-    if (days == Integer.MAX_VALUE) {
-      return LocalDate.MAX;
-    } else if (days == Integer.MIN_VALUE) {
-      return LocalDate.MIN;
-    }
-    // adapt from different Postgres Epoch and convert to LocalDate:
-    return LocalDate.ofEpochDay(PG_EPOCH_DIFF.toDays() + days);
-  }
-
-  /**
-   * <p>Given a UTC timestamp {@code millis} finds another point in time that is rendered in given time
-   * zone {@code tz} exactly as "millis in UTC".</p>
-   *
-   * <p>For instance, given 7 Jan 16:00 UTC and tz=GMT+02:00 it returns 7 Jan 14:00 UTC == 7 Jan 16:00
-   * GMT+02:00 Note that is not trivial for timestamps near DST change. For such cases, we rely on
-   * {@link Calendar} to figure out the proper timestamp.</p>
-   *
-   * @param millis source timestamp
-   * @param tz desired time zone
-   * @return timestamp that would be rendered in {@code tz} like {@code millis} in UTC
-   */
-  private long guessTimestamp(long millis, TimeZone tz) {
-    if (tz == null) {
-      // If client did not provide us with time zone, we use system default time zone
-      tz = getDefaultTz();
-    }
-    // The story here:
-    // Backend provided us with something like '2015-10-04 13:40' and it did NOT provide us with a
-    // time zone.
-    // On top of that, user asked us to treat the timestamp as if it were in GMT+02:00.
-    //
-    // The code below creates such a timestamp that is rendered as '2015-10-04 13:40 GMT+02:00'
-    // In other words, its UTC value should be 11:40 UTC == 13:40 GMT+02:00.
-    // It is not sufficient to just subtract offset as you might cross DST change as you subtract.
-    //
-    // For instance, on 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00
-    // Suppose we deal with 2000-03-26 02:00:01
-    // If you subtract offset from the timestamp, the time will be "a hour behind" since
-    // "just a couple of hours ago the OFFSET was different"
-    //
-    // To make a long story short: we have UTC timestamp that looks like "2000-03-26 02:00:01" when
-    // rendered in UTC tz.
-    // We want to know another timestamp that will look like "2000-03-26 02:00:01" in Europe/Moscow
-    // time zone.
-
-    if (isSimpleTimeZone(tz.getID())) {
-      // For well-known non-DST time zones, just subtract offset
-      return millis - tz.getRawOffset();
-    }
-    // For all the other time zones, enjoy debugging Calendar API
-    // Here we do a straight-forward implementation that splits original timestamp into pieces and
-    // composes it back.
-    // Note: cal.setTimeZone alone is not sufficient as it would alter hour (it will try to keep the
-    // same time instant value)
-    Calendar cal = calendarWithUserTz;
-    cal.setTimeZone(UTC_TIMEZONE);
-    cal.setTimeInMillis(millis);
-    int era = cal.get(Calendar.ERA);
-    int year = cal.get(Calendar.YEAR);
-    int month = cal.get(Calendar.MONTH);
-    int day = cal.get(Calendar.DAY_OF_MONTH);
-    int hour = cal.get(Calendar.HOUR_OF_DAY);
-    int min = cal.get(Calendar.MINUTE);
-    int sec = cal.get(Calendar.SECOND);
-    int ms = cal.get(Calendar.MILLISECOND);
-    cal.setTimeZone(tz);
-    cal.set(Calendar.ERA, era);
-    cal.set(Calendar.YEAR, year);
-    cal.set(Calendar.MONTH, month);
-    cal.set(Calendar.DAY_OF_MONTH, day);
-    cal.set(Calendar.HOUR_OF_DAY, hour);
-    cal.set(Calendar.MINUTE, min);
-    cal.set(Calendar.SECOND, sec);
-    cal.set(Calendar.MILLISECOND, ms);
-    return cal.getTimeInMillis();
-  }
-
-  private static boolean isSimpleTimeZone(String id) {
-    return id.startsWith("GMT") || id.startsWith("UTC");
-  }
-
-  /**
-   * Extracts the date part from a timestamp.
-   *
-   * @param millis The timestamp from which to extract the date.
-   * @param tz The time zone of the date.
-   * @return The extracted date.
-   */
-  public Date convertToDate(long millis, TimeZone tz) {
-
-    // no adjustments for the infinity hack values
-    if (millis <= PGStatement.DATE_NEGATIVE_INFINITY
-        || millis >= PGStatement.DATE_POSITIVE_INFINITY) {
-      return new Date(millis);
-    }
-    if (tz == null) {
-      tz = getDefaultTz();
-    }
-    if (isSimpleTimeZone(tz.getID())) {
-      // Truncate to 00:00 of the day.
-      // Suppose the input date is 7 Jan 15:40 GMT+02:00 (that is 13:40 UTC)
-      // We want it to become 7 Jan 00:00 GMT+02:00
-      // 1) Make sure millis becomes 15:40 in UTC, so add offset
-      int offset = tz.getRawOffset();
-      millis += offset;
-      // 2) Truncate hours, minutes, etc. Day is always 86400 seconds, no matter what leap seconds
-      // are
-      millis = floorDiv(millis, ONEDAY) * ONEDAY;
-      // 2) Now millis is 7 Jan 00:00 UTC, however we need that in GMT+02:00, so subtract some
-      // offset
-      millis -= offset;
-      // Now we have brand-new 7 Jan 00:00 GMT+02:00
-      return new Date(millis);
+    public String toString(Calendar cal, Time x) {
+        return toString(cal, x, true);
     }
 
-    Calendar cal = calendarWithUserTz;
-    cal.setTimeZone(tz);
-    cal.setTimeInMillis(millis);
-    cal.set(Calendar.HOUR_OF_DAY, 0);
-    cal.set(Calendar.MINUTE, 0);
-    cal.set(Calendar.SECOND, 0);
-    cal.set(Calendar.MILLISECOND, 0);
+    public String toString(Calendar cal, Time x,
+                           boolean withTimeZone) {
+        try (ResourceLock ignore = lock.obtain()) {
+            cal = setupCalendar(cal);
+            cal.setTime(x);
 
-    return new Date(cal.getTimeInMillis());
-  }
+            sbuf.setLength(0);
 
-  /**
-   * Extracts the time part from a timestamp. This method ensures the date part of output timestamp
-   * looks like 1970-01-01 in given timezone.
-   *
-   * @param millis The timestamp from which to extract the time.
-   * @param tz timezone to use.
-   * @return The extracted time.
-   */
-  public Time convertToTime(long millis, TimeZone tz) {
-    if (tz == null) {
-      tz = getDefaultTz();
-    }
-    if (isSimpleTimeZone(tz.getID())) {
-      // Leave just time part of the day.
-      // Suppose the input date is 2015 7 Jan 15:40 GMT+02:00 (that is 13:40 UTC)
-      // We want it to become 1970 1 Jan 15:40 GMT+02:00
-      // 1) Make sure millis becomes 15:40 in UTC, so add offset
-      int offset = tz.getRawOffset();
-      millis += offset;
-      // 2) Truncate year, month, day. Day is always 86400 seconds, no matter what leap seconds are
-      millis = floorMod(millis, ONEDAY);
-      // 2) Now millis is 1970 1 Jan 15:40 UTC, however we need that in GMT+02:00, so subtract some
-      // offset
-      millis -= offset;
-      // Now we have brand-new 1970 1 Jan 15:40 GMT+02:00
-      return new Time(millis);
-    }
-    Calendar cal = calendarWithUserTz;
-    cal.setTimeZone(tz);
-    cal.setTimeInMillis(millis);
-    cal.set(Calendar.ERA, GregorianCalendar.AD);
-    cal.set(Calendar.YEAR, 1970);
-    cal.set(Calendar.MONTH, 0);
-    cal.set(Calendar.DAY_OF_MONTH, 1);
+            appendTime(sbuf, cal, cal.get(Calendar.MILLISECOND) * 1000000);
 
-    return new Time(cal.getTimeInMillis());
-  }
+            // The 'time' parser for <= 7.3 doesn't like timezones.
+            if (withTimeZone) {
+                appendTimeZone(sbuf, cal);
+            }
 
-  /**
-   * Returns the given time value as String matching what the current postgresql server would send
-   * in text mode.
-   *
-   * @param time time value
-   * @param withTimeZone whether timezone should be added
-   * @return given time value as String
-   */
-  public String timeToString(java.util.Date time, boolean withTimeZone) {
-    Calendar cal = null;
-    if (withTimeZone) {
-      cal = calendarWithUserTz;
-      cal.setTimeZone(timeZoneProvider.get());
-    }
-    if (time instanceof Timestamp) {
-      return toString(cal, (Timestamp) time, withTimeZone);
-    }
-    if (time instanceof Time) {
-      return toString(cal, (Time) time, withTimeZone);
-    }
-    return toString(cal, (Date) time, withTimeZone);
-  }
-
-  /**
-   * Converts the given postgresql seconds to java seconds. Reverse engineered by inserting varying
-   * dates to postgresql and tuning the formula until the java dates matched. See {@link #toPgSecs}
-   * for the reverse operation.
-   *
-   * @param secs Postgresql seconds.
-   * @return Java seconds.
-   */
-  private static long toJavaSecs(long secs) {
-    // postgres epoc to java epoc
-    secs += PG_EPOCH_DIFF.getSeconds();
-
-    // Julian/Gregorian calendar cutoff point
-    if (secs < -12219292800L) { // October 4, 1582 -> October 15, 1582
-      secs += 86400 * 10;
-      if (secs < -14825808000L) { // 1500-02-28 -> 1500-03-01
-        int extraLeaps = (int) ((secs + 14825808000L) / 3155760000L);
-        extraLeaps--;
-        extraLeaps -= extraLeaps / 4;
-        secs += extraLeaps * 86400L;
-      }
-    }
-    return secs;
-  }
-
-  /**
-   * Converts the given java seconds to postgresql seconds. See {@link #toJavaSecs} for the reverse
-   * operation. The conversion is valid for any year 100 BC onwards.
-   *
-   * @param secs Postgresql seconds.
-   * @return Java seconds.
-   */
-  private static long toPgSecs(long secs) {
-    // java epoc to postgres epoc
-    secs -= PG_EPOCH_DIFF.getSeconds();
-
-    // Julian/Gregorian calendar cutoff point
-    if (secs < -13165977600L) { // October 15, 1582 -> October 4, 1582
-      secs -= 86400 * 10;
-      if (secs < -15773356800L) { // 1500-03-01 -> 1500-02-28
-        int years = (int) ((secs + 15773356800L) / -3155823050L);
-        years++;
-        years -= years / 4;
-        secs += years * 86400L;
-      }
+            return sbuf.toString();
+        }
     }
 
-    return secs;
-  }
+    private void appendTimeZone(StringBuilder sb, Calendar cal) {
+        int offset = (cal.get(Calendar.ZONE_OFFSET) + cal.get(Calendar.DST_OFFSET)) / 1000;
 
-  /**
-   * Converts the SQL Date to binary representation for {@link Oid#DATE}.
-   *
-   * @param tz The timezone used.
-   * @param bytes The binary encoded date value.
-   * @param value value
-   * @throws PSQLException If binary format could not be parsed.
-   */
-  public void toBinDate(TimeZone tz, byte[] bytes, Date value) throws PSQLException {
-    long millis = value.getTime();
-
-    if (tz == null) {
-      tz = getDefaultTz();
+        appendTimeZone(sb, offset);
     }
-    // It "getOffset" is UNTESTED
-    // See org.postgresql.jdbc.AbstractJdbc2Statement.setDate(int, java.sql.Date,
-    // java.util.Calendar)
-    // The problem is we typically do not know for sure what is the exact required date/timestamp
-    // type
-    // Thus pgjdbc sticks to text transfer.
-    millis += tz.getOffset(millis);
 
-    long secs = toPgSecs(millis / 1000);
-    ByteConverter.int4(bytes, 0, (int) (secs / 86400));
-  }
+    private void appendTimeZone(StringBuilder sb, int offset) {
+        int absoff = Math.abs(offset);
+        int hours = absoff / 60 / 60;
+        int mins = (absoff - hours * 60 * 60) / 60;
+        int secs = absoff - hours * 60 * 60 - mins * 60;
 
-  /**
-   * Converts backend's TimeZone parameter to java format.
-   * Notable difference: backend's gmt-3 is GMT+03 in Java.
-   *
-   * @param timeZone time zone to use
-   * @return java TimeZone
-   */
-  public static TimeZone parseBackendTimeZone(String timeZone) {
-    if (timeZone.startsWith("GMT")) {
-      TimeZone tz = GMT_ZONES.get(timeZone);
-      if (tz != null) {
+        sb.append(offset >= 0 ? "+" : "-");
+
+        sb.append(NUMBERS[hours]);
+
+        if (mins == 0 && secs == 0) {
+            return;
+        }
+        sb.append(':');
+
+        sb.append(NUMBERS[mins]);
+
+        if (secs != 0) {
+            sb.append(':');
+            sb.append(NUMBERS[secs]);
+        }
+    }
+
+    public String toString(LocalDate localDate) {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (LocalDate.MAX.equals(localDate)) {
+                return "infinity";
+            } else if (localDate.isBefore(MIN_LOCAL_DATE)) {
+                return "-infinity";
+            }
+
+            sbuf.setLength(0);
+
+            appendDate(sbuf, localDate);
+            appendEra(sbuf, localDate);
+
+            return sbuf.toString();
+        }
+    }
+
+    public String toString(LocalTime localTime) {
+        try (ResourceLock ignore = lock.obtain()) {
+            sbuf.setLength(0);
+
+            if (localTime.isAfter(MAX_TIME)) {
+                return "24:00:00";
+            }
+
+            int nano = localTime.getNano();
+            if (nanosExceed499(nano)) {
+                // Technically speaking this is not a proper rounding, however
+                // it relies on the fact that appendTime just truncates 000..999 nanosecond part
+                localTime = localTime.plus(ONE_MICROSECOND);
+            }
+            appendTime(sbuf, localTime);
+
+            return sbuf.toString();
+        }
+    }
+
+    public String toString(OffsetTime offsetTime) {
+        try (ResourceLock ignore = lock.obtain()) {
+            sbuf.setLength(0);
+
+            final LocalTime localTime = offsetTime.toLocalTime();
+            if (localTime.isAfter(MAX_TIME)) {
+                sbuf.append("24:00:00");
+                appendTimeZone(sbuf, offsetTime.getOffset());
+                return sbuf.toString();
+            }
+
+            int nano = offsetTime.getNano();
+            if (nanosExceed499(nano)) {
+                // Technically speaking this is not a proper rounding, however
+                // it relies on the fact that appendTime just truncates 000..999 nanosecond part
+                offsetTime = offsetTime.plus(ONE_MICROSECOND);
+            }
+            appendTime(sbuf, localTime);
+            appendTimeZone(sbuf, offsetTime.getOffset());
+
+            return sbuf.toString();
+        }
+    }
+
+    /**
+     * Converts {@code timetz} to string taking client time zone ({@link #timeZoneProvider})
+     * into account.
+     *
+     * @param value binary representation of {@code timetz}
+     * @return string representation of {@code timetz}
+     */
+    public String toStringOffsetTimeBin(byte[] value) throws PSQLException {
+        OffsetTime offsetTimeBin = toOffsetTimeBin(value);
+        return toString(withClientOffsetSameInstant(offsetTimeBin));
+    }
+
+    /**
+     * PostgreSQL does not store the time zone in the binary representation of timetz.
+     * However, we want to preserve the output of {@code getString()} in both binary and text formats
+     * So we try a client time zone when serializing {@link OffsetTime} to string.
+     *
+     * @param input input offset time
+     * @return adjusted offset time (it represents the same instant as the input one)
+     */
+    public OffsetTime withClientOffsetSameInstant(OffsetTime input) {
+        if (input == OffsetTime.MAX || input == OffsetTime.MIN) {
+            return input;
+        }
+        TimeZone timeZone = timeZoneProvider.get();
+        int offsetMillis = timeZone.getRawOffset();
+        return input.withOffsetSameInstant(
+                offsetMillis == 0
+                        ? ZoneOffset.UTC
+                        : ZoneOffset.ofTotalSeconds(offsetMillis / 1000));
+    }
+
+    public String toString(OffsetDateTime offsetDateTime) {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (offsetDateTime.isAfter(MAX_OFFSET_DATETIME)) {
+                return "infinity";
+            } else if (offsetDateTime.isBefore(MIN_OFFSET_DATETIME)) {
+                return "-infinity";
+            }
+
+            sbuf.setLength(0);
+
+            int nano = offsetDateTime.getNano();
+            if (nanosExceed499(nano)) {
+                // Technically speaking this is not a proper rounding, however
+                // it relies on the fact that appendTime just truncates 000..999 nanosecond part
+                offsetDateTime = offsetDateTime.plus(ONE_MICROSECOND);
+            }
+            LocalDateTime localDateTime = offsetDateTime.toLocalDateTime();
+            LocalDate localDate = localDateTime.toLocalDate();
+            appendDate(sbuf, localDate);
+            sbuf.append(' ');
+            appendTime(sbuf, localDateTime.toLocalTime());
+            appendTimeZone(sbuf, offsetDateTime.getOffset());
+            appendEra(sbuf, localDate);
+
+            return sbuf.toString();
+        }
+    }
+
+    /**
+     * Converts {@code timestamptz} to string taking client time zone ({@link #timeZoneProvider})
+     * into account.
+     *
+     * @param value binary representation of {@code timestamptz}
+     * @return string representation of {@code timestamptz}
+     */
+    public String toStringOffsetDateTime(byte[] value) throws PSQLException {
+        OffsetDateTime offsetDateTime = toOffsetDateTimeBin(value);
+        return toString(withClientOffsetSameInstant(offsetDateTime));
+    }
+
+    /**
+     * PostgreSQL does not store the time zone in the binary representation of timestamptz.
+     * However, we want to preserve the output of {@code getString()} in both binary and text formats
+     * So we try a client time zone when serializing {@link OffsetDateTime} to string.
+     *
+     * @param input input offset date time
+     * @return adjusted offset date time (it represents the same instant as the input one)
+     */
+    public OffsetDateTime withClientOffsetSameInstant(OffsetDateTime input) {
+        if (input == OffsetDateTime.MAX || input == OffsetDateTime.MIN) {
+            return input;
+        }
+        int offsetMillis;
+        TimeZone timeZone = timeZoneProvider.get();
+        if (isSimpleTimeZone(timeZone.getID())) {
+            offsetMillis = timeZone.getRawOffset();
+        } else {
+            offsetMillis = timeZone.getOffset(input.toEpochSecond() * 1000L);
+        }
+        return input.withOffsetSameInstant(
+                offsetMillis == 0
+                        ? ZoneOffset.UTC
+                        : ZoneOffset.ofTotalSeconds(offsetMillis / 1000));
+    }
+
+    /**
+     * Formats {@link LocalDateTime} to be sent to the backend, thus it adds time zone.
+     * Do not use this method in {@link java.sql.ResultSet#getString(int)}
+     *
+     * @param localDateTime The local date to format as a String
+     * @return The formatted local date
+     */
+    public String toString(LocalDateTime localDateTime) {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (localDateTime.isAfter(MAX_LOCAL_DATETIME)) {
+                return "infinity";
+            } else if (localDateTime.isBefore(MIN_LOCAL_DATETIME)) {
+                return "-infinity";
+            }
+
+            sbuf.setLength(0);
+
+            if (nanosExceed499(localDateTime.getNano())) {
+                localDateTime = localDateTime.plus(ONE_MICROSECOND);
+            }
+
+            LocalDate localDate = localDateTime.toLocalDate();
+            appendDate(sbuf, localDate);
+            sbuf.append(' ');
+            appendTime(sbuf, localDateTime.toLocalTime());
+            appendEra(sbuf, localDate);
+
+            return sbuf.toString();
+        }
+    }
+
+    private void appendTimeZone(StringBuilder sb, ZoneOffset offset) {
+        int offsetSeconds = offset.getTotalSeconds();
+
+        appendTimeZone(sb, offsetSeconds);
+    }
+
+    /**
+     * Returns the SQL Date object matching the given bytes with {@link Oid#DATE}.
+     *
+     * @param tz    The timezone used.
+     * @param bytes The binary encoded date value.
+     * @return The parsed date object.
+     * @throws PSQLException If binary format could not be parsed.
+     */
+    public Date toDateBin(TimeZone tz, byte[] bytes) throws PSQLException {
+        if (bytes.length != 4) {
+            throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "date"),
+                    PSQLState.BAD_DATETIME_FORMAT);
+        }
+        int days = ByteConverter.int4(bytes, 0);
+        if (tz == null) {
+            tz = getDefaultTz();
+        }
+        long secs = toJavaSecs(days * 86400L);
+        long millis = secs * 1000L;
+
+        if (millis <= PGStatement.DATE_NEGATIVE_SMALLER_INFINITY) {
+            millis = PGStatement.DATE_NEGATIVE_INFINITY;
+        } else if (millis >= PGStatement.DATE_POSITIVE_SMALLER_INFINITY) {
+            millis = PGStatement.DATE_POSITIVE_INFINITY;
+        } else {
+            // Here be dragons: backend did not provide us the timezone, so we guess the actual point in
+            // time
+
+            millis = guessTimestamp(millis, tz);
+        }
+        return new Date(millis);
+    }
+
+    private TimeZone getDefaultTz() {
+        // Fast path to getting the default timezone.
+        if (DEFAULT_TIME_ZONE_FIELD != null) {
+            try {
+                TimeZone defaultTimeZone = (TimeZone) DEFAULT_TIME_ZONE_FIELD.get(null);
+                if (defaultTimeZone == prevDefaultZoneFieldValue) {
+                    return defaultTimeZoneCache;
+                }
+                prevDefaultZoneFieldValue = defaultTimeZone;
+            } catch (Exception e) {
+                // If this were to fail, fallback on slow method.
+            }
+        }
+        TimeZone tz = TimeZone.getDefault();
+        defaultTimeZoneCache = tz;
         return tz;
-      }
     }
-    return TimeZone.getTimeZone(timeZone);
-  }
 
-  private static long floorDiv(long x, long y) {
-    long r = x / y;
-    // if the signs are different and modulo not zero, round down
-    if ((x ^ y) < 0 && (r * y != x)) {
-      r--;
+    public boolean hasFastDefaultTimeZone() {
+        return DEFAULT_TIME_ZONE_FIELD != null;
     }
-    return r;
-  }
 
-  private static long floorMod(long x, long y) {
-    return x - floorDiv(x, y) * y;
-  }
+    /**
+     * Returns the SQL Time object matching the given bytes with {@link Oid#TIME} or
+     * {@link Oid#TIMETZ}.
+     *
+     * @param tz    The timezone used when received data is {@link Oid#TIME}, ignored if data already
+     *              contains {@link Oid#TIMETZ}.
+     * @param bytes The binary encoded time value.
+     * @return The parsed time object.
+     * @throws PSQLException If binary format could not be parsed.
+     */
+    public Time toTimeBin(TimeZone tz, byte[] bytes) throws PSQLException {
+        if (bytes.length != 8 && bytes.length != 12) {
+            throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "time"),
+                    PSQLState.BAD_DATETIME_FORMAT);
+        }
+
+        long millis;
+        int timeOffset;
+
+        if (usesDouble) {
+            double time = ByteConverter.float8(bytes, 0);
+
+            millis = (long) (time * 1000);
+        } else {
+            long time = ByteConverter.int8(bytes, 0);
+
+            millis = time / 1000;
+        }
+
+        if (bytes.length == 12) {
+            timeOffset = ByteConverter.int4(bytes, 8);
+            timeOffset *= -1000;
+            millis -= timeOffset;
+            return new Time(millis);
+        }
+
+        if (tz == null) {
+            tz = getDefaultTz();
+        }
+
+        // Here be dragons: backend did not provide us the timezone, so we guess the actual point in
+        // time
+        millis = guessTimestamp(millis, tz);
+
+        return convertToTime(millis, tz); // Ensure date part is 1970-01-01
+    }
+
+    /**
+     * Returns the SQL Time object matching the given bytes with {@link Oid#TIME}.
+     *
+     * @param bytes The binary encoded time value.
+     * @return The parsed time object.
+     * @throws PSQLException If binary format could not be parsed.
+     */
+    public LocalTime toLocalTimeBin(byte[] bytes) throws PSQLException {
+        if (bytes.length != 8) {
+            throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "time"),
+                    PSQLState.BAD_DATETIME_FORMAT);
+        }
+
+        long micros;
+
+        if (usesDouble) {
+            double seconds = ByteConverter.float8(bytes, 0);
+
+            micros = (long) (seconds * 1000000d);
+        } else {
+            micros = ByteConverter.int8(bytes, 0);
+        }
+
+        return LocalTime.ofNanoOfDay(Math.multiplyExact(micros, 1000L));
+    }
+
+    /**
+     * Returns the SQL Timestamp object matching the given bytes with {@link Oid#TIMESTAMP} or
+     * {@link Oid#TIMESTAMPTZ}.
+     *
+     * @param tz          The timezone used when received data is {@link Oid#TIMESTAMP}, ignored if data
+     *                    already contains {@link Oid#TIMESTAMPTZ}.
+     * @param bytes       The binary encoded timestamp value.
+     * @param timestamptz True if the binary is in GMT.
+     * @return The parsed timestamp object.
+     * @throws PSQLException If binary format could not be parsed.
+     */
+    public Timestamp toTimestampBin(TimeZone tz, byte[] bytes, boolean timestamptz)
+            throws PSQLException {
+
+        ParsedBinaryTimestamp parsedTimestamp = this.toParsedTimestampBin(tz, bytes, timestamptz);
+        if (parsedTimestamp.infinity == Infinity.POSITIVE) {
+            return new Timestamp(PGStatement.DATE_POSITIVE_INFINITY);
+        } else if (parsedTimestamp.infinity == Infinity.NEGATIVE) {
+            return new Timestamp(PGStatement.DATE_NEGATIVE_INFINITY);
+        }
+
+        Timestamp ts = new Timestamp(parsedTimestamp.millis);
+        ts.setNanos(parsedTimestamp.nanos);
+        return ts;
+    }
+
+    private ParsedBinaryTimestamp toParsedTimestampBinPlain(byte[] bytes)
+            throws PSQLException {
+
+        if (bytes.length != 8) {
+            throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "timestamp"),
+                    PSQLState.BAD_DATETIME_FORMAT);
+        }
+
+        long secs;
+        int nanos;
+
+        if (usesDouble) {
+            double time = ByteConverter.float8(bytes, 0);
+            if (time == Double.POSITIVE_INFINITY) {
+                ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp();
+                ts.infinity = Infinity.POSITIVE;
+                return ts;
+            } else if (time == Double.NEGATIVE_INFINITY) {
+                ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp();
+                ts.infinity = Infinity.NEGATIVE;
+                return ts;
+            }
+
+            secs = (long) time;
+            nanos = (int) ((time - secs) * 1000000);
+        } else {
+            long time = ByteConverter.int8(bytes, 0);
+
+            // compatibility with text based receiving, not strictly necessary
+            // and can actually be confusing because there are timestamps
+            // that are larger than infinite
+            if (time == Long.MAX_VALUE) {
+                ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp();
+                ts.infinity = Infinity.POSITIVE;
+                return ts;
+            } else if (time == Long.MIN_VALUE) {
+                ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp();
+                ts.infinity = Infinity.NEGATIVE;
+                return ts;
+            }
+
+            secs = time / 1000000;
+            nanos = (int) (time - secs * 1000000);
+        }
+        if (nanos < 0) {
+            secs--;
+            nanos += 1000000;
+        }
+        nanos *= 1000;
+
+        long millis = secs * 1000L;
+
+        ParsedBinaryTimestamp ts = new ParsedBinaryTimestamp();
+        ts.millis = millis;
+        ts.nanos = nanos;
+        return ts;
+    }
+
+    private ParsedBinaryTimestamp toParsedTimestampBin(TimeZone tz, byte[] bytes,
+                                                       boolean timestamptz)
+            throws PSQLException {
+
+        ParsedBinaryTimestamp ts = toParsedTimestampBinPlain(bytes);
+        if (ts.infinity != null) {
+            return ts;
+        }
+
+        long secs = ts.millis / 1000L;
+
+        secs = toJavaSecs(secs);
+        long millis = secs * 1000L;
+        if (!timestamptz) {
+            // Here be dragons: backend did not provide us the timezone, so we guess the actual point in
+            // time
+            millis = guessTimestamp(millis, tz);
+        }
+
+        ts.millis = millis;
+        return ts;
+    }
+
+    private ParsedBinaryTimestamp toProlepticParsedTimestampBin(byte[] bytes)
+            throws PSQLException {
+
+        ParsedBinaryTimestamp ts = toParsedTimestampBinPlain(bytes);
+        if (ts.infinity != null) {
+            return ts;
+        }
+
+        long secs = ts.millis / 1000L;
+
+        // postgres epoc to java epoc
+        secs += PG_EPOCH_DIFF.getSeconds();
+        long millis = secs * 1000L;
+
+        ts.millis = millis;
+        return ts;
+    }
+
+    /**
+     * Returns the local date time object matching the given bytes with {@link Oid#TIMESTAMP} or
+     * {@link Oid#TIMESTAMPTZ}.
+     *
+     * @param bytes The binary encoded local date time value.
+     * @return The parsed local date time object.
+     * @throws PSQLException If binary format could not be parsed.
+     */
+    public LocalDateTime toLocalDateTimeBin(byte[] bytes) throws PSQLException {
+
+        ParsedBinaryTimestamp parsedTimestamp = this.toProlepticParsedTimestampBin(bytes);
+        if (parsedTimestamp.infinity == Infinity.POSITIVE) {
+            return LocalDateTime.MAX;
+        } else if (parsedTimestamp.infinity == Infinity.NEGATIVE) {
+            return LocalDateTime.MIN;
+        }
+
+        // hardcode utc because the backend does not provide us the timezone
+        // Postgres is always UTC
+        return LocalDateTime.ofEpochSecond(parsedTimestamp.millis / 1000L, parsedTimestamp.nanos, ZoneOffset.UTC);
+    }
+
+    /**
+     * Returns the local date time object matching the given bytes with {@link Oid#DATE} or
+     * {@link Oid#TIMESTAMP}.
+     *
+     * @param bytes The binary encoded local date value.
+     * @return The parsed local date object.
+     * @throws PSQLException If binary format could not be parsed.
+     */
+    public LocalDate toLocalDateBin(byte[] bytes) throws PSQLException {
+        if (bytes.length != 4) {
+            throw new PSQLException(GT.tr("Unsupported binary encoding of {0}.", "date"),
+                    PSQLState.BAD_DATETIME_FORMAT);
+        }
+        int days = ByteConverter.int4(bytes, 0);
+        if (days == Integer.MAX_VALUE) {
+            return LocalDate.MAX;
+        } else if (days == Integer.MIN_VALUE) {
+            return LocalDate.MIN;
+        }
+        // adapt from different Postgres Epoch and convert to LocalDate:
+        return LocalDate.ofEpochDay(PG_EPOCH_DIFF.toDays() + days);
+    }
+
+    /**
+     * <p>Given a UTC timestamp {@code millis} finds another point in time that is rendered in given time
+     * zone {@code tz} exactly as "millis in UTC".</p>
+     *
+     * <p>For instance, given 7 Jan 16:00 UTC and tz=GMT+02:00 it returns 7 Jan 14:00 UTC == 7 Jan 16:00
+     * GMT+02:00 Note that is not trivial for timestamps near DST change. For such cases, we rely on
+     * {@link Calendar} to figure out the proper timestamp.</p>
+     *
+     * @param millis source timestamp
+     * @param tz     desired time zone
+     * @return timestamp that would be rendered in {@code tz} like {@code millis} in UTC
+     */
+    private long guessTimestamp(long millis, TimeZone tz) {
+        if (tz == null) {
+            // If client did not provide us with time zone, we use system default time zone
+            tz = getDefaultTz();
+        }
+        // The story here:
+        // Backend provided us with something like '2015-10-04 13:40' and it did NOT provide us with a
+        // time zone.
+        // On top of that, user asked us to treat the timestamp as if it were in GMT+02:00.
+        //
+        // The code below creates such a timestamp that is rendered as '2015-10-04 13:40 GMT+02:00'
+        // In other words, its UTC value should be 11:40 UTC == 13:40 GMT+02:00.
+        // It is not sufficient to just subtract offset as you might cross DST change as you subtract.
+        //
+        // For instance, on 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00
+        // Suppose we deal with 2000-03-26 02:00:01
+        // If you subtract offset from the timestamp, the time will be "a hour behind" since
+        // "just a couple of hours ago the OFFSET was different"
+        //
+        // To make a long story short: we have UTC timestamp that looks like "2000-03-26 02:00:01" when
+        // rendered in UTC tz.
+        // We want to know another timestamp that will look like "2000-03-26 02:00:01" in Europe/Moscow
+        // time zone.
+
+        if (isSimpleTimeZone(tz.getID())) {
+            // For well-known non-DST time zones, just subtract offset
+            return millis - tz.getRawOffset();
+        }
+        // For all the other time zones, enjoy debugging Calendar API
+        // Here we do a straight-forward implementation that splits original timestamp into pieces and
+        // composes it back.
+        // Note: cal.setTimeZone alone is not sufficient as it would alter hour (it will try to keep the
+        // same time instant value)
+        Calendar cal = calendarWithUserTz;
+        cal.setTimeZone(UTC_TIMEZONE);
+        cal.setTimeInMillis(millis);
+        int era = cal.get(Calendar.ERA);
+        int year = cal.get(Calendar.YEAR);
+        int month = cal.get(Calendar.MONTH);
+        int day = cal.get(Calendar.DAY_OF_MONTH);
+        int hour = cal.get(Calendar.HOUR_OF_DAY);
+        int min = cal.get(Calendar.MINUTE);
+        int sec = cal.get(Calendar.SECOND);
+        int ms = cal.get(Calendar.MILLISECOND);
+        cal.setTimeZone(tz);
+        cal.set(Calendar.ERA, era);
+        cal.set(Calendar.YEAR, year);
+        cal.set(Calendar.MONTH, month);
+        cal.set(Calendar.DAY_OF_MONTH, day);
+        cal.set(Calendar.HOUR_OF_DAY, hour);
+        cal.set(Calendar.MINUTE, min);
+        cal.set(Calendar.SECOND, sec);
+        cal.set(Calendar.MILLISECOND, ms);
+        return cal.getTimeInMillis();
+    }
+
+    /**
+     * Extracts the date part from a timestamp.
+     *
+     * @param millis The timestamp from which to extract the date.
+     * @param tz     The time zone of the date.
+     * @return The extracted date.
+     */
+    public Date convertToDate(long millis, TimeZone tz) {
+
+        // no adjustments for the infinity hack values
+        if (millis <= PGStatement.DATE_NEGATIVE_INFINITY
+                || millis >= PGStatement.DATE_POSITIVE_INFINITY) {
+            return new Date(millis);
+        }
+        if (tz == null) {
+            tz = getDefaultTz();
+        }
+        if (isSimpleTimeZone(tz.getID())) {
+            // Truncate to 00:00 of the day.
+            // Suppose the input date is 7 Jan 15:40 GMT+02:00 (that is 13:40 UTC)
+            // We want it to become 7 Jan 00:00 GMT+02:00
+            // 1) Make sure millis becomes 15:40 in UTC, so add offset
+            int offset = tz.getRawOffset();
+            millis += offset;
+            // 2) Truncate hours, minutes, etc. Day is always 86400 seconds, no matter what leap seconds
+            // are
+            millis = floorDiv(millis, ONEDAY) * ONEDAY;
+            // 2) Now millis is 7 Jan 00:00 UTC, however we need that in GMT+02:00, so subtract some
+            // offset
+            millis -= offset;
+            // Now we have brand-new 7 Jan 00:00 GMT+02:00
+            return new Date(millis);
+        }
+
+        Calendar cal = calendarWithUserTz;
+        cal.setTimeZone(tz);
+        cal.setTimeInMillis(millis);
+        cal.set(Calendar.HOUR_OF_DAY, 0);
+        cal.set(Calendar.MINUTE, 0);
+        cal.set(Calendar.SECOND, 0);
+        cal.set(Calendar.MILLISECOND, 0);
+
+        return new Date(cal.getTimeInMillis());
+    }
+
+    /**
+     * Extracts the time part from a timestamp. This method ensures the date part of output timestamp
+     * looks like 1970-01-01 in given timezone.
+     *
+     * @param millis The timestamp from which to extract the time.
+     * @param tz     timezone to use.
+     * @return The extracted time.
+     */
+    public Time convertToTime(long millis, TimeZone tz) {
+        if (tz == null) {
+            tz = getDefaultTz();
+        }
+        if (isSimpleTimeZone(tz.getID())) {
+            // Leave just time part of the day.
+            // Suppose the input date is 2015 7 Jan 15:40 GMT+02:00 (that is 13:40 UTC)
+            // We want it to become 1970 1 Jan 15:40 GMT+02:00
+            // 1) Make sure millis becomes 15:40 in UTC, so add offset
+            int offset = tz.getRawOffset();
+            millis += offset;
+            // 2) Truncate year, month, day. Day is always 86400 seconds, no matter what leap seconds are
+            millis = floorMod(millis, ONEDAY);
+            // 2) Now millis is 1970 1 Jan 15:40 UTC, however we need that in GMT+02:00, so subtract some
+            // offset
+            millis -= offset;
+            // Now we have brand-new 1970 1 Jan 15:40 GMT+02:00
+            return new Time(millis);
+        }
+        Calendar cal = calendarWithUserTz;
+        cal.setTimeZone(tz);
+        cal.setTimeInMillis(millis);
+        cal.set(Calendar.ERA, GregorianCalendar.AD);
+        cal.set(Calendar.YEAR, 1970);
+        cal.set(Calendar.MONTH, 0);
+        cal.set(Calendar.DAY_OF_MONTH, 1);
+
+        return new Time(cal.getTimeInMillis());
+    }
+
+    /**
+     * Returns the given time value as String matching what the current postgresql server would send
+     * in text mode.
+     *
+     * @param time         time value
+     * @param withTimeZone whether timezone should be added
+     * @return given time value as String
+     */
+    public String timeToString(java.util.Date time, boolean withTimeZone) {
+        Calendar cal = null;
+        if (withTimeZone) {
+            cal = calendarWithUserTz;
+            cal.setTimeZone(timeZoneProvider.get());
+        }
+        if (time instanceof Timestamp) {
+            return toString(cal, (Timestamp) time, withTimeZone);
+        }
+        if (time instanceof Time) {
+            return toString(cal, (Time) time, withTimeZone);
+        }
+        return toString(cal, (Date) time, withTimeZone);
+    }
+
+    /**
+     * Converts the SQL Date to binary representation for {@link Oid#DATE}.
+     *
+     * @param tz    The timezone used.
+     * @param bytes The binary encoded date value.
+     * @param value value
+     * @throws PSQLException If binary format could not be parsed.
+     */
+    public void toBinDate(TimeZone tz, byte[] bytes, Date value) throws PSQLException {
+        long millis = value.getTime();
+
+        if (tz == null) {
+            tz = getDefaultTz();
+        }
+        // It "getOffset" is UNTESTED
+        // See org.postgresql.jdbc.AbstractJdbc2Statement.setDate(int, java.sql.Date,
+        // java.util.Calendar)
+        // The problem is we typically do not know for sure what is the exact required date/timestamp
+        // type
+        // Thus pgjdbc sticks to text transfer.
+        millis += tz.getOffset(millis);
+
+        long secs = toPgSecs(millis / 1000);
+        ByteConverter.int4(bytes, 0, (int) (secs / 86400));
+    }
+
+    enum Infinity {
+        POSITIVE,
+        NEGATIVE
+    }
+
+    private static class ParsedTimestamp {
+        boolean hasDate;
+        int era = GregorianCalendar.AD;
+        int year = 1970;
+        int month = 1;
+
+        boolean hasTime;
+        int day = 1;
+        int hour;
+        int minute;
+        int second;
+        int nanos;
+
+        boolean hasOffset;
+        ZoneOffset offset = ZoneOffset.UTC;
+    }
+
+    private static class ParsedBinaryTimestamp {
+        Infinity infinity;
+        long millis;
+        int nanos;
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/TypeInfoCache.java b/pgjdbc/src/main/java/org/postgresql/jdbc/TypeInfoCache.java
index 615b653..9f7f42a 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/TypeInfoCache.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/TypeInfoCache.java
@@ -33,1063 +33,1053 @@ import java.util.logging.Logger;
 @SuppressWarnings("try")
 public class TypeInfoCache implements TypeInfo {
 
-  private static final Logger LOGGER = Logger.getLogger(TypeInfoCache.class.getName());
+    private static final Logger LOGGER = Logger.getLogger(TypeInfoCache.class.getName());
+    // basic pg types info:
+    // 0 - type name
+    // 1 - type oid
+    // 2 - sql type
+    // 3 - java class
+    // 4 - array type oid
+    private static final Object[][] types = {
+            {"int2", Oid.INT2, Types.SMALLINT, "java.lang.Integer", Oid.INT2_ARRAY},
+            {"int4", Oid.INT4, Types.INTEGER, "java.lang.Integer", Oid.INT4_ARRAY},
+            {"oid", Oid.OID, Types.BIGINT, "java.lang.Long", Oid.OID_ARRAY},
+            {"int8", Oid.INT8, Types.BIGINT, "java.lang.Long", Oid.INT8_ARRAY},
+            {"money", Oid.MONEY, Types.DOUBLE, "java.lang.Double", Oid.MONEY_ARRAY},
+            {"numeric", Oid.NUMERIC, Types.NUMERIC, "java.math.BigDecimal", Oid.NUMERIC_ARRAY},
+            {"float4", Oid.FLOAT4, Types.REAL, "java.lang.Float", Oid.FLOAT4_ARRAY},
+            {"float8", Oid.FLOAT8, Types.DOUBLE, "java.lang.Double", Oid.FLOAT8_ARRAY},
+            {"char", Oid.CHAR, Types.CHAR, "java.lang.String", Oid.CHAR_ARRAY},
+            {"bpchar", Oid.BPCHAR, Types.CHAR, "java.lang.String", Oid.BPCHAR_ARRAY},
+            {"varchar", Oid.VARCHAR, Types.VARCHAR, "java.lang.String", Oid.VARCHAR_ARRAY},
+            {"varbit", Oid.VARBIT, Types.OTHER, "java.lang.String", Oid.VARBIT_ARRAY},
+            {"text", Oid.TEXT, Types.VARCHAR, "java.lang.String", Oid.TEXT_ARRAY},
+            {"name", Oid.NAME, Types.VARCHAR, "java.lang.String", Oid.NAME_ARRAY},
+            {"bytea", Oid.BYTEA, Types.BINARY, "[B", Oid.BYTEA_ARRAY},
+            {"bool", Oid.BOOL, Types.BIT, "java.lang.Boolean", Oid.BOOL_ARRAY},
+            {"bit", Oid.BIT, Types.BIT, "java.lang.Boolean", Oid.BIT_ARRAY},
+            {"date", Oid.DATE, Types.DATE, "java.sql.Date", Oid.DATE_ARRAY},
+            {"time", Oid.TIME, Types.TIME, "java.sql.Time", Oid.TIME_ARRAY},
+            {"timetz", Oid.TIMETZ, Types.TIME, "java.sql.Time", Oid.TIMETZ_ARRAY},
+            {"timestamp", Oid.TIMESTAMP, Types.TIMESTAMP, "java.sql.Timestamp", Oid.TIMESTAMP_ARRAY},
+            {"timestamptz", Oid.TIMESTAMPTZ, Types.TIMESTAMP, "java.sql.Timestamp",
+                    Oid.TIMESTAMPTZ_ARRAY},
+            {"refcursor", Oid.REF_CURSOR, Types.REF_CURSOR, "java.sql.ResultSet", Oid.REF_CURSOR_ARRAY},
+            {"json", Oid.JSON, Types.OTHER, "org.postgresql.util.PGobject", Oid.JSON_ARRAY},
+            {"point", Oid.POINT, Types.OTHER, "org.postgresql.geometric.PGpoint", Oid.POINT_ARRAY},
+            {"box", Oid.BOX, Types.OTHER, "org.postgresql.geometric.PGBox", Oid.BOX_ARRAY}
+    };
+    /**
+     * PG maps several alias to real type names. When we do queries against pg_catalog, we must use
+     * the real type, not an alias, so use this mapping.
+     * <p>
+     * Additional values used at runtime (including case variants) will be added to the map.
+     * </p>
+     */
+    private static final ConcurrentMap<String, String> TYPE_ALIASES = new ConcurrentHashMap<>(30);
 
-  // pgname (String) -> java.sql.Types (Integer)
-  private final Map<String, Integer> pgNameToSQLType;
-
-  private final Map<Integer, Integer> oidToSQLType;
-
-  // pgname (String) -> java class name (String)
-  // ie "text" -> "java.lang.String"
-  private final Map<String, String> pgNameToJavaClass;
-
-  // oid (Integer) -> pgname (String)
-  private final Map<Integer, String> oidToPgName;
-  // pgname (String) -> oid (Integer)
-  private final Map<String, Integer> pgNameToOid;
-
-  private final Map<String, Integer> javaArrayTypeToOid;
-
-  // pgname (String) -> extension pgobject (Class)
-  private final Map<String, Class<? extends PGobject>> pgNameToPgObject;
-
-  // type array oid -> base type's oid
-  private final Map<Integer, Integer> pgArrayToPgType;
-
-  // array type oid -> base type array element delimiter
-  private final Map<Integer, Character> arrayOidToDelimiter;
-
-  private final BaseConnection conn;
-  private final int unknownLength;
-  private PreparedStatement getOidStatementSimple;
-  private PreparedStatement getOidStatementComplexNonArray;
-  private PreparedStatement getOidStatementComplexArray;
-  private PreparedStatement getNameStatement;
-  private PreparedStatement getArrayElementOidStatement;
-  private PreparedStatement getArrayDelimiterStatement;
-  private PreparedStatement getTypeInfoStatement;
-  private PreparedStatement getAllTypeInfoStatement;
-  private final ResourceLock lock = new ResourceLock();
-
-  // basic pg types info:
-  // 0 - type name
-  // 1 - type oid
-  // 2 - sql type
-  // 3 - java class
-  // 4 - array type oid
-  private static final Object[][] types = {
-      {"int2", Oid.INT2, Types.SMALLINT, "java.lang.Integer", Oid.INT2_ARRAY},
-      {"int4", Oid.INT4, Types.INTEGER, "java.lang.Integer", Oid.INT4_ARRAY},
-      {"oid", Oid.OID, Types.BIGINT, "java.lang.Long", Oid.OID_ARRAY},
-      {"int8", Oid.INT8, Types.BIGINT, "java.lang.Long", Oid.INT8_ARRAY},
-      {"money", Oid.MONEY, Types.DOUBLE, "java.lang.Double", Oid.MONEY_ARRAY},
-      {"numeric", Oid.NUMERIC, Types.NUMERIC, "java.math.BigDecimal", Oid.NUMERIC_ARRAY},
-      {"float4", Oid.FLOAT4, Types.REAL, "java.lang.Float", Oid.FLOAT4_ARRAY},
-      {"float8", Oid.FLOAT8, Types.DOUBLE, "java.lang.Double", Oid.FLOAT8_ARRAY},
-      {"char", Oid.CHAR, Types.CHAR, "java.lang.String", Oid.CHAR_ARRAY},
-      {"bpchar", Oid.BPCHAR, Types.CHAR, "java.lang.String", Oid.BPCHAR_ARRAY},
-      {"varchar", Oid.VARCHAR, Types.VARCHAR, "java.lang.String", Oid.VARCHAR_ARRAY},
-      {"varbit", Oid.VARBIT, Types.OTHER, "java.lang.String", Oid.VARBIT_ARRAY},
-      {"text", Oid.TEXT, Types.VARCHAR, "java.lang.String", Oid.TEXT_ARRAY},
-      {"name", Oid.NAME, Types.VARCHAR, "java.lang.String", Oid.NAME_ARRAY},
-      {"bytea", Oid.BYTEA, Types.BINARY, "[B", Oid.BYTEA_ARRAY},
-      {"bool", Oid.BOOL, Types.BIT, "java.lang.Boolean", Oid.BOOL_ARRAY},
-      {"bit", Oid.BIT, Types.BIT, "java.lang.Boolean", Oid.BIT_ARRAY},
-      {"date", Oid.DATE, Types.DATE, "java.sql.Date", Oid.DATE_ARRAY},
-      {"time", Oid.TIME, Types.TIME, "java.sql.Time", Oid.TIME_ARRAY},
-      {"timetz", Oid.TIMETZ, Types.TIME, "java.sql.Time", Oid.TIMETZ_ARRAY},
-      {"timestamp", Oid.TIMESTAMP, Types.TIMESTAMP, "java.sql.Timestamp", Oid.TIMESTAMP_ARRAY},
-      {"timestamptz", Oid.TIMESTAMPTZ, Types.TIMESTAMP, "java.sql.Timestamp",
-          Oid.TIMESTAMPTZ_ARRAY},
-      {"refcursor", Oid.REF_CURSOR, Types.REF_CURSOR, "java.sql.ResultSet", Oid.REF_CURSOR_ARRAY},
-      {"json", Oid.JSON, Types.OTHER, "org.postgresql.util.PGobject", Oid.JSON_ARRAY},
-      {"point", Oid.POINT, Types.OTHER, "org.postgresql.geometric.PGpoint", Oid.POINT_ARRAY},
-      {"box", Oid.BOX, Types.OTHER, "org.postgresql.geometric.PGBox", Oid.BOX_ARRAY}
-  };
-
-  /**
-   * PG maps several alias to real type names. When we do queries against pg_catalog, we must use
-   * the real type, not an alias, so use this mapping.
-   * <p>
-   * Additional values used at runtime (including case variants) will be added to the map.
-   * </p>
-   */
-  private static final ConcurrentMap<String, String> TYPE_ALIASES = new ConcurrentHashMap<>(30);
-
-  static {
-    TYPE_ALIASES.put("bool", "bool");
-    TYPE_ALIASES.put("boolean", "bool");
-    TYPE_ALIASES.put("smallint", "int2");
-    TYPE_ALIASES.put("int2", "int2");
-    TYPE_ALIASES.put("int", "int4");
-    TYPE_ALIASES.put("integer", "int4");
-    TYPE_ALIASES.put("int4", "int4");
-    TYPE_ALIASES.put("long", "int8");
-    TYPE_ALIASES.put("int8", "int8");
-    TYPE_ALIASES.put("bigint", "int8");
-    TYPE_ALIASES.put("float", "float8");
-    TYPE_ALIASES.put("real", "float4");
-    TYPE_ALIASES.put("float4", "float4");
-    TYPE_ALIASES.put("double", "float8");
-    TYPE_ALIASES.put("double precision", "float8");
-    TYPE_ALIASES.put("float8", "float8");
-    TYPE_ALIASES.put("decimal", "numeric");
-    TYPE_ALIASES.put("numeric", "numeric");
-    TYPE_ALIASES.put("character varying", "varchar");
-    TYPE_ALIASES.put("varchar", "varchar");
-    TYPE_ALIASES.put("time without time zone", "time");
-    TYPE_ALIASES.put("time", "time");
-    TYPE_ALIASES.put("time with time zone", "timetz");
-    TYPE_ALIASES.put("timetz", "timetz");
-    TYPE_ALIASES.put("timestamp without time zone", "timestamp");
-    TYPE_ALIASES.put("timestamp", "timestamp");
-    TYPE_ALIASES.put("timestamp with time zone", "timestamptz");
-    TYPE_ALIASES.put("timestamptz", "timestamptz");
-  }
-
-  @SuppressWarnings("this-escape")
-  public TypeInfoCache(BaseConnection conn, int unknownLength) {
-    this.conn = conn;
-    this.unknownLength = unknownLength;
-    oidToPgName = new HashMap<>((int) Math.round(types.length * 1.5));
-    pgNameToOid = new HashMap<>((int) Math.round(types.length * 1.5));
-    javaArrayTypeToOid = new HashMap<>((int) Math.round(types.length * 1.5));
-    pgNameToJavaClass = new HashMap<>((int) Math.round(types.length * 1.5));
-    pgNameToPgObject = new HashMap<>((int) Math.round(types.length * 1.5));
-    pgArrayToPgType = new HashMap<>((int) Math.round(types.length * 1.5));
-    arrayOidToDelimiter = new HashMap<>((int) Math.round(types.length * 2.5));
-
-    // needs to be synchronized because the iterator is returned
-    // from getPGTypeNamesWithSQLTypes()
-    pgNameToSQLType = Collections.synchronizedMap(new HashMap<String, Integer>((int) Math.round(types.length * 1.5)));
-    oidToSQLType = Collections.synchronizedMap(new HashMap<Integer, Integer>((int) Math.round(types.length * 1.5)));
-
-    for (Object[] type : types) {
-      String pgTypeName = (String) type[0];
-      Integer oid = (Integer) type[1];
-      Integer sqlType = (Integer) type[2];
-      String javaClass = (String) type[3];
-      Integer arrayOid = (Integer) type[4];
-
-      addCoreType(pgTypeName, oid, sqlType, javaClass, arrayOid);
+    static {
+        TYPE_ALIASES.put("bool", "bool");
+        TYPE_ALIASES.put("boolean", "bool");
+        TYPE_ALIASES.put("smallint", "int2");
+        TYPE_ALIASES.put("int2", "int2");
+        TYPE_ALIASES.put("int", "int4");
+        TYPE_ALIASES.put("integer", "int4");
+        TYPE_ALIASES.put("int4", "int4");
+        TYPE_ALIASES.put("long", "int8");
+        TYPE_ALIASES.put("int8", "int8");
+        TYPE_ALIASES.put("bigint", "int8");
+        TYPE_ALIASES.put("float", "float8");
+        TYPE_ALIASES.put("real", "float4");
+        TYPE_ALIASES.put("float4", "float4");
+        TYPE_ALIASES.put("double", "float8");
+        TYPE_ALIASES.put("double precision", "float8");
+        TYPE_ALIASES.put("float8", "float8");
+        TYPE_ALIASES.put("decimal", "numeric");
+        TYPE_ALIASES.put("numeric", "numeric");
+        TYPE_ALIASES.put("character varying", "varchar");
+        TYPE_ALIASES.put("varchar", "varchar");
+        TYPE_ALIASES.put("time without time zone", "time");
+        TYPE_ALIASES.put("time", "time");
+        TYPE_ALIASES.put("time with time zone", "timetz");
+        TYPE_ALIASES.put("timetz", "timetz");
+        TYPE_ALIASES.put("timestamp without time zone", "timestamp");
+        TYPE_ALIASES.put("timestamp", "timestamp");
+        TYPE_ALIASES.put("timestamp with time zone", "timestamptz");
+        TYPE_ALIASES.put("timestamptz", "timestamptz");
     }
 
-    pgNameToJavaClass.put("hstore", Map.class.getName());
-  }
+    // pgname (String) -> java.sql.Types (Integer)
+    private final Map<String, Integer> pgNameToSQLType;
+    private final Map<Integer, Integer> oidToSQLType;
+    // pgname (String) -> java class name (String)
+    // ie "text" -> "java.lang.String"
+    private final Map<String, String> pgNameToJavaClass;
+    // oid (Integer) -> pgname (String)
+    private final Map<Integer, String> oidToPgName;
+    // pgname (String) -> oid (Integer)
+    private final Map<String, Integer> pgNameToOid;
+    private final Map<String, Integer> javaArrayTypeToOid;
+    // pgname (String) -> extension pgobject (Class)
+    private final Map<String, Class<? extends PGobject>> pgNameToPgObject;
+    // type array oid -> base type's oid
+    private final Map<Integer, Integer> pgArrayToPgType;
+    // array type oid -> base type array element delimiter
+    private final Map<Integer, Character> arrayOidToDelimiter;
+    private final BaseConnection conn;
+    private final int unknownLength;
+    private final ResourceLock lock = new ResourceLock();
+    private PreparedStatement getOidStatementSimple;
+    private PreparedStatement getOidStatementComplexNonArray;
+    private PreparedStatement getOidStatementComplexArray;
+    private PreparedStatement getNameStatement;
+    private PreparedStatement getArrayElementOidStatement;
+    private PreparedStatement getArrayDelimiterStatement;
+    private PreparedStatement getTypeInfoStatement;
+    private PreparedStatement getAllTypeInfoStatement;
 
-  @Override
-  public void addCoreType(String pgTypeName, Integer oid, Integer sqlType,
-      String javaClass, Integer arrayOid) {
-    try (ResourceLock ignore = lock.obtain()) {
-      pgNameToJavaClass.put(pgTypeName, javaClass);
-      pgNameToOid.put(pgTypeName, oid);
-      oidToPgName.put(oid, pgTypeName);
-      javaArrayTypeToOid.put(javaClass, arrayOid);
-      pgArrayToPgType.put(arrayOid, oid);
-      pgNameToSQLType.put(pgTypeName, sqlType);
-      oidToSQLType.put(oid, sqlType);
+    @SuppressWarnings("this-escape")
+    public TypeInfoCache(BaseConnection conn, int unknownLength) {
+        this.conn = conn;
+        this.unknownLength = unknownLength;
+        oidToPgName = new HashMap<>((int) Math.round(types.length * 1.5));
+        pgNameToOid = new HashMap<>((int) Math.round(types.length * 1.5));
+        javaArrayTypeToOid = new HashMap<>((int) Math.round(types.length * 1.5));
+        pgNameToJavaClass = new HashMap<>((int) Math.round(types.length * 1.5));
+        pgNameToPgObject = new HashMap<>((int) Math.round(types.length * 1.5));
+        pgArrayToPgType = new HashMap<>((int) Math.round(types.length * 1.5));
+        arrayOidToDelimiter = new HashMap<>((int) Math.round(types.length * 2.5));
 
-      // Currently we hardcode all core types array delimiter
-      // to a comma. In a stock install the only exception is
-      // the box datatype and it's not a JDBC core type.
-      //
-      Character delim = ',';
-      if ("box".equals(pgTypeName)) {
-        delim = ';';
-      }
-      arrayOidToDelimiter.put(oid, delim);
-      arrayOidToDelimiter.put(arrayOid, delim);
+        // needs to be synchronized because the iterator is returned
+        // from getPGTypeNamesWithSQLTypes()
+        pgNameToSQLType = Collections.synchronizedMap(new HashMap<String, Integer>((int) Math.round(types.length * 1.5)));
+        oidToSQLType = Collections.synchronizedMap(new HashMap<Integer, Integer>((int) Math.round(types.length * 1.5)));
 
-      String pgArrayTypeName = pgTypeName + "[]";
-      pgNameToJavaClass.put(pgArrayTypeName, "java.sql.Array");
-      pgNameToSQLType.put(pgArrayTypeName, Types.ARRAY);
-      oidToSQLType.put(arrayOid, Types.ARRAY);
-      pgNameToOid.put(pgArrayTypeName, arrayOid);
-      pgArrayTypeName = "_" + pgTypeName;
-      if (!pgNameToJavaClass.containsKey(pgArrayTypeName)) {
-        pgNameToJavaClass.put(pgArrayTypeName, "java.sql.Array");
-        pgNameToSQLType.put(pgArrayTypeName, Types.ARRAY);
-        pgNameToOid.put(pgArrayTypeName, arrayOid);
-        oidToPgName.put(arrayOid, pgArrayTypeName);
-      }
+        for (Object[] type : types) {
+            String pgTypeName = (String) type[0];
+            Integer oid = (Integer) type[1];
+            Integer sqlType = (Integer) type[2];
+            String javaClass = (String) type[3];
+            Integer arrayOid = (Integer) type[4];
+
+            addCoreType(pgTypeName, oid, sqlType, javaClass, arrayOid);
+        }
+
+        pgNameToJavaClass.put("hstore", Map.class.getName());
     }
-  }
 
-  @Override
-  public void addDataType(String type, Class<? extends PGobject> klass)
-      throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      pgNameToPgObject.put(type, klass);
-      pgNameToJavaClass.put(type, klass.getName());
+    @Override
+    public void addCoreType(String pgTypeName, Integer oid, Integer sqlType,
+                            String javaClass, Integer arrayOid) {
+        try (ResourceLock ignore = lock.obtain()) {
+            pgNameToJavaClass.put(pgTypeName, javaClass);
+            pgNameToOid.put(pgTypeName, oid);
+            oidToPgName.put(oid, pgTypeName);
+            javaArrayTypeToOid.put(javaClass, arrayOid);
+            pgArrayToPgType.put(arrayOid, oid);
+            pgNameToSQLType.put(pgTypeName, sqlType);
+            oidToSQLType.put(oid, sqlType);
+
+            // Currently we hardcode all core types array delimiter
+            // to a comma. In a stock install the only exception is
+            // the box datatype and it's not a JDBC core type.
+            //
+            Character delim = ',';
+            if ("box".equals(pgTypeName)) {
+                delim = ';';
+            }
+            arrayOidToDelimiter.put(oid, delim);
+            arrayOidToDelimiter.put(arrayOid, delim);
+
+            String pgArrayTypeName = pgTypeName + "[]";
+            pgNameToJavaClass.put(pgArrayTypeName, "java.sql.Array");
+            pgNameToSQLType.put(pgArrayTypeName, Types.ARRAY);
+            oidToSQLType.put(arrayOid, Types.ARRAY);
+            pgNameToOid.put(pgArrayTypeName, arrayOid);
+            pgArrayTypeName = "_" + pgTypeName;
+            if (!pgNameToJavaClass.containsKey(pgArrayTypeName)) {
+                pgNameToJavaClass.put(pgArrayTypeName, "java.sql.Array");
+                pgNameToSQLType.put(pgArrayTypeName, Types.ARRAY);
+                pgNameToOid.put(pgArrayTypeName, arrayOid);
+                oidToPgName.put(arrayOid, pgArrayTypeName);
+            }
+        }
     }
-  }
 
-  @Override
-  public Iterator<String> getPGTypeNamesWithSQLTypes() {
-    return pgNameToSQLType.keySet().iterator();
-  }
-
-  @Override
-  public Iterator<Integer> getPGTypeOidsWithSQLTypes() {
-    return oidToSQLType.keySet().iterator();
-  }
-
-  private String getSQLTypeQuery(boolean typoidParam) {
-    // There's no great way of telling what's an array type.
-    // People can name their own types starting with _.
-    // Other types use typelem that aren't actually arrays, like box.
-    //
-    // in case of multiple records (in different schemas) choose the one from the current
-    // schema,
-    // otherwise take the last version of a type that is at least more deterministic then before
-    // (keeping old behaviour of finding types, that should not be found without correct search
-    // path)
-    StringBuilder sql = new StringBuilder();
-    sql.append("SELECT typinput='pg_catalog.array_in'::regproc as is_array, typtype, typname, pg_type.oid ");
-    sql.append("  FROM pg_catalog.pg_type ");
-    sql.append("  LEFT JOIN (select ns.oid as nspoid, ns.nspname, r.r ");
-    sql.append("          from pg_namespace as ns ");
-    // -- go with older way of unnesting array to be compatible with 8.0
-    sql.append("          join ( select s.r, (current_schemas(false))[s.r] as nspname ");
-    sql.append("                   from generate_series(1, array_upper(current_schemas(false), 1)) as s(r) ) as r ");
-    sql.append("         using ( nspname ) ");
-    sql.append("       ) as sp ");
-    sql.append("    ON sp.nspoid = typnamespace ");
-    if (typoidParam) {
-      sql.append(" WHERE pg_type.oid = ? ");
+    @Override
+    public void addDataType(String type, Class<? extends PGobject> klass)
+            throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            pgNameToPgObject.put(type, klass);
+            pgNameToJavaClass.put(type, klass.getName());
+        }
     }
-    sql.append(" ORDER BY sp.r, pg_type.oid DESC;");
-    return sql.toString();
-  }
 
-  private int getSQLTypeFromQueryResult(ResultSet rs) throws SQLException {
-    Integer type = null;
-    boolean isArray = rs.getBoolean("is_array");
-    String typtype = rs.getString("typtype");
-    if (isArray) {
-      type = Types.ARRAY;
-    } else if ("c".equals(typtype)) {
-      type = Types.STRUCT;
-    } else if ("d".equals(typtype)) {
-      type = Types.DISTINCT;
-    } else if ("e".equals(typtype)) {
-      type = Types.VARCHAR;
+    @Override
+    public Iterator<String> getPGTypeNamesWithSQLTypes() {
+        return pgNameToSQLType.keySet().iterator();
     }
-    if (type == null) {
-      type = Types.OTHER;
-    }
-    return type;
-  }
 
-  private PreparedStatement prepareGetAllTypeInfoStatement() throws SQLException {
-    PreparedStatement getAllTypeInfoStatement = this.getAllTypeInfoStatement;
-    if (getAllTypeInfoStatement == null) {
-      getAllTypeInfoStatement = conn.prepareStatement(getSQLTypeQuery(false));
-      this.getAllTypeInfoStatement = getAllTypeInfoStatement;
+    @Override
+    public Iterator<Integer> getPGTypeOidsWithSQLTypes() {
+        return oidToSQLType.keySet().iterator();
     }
-    return getAllTypeInfoStatement;
-  }
 
-  public void cacheSQLTypes() throws SQLException {
-    LOGGER.log(Level.FINEST, "caching all SQL typecodes");
-    PreparedStatement getAllTypeInfoStatement = prepareGetAllTypeInfoStatement();
-    // Go through BaseStatement to avoid transaction start.
-    if (!((BaseStatement) getAllTypeInfoStatement)
-        .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
-      throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+    private String getSQLTypeQuery(boolean typoidParam) {
+        // There's no great way of telling what's an array type.
+        // People can name their own types starting with _.
+        // Other types use typelem that aren't actually arrays, like box.
+        //
+        // in case of multiple records (in different schemas) choose the one from the current
+        // schema,
+        // otherwise take the last version of a type that is at least more deterministic then before
+        // (keeping old behaviour of finding types, that should not be found without correct search
+        // path)
+        StringBuilder sql = new StringBuilder();
+        sql.append("SELECT typinput='pg_catalog.array_in'::regproc as is_array, typtype, typname, pg_type.oid ");
+        sql.append("  FROM pg_catalog.pg_type ");
+        sql.append("  LEFT JOIN (select ns.oid as nspoid, ns.nspname, r.r ");
+        sql.append("          from pg_namespace as ns ");
+        // -- go with older way of unnesting array to be compatible with 8.0
+        sql.append("          join ( select s.r, (current_schemas(false))[s.r] as nspname ");
+        sql.append("                   from generate_series(1, array_upper(current_schemas(false), 1)) as s(r) ) as r ");
+        sql.append("         using ( nspname ) ");
+        sql.append("       ) as sp ");
+        sql.append("    ON sp.nspoid = typnamespace ");
+        if (typoidParam) {
+            sql.append(" WHERE pg_type.oid = ? ");
+        }
+        sql.append(" ORDER BY sp.r, pg_type.oid DESC;");
+        return sql.toString();
     }
-    ResultSet rs = getAllTypeInfoStatement.getResultSet();
-    while (rs.next()) {
-      String typeName = rs.getString("typname");
-      Integer type = getSQLTypeFromQueryResult(rs);
-      if (!pgNameToSQLType.containsKey(typeName)) {
-        pgNameToSQLType.put(typeName, type);
-      }
 
-      Integer typeOid = longOidToInt(rs.getLong("oid"));
-      if (!oidToSQLType.containsKey(typeOid)) {
-        oidToSQLType.put(typeOid, type);
-      }
+    private int getSQLTypeFromQueryResult(ResultSet rs) throws SQLException {
+        Integer type = null;
+        boolean isArray = rs.getBoolean("is_array");
+        String typtype = rs.getString("typtype");
+        if (isArray) {
+            type = Types.ARRAY;
+        } else if ("c".equals(typtype)) {
+            type = Types.STRUCT;
+        } else if ("d".equals(typtype)) {
+            type = Types.DISTINCT;
+        } else if ("e".equals(typtype)) {
+            type = Types.VARCHAR;
+        }
+        if (type == null) {
+            type = Types.OTHER;
+        }
+        return type;
     }
-    rs.close();
-  }
 
-  private PreparedStatement prepareGetTypeInfoStatement() throws SQLException {
-    PreparedStatement getTypeInfoStatement = this.getTypeInfoStatement;
-    if (getTypeInfoStatement == null) {
-      getTypeInfoStatement = conn.prepareStatement(getSQLTypeQuery(true));
-      this.getTypeInfoStatement = getTypeInfoStatement;
+    private PreparedStatement prepareGetAllTypeInfoStatement() throws SQLException {
+        PreparedStatement getAllTypeInfoStatement = this.getAllTypeInfoStatement;
+        if (getAllTypeInfoStatement == null) {
+            getAllTypeInfoStatement = conn.prepareStatement(getSQLTypeQuery(false));
+            this.getAllTypeInfoStatement = getAllTypeInfoStatement;
+        }
+        return getAllTypeInfoStatement;
     }
-    return getTypeInfoStatement;
-  }
 
-  @Override
-  public int getSQLType(String pgTypeName) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
+    public void cacheSQLTypes() throws SQLException {
+        LOGGER.log(Level.FINEST, "caching all SQL typecodes");
+        PreparedStatement getAllTypeInfoStatement = prepareGetAllTypeInfoStatement();
+        // Go through BaseStatement to avoid transaction start.
+        if (!((BaseStatement) getAllTypeInfoStatement)
+                .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
+            throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+        }
+        ResultSet rs = getAllTypeInfoStatement.getResultSet();
+        while (rs.next()) {
+            String typeName = rs.getString("typname");
+            Integer type = getSQLTypeFromQueryResult(rs);
+            if (!pgNameToSQLType.containsKey(typeName)) {
+                pgNameToSQLType.put(typeName, type);
+            }
+
+            Integer typeOid = longOidToInt(rs.getLong("oid"));
+            if (!oidToSQLType.containsKey(typeOid)) {
+                oidToSQLType.put(typeOid, type);
+            }
+        }
+        rs.close();
+    }
+
+    private PreparedStatement prepareGetTypeInfoStatement() throws SQLException {
+        PreparedStatement getTypeInfoStatement = this.getTypeInfoStatement;
+        if (getTypeInfoStatement == null) {
+            getTypeInfoStatement = conn.prepareStatement(getSQLTypeQuery(true));
+            this.getTypeInfoStatement = getTypeInfoStatement;
+        }
+        return getTypeInfoStatement;
+    }
+
+    @Override
+    public int getSQLType(String pgTypeName) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
       /*
         Get a few things out of the way such as arrays and known types
       */
-      if (pgTypeName.endsWith("[]")) {
-        return Types.ARRAY;
-      }
-      Integer i = this.pgNameToSQLType.get(pgTypeName);
-      if (i != null) {
-        return i;
-      }
+            if (pgTypeName.endsWith("[]")) {
+                return Types.ARRAY;
+            }
+            Integer i = this.pgNameToSQLType.get(pgTypeName);
+            if (i != null) {
+                return i;
+            }
 
       /*
         All else fails then we will query the database.
         save for future calls
       */
-      i = getSQLType(getPGType(pgTypeName));
+            i = getSQLType(getPGType(pgTypeName));
 
-      pgNameToSQLType.put(pgTypeName, i);
-      return i;
+            pgNameToSQLType.put(pgTypeName, i);
+            return i;
+        }
     }
-  }
 
-  @Override
-  public int getJavaArrayType(String className) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      Integer oid = javaArrayTypeToOid.get(className);
-      if (oid == null) {
-        return Oid.UNSPECIFIED;
-      }
-      return oid;
+    @Override
+    public int getJavaArrayType(String className) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            Integer oid = javaArrayTypeToOid.get(className);
+            if (oid == null) {
+                return Oid.UNSPECIFIED;
+            }
+            return oid;
+        }
     }
-  }
 
-  @Override
-  public int getSQLType(int typeOid) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (typeOid == Oid.UNSPECIFIED) {
-        return Types.OTHER;
-      }
+    @Override
+    public int getSQLType(int typeOid) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (typeOid == Oid.UNSPECIFIED) {
+                return Types.OTHER;
+            }
 
-      Integer i = oidToSQLType.get(typeOid);
-      if (i != null) {
-        return i;
-      }
+            Integer i = oidToSQLType.get(typeOid);
+            if (i != null) {
+                return i;
+            }
 
-      LOGGER.log(Level.FINEST, "querying SQL typecode for pg type oid ''{0}''", intOidToLong(typeOid));
+            LOGGER.log(Level.FINEST, "querying SQL typecode for pg type oid ''{0}''", intOidToLong(typeOid));
 
-      PreparedStatement getTypeInfoStatement = prepareGetTypeInfoStatement();
+            PreparedStatement getTypeInfoStatement = prepareGetTypeInfoStatement();
 
-      getTypeInfoStatement.setLong(1, intOidToLong(typeOid));
+            getTypeInfoStatement.setLong(1, intOidToLong(typeOid));
 
-      // Go through BaseStatement to avoid transaction start.
-      if (!((BaseStatement) getTypeInfoStatement)
-          .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
-        throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
-      }
+            // Go through BaseStatement to avoid transaction start.
+            if (!((BaseStatement) getTypeInfoStatement)
+                    .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
+                throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+            }
 
-      ResultSet rs = getTypeInfoStatement.getResultSet();
+            ResultSet rs = getTypeInfoStatement.getResultSet();
 
-      int sqlType = Types.OTHER;
-      if (rs.next()) {
-        sqlType = getSQLTypeFromQueryResult(rs);
-      }
-      rs.close();
+            int sqlType = Types.OTHER;
+            if (rs.next()) {
+                sqlType = getSQLTypeFromQueryResult(rs);
+            }
+            rs.close();
 
-      oidToSQLType.put(typeOid, sqlType);
-      return sqlType;
+            oidToSQLType.put(typeOid, sqlType);
+            return sqlType;
+        }
     }
-  }
 
-  private PreparedStatement getOidStatement(String pgTypeName) throws SQLException {
-    boolean isArray = pgTypeName.endsWith("[]");
-    boolean hasQuote = pgTypeName.contains("\"");
-    int dotIndex = pgTypeName.indexOf('.');
+    private PreparedStatement getOidStatement(String pgTypeName) throws SQLException {
+        boolean isArray = pgTypeName.endsWith("[]");
+        boolean hasQuote = pgTypeName.contains("\"");
+        int dotIndex = pgTypeName.indexOf('.');
 
-    if (dotIndex == -1 && !hasQuote && !isArray) {
-      PreparedStatement getOidStatementSimple = this.getOidStatementSimple;
-      if (getOidStatementSimple == null) {
-        String sql;
-        // see comments in @getSQLType()
-        // -- go with older way of unnesting array to be compatible with 8.0
-        sql = "SELECT pg_type.oid, typname "
-              + "  FROM pg_catalog.pg_type "
-              + "  LEFT "
-              + "  JOIN (select ns.oid as nspoid, ns.nspname, r.r "
-              + "          from pg_namespace as ns "
-              + "          join ( select s.r, (current_schemas(false))[s.r] as nspname "
-              + "                   from generate_series(1, array_upper(current_schemas(false), 1)) as s(r) ) as r "
-              + "         using ( nspname ) "
-              + "       ) as sp "
-              + "    ON sp.nspoid = typnamespace "
-              + " WHERE typname = ? "
-              + " ORDER BY sp.r, pg_type.oid DESC LIMIT 1;";
-        this.getOidStatementSimple = getOidStatementSimple = conn.prepareStatement(sql);
-      }
-      // coerce to lower case to handle upper case type names
-      String lcName = pgTypeName.toLowerCase(Locale.ROOT);
-      // default arrays are represented with _ as prefix ... this dont even work for public schema
-      // fully
-      getOidStatementSimple.setString(1, lcName);
-      return getOidStatementSimple;
-    }
-    PreparedStatement oidStatementComplex;
-    if (isArray) {
-      PreparedStatement getOidStatementComplexArray = this.getOidStatementComplexArray;
-      if (getOidStatementComplexArray == null) {
-        String sql;
-        if (conn.haveMinimumServerVersion(ServerVersion.v8_3)) {
-          sql = "SELECT t.typarray, arr.typname "
-              + "  FROM pg_catalog.pg_type t"
-              + "  JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid"
-              + "  JOIN pg_catalog.pg_type arr ON arr.oid = t.typarray"
-              + " WHERE t.typname = ? AND (n.nspname = ? OR ? AND n.nspname = ANY (current_schemas(true)))"
-              + " ORDER BY t.oid DESC LIMIT 1";
+        if (dotIndex == -1 && !hasQuote && !isArray) {
+            PreparedStatement getOidStatementSimple = this.getOidStatementSimple;
+            if (getOidStatementSimple == null) {
+                String sql;
+                // see comments in @getSQLType()
+                // -- go with older way of unnesting array to be compatible with 8.0
+                sql = "SELECT pg_type.oid, typname "
+                        + "  FROM pg_catalog.pg_type "
+                        + "  LEFT "
+                        + "  JOIN (select ns.oid as nspoid, ns.nspname, r.r "
+                        + "          from pg_namespace as ns "
+                        + "          join ( select s.r, (current_schemas(false))[s.r] as nspname "
+                        + "                   from generate_series(1, array_upper(current_schemas(false), 1)) as s(r) ) as r "
+                        + "         using ( nspname ) "
+                        + "       ) as sp "
+                        + "    ON sp.nspoid = typnamespace "
+                        + " WHERE typname = ? "
+                        + " ORDER BY sp.r, pg_type.oid DESC LIMIT 1;";
+                this.getOidStatementSimple = getOidStatementSimple = conn.prepareStatement(sql);
+            }
+            // coerce to lower case to handle upper case type names
+            String lcName = pgTypeName.toLowerCase(Locale.ROOT);
+            // default arrays are represented with _ as prefix ... this dont even work for public schema
+            // fully
+            getOidStatementSimple.setString(1, lcName);
+            return getOidStatementSimple;
+        }
+        PreparedStatement oidStatementComplex;
+        if (isArray) {
+            PreparedStatement getOidStatementComplexArray = this.getOidStatementComplexArray;
+            if (getOidStatementComplexArray == null) {
+                String sql;
+                if (conn.haveMinimumServerVersion(ServerVersion.v8_3)) {
+                    sql = "SELECT t.typarray, arr.typname "
+                            + "  FROM pg_catalog.pg_type t"
+                            + "  JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid"
+                            + "  JOIN pg_catalog.pg_type arr ON arr.oid = t.typarray"
+                            + " WHERE t.typname = ? AND (n.nspname = ? OR ? AND n.nspname = ANY (current_schemas(true)))"
+                            + " ORDER BY t.oid DESC LIMIT 1";
+                } else {
+                    sql = "SELECT t.oid, t.typname "
+                            + "  FROM pg_catalog.pg_type t"
+                            + "  JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid"
+                            + " WHERE t.typelem = (SELECT oid FROM pg_catalog.pg_type WHERE typname = ?)"
+                            + " AND substring(t.typname, 1, 1) = '_' AND t.typlen = -1"
+                            + " AND (n.nspname = ? OR ? AND n.nspname = ANY (current_schemas(true)))"
+                            + " ORDER BY t.typelem DESC LIMIT 1";
+                }
+                this.getOidStatementComplexArray = getOidStatementComplexArray = conn.prepareStatement(sql);
+            }
+            oidStatementComplex = getOidStatementComplexArray;
         } else {
-          sql = "SELECT t.oid, t.typname "
-              + "  FROM pg_catalog.pg_type t"
-              + "  JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid"
-              + " WHERE t.typelem = (SELECT oid FROM pg_catalog.pg_type WHERE typname = ?)"
-              + " AND substring(t.typname, 1, 1) = '_' AND t.typlen = -1"
-              + " AND (n.nspname = ? OR ? AND n.nspname = ANY (current_schemas(true)))"
-              + " ORDER BY t.typelem DESC LIMIT 1";
+            PreparedStatement getOidStatementComplexNonArray = this.getOidStatementComplexNonArray;
+            if (getOidStatementComplexNonArray == null) {
+                String sql = "SELECT t.oid, t.typname "
+                        + "  FROM pg_catalog.pg_type t"
+                        + "  JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid"
+                        + " WHERE t.typname = ? AND (n.nspname = ? OR ? AND n.nspname = ANY (current_schemas(true)))"
+                        + " ORDER BY t.oid DESC LIMIT 1";
+                this.getOidStatementComplexNonArray = getOidStatementComplexNonArray = conn.prepareStatement(sql);
+            }
+            oidStatementComplex = getOidStatementComplexNonArray;
         }
-        this.getOidStatementComplexArray = getOidStatementComplexArray = conn.prepareStatement(sql);
-      }
-      oidStatementComplex = getOidStatementComplexArray;
-    } else {
-      PreparedStatement getOidStatementComplexNonArray = this.getOidStatementComplexNonArray;
-      if (getOidStatementComplexNonArray == null) {
-        String sql = "SELECT t.oid, t.typname "
-            + "  FROM pg_catalog.pg_type t"
-            + "  JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid"
-            + " WHERE t.typname = ? AND (n.nspname = ? OR ? AND n.nspname = ANY (current_schemas(true)))"
-            + " ORDER BY t.oid DESC LIMIT 1";
-        this.getOidStatementComplexNonArray = getOidStatementComplexNonArray = conn.prepareStatement(sql);
-      }
-      oidStatementComplex = getOidStatementComplexNonArray;
-    }
-    //type name requested may be schema specific, of the form "{schema}"."typeName",
-    //or may check across all schemas where a schema is not specified.
-    String fullName = isArray ? pgTypeName.substring(0, pgTypeName.length() - 2) : pgTypeName;
-    String schema;
-    String name;
-    // simple use case
-    if (dotIndex == -1) {
-      schema = null;
-      name = fullName;
-    } else {
-      if (fullName.startsWith("\"")) {
-        if (fullName.endsWith("\"")) {
-          String[] parts = fullName.split("\"\\.\"");
-          schema = parts.length == 2 ? parts[0] + "\"" : null;
-          name = parts.length == 2 ? "\"" + parts[1] : parts[0];
+        //type name requested may be schema specific, of the form "{schema}"."typeName",
+        //or may check across all schemas where a schema is not specified.
+        String fullName = isArray ? pgTypeName.substring(0, pgTypeName.length() - 2) : pgTypeName;
+        String schema;
+        String name;
+        // simple use case
+        if (dotIndex == -1) {
+            schema = null;
+            name = fullName;
         } else {
-          int lastDotIndex = fullName.lastIndexOf('.');
-          name = fullName.substring(lastDotIndex + 1);
-          schema = fullName.substring(0, lastDotIndex);
+            if (fullName.startsWith("\"")) {
+                if (fullName.endsWith("\"")) {
+                    String[] parts = fullName.split("\"\\.\"");
+                    schema = parts.length == 2 ? parts[0] + "\"" : null;
+                    name = parts.length == 2 ? "\"" + parts[1] : parts[0];
+                } else {
+                    int lastDotIndex = fullName.lastIndexOf('.');
+                    name = fullName.substring(lastDotIndex + 1);
+                    schema = fullName.substring(0, lastDotIndex);
+                }
+            } else {
+                schema = fullName.substring(0, dotIndex);
+                name = fullName.substring(dotIndex + 1);
+            }
         }
-      } else {
-        schema = fullName.substring(0, dotIndex);
-        name = fullName.substring(dotIndex + 1);
-      }
-    }
-    if (schema != null && schema.startsWith("\"") && schema.endsWith("\"")) {
-      schema = schema.substring(1, schema.length() - 1);
-    } else if (schema != null) {
-      schema = schema.toLowerCase(Locale.ROOT);
-    }
-    if (name.startsWith("\"") && name.endsWith("\"")) {
-      name = name.substring(1, name.length() - 1);
-    } else {
-      name = name.toLowerCase(Locale.ROOT);
-    }
-    oidStatementComplex.setString(1, name);
-    oidStatementComplex.setString(2, schema);
-    oidStatementComplex.setBoolean(3, schema == null);
-    return oidStatementComplex;
-  }
-
-  @Override
-  public int getPGType(String pgTypeName) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      // there really isn't anything else to return other than UNSPECIFIED here.
-      if (pgTypeName == null) {
-        return Oid.UNSPECIFIED;
-      }
-
-      Integer oid = pgNameToOid.get(pgTypeName);
-      if (oid != null) {
-        return oid;
-      }
-
-      PreparedStatement oidStatement = getOidStatement(pgTypeName);
-
-      // Go through BaseStatement to avoid transaction start.
-      if (!((BaseStatement) oidStatement).executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
-        throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
-      }
-
-      oid = Oid.UNSPECIFIED;
-      ResultSet rs = oidStatement.getResultSet();
-      if (rs.next()) {
-        oid = (int) rs.getLong(1);
-        String internalName = rs.getString(2);
-        oidToPgName.put(oid, internalName);
-        pgNameToOid.put(internalName, oid);
-      }
-      pgNameToOid.put(pgTypeName, oid);
-      rs.close();
-
-      return oid;
-    }
-  }
-
-  @Override
-  public String getPGType(int oid) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (oid == Oid.UNSPECIFIED) {
-        // TODO: it would be great to forbid UNSPECIFIED argument, and make the return type non-nullable
-        return null;
-      }
-
-      String pgTypeName = oidToPgName.get(oid);
-      if (pgTypeName != null) {
-        return pgTypeName;
-      }
-
-      PreparedStatement getNameStatement = prepareGetNameStatement();
-
-      getNameStatement.setInt(1, oid);
-
-      // Go through BaseStatement to avoid transaction start.
-      if (!((BaseStatement) getNameStatement).executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
-        throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
-      }
-
-      ResultSet rs = getNameStatement.getResultSet();
-      if (rs.next()) {
-        boolean onPath = rs.getBoolean(1);
-        String schema = rs.getString(2);
-        String name = rs.getString(3);
-        if (onPath) {
-          pgTypeName = name;
-          pgNameToOid.put(schema + "." + name, oid);
+        if (schema != null && schema.startsWith("\"") && schema.endsWith("\"")) {
+            schema = schema.substring(1, schema.length() - 1);
+        } else if (schema != null) {
+            schema = schema.toLowerCase(Locale.ROOT);
+        }
+        if (name.startsWith("\"") && name.endsWith("\"")) {
+            name = name.substring(1, name.length() - 1);
         } else {
-          // TODO: escaping !?
-          pgTypeName = "\"" + schema + "\".\"" + name + "\"";
-          // if all is lowercase add special type info
-          // TODO: should probably check for all special chars
-          if (schema.equals(schema.toLowerCase(Locale.ROOT)) && schema.indexOf('.') == -1
-              && name.equals(name.toLowerCase(Locale.ROOT)) && name.indexOf('.') == -1) {
-            pgNameToOid.put(schema + "." + name, oid);
-          }
+            name = name.toLowerCase(Locale.ROOT);
         }
-        pgNameToOid.put(pgTypeName, oid);
-        oidToPgName.put(oid, pgTypeName);
-      }
-      rs.close();
-
-      return pgTypeName;
+        oidStatementComplex.setString(1, name);
+        oidStatementComplex.setString(2, schema);
+        oidStatementComplex.setBoolean(3, schema == null);
+        return oidStatementComplex;
     }
-  }
 
-  private PreparedStatement prepareGetNameStatement() throws SQLException {
-    PreparedStatement getNameStatement = this.getNameStatement;
-    if (getNameStatement == null) {
-      String sql;
-      sql = "SELECT n.nspname = ANY(current_schemas(true)), n.nspname, t.typname "
-            + "FROM pg_catalog.pg_type t "
-            + "JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid WHERE t.oid = ?";
+    @Override
+    public int getPGType(String pgTypeName) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            // there really isn't anything else to return other than UNSPECIFIED here.
+            if (pgTypeName == null) {
+                return Oid.UNSPECIFIED;
+            }
 
-      this.getNameStatement = getNameStatement = conn.prepareStatement(sql);
-    }
-    return getNameStatement;
-  }
+            Integer oid = pgNameToOid.get(pgTypeName);
+            if (oid != null) {
+                return oid;
+            }
 
-  @Override
-  public int getPGArrayType(String elementTypeName) throws SQLException {
-    elementTypeName = getTypeForAlias(elementTypeName);
-    return getPGType(elementTypeName + "[]");
-  }
+            PreparedStatement oidStatement = getOidStatement(pgTypeName);
 
-  /**
-   * Return the oid of the array's base element if it's an array, if not return the provided oid.
-   * This doesn't do any database lookups, so it's only useful for the originally provided type
-   * mappings. This is fine for it's intended uses where we only have intimate knowledge of types
-   * that are already known to the driver.
-   *
-   * @param oid input oid
-   * @return oid of the array's base element or the provided oid (if not array)
-   */
-  protected int convertArrayToBaseOid(int oid) {
-    try (ResourceLock ignore = lock.obtain()) {
-      Integer i = pgArrayToPgType.get(oid);
-      if (i == null) {
-        return oid;
-      }
-      return i;
-    }
-  }
+            // Go through BaseStatement to avoid transaction start.
+            if (!((BaseStatement) oidStatement).executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
+                throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+            }
 
-  @Override
-  public char getArrayDelimiter(int oid) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (oid == Oid.UNSPECIFIED) {
-        return ',';
-      }
+            oid = Oid.UNSPECIFIED;
+            ResultSet rs = oidStatement.getResultSet();
+            if (rs.next()) {
+                oid = (int) rs.getLong(1);
+                String internalName = rs.getString(2);
+                oidToPgName.put(oid, internalName);
+                pgNameToOid.put(internalName, oid);
+            }
+            pgNameToOid.put(pgTypeName, oid);
+            rs.close();
 
-      Character delim = arrayOidToDelimiter.get(oid);
-      if (delim != null) {
-        return delim;
-      }
-
-      PreparedStatement getArrayDelimiterStatement = prepareGetArrayDelimiterStatement();
-
-      getArrayDelimiterStatement.setInt(1, oid);
-
-      // Go through BaseStatement to avoid transaction start.
-      if (!((BaseStatement) getArrayDelimiterStatement)
-          .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
-        throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
-      }
-
-      ResultSet rs = getArrayDelimiterStatement.getResultSet();
-      if (!rs.next()) {
-        throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
-      }
-
-      String s = rs.getString(1);
-      delim = s.charAt(0);
-
-      arrayOidToDelimiter.put(oid, delim);
-
-      rs.close();
-
-      return delim;
-    }
-  }
-
-  private PreparedStatement prepareGetArrayDelimiterStatement() throws SQLException {
-    PreparedStatement getArrayDelimiterStatement = this.getArrayDelimiterStatement;
-    if (getArrayDelimiterStatement == null) {
-      String sql;
-      sql = "SELECT e.typdelim FROM pg_catalog.pg_type t, pg_catalog.pg_type e "
-            + "WHERE t.oid = ? and t.typelem = e.oid";
-      this.getArrayDelimiterStatement = getArrayDelimiterStatement = conn.prepareStatement(sql);
-    }
-    return getArrayDelimiterStatement;
-  }
-
-  @Override
-  public int getPGArrayElement(int oid) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (oid == Oid.UNSPECIFIED) {
-        return Oid.UNSPECIFIED;
-      }
-
-      Integer pgType = pgArrayToPgType.get(oid);
-
-      if (pgType != null) {
-        return pgType;
-      }
-
-      PreparedStatement getArrayElementOidStatement = prepareGetArrayElementOidStatement();
-
-      getArrayElementOidStatement.setInt(1, oid);
-
-      // Go through BaseStatement to avoid transaction start.
-      if (!((BaseStatement) getArrayElementOidStatement)
-          .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
-        throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
-      }
-
-      ResultSet rs = getArrayElementOidStatement.getResultSet();
-      if (!rs.next()) {
-        throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
-      }
-
-      pgType = (int) rs.getLong(1);
-      boolean onPath = rs.getBoolean(2);
-      String schema = rs.getString(3);
-      String name = rs.getString(4);
-      pgArrayToPgType.put(oid, pgType);
-      pgNameToOid.put(schema + "." + name, pgType);
-      String fullName = "\"" + schema + "\".\"" + name + "\"";
-      pgNameToOid.put(fullName, pgType);
-      if (onPath && name.equals(name.toLowerCase(Locale.ROOT))) {
-        oidToPgName.put(pgType, name);
-        pgNameToOid.put(name, pgType);
-      } else {
-        oidToPgName.put(pgType, fullName);
-      }
-
-      rs.close();
-
-      return pgType;
-    }
-  }
-
-  private PreparedStatement prepareGetArrayElementOidStatement() throws SQLException {
-    PreparedStatement getArrayElementOidStatement = this.getArrayElementOidStatement;
-    if (getArrayElementOidStatement == null) {
-      String sql;
-      sql = "SELECT e.oid, n.nspname = ANY(current_schemas(true)), n.nspname, e.typname "
-            + "FROM pg_catalog.pg_type t JOIN pg_catalog.pg_type e ON t.typelem = e.oid "
-            + "JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid WHERE t.oid = ?";
-      this.getArrayElementOidStatement = getArrayElementOidStatement = conn.prepareStatement(sql);
-    }
-    return getArrayElementOidStatement;
-  }
-
-  @Override
-  public Class<? extends PGobject> getPGobject(String type) {
-    try (ResourceLock ignore = lock.obtain()) {
-      return pgNameToPgObject.get(type);
-    }
-  }
-
-  @Override
-  public String getJavaClass(int oid) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      String pgTypeName = getPGType(oid);
-      if (pgTypeName == null) {
-        // Technically speaking, we should not be here
-        // null result probably means oid == UNSPECIFIED which has no clear way
-        // to map to Java
-        return "java.lang.String";
-      }
-
-      String result = pgNameToJavaClass.get(pgTypeName);
-      if (result != null) {
-        return result;
-      }
-
-      if (getSQLType(pgTypeName) == Types.ARRAY) {
-        result = "java.sql.Array";
-        pgNameToJavaClass.put(pgTypeName, result);
-      }
-
-      return result == null ? "java.lang.String" : result;
-    }
-  }
-
-  @Override
-  public String getTypeForAlias(String alias) {
-    if ( alias == null ) {
-      return null;
-    }
-    String type = TYPE_ALIASES.get(alias);
-    if (type != null) {
-      return type;
-    }
-    type = TYPE_ALIASES.get(alias.toLowerCase(Locale.ROOT));
-    if (type == null) {
-      type = alias;
-    }
-    //populate for future use
-    TYPE_ALIASES.put(alias, type);
-    return type;
-  }
-
-  @Override
-  public int getPrecision(int oid, int typmod) {
-    oid = convertArrayToBaseOid(oid);
-    switch (oid) {
-      case Oid.INT2:
-        return 5;
-
-      case Oid.OID:
-      case Oid.INT4:
-        return 10;
-
-      case Oid.INT8:
-        return 19;
-
-      case Oid.FLOAT4:
-        // For float4 and float8, we can normally only get 6 and 15
-        // significant digits out, but extra_float_digits may raise
-        // that number by up to two digits.
-        return 8;
-
-      case Oid.FLOAT8:
-        return 17;
-
-      case Oid.NUMERIC:
-        if (typmod == -1) {
-          return 0;
+            return oid;
         }
-        return ((typmod - 4) & 0xFFFF0000) >> 16;
-
-      case Oid.CHAR:
-      case Oid.BOOL:
-        return 1;
-
-      case Oid.BPCHAR:
-      case Oid.VARCHAR:
-        if (typmod == -1) {
-          return unknownLength;
-        }
-        return typmod - 4;
-
-      // datetime types get the
-      // "length in characters of the String representation"
-      case Oid.DATE:
-      case Oid.TIME:
-      case Oid.TIMETZ:
-      case Oid.INTERVAL:
-      case Oid.TIMESTAMP:
-      case Oid.TIMESTAMPTZ:
-        return getDisplaySize(oid, typmod);
-
-      case Oid.BIT:
-        return typmod;
-
-      case Oid.VARBIT:
-        if (typmod == -1) {
-          return unknownLength;
-        }
-        return typmod;
-
-      case Oid.TEXT:
-      case Oid.BYTEA:
-      default:
-        return unknownLength;
     }
-  }
 
-  @Override
-  public int getScale(int oid, int typmod) {
-    oid = convertArrayToBaseOid(oid);
-    switch (oid) {
-      case Oid.FLOAT4:
-        return 8;
-      case Oid.FLOAT8:
-        return 17;
-      case Oid.NUMERIC:
-        if (typmod == -1) {
-          return 0;
+    @Override
+    public String getPGType(int oid) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (oid == Oid.UNSPECIFIED) {
+                // TODO: it would be great to forbid UNSPECIFIED argument, and make the return type non-nullable
+                return null;
+            }
+
+            String pgTypeName = oidToPgName.get(oid);
+            if (pgTypeName != null) {
+                return pgTypeName;
+            }
+
+            PreparedStatement getNameStatement = prepareGetNameStatement();
+
+            getNameStatement.setInt(1, oid);
+
+            // Go through BaseStatement to avoid transaction start.
+            if (!((BaseStatement) getNameStatement).executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
+                throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+            }
+
+            ResultSet rs = getNameStatement.getResultSet();
+            if (rs.next()) {
+                boolean onPath = rs.getBoolean(1);
+                String schema = rs.getString(2);
+                String name = rs.getString(3);
+                if (onPath) {
+                    pgTypeName = name;
+                    pgNameToOid.put(schema + "." + name, oid);
+                } else {
+                    // TODO: escaping !?
+                    pgTypeName = "\"" + schema + "\".\"" + name + "\"";
+                    // if all is lowercase add special type info
+                    // TODO: should probably check for all special chars
+                    if (schema.equals(schema.toLowerCase(Locale.ROOT)) && schema.indexOf('.') == -1
+                            && name.equals(name.toLowerCase(Locale.ROOT)) && name.indexOf('.') == -1) {
+                        pgNameToOid.put(schema + "." + name, oid);
+                    }
+                }
+                pgNameToOid.put(pgTypeName, oid);
+                oidToPgName.put(oid, pgTypeName);
+            }
+            rs.close();
+
+            return pgTypeName;
         }
-        return (typmod - 4) & 0xFFFF;
-      case Oid.TIME:
-      case Oid.TIMETZ:
-      case Oid.TIMESTAMP:
-      case Oid.TIMESTAMPTZ:
-        if (typmod == -1) {
-          return 6;
-        }
-        return typmod;
-      case Oid.INTERVAL:
-        if (typmod == -1) {
-          return 6;
-        }
-        return typmod & 0xFFFF;
-      default:
-        return 0;
     }
-  }
 
-  @Override
-  public boolean isCaseSensitive(int oid) {
-    oid = convertArrayToBaseOid(oid);
-    switch (oid) {
-      case Oid.OID:
-      case Oid.INT2:
-      case Oid.INT4:
-      case Oid.INT8:
-      case Oid.FLOAT4:
-      case Oid.FLOAT8:
-      case Oid.NUMERIC:
-      case Oid.BOOL:
-      case Oid.BIT:
-      case Oid.VARBIT:
-      case Oid.DATE:
-      case Oid.TIME:
-      case Oid.TIMETZ:
-      case Oid.TIMESTAMP:
-      case Oid.TIMESTAMPTZ:
-      case Oid.INTERVAL:
-        return false;
-      default:
-        return true;
-    }
-  }
+    private PreparedStatement prepareGetNameStatement() throws SQLException {
+        PreparedStatement getNameStatement = this.getNameStatement;
+        if (getNameStatement == null) {
+            String sql;
+            sql = "SELECT n.nspname = ANY(current_schemas(true)), n.nspname, t.typname "
+                    + "FROM pg_catalog.pg_type t "
+                    + "JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid WHERE t.oid = ?";
 
-  @Override
-  public boolean isSigned(int oid) {
-    oid = convertArrayToBaseOid(oid);
-    switch (oid) {
-      case Oid.INT2:
-      case Oid.INT4:
-      case Oid.INT8:
-      case Oid.FLOAT4:
-      case Oid.FLOAT8:
-      case Oid.NUMERIC:
-        return true;
-      default:
-        return false;
-    }
-  }
-
-  @SuppressWarnings("fallthrough")
-  @Override
-  public int getDisplaySize(int oid, int typmod) {
-    oid = convertArrayToBaseOid(oid);
-    switch (oid) {
-      case Oid.INT2:
-        return 6; // -32768 to +32767
-      case Oid.INT4:
-        return 11; // -2147483648 to +2147483647
-      case Oid.OID:
-        return 10; // 0 to 4294967295
-      case Oid.INT8:
-        return 20; // -9223372036854775808 to +9223372036854775807
-      case Oid.FLOAT4:
-        // varies based upon the extra_float_digits GUC.
-        // These values are for the longest possible length.
-        return 15; // sign + 9 digits + decimal point + e + sign + 2 digits
-      case Oid.FLOAT8:
-        return 25; // sign + 18 digits + decimal point + e + sign + 3 digits
-      case Oid.CHAR:
-        return 1;
-      case Oid.BOOL:
-        return 1;
-      case Oid.DATE:
-        return 13; // "4713-01-01 BC" to "01/01/4713 BC" - "31/12/32767"
-      case Oid.TIME:
-      case Oid.TIMETZ:
-      case Oid.TIMESTAMP:
-      case Oid.TIMESTAMPTZ:
-        // Calculate the number of decimal digits + the decimal point.
-        int secondSize;
-        switch (typmod) {
-          case -1:
-            secondSize = 6 + 1;
-            break;
-          case 0:
-            secondSize = 0;
-            break;
-          case 1:
-            // Bizarrely SELECT '0:0:0.1'::time(1); returns 2 digits.
-            secondSize = 2 + 1;
-            break;
-          default:
-            secondSize = typmod + 1;
-            break;
+            this.getNameStatement = getNameStatement = conn.prepareStatement(sql);
         }
+        return getNameStatement;
+    }
 
-        // We assume the worst case scenario for all of these.
-        // time = '00:00:00' = 8
-        // date = '5874897-12-31' = 13 (although at large values second precision is lost)
-        // date = '294276-11-20' = 12 --enable-integer-datetimes
-        // zone = '+11:30' = 6;
+    @Override
+    public int getPGArrayType(String elementTypeName) throws SQLException {
+        elementTypeName = getTypeForAlias(elementTypeName);
+        return getPGType(elementTypeName + "[]");
+    }
 
+    /**
+     * Return the oid of the array's base element if it's an array, if not return the provided oid.
+     * This doesn't do any database lookups, so it's only useful for the originally provided type
+     * mappings. This is fine for it's intended uses where we only have intimate knowledge of types
+     * that are already known to the driver.
+     *
+     * @param oid input oid
+     * @return oid of the array's base element or the provided oid (if not array)
+     */
+    protected int convertArrayToBaseOid(int oid) {
+        try (ResourceLock ignore = lock.obtain()) {
+            Integer i = pgArrayToPgType.get(oid);
+            if (i == null) {
+                return oid;
+            }
+            return i;
+        }
+    }
+
+    @Override
+    public char getArrayDelimiter(int oid) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (oid == Oid.UNSPECIFIED) {
+                return ',';
+            }
+
+            Character delim = arrayOidToDelimiter.get(oid);
+            if (delim != null) {
+                return delim;
+            }
+
+            PreparedStatement getArrayDelimiterStatement = prepareGetArrayDelimiterStatement();
+
+            getArrayDelimiterStatement.setInt(1, oid);
+
+            // Go through BaseStatement to avoid transaction start.
+            if (!((BaseStatement) getArrayDelimiterStatement)
+                    .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
+                throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+            }
+
+            ResultSet rs = getArrayDelimiterStatement.getResultSet();
+            if (!rs.next()) {
+                throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+            }
+
+            String s = rs.getString(1);
+            delim = s.charAt(0);
+
+            arrayOidToDelimiter.put(oid, delim);
+
+            rs.close();
+
+            return delim;
+        }
+    }
+
+    private PreparedStatement prepareGetArrayDelimiterStatement() throws SQLException {
+        PreparedStatement getArrayDelimiterStatement = this.getArrayDelimiterStatement;
+        if (getArrayDelimiterStatement == null) {
+            String sql;
+            sql = "SELECT e.typdelim FROM pg_catalog.pg_type t, pg_catalog.pg_type e "
+                    + "WHERE t.oid = ? and t.typelem = e.oid";
+            this.getArrayDelimiterStatement = getArrayDelimiterStatement = conn.prepareStatement(sql);
+        }
+        return getArrayDelimiterStatement;
+    }
+
+    @Override
+    public int getPGArrayElement(int oid) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (oid == Oid.UNSPECIFIED) {
+                return Oid.UNSPECIFIED;
+            }
+
+            Integer pgType = pgArrayToPgType.get(oid);
+
+            if (pgType != null) {
+                return pgType;
+            }
+
+            PreparedStatement getArrayElementOidStatement = prepareGetArrayElementOidStatement();
+
+            getArrayElementOidStatement.setInt(1, oid);
+
+            // Go through BaseStatement to avoid transaction start.
+            if (!((BaseStatement) getArrayElementOidStatement)
+                    .executeWithFlags(QueryExecutor.QUERY_SUPPRESS_BEGIN)) {
+                throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+            }
+
+            ResultSet rs = getArrayElementOidStatement.getResultSet();
+            if (!rs.next()) {
+                throw new PSQLException(GT.tr("No results were returned by the query."), PSQLState.NO_DATA);
+            }
+
+            pgType = (int) rs.getLong(1);
+            boolean onPath = rs.getBoolean(2);
+            String schema = rs.getString(3);
+            String name = rs.getString(4);
+            pgArrayToPgType.put(oid, pgType);
+            pgNameToOid.put(schema + "." + name, pgType);
+            String fullName = "\"" + schema + "\".\"" + name + "\"";
+            pgNameToOid.put(fullName, pgType);
+            if (onPath && name.equals(name.toLowerCase(Locale.ROOT))) {
+                oidToPgName.put(pgType, name);
+                pgNameToOid.put(name, pgType);
+            } else {
+                oidToPgName.put(pgType, fullName);
+            }
+
+            rs.close();
+
+            return pgType;
+        }
+    }
+
+    private PreparedStatement prepareGetArrayElementOidStatement() throws SQLException {
+        PreparedStatement getArrayElementOidStatement = this.getArrayElementOidStatement;
+        if (getArrayElementOidStatement == null) {
+            String sql;
+            sql = "SELECT e.oid, n.nspname = ANY(current_schemas(true)), n.nspname, e.typname "
+                    + "FROM pg_catalog.pg_type t JOIN pg_catalog.pg_type e ON t.typelem = e.oid "
+                    + "JOIN pg_catalog.pg_namespace n ON t.typnamespace = n.oid WHERE t.oid = ?";
+            this.getArrayElementOidStatement = getArrayElementOidStatement = conn.prepareStatement(sql);
+        }
+        return getArrayElementOidStatement;
+    }
+
+    @Override
+    public Class<? extends PGobject> getPGobject(String type) {
+        try (ResourceLock ignore = lock.obtain()) {
+            return pgNameToPgObject.get(type);
+        }
+    }
+
+    @Override
+    public String getJavaClass(int oid) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            String pgTypeName = getPGType(oid);
+            if (pgTypeName == null) {
+                // Technically speaking, we should not be here
+                // null result probably means oid == UNSPECIFIED which has no clear way
+                // to map to Java
+                return "java.lang.String";
+            }
+
+            String result = pgNameToJavaClass.get(pgTypeName);
+            if (result != null) {
+                return result;
+            }
+
+            if (getSQLType(pgTypeName) == Types.ARRAY) {
+                result = "java.sql.Array";
+                pgNameToJavaClass.put(pgTypeName, result);
+            }
+
+            return result == null ? "java.lang.String" : result;
+        }
+    }
+
+    @Override
+    public String getTypeForAlias(String alias) {
+        if (alias == null) {
+            return null;
+        }
+        String type = TYPE_ALIASES.get(alias);
+        if (type != null) {
+            return type;
+        }
+        type = TYPE_ALIASES.get(alias.toLowerCase(Locale.ROOT));
+        if (type == null) {
+            type = alias;
+        }
+        //populate for future use
+        TYPE_ALIASES.put(alias, type);
+        return type;
+    }
+
+    @Override
+    public int getPrecision(int oid, int typmod) {
+        oid = convertArrayToBaseOid(oid);
         switch (oid) {
-          case Oid.TIME:
-            return 8 + secondSize;
-          case Oid.TIMETZ:
-            return 8 + secondSize + 6;
-          case Oid.TIMESTAMP:
-            return 13 + 1 + 8 + secondSize;
-          case Oid.TIMESTAMPTZ:
-            return 13 + 1 + 8 + secondSize + 6;
-        }
-      case Oid.INTERVAL:
-        // SELECT LENGTH('-123456789 years 11 months 33 days 23 hours 10.123456 seconds'::interval);
-        return 49;
-      case Oid.VARCHAR:
-      case Oid.BPCHAR:
-        if (typmod == -1) {
-          return unknownLength;
-        }
-        return typmod - 4;
-      case Oid.NUMERIC:
-        if (typmod == -1) {
-          return 131089; // SELECT LENGTH(pow(10::numeric,131071)); 131071 = 2^17-1
-        }
-        int precision = (typmod - 4 >> 16) & 0xffff;
-        int scale = (typmod - 4) & 0xffff;
-        // sign + digits + decimal point (only if we have nonzero scale)
-        return 1 + precision + (scale != 0 ? 1 : 0);
-      case Oid.BIT:
-        return typmod;
-      case Oid.VARBIT:
-        if (typmod == -1) {
-          return unknownLength;
-        }
-        return typmod;
-      case Oid.TEXT:
-      case Oid.BYTEA:
-        return unknownLength;
-      default:
-        return unknownLength;
-    }
-  }
+            case Oid.INT2:
+                return 5;
 
-  @Override
-  public int getMaximumPrecision(int oid) {
-    oid = convertArrayToBaseOid(oid);
-    switch (oid) {
-      case Oid.NUMERIC:
-        return 1000;
-      case Oid.TIME:
-      case Oid.TIMETZ:
-        // Technically this depends on the --enable-integer-datetimes
-        // configure setting. It is 6 with integer and 10 with float.
-        return 6;
-      case Oid.TIMESTAMP:
-      case Oid.TIMESTAMPTZ:
-      case Oid.INTERVAL:
-        return 6;
-      case Oid.BPCHAR:
-      case Oid.VARCHAR:
-        return 10485760;
-      case Oid.BIT:
-      case Oid.VARBIT:
-        return 83886080;
-      default:
-        return 0;
-    }
-  }
+            case Oid.OID:
+            case Oid.INT4:
+                return 10;
 
-  @Override
-  public boolean requiresQuoting(int oid) throws SQLException {
-    int sqlType = getSQLType(oid);
-    return requiresQuotingSqlType(sqlType);
-  }
+            case Oid.INT8:
+                return 19;
 
-  /**
-   * Returns true if particular sqlType requires quoting.
-   * This method is used internally by the driver, so it might disappear without notice.
-   *
-   * @param sqlType sql type as in java.sql.Types
-   * @return true if the type requires quoting
-   * @throws SQLException if something goes wrong
-   */
-  @Override
-  public boolean requiresQuotingSqlType(int sqlType) throws SQLException {
-    switch (sqlType) {
-      case Types.BIGINT:
-      case Types.DOUBLE:
-      case Types.FLOAT:
-      case Types.INTEGER:
-      case Types.REAL:
-      case Types.SMALLINT:
-      case Types.TINYINT:
-      case Types.NUMERIC:
-      case Types.DECIMAL:
-        return false;
-    }
-    return true;
-  }
+            case Oid.FLOAT4:
+                // For float4 and float8, we can normally only get 6 and 15
+                // significant digits out, but extra_float_digits may raise
+                // that number by up to two digits.
+                return 8;
 
-  @Override
-  public int longOidToInt(long oid) throws SQLException {
-    if ((oid & 0xFFFF_FFFF_0000_0000L) != 0) {
-      throw new PSQLException(GT.tr("Value is not an OID: {0}", oid), PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+            case Oid.FLOAT8:
+                return 17;
+
+            case Oid.NUMERIC:
+                if (typmod == -1) {
+                    return 0;
+                }
+                return ((typmod - 4) & 0xFFFF0000) >> 16;
+
+            case Oid.CHAR:
+            case Oid.BOOL:
+                return 1;
+
+            case Oid.BPCHAR:
+            case Oid.VARCHAR:
+                if (typmod == -1) {
+                    return unknownLength;
+                }
+                return typmod - 4;
+
+            // datetime types get the
+            // "length in characters of the String representation"
+            case Oid.DATE:
+            case Oid.TIME:
+            case Oid.TIMETZ:
+            case Oid.INTERVAL:
+            case Oid.TIMESTAMP:
+            case Oid.TIMESTAMPTZ:
+                return getDisplaySize(oid, typmod);
+
+            case Oid.BIT:
+                return typmod;
+
+            case Oid.VARBIT:
+                if (typmod == -1) {
+                    return unknownLength;
+                }
+                return typmod;
+
+            case Oid.TEXT:
+            case Oid.BYTEA:
+            default:
+                return unknownLength;
+        }
     }
 
-    return (int) oid;
-  }
+    @Override
+    public int getScale(int oid, int typmod) {
+        oid = convertArrayToBaseOid(oid);
+        switch (oid) {
+            case Oid.FLOAT4:
+                return 8;
+            case Oid.FLOAT8:
+                return 17;
+            case Oid.NUMERIC:
+                if (typmod == -1) {
+                    return 0;
+                }
+                return (typmod - 4) & 0xFFFF;
+            case Oid.TIME:
+            case Oid.TIMETZ:
+            case Oid.TIMESTAMP:
+            case Oid.TIMESTAMPTZ:
+                if (typmod == -1) {
+                    return 6;
+                }
+                return typmod;
+            case Oid.INTERVAL:
+                if (typmod == -1) {
+                    return 6;
+                }
+                return typmod & 0xFFFF;
+            default:
+                return 0;
+        }
+    }
 
-  @Override
-  public long intOidToLong(int oid) {
-    return ((long) oid) & 0xFFFFFFFFL;
-  }
+    @Override
+    public boolean isCaseSensitive(int oid) {
+        oid = convertArrayToBaseOid(oid);
+        switch (oid) {
+            case Oid.OID:
+            case Oid.INT2:
+            case Oid.INT4:
+            case Oid.INT8:
+            case Oid.FLOAT4:
+            case Oid.FLOAT8:
+            case Oid.NUMERIC:
+            case Oid.BOOL:
+            case Oid.BIT:
+            case Oid.VARBIT:
+            case Oid.DATE:
+            case Oid.TIME:
+            case Oid.TIMETZ:
+            case Oid.TIMESTAMP:
+            case Oid.TIMESTAMPTZ:
+            case Oid.INTERVAL:
+                return false;
+            default:
+                return true;
+        }
+    }
+
+    @Override
+    public boolean isSigned(int oid) {
+        oid = convertArrayToBaseOid(oid);
+        switch (oid) {
+            case Oid.INT2:
+            case Oid.INT4:
+            case Oid.INT8:
+            case Oid.FLOAT4:
+            case Oid.FLOAT8:
+            case Oid.NUMERIC:
+                return true;
+            default:
+                return false;
+        }
+    }
+
+    @SuppressWarnings("fallthrough")
+    @Override
+    public int getDisplaySize(int oid, int typmod) {
+        oid = convertArrayToBaseOid(oid);
+        switch (oid) {
+            case Oid.INT2:
+                return 6; // -32768 to +32767
+            case Oid.INT4:
+                return 11; // -2147483648 to +2147483647
+            case Oid.OID:
+                return 10; // 0 to 4294967295
+            case Oid.INT8:
+                return 20; // -9223372036854775808 to +9223372036854775807
+            case Oid.FLOAT4:
+                // varies based upon the extra_float_digits GUC.
+                // These values are for the longest possible length.
+                return 15; // sign + 9 digits + decimal point + e + sign + 2 digits
+            case Oid.FLOAT8:
+                return 25; // sign + 18 digits + decimal point + e + sign + 3 digits
+            case Oid.CHAR:
+                return 1;
+            case Oid.BOOL:
+                return 1;
+            case Oid.DATE:
+                return 13; // "4713-01-01 BC" to "01/01/4713 BC" - "31/12/32767"
+            case Oid.TIME:
+            case Oid.TIMETZ:
+            case Oid.TIMESTAMP:
+            case Oid.TIMESTAMPTZ:
+                // Calculate the number of decimal digits + the decimal point.
+                int secondSize;
+                switch (typmod) {
+                    case -1:
+                        secondSize = 6 + 1;
+                        break;
+                    case 0:
+                        secondSize = 0;
+                        break;
+                    case 1:
+                        // Bizarrely SELECT '0:0:0.1'::time(1); returns 2 digits.
+                        secondSize = 2 + 1;
+                        break;
+                    default:
+                        secondSize = typmod + 1;
+                        break;
+                }
+
+                // We assume the worst case scenario for all of these.
+                // time = '00:00:00' = 8
+                // date = '5874897-12-31' = 13 (although at large values second precision is lost)
+                // date = '294276-11-20' = 12 --enable-integer-datetimes
+                // zone = '+11:30' = 6;
+
+                switch (oid) {
+                    case Oid.TIME:
+                        return 8 + secondSize;
+                    case Oid.TIMETZ:
+                        return 8 + secondSize + 6;
+                    case Oid.TIMESTAMP:
+                        return 13 + 1 + 8 + secondSize;
+                    case Oid.TIMESTAMPTZ:
+                        return 13 + 1 + 8 + secondSize + 6;
+                }
+            case Oid.INTERVAL:
+                // SELECT LENGTH('-123456789 years 11 months 33 days 23 hours 10.123456 seconds'::interval);
+                return 49;
+            case Oid.VARCHAR:
+            case Oid.BPCHAR:
+                if (typmod == -1) {
+                    return unknownLength;
+                }
+                return typmod - 4;
+            case Oid.NUMERIC:
+                if (typmod == -1) {
+                    return 131089; // SELECT LENGTH(pow(10::numeric,131071)); 131071 = 2^17-1
+                }
+                int precision = (typmod - 4 >> 16) & 0xffff;
+                int scale = (typmod - 4) & 0xffff;
+                // sign + digits + decimal point (only if we have nonzero scale)
+                return 1 + precision + (scale != 0 ? 1 : 0);
+            case Oid.BIT:
+                return typmod;
+            case Oid.VARBIT:
+                if (typmod == -1) {
+                    return unknownLength;
+                }
+                return typmod;
+            case Oid.TEXT:
+            case Oid.BYTEA:
+                return unknownLength;
+            default:
+                return unknownLength;
+        }
+    }
+
+    @Override
+    public int getMaximumPrecision(int oid) {
+        oid = convertArrayToBaseOid(oid);
+        switch (oid) {
+            case Oid.NUMERIC:
+                return 1000;
+            case Oid.TIME:
+            case Oid.TIMETZ:
+                // Technically this depends on the --enable-integer-datetimes
+                // configure setting. It is 6 with integer and 10 with float.
+                return 6;
+            case Oid.TIMESTAMP:
+            case Oid.TIMESTAMPTZ:
+            case Oid.INTERVAL:
+                return 6;
+            case Oid.BPCHAR:
+            case Oid.VARCHAR:
+                return 10485760;
+            case Oid.BIT:
+            case Oid.VARBIT:
+                return 83886080;
+            default:
+                return 0;
+        }
+    }
+
+    @Override
+    public boolean requiresQuoting(int oid) throws SQLException {
+        int sqlType = getSQLType(oid);
+        return requiresQuotingSqlType(sqlType);
+    }
+
+    /**
+     * Returns true if particular sqlType requires quoting.
+     * This method is used internally by the driver, so it might disappear without notice.
+     *
+     * @param sqlType sql type as in java.sql.Types
+     * @return true if the type requires quoting
+     * @throws SQLException if something goes wrong
+     */
+    @Override
+    public boolean requiresQuotingSqlType(int sqlType) throws SQLException {
+        switch (sqlType) {
+            case Types.BIGINT:
+            case Types.DOUBLE:
+            case Types.FLOAT:
+            case Types.INTEGER:
+            case Types.REAL:
+            case Types.SMALLINT:
+            case Types.TINYINT:
+            case Types.NUMERIC:
+            case Types.DECIMAL:
+                return false;
+        }
+        return true;
+    }
+
+    @Override
+    public int longOidToInt(long oid) throws SQLException {
+        if ((oid & 0xFFFF_FFFF_0000_0000L) != 0) {
+            throw new PSQLException(GT.tr("Value is not an OID: {0}", oid), PSQLState.NUMERIC_VALUE_OUT_OF_RANGE);
+        }
+
+        return (int) oid;
+    }
+
+    @Override
+    public long intOidToLong(int oid) {
+        return ((long) oid) & 0xFFFFFFFFL;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc/UUIDArrayAssistant.java b/pgjdbc/src/main/java/org/postgresql/jdbc/UUIDArrayAssistant.java
index c6376c9..1e85814 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc/UUIDArrayAssistant.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc/UUIDArrayAssistant.java
@@ -12,21 +12,21 @@ import java.util.UUID;
 
 public class UUIDArrayAssistant implements ArrayAssistant {
 
-  public UUIDArrayAssistant() {
-  }
+    public UUIDArrayAssistant() {
+    }
 
-  @Override
-  public Class<?> baseType() {
-    return UUID.class;
-  }
+    @Override
+    public Class<?> baseType() {
+        return UUID.class;
+    }
 
-  @Override
-  public Object buildElement(byte[] bytes, int pos, int len) {
-    return new UUID(ByteConverter.int8(bytes, pos + 0), ByteConverter.int8(bytes, pos + 8));
-  }
+    @Override
+    public Object buildElement(byte[] bytes, int pos, int len) {
+        return new UUID(ByteConverter.int8(bytes, pos + 0), ByteConverter.int8(bytes, pos + 8));
+    }
 
-  @Override
-  public Object buildElement(String literal) {
-    return UUID.fromString(literal);
-  }
+    @Override
+    public Object buildElement(String literal) {
+        return UUID.fromString(literal);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistant.java b/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistant.java
index cc57ac8..9e267a2 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistant.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistant.java
@@ -12,28 +12,28 @@ package org.postgresql.jdbc2;
  * @author Minglei Tu
  */
 public interface ArrayAssistant {
-  /**
-   * get array base type.
-   *
-   * @return array base type
-   */
-  Class<?> baseType();
+    /**
+     * get array base type.
+     *
+     * @return array base type
+     */
+    Class<?> baseType();
 
-  /**
-   * build a array element from its binary bytes.
-   *
-   * @param bytes input bytes
-   * @param pos position in input array
-   * @param len length of the element
-   * @return array element from its binary bytes
-   */
-  Object buildElement(byte[] bytes, int pos, int len);
+    /**
+     * build a array element from its binary bytes.
+     *
+     * @param bytes input bytes
+     * @param pos   position in input array
+     * @param len   length of the element
+     * @return array element from its binary bytes
+     */
+    Object buildElement(byte[] bytes, int pos, int len);
 
-  /**
-   * build an array element from its literal string.
-   *
-   * @param literal string representation of array element
-   * @return array element
-   */
-  Object buildElement(String literal);
+    /**
+     * build an array element from its literal string.
+     *
+     * @param literal string representation of array element
+     * @return array element
+     */
+    Object buildElement(String literal);
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistantRegistry.java b/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistantRegistry.java
index 59a97f9..7cdabd5 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistantRegistry.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbc2/ArrayAssistantRegistry.java
@@ -14,17 +14,17 @@ import java.util.concurrent.ConcurrentMap;
  * @author Minglei Tu
  */
 public class ArrayAssistantRegistry {
-  private static final ConcurrentMap<Integer, ArrayAssistant> ARRAY_ASSISTANT_MAP =
-      new ConcurrentHashMap<>();
+    private static final ConcurrentMap<Integer, ArrayAssistant> ARRAY_ASSISTANT_MAP =
+            new ConcurrentHashMap<>();
 
-  public ArrayAssistantRegistry() {
-  }
+    public ArrayAssistantRegistry() {
+    }
 
-  public static ArrayAssistant getAssistant(int oid) {
-    return ARRAY_ASSISTANT_MAP.get(oid);
-  }
+    public static ArrayAssistant getAssistant(int oid) {
+        return ARRAY_ASSISTANT_MAP.get(oid);
+    }
 
-  public static void register(int oid, ArrayAssistant assistant) {
-    ARRAY_ASSISTANT_MAP.put(oid, assistant);
-  }
+    public static void register(int oid, ArrayAssistant assistant) {
+        ARRAY_ASSISTANT_MAP.put(oid, assistant);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgPassParser.java b/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgPassParser.java
index 0ca6f05..70a7483 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgPassParser.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgPassParser.java
@@ -28,234 +28,234 @@ import java.util.logging.Logger;
  */
 public class PgPassParser {
 
-  private static final Logger LOGGER = Logger.getLogger(PgPassParser.class.getName());
-  private static final char SEPARATOR = ':';
-  //
-  private final String hostname;
-  private final String port;
-  private final String database;
-  private final String user;
-
-  //
-  private PgPassParser(String hostname, String port, String database, String user) {
-    this.hostname = hostname;
-    this.port = port;
-    this.database = database;
-    this.user = user;
-  }
-
-  /**
-   * Read .pgpass resource
-   *
-   * @param hostname hostname or *
-   * @param port     port or *
-   * @param database database or *
-   * @param user     username or *
-   * @return password or null
-   */
-  public static String getPassword(String hostname, String port, String database, String user) {
-    if (hostname == null || hostname.isEmpty()) {
-      return null;
-    }
-    if (port == null || port.isEmpty()) {
-      return null;
-    }
-    if (database == null || database.isEmpty()) {
-      return null;
-    }
-    if (user == null || user.isEmpty()) {
-      return null;
-    }
-    PgPassParser pgPassParser = new PgPassParser(hostname, port, database, user);
-    return pgPassParser.findPassword();
-  }
-
-  private String findPassword() {
-    String resourceName = findPgPasswordResourceName();
-    if (resourceName == null) {
-      return null;
-    }
+    private static final Logger LOGGER = Logger.getLogger(PgPassParser.class.getName());
+    private static final char SEPARATOR = ':';
     //
-    String result = null;
-    try (InputStream inputStream = openInputStream(resourceName)) {
-      result = parseInputStream(inputStream);
-    } catch (IOException e) {
-      LOGGER.log(Level.FINE, "Failed to handle resource [{0}] with error [{1}]", new Object[]{resourceName, e.getMessage()});
-    }
+    private final String hostname;
+    private final String port;
+    private final String database;
+    private final String user;
+
     //
-    return result;
-  }
-
-  // open URL or File
-  private InputStream openInputStream(String resourceName) throws IOException {
-
-    try {
-      URL url = URI.create(resourceName).toURL();
-      return url.openStream();
-    } catch ( MalformedURLException ex ) {
-      // try file
-      File file = new File(resourceName);
-      return new FileInputStream(file);
-    }
-  }
-
-  // choose resource where to search for service description
-  private String findPgPasswordResourceName() {
-    // default file name
-    String pgPassFileDefaultName = PGEnvironment.PGPASSFILE.getDefaultValue();
-
-    // if there is value, use it - 1st priority
-    {
-      String propertyName = PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName();
-      String resourceName = System.getProperty(propertyName);
-      if (resourceName != null && !resourceName.trim().isEmpty()) {
-        LOGGER.log(Level.FINE, "Value [{0}] selected from property [{1}]", new Object[]{resourceName, propertyName});
-        return resourceName;
-      }
+    private PgPassParser(String hostname, String port, String database, String user) {
+        this.hostname = hostname;
+        this.port = port;
+        this.database = database;
+        this.user = user;
     }
 
-    // if there is value, use it - 2nd priority
-    {
-      String envVariableName = PGEnvironment.PGPASSFILE.getName();
-      String resourceName = System.getenv().get(envVariableName);
-      if (resourceName != null && !resourceName.trim().isEmpty()) {
-        LOGGER.log(Level.FINE, "Value [{0}] selected from environment variable [{1}]", new Object[]{resourceName, envVariableName});
-        return resourceName;
-      }
-    }
-
-    // if file in user home is readable, use it, otherwise continue - 3rd priority
-    {
-      String resourceName = "";
-      if ( !OSUtil.isWindows() ) {
-        resourceName += ".";
-      }
-      resourceName += pgPassFileDefaultName;
-      if (OSUtil.isWindows()) {
-        resourceName += ".conf";
-      }
-      File resourceFile = new File(OSUtil.getUserConfigRootDirectory(), resourceName);
-      if (resourceFile.canRead()) {
-        LOGGER.log(Level.FINE, "Value [{0}] selected because file exist in user home directory", new Object[]{resourceFile.getAbsolutePath()});
-        return resourceFile.getAbsolutePath();
-      }
-    }
-
-    // otherwise null
-    LOGGER.log(Level.FINE, "Value for resource [{0}] not found", pgPassFileDefaultName);
-    return null;
-  }
-
-  //
-  private String parseInputStream(InputStream inputStream) throws IOException {
-    //
-    String result = null;
-    try (
-        Reader reader = new InputStreamReader(inputStream, StandardCharsets.UTF_8);
-        BufferedReader br = new BufferedReader(reader)) {
-      //
-      String line;
-      int currentLine = 0;
-      while ((line = br.readLine()) != null) {
-        currentLine++;
-        if (line.trim().isEmpty()) {
-          // skip empty lines
-          continue;
-        } else if (line.startsWith("#")) {
-          // skip lines with comments
-          continue;
-        }
-        // analyze line, accept first matching line
-        result = evaluateLine(line, currentLine);
-        if (result != null) {
-          break;
-        }
-      }
-    }
-    //
-    return result;
-  }
-
-  //
-  private String evaluateLine(String fullLine, int currentLine) {
-    String line = fullLine;
-    String result = null;
-    // check match
-    if ((line = checkForPattern(line, hostname)) != null
-        && (line = checkForPattern(line, port)) != null
-        && (line = checkForPattern(line, database)) != null
-        && (line = checkForPattern(line, user)) != null) {
-      // use remaining line to get password
-      result = extractPassword(line);
-      String lineWithoutPassword = fullLine.substring(0, fullLine.length() - line.length());
-      LOGGER.log(Level.FINE, "Matching line number [{0}] with value prefix [{1}] found for input [{2}:{3}:{4}:{5}]",
-          new Object[]{currentLine, lineWithoutPassword, hostname, port, database, user});
-    }
-    //
-    return result;
-  }
-
-  //
-  private String extractPassword(String line) {
-    StringBuilder sb = new StringBuilder();
-    // take all characters up to separator (which is colon)
-    // remove escaping colon and backslash ("\\ -> \" ; "\: -> :")
-    // single backslash is not considered as error ("\a -> \a")
-    for (int i = 0; i < line.length(); i++) {
-      char chr = line.charAt(i);
-      if (chr == '\\' && (i + 1) < line.length()) {
-        char nextChr = line.charAt(i + 1);
-        if (nextChr == '\\' || nextChr == SEPARATOR) {
-          chr = nextChr;
-          i++;
-        }
-      } else if (chr == SEPARATOR) {
-        break;
-      }
-      sb.append(chr);
-    }
-    return sb.toString();
-  }
-
-  //
-  private String checkForPattern(String line, String value) {
-    String result = null;
-    if (line.startsWith("*:")) {
-      // any value match
-      result = line.substring(2);
-    } else {
-      int lPos = 0;
-      // Why not to split by separator (:) and compare by elements?
-      // Ipv6 makes in tricky. ipv6 may contain different number of colons. Also, to maintain compatibility with libpq.
-      // Compare beginning of line and value char by char.
-      // line may have escaped values, value does not have escaping
-      // line escaping is not mandatory. These are considered equal: "ab\cd:ef" == "ab\\cd\:ef" == "ab\cd\:ef" == "ab\\cd:ef"
-      for (int vPos = 0; vPos < value.length(); vPos++) {
-        if (lPos >= line.length()) {
-          return null;
-        }
-        char l = line.charAt(lPos);
-        if (l == '\\') {
-          if ((lPos + 1) >= line.length()) {
+    /**
+     * Read .pgpass resource
+     *
+     * @param hostname hostname or *
+     * @param port     port or *
+     * @param database database or *
+     * @param user     username or *
+     * @return password or null
+     */
+    public static String getPassword(String hostname, String port, String database, String user) {
+        if (hostname == null || hostname.isEmpty()) {
             return null;
-          }
-          char next = line.charAt(lPos + 1);
-          if (next == '\\' || next == SEPARATOR) {
-            l = next;
-            lPos++;
-          }
         }
-        lPos++;
-        char v = value.charAt(vPos);
-        if (l != v) {
-          return null;
+        if (port == null || port.isEmpty()) {
+            return null;
         }
-      }
-      if (line.charAt(lPos) == SEPARATOR) {
-        result = line.substring(lPos + 1);
-      }
+        if (database == null || database.isEmpty()) {
+            return null;
+        }
+        if (user == null || user.isEmpty()) {
+            return null;
+        }
+        PgPassParser pgPassParser = new PgPassParser(hostname, port, database, user);
+        return pgPassParser.findPassword();
+    }
+
+    private String findPassword() {
+        String resourceName = findPgPasswordResourceName();
+        if (resourceName == null) {
+            return null;
+        }
+        //
+        String result = null;
+        try (InputStream inputStream = openInputStream(resourceName)) {
+            result = parseInputStream(inputStream);
+        } catch (IOException e) {
+            LOGGER.log(Level.FINE, "Failed to handle resource [{0}] with error [{1}]", new Object[]{resourceName, e.getMessage()});
+        }
+        //
+        return result;
+    }
+
+    // open URL or File
+    private InputStream openInputStream(String resourceName) throws IOException {
+
+        try {
+            URL url = URI.create(resourceName).toURL();
+            return url.openStream();
+        } catch (MalformedURLException ex) {
+            // try file
+            File file = new File(resourceName);
+            return new FileInputStream(file);
+        }
+    }
+
+    // choose resource where to search for service description
+    private String findPgPasswordResourceName() {
+        // default file name
+        String pgPassFileDefaultName = PGEnvironment.PGPASSFILE.getDefaultValue();
+
+        // if there is value, use it - 1st priority
+        {
+            String propertyName = PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName();
+            String resourceName = System.getProperty(propertyName);
+            if (resourceName != null && !resourceName.trim().isEmpty()) {
+                LOGGER.log(Level.FINE, "Value [{0}] selected from property [{1}]", new Object[]{resourceName, propertyName});
+                return resourceName;
+            }
+        }
+
+        // if there is value, use it - 2nd priority
+        {
+            String envVariableName = PGEnvironment.PGPASSFILE.getName();
+            String resourceName = System.getenv().get(envVariableName);
+            if (resourceName != null && !resourceName.trim().isEmpty()) {
+                LOGGER.log(Level.FINE, "Value [{0}] selected from environment variable [{1}]", new Object[]{resourceName, envVariableName});
+                return resourceName;
+            }
+        }
+
+        // if file in user home is readable, use it, otherwise continue - 3rd priority
+        {
+            String resourceName = "";
+            if (!OSUtil.isWindows()) {
+                resourceName += ".";
+            }
+            resourceName += pgPassFileDefaultName;
+            if (OSUtil.isWindows()) {
+                resourceName += ".conf";
+            }
+            File resourceFile = new File(OSUtil.getUserConfigRootDirectory(), resourceName);
+            if (resourceFile.canRead()) {
+                LOGGER.log(Level.FINE, "Value [{0}] selected because file exist in user home directory", new Object[]{resourceFile.getAbsolutePath()});
+                return resourceFile.getAbsolutePath();
+            }
+        }
+
+        // otherwise null
+        LOGGER.log(Level.FINE, "Value for resource [{0}] not found", pgPassFileDefaultName);
+        return null;
+    }
+
+    //
+    private String parseInputStream(InputStream inputStream) throws IOException {
+        //
+        String result = null;
+        try (
+                Reader reader = new InputStreamReader(inputStream, StandardCharsets.UTF_8);
+                BufferedReader br = new BufferedReader(reader)) {
+            //
+            String line;
+            int currentLine = 0;
+            while ((line = br.readLine()) != null) {
+                currentLine++;
+                if (line.trim().isEmpty()) {
+                    // skip empty lines
+                    continue;
+                } else if (line.startsWith("#")) {
+                    // skip lines with comments
+                    continue;
+                }
+                // analyze line, accept first matching line
+                result = evaluateLine(line, currentLine);
+                if (result != null) {
+                    break;
+                }
+            }
+        }
+        //
+        return result;
+    }
+
+    //
+    private String evaluateLine(String fullLine, int currentLine) {
+        String line = fullLine;
+        String result = null;
+        // check match
+        if ((line = checkForPattern(line, hostname)) != null
+                && (line = checkForPattern(line, port)) != null
+                && (line = checkForPattern(line, database)) != null
+                && (line = checkForPattern(line, user)) != null) {
+            // use remaining line to get password
+            result = extractPassword(line);
+            String lineWithoutPassword = fullLine.substring(0, fullLine.length() - line.length());
+            LOGGER.log(Level.FINE, "Matching line number [{0}] with value prefix [{1}] found for input [{2}:{3}:{4}:{5}]",
+                    new Object[]{currentLine, lineWithoutPassword, hostname, port, database, user});
+        }
+        //
+        return result;
+    }
+
+    //
+    private String extractPassword(String line) {
+        StringBuilder sb = new StringBuilder();
+        // take all characters up to separator (which is colon)
+        // remove escaping colon and backslash ("\\ -> \" ; "\: -> :")
+        // single backslash is not considered as error ("\a -> \a")
+        for (int i = 0; i < line.length(); i++) {
+            char chr = line.charAt(i);
+            if (chr == '\\' && (i + 1) < line.length()) {
+                char nextChr = line.charAt(i + 1);
+                if (nextChr == '\\' || nextChr == SEPARATOR) {
+                    chr = nextChr;
+                    i++;
+                }
+            } else if (chr == SEPARATOR) {
+                break;
+            }
+            sb.append(chr);
+        }
+        return sb.toString();
+    }
+
+    //
+    private String checkForPattern(String line, String value) {
+        String result = null;
+        if (line.startsWith("*:")) {
+            // any value match
+            result = line.substring(2);
+        } else {
+            int lPos = 0;
+            // Why not to split by separator (:) and compare by elements?
+            // Ipv6 makes in tricky. ipv6 may contain different number of colons. Also, to maintain compatibility with libpq.
+            // Compare beginning of line and value char by char.
+            // line may have escaped values, value does not have escaping
+            // line escaping is not mandatory. These are considered equal: "ab\cd:ef" == "ab\\cd\:ef" == "ab\cd\:ef" == "ab\\cd:ef"
+            for (int vPos = 0; vPos < value.length(); vPos++) {
+                if (lPos >= line.length()) {
+                    return null;
+                }
+                char l = line.charAt(lPos);
+                if (l == '\\') {
+                    if ((lPos + 1) >= line.length()) {
+                        return null;
+                    }
+                    char next = line.charAt(lPos + 1);
+                    if (next == '\\' || next == SEPARATOR) {
+                        l = next;
+                        lPos++;
+                    }
+                }
+                lPos++;
+                char v = value.charAt(vPos);
+                if (l != v) {
+                    return null;
+                }
+            }
+            if (line.charAt(lPos) == SEPARATOR) {
+                result = line.substring(lPos + 1);
+            }
+        }
+        return result;
     }
-    return result;
-  }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgServiceConfParser.java b/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgServiceConfParser.java
index 7bca1b1..6773d92 100644
--- a/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgServiceConfParser.java
+++ b/pgjdbc/src/main/java/org/postgresql/jdbcurlresolver/PgServiceConfParser.java
@@ -34,225 +34,225 @@ import java.util.stream.Collectors;
  */
 public class PgServiceConfParser {
 
-  private static final Logger LOGGER = Logger.getLogger(PgServiceConfParser.class.getName());
-  private final String serviceName;
-  private boolean ignoreIfOpenFails = true;
+    private static final Logger LOGGER = Logger.getLogger(PgServiceConfParser.class.getName());
+    private final String serviceName;
+    private boolean ignoreIfOpenFails = true;
 
-  private PgServiceConfParser(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  /**
-   * Read pg_service.conf resource
-   *
-   * @param serviceName service name to search for
-   * @return key value pairs
-   */
-  public static Properties getServiceProperties(String serviceName) {
-    PgServiceConfParser pgServiceConfParser = new PgServiceConfParser(serviceName);
-    return pgServiceConfParser.findServiceDescription();
-  }
-
-  private Properties findServiceDescription() {
-    String resourceName = findPgServiceConfResourceName();
-    if (resourceName == null) {
-      return null;
-    }
-    //
-    Properties result = null;
-    try (InputStream inputStream = openInputStream(resourceName)) {
-      result = parseInputStream(inputStream);
-    } catch (IOException e) {
-      Level level = ignoreIfOpenFails ? Level.FINE : Level.WARNING;
-      LOGGER.log(level, "Failed to handle resource [{0}] with error [{1}]", new Object[]{resourceName, e.getMessage()});
-    }
-    //
-    return result;
-  }
-
-  // open URL or File
-  private InputStream openInputStream(String resourceName) throws IOException {
-
-    try {
-      URL url = URI.create(resourceName).toURL();
-      return url.openStream();
-    } catch ( MalformedURLException ex ) {
-      // try file
-      File file = new File(resourceName);
-      return new FileInputStream(file);
-    }
-  }
-
-  // choose resource where to search for service description
-  private String findPgServiceConfResourceName() {
-    // default file name
-    String pgServiceConfFileDefaultName = PGEnvironment.PGSERVICEFILE.getDefaultValue();
-
-    // if there is value, use it - 1st priority
-    {
-      String propertyName = PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName();
-      String resourceName = System.getProperty(propertyName);
-      if (resourceName != null && !resourceName.trim().isEmpty()) {
-        this.ignoreIfOpenFails = false;
-        LOGGER.log(Level.FINE, "Value [{0}] selected from property [{1}]",
-            new Object[]{resourceName, propertyName});
-        return resourceName;
-      }
+    private PgServiceConfParser(String serviceName) {
+        this.serviceName = serviceName;
     }
 
-    // if there is value, use it - 2nd priority
-    {
-      String envVariableName = PGEnvironment.PGSERVICEFILE.getName();
-      String resourceName = System.getenv().get(envVariableName);
-      if (resourceName != null && !resourceName.trim().isEmpty()) {
-        this.ignoreIfOpenFails = false;
-        LOGGER.log(Level.FINE, "Value [{0}] selected from environment variable [{1}]",
-            new Object[]{resourceName, envVariableName});
-        return resourceName;
-      }
+    /**
+     * Read pg_service.conf resource
+     *
+     * @param serviceName service name to search for
+     * @return key value pairs
+     */
+    public static Properties getServiceProperties(String serviceName) {
+        PgServiceConfParser pgServiceConfParser = new PgServiceConfParser(serviceName);
+        return pgServiceConfParser.findServiceDescription();
     }
 
+    private Properties findServiceDescription() {
+        String resourceName = findPgServiceConfResourceName();
+        if (resourceName == null) {
+            return null;
+        }
+        //
+        Properties result = null;
+        try (InputStream inputStream = openInputStream(resourceName)) {
+            result = parseInputStream(inputStream);
+        } catch (IOException e) {
+            Level level = ignoreIfOpenFails ? Level.FINE : Level.WARNING;
+            LOGGER.log(level, "Failed to handle resource [{0}] with error [{1}]", new Object[]{resourceName, e.getMessage()});
+        }
+        //
+        return result;
+    }
+
+    // open URL or File
+    private InputStream openInputStream(String resourceName) throws IOException {
+
+        try {
+            URL url = URI.create(resourceName).toURL();
+            return url.openStream();
+        } catch (MalformedURLException ex) {
+            // try file
+            File file = new File(resourceName);
+            return new FileInputStream(file);
+        }
+    }
+
+    // choose resource where to search for service description
+    private String findPgServiceConfResourceName() {
+        // default file name
+        String pgServiceConfFileDefaultName = PGEnvironment.PGSERVICEFILE.getDefaultValue();
+
+        // if there is value, use it - 1st priority
+        {
+            String propertyName = PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName();
+            String resourceName = System.getProperty(propertyName);
+            if (resourceName != null && !resourceName.trim().isEmpty()) {
+                this.ignoreIfOpenFails = false;
+                LOGGER.log(Level.FINE, "Value [{0}] selected from property [{1}]",
+                        new Object[]{resourceName, propertyName});
+                return resourceName;
+            }
+        }
+
+        // if there is value, use it - 2nd priority
+        {
+            String envVariableName = PGEnvironment.PGSERVICEFILE.getName();
+            String resourceName = System.getenv().get(envVariableName);
+            if (resourceName != null && !resourceName.trim().isEmpty()) {
+                this.ignoreIfOpenFails = false;
+                LOGGER.log(Level.FINE, "Value [{0}] selected from environment variable [{1}]",
+                        new Object[]{resourceName, envVariableName});
+                return resourceName;
+            }
+        }
+
     /*
      if file in user home is readable, use it, otherwise continue - 3rd priority
      in the case that the file is in the user home directory it is prepended with '.'
      */
-    {
-      String resourceName = "." + pgServiceConfFileDefaultName;
-      File resourceFile = new File(OSUtil.getUserConfigRootDirectory(), resourceName);
-      if (resourceFile.canRead()) {
-        LOGGER.log(Level.FINE, "Value [{0}] selected because file exist in user home directory", new Object[]{resourceFile.getAbsolutePath()});
-        return resourceFile.getAbsolutePath();
-      }
-    }
-
-    // if there is value, use it - 4th priority
-    {
-      String envVariableName = PGEnvironment.PGSYSCONFDIR.getName();
-      String pgSysconfDir = System.getenv().get(envVariableName);
-      if (pgSysconfDir != null && !pgSysconfDir.trim().isEmpty()) {
-        String resourceName = pgSysconfDir + File.separator + pgServiceConfFileDefaultName;
-        LOGGER.log(Level.FINE, "Value [{0}] selected using environment variable [{1}]", new Object[]{resourceName, envVariableName});
-        return resourceName;
-      }
-    }
-    // otherwise null
-    LOGGER.log(Level.FINE, "Value for resource [{0}] not found", pgServiceConfFileDefaultName);
-    return null;
-  }
-
-  /*
-  # Requirements for stream handling (have to match with libpq behaviour)
-  #
-  # space around line is removed
-  #   Line: "   host=my-host    "
-  #   equal to : "host=my-host"
-  # keys are case sensitive
-  #   Line: "host=my-host"
-  #   not equal to : "HOST=my-host"
-  # keys are limited with values described in enum PGEnvironment field name
-  #   key is invalid: "my-host=my-host"
-  # unexpected keys produce error
-  #   Example: "my-host=my-host"
-  #   Example: "HOST=my-host"
-  # space before equal sign becomes part of key
-  #   Line: "host =my-host"
-  #   key equals: "host "
-  # space after equal sign becomes part of value
-  #   Line: "host= my-host"
-  #   key equals: " my-host"
-  # in case of duplicate section - first entry counts
-  #   Line: "[service-one]"
-  #   Line: "host=host-one"
-  #   Line: "[service-two]"
-  #   Line: "host=host-two"
-  #   --> section-one is selected
-  # in case of duplicate key - first entry counts
-  #   Line: "[service-one]"
-  #   Line: "host=host-one"
-  #   Line: "host=host-two"
-  #   --> host-one is selected
-  # service name is case sensitive
-  #   Line: "[service-one]"
-  #   Line: "[service-ONE]"
-  #   --> these are unique service names
-  # whatever is between brackets is considered as service name (including space)
-  #   Line: "[ service-ONE]"
-  #   Line: "[service-ONE ]"
-  #   Line: "[service ONE]"
-  #   --> these are unique service names
-  */
-  private Properties parseInputStream(InputStream inputStream) throws IOException {
-    // build set of allowed keys
-    Set<String> allowedServiceKeys = Arrays.stream(PGProperty.values())
-        .map(PGProperty::getName)
-        .map(PGPropertyUtil::translatePGPropertyToPGService)
-        .collect(Collectors.toSet());
-
-    //
-    Properties result = new Properties();
-    boolean isFound = false;
-    try (
-        Reader reader = new InputStreamReader(inputStream, StandardCharsets.UTF_8);
-        BufferedReader br = new BufferedReader(reader)) {
-      //
-      String originalLine;
-      String line;
-      int lineNumber = 0;
-      while ((originalLine = br.readLine()) != null) {
-        lineNumber++;
-        // remove spaces around it
-        line = originalLine.trim();
-        // skip if empty line or starts with comment sign
-        if (line.isEmpty() || line.startsWith("#")) {
-          continue;
+        {
+            String resourceName = "." + pgServiceConfFileDefaultName;
+            File resourceFile = new File(OSUtil.getUserConfigRootDirectory(), resourceName);
+            if (resourceFile.canRead()) {
+                LOGGER.log(Level.FINE, "Value [{0}] selected because file exist in user home directory", new Object[]{resourceFile.getAbsolutePath()});
+                return resourceFile.getAbsolutePath();
+            }
         }
-        // find first equal sign
-        int indexOfEqualSign = line.indexOf("=");
-        // is it section start?
-        if (line.startsWith("[") && line.endsWith("]")) {
-          // stop processing if section with correct name was found already
-          if (isFound) {
-            break;
-          }
-          // get name of section
-          String sectionName = line.substring(1, line.length() - 1);
-          // if match then mark it as section is found
-          if (serviceName.equals(sectionName)) {
-            isFound = true;
-          }
-        } else if (!isFound) {
-          // skip further processing until section is found
-          continue;
-        } else if (indexOfEqualSign > 1) {
-          // get key and value
-          String key = line.substring(0, indexOfEqualSign);
-          String value = line.substring(indexOfEqualSign + 1);
-          // check key against set of allowed keys
-          if (!allowedServiceKeys.contains(key)) {
-            // log list of allowed keys
-            String allowedValuesCommaSeparated =
-                allowedServiceKeys.stream().sorted().collect(Collectors.joining(","));
-            LOGGER.log(Level.SEVERE, "Got invalid key: line number [{0}], value [{1}], allowed "
-                    + "values [{2}]",
-                new Object[]{lineNumber, originalLine, allowedValuesCommaSeparated});
-            // stop processing because of invalid key
-            return null;
-          }
-          // ignore line if value is missing
-          if (!value.isEmpty()) {
-            // ignore line having duplicate key, otherwise store key-value pair
-            result.putIfAbsent(PGPropertyUtil.translatePGServiceToPGProperty(key), value);
-          }
-        } else {
-          // if not equal sign then stop processing because of invalid syntax
-          LOGGER.log(Level.WARNING, "Not valid line: line number [{0}], value [{1}]",
-              new Object[]{lineNumber, originalLine});
-          return null;
+
+        // if there is value, use it - 4th priority
+        {
+            String envVariableName = PGEnvironment.PGSYSCONFDIR.getName();
+            String pgSysconfDir = System.getenv().get(envVariableName);
+            if (pgSysconfDir != null && !pgSysconfDir.trim().isEmpty()) {
+                String resourceName = pgSysconfDir + File.separator + pgServiceConfFileDefaultName;
+                LOGGER.log(Level.FINE, "Value [{0}] selected using environment variable [{1}]", new Object[]{resourceName, envVariableName});
+                return resourceName;
+            }
         }
-      }
+        // otherwise null
+        LOGGER.log(Level.FINE, "Value for resource [{0}] not found", pgServiceConfFileDefaultName);
+        return null;
+    }
+
+    /*
+    # Requirements for stream handling (have to match with libpq behaviour)
+    #
+    # space around line is removed
+    #   Line: "   host=my-host    "
+    #   equal to : "host=my-host"
+    # keys are case sensitive
+    #   Line: "host=my-host"
+    #   not equal to : "HOST=my-host"
+    # keys are limited with values described in enum PGEnvironment field name
+    #   key is invalid: "my-host=my-host"
+    # unexpected keys produce error
+    #   Example: "my-host=my-host"
+    #   Example: "HOST=my-host"
+    # space before equal sign becomes part of key
+    #   Line: "host =my-host"
+    #   key equals: "host "
+    # space after equal sign becomes part of value
+    #   Line: "host= my-host"
+    #   key equals: " my-host"
+    # in case of duplicate section - first entry counts
+    #   Line: "[service-one]"
+    #   Line: "host=host-one"
+    #   Line: "[service-two]"
+    #   Line: "host=host-two"
+    #   --> section-one is selected
+    # in case of duplicate key - first entry counts
+    #   Line: "[service-one]"
+    #   Line: "host=host-one"
+    #   Line: "host=host-two"
+    #   --> host-one is selected
+    # service name is case sensitive
+    #   Line: "[service-one]"
+    #   Line: "[service-ONE]"
+    #   --> these are unique service names
+    # whatever is between brackets is considered as service name (including space)
+    #   Line: "[ service-ONE]"
+    #   Line: "[service-ONE ]"
+    #   Line: "[service ONE]"
+    #   --> these are unique service names
+    */
+    private Properties parseInputStream(InputStream inputStream) throws IOException {
+        // build set of allowed keys
+        Set<String> allowedServiceKeys = Arrays.stream(PGProperty.values())
+                .map(PGProperty::getName)
+                .map(PGPropertyUtil::translatePGPropertyToPGService)
+                .collect(Collectors.toSet());
+
+        //
+        Properties result = new Properties();
+        boolean isFound = false;
+        try (
+                Reader reader = new InputStreamReader(inputStream, StandardCharsets.UTF_8);
+                BufferedReader br = new BufferedReader(reader)) {
+            //
+            String originalLine;
+            String line;
+            int lineNumber = 0;
+            while ((originalLine = br.readLine()) != null) {
+                lineNumber++;
+                // remove spaces around it
+                line = originalLine.trim();
+                // skip if empty line or starts with comment sign
+                if (line.isEmpty() || line.startsWith("#")) {
+                    continue;
+                }
+                // find first equal sign
+                int indexOfEqualSign = line.indexOf("=");
+                // is it section start?
+                if (line.startsWith("[") && line.endsWith("]")) {
+                    // stop processing if section with correct name was found already
+                    if (isFound) {
+                        break;
+                    }
+                    // get name of section
+                    String sectionName = line.substring(1, line.length() - 1);
+                    // if match then mark it as section is found
+                    if (serviceName.equals(sectionName)) {
+                        isFound = true;
+                    }
+                } else if (!isFound) {
+                    // skip further processing until section is found
+                    continue;
+                } else if (indexOfEqualSign > 1) {
+                    // get key and value
+                    String key = line.substring(0, indexOfEqualSign);
+                    String value = line.substring(indexOfEqualSign + 1);
+                    // check key against set of allowed keys
+                    if (!allowedServiceKeys.contains(key)) {
+                        // log list of allowed keys
+                        String allowedValuesCommaSeparated =
+                                allowedServiceKeys.stream().sorted().collect(Collectors.joining(","));
+                        LOGGER.log(Level.SEVERE, "Got invalid key: line number [{0}], value [{1}], allowed "
+                                        + "values [{2}]",
+                                new Object[]{lineNumber, originalLine, allowedValuesCommaSeparated});
+                        // stop processing because of invalid key
+                        return null;
+                    }
+                    // ignore line if value is missing
+                    if (!value.isEmpty()) {
+                        // ignore line having duplicate key, otherwise store key-value pair
+                        result.putIfAbsent(PGPropertyUtil.translatePGServiceToPGProperty(key), value);
+                    }
+                } else {
+                    // if not equal sign then stop processing because of invalid syntax
+                    LOGGER.log(Level.WARNING, "Not valid line: line number [{0}], value [{1}]",
+                            new Object[]{lineNumber, originalLine});
+                    return null;
+                }
+            }
+        }
+        // null means failure - service is not found
+        return isFound ? result : null;
     }
-    // null means failure - service is not found
-    return isFound ? result : null;
-  }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/largeobject/BlobInputStream.java b/pgjdbc/src/main/java/org/postgresql/largeobject/BlobInputStream.java
index 15beab6..5e5adba 100644
--- a/pgjdbc/src/main/java/org/postgresql/largeobject/BlobInputStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/largeobject/BlobInputStream.java
@@ -17,327 +17,320 @@ import java.sql.SQLException;
  */
 @SuppressWarnings("try")
 public class BlobInputStream extends InputStream {
-  static final int DEFAULT_MAX_BUFFER_SIZE = 512 * 1024;
-  static final int INITIAL_BUFFER_SIZE = 64 * 1024;
+    static final int DEFAULT_MAX_BUFFER_SIZE = 512 * 1024;
+    static final int INITIAL_BUFFER_SIZE = 64 * 1024;
+    private final ResourceLock lock = new ResourceLock();
+    /**
+     * The buffer size.
+     */
+    private final int maxBufferSize;
+    /**
+     * The limit.
+     */
+    private final long limit;
+    /**
+     * The parent LargeObject.
+     */
+    private LargeObject lo;
+    /**
+     * The absolute position.
+     */
+    private long absolutePosition;
+    /**
+     * Buffer used to improve performance.
+     */
+    private byte[] buffer;
+    /**
+     * Position within buffer.
+     */
+    private int bufferPosition;
+    /**
+     * The amount of bytes to read on the next read.
+     * Currently, we nullify {@link #buffer}, so we can't use {@code buffer.length}.
+     */
+    private int lastBufferSize;
+    /**
+     * The mark position.
+     */
+    private long markPosition;
 
-  /**
-   * The parent LargeObject.
-   */
-  private LargeObject lo;
-  private final ResourceLock lock = new ResourceLock();
-
-  /**
-   * The absolute position.
-   */
-  private long absolutePosition;
-
-  /**
-   * Buffer used to improve performance.
-   */
-  private byte [] buffer;
-
-  /**
-   * Position within buffer.
-   */
-  private int bufferPosition;
-
-  /**
-   * The amount of bytes to read on the next read.
-   * Currently, we nullify {@link #buffer}, so we can't use {@code buffer.length}.
-   */
-  private int lastBufferSize;
-
-  /**
-   * The buffer size.
-   */
-  private final int maxBufferSize;
-
-  /**
-   * The mark position.
-   */
-  private long markPosition;
-
-  /**
-   * The limit.
-   */
-  private final long limit;
-
-  /**
-   * @param lo LargeObject to read from
-   */
-  public BlobInputStream(LargeObject lo) {
-    this(lo, DEFAULT_MAX_BUFFER_SIZE);
-  }
-
-  /**
-   * @param lo LargeObject to read from
-   * @param bsize buffer size
-   */
-
-  public BlobInputStream(LargeObject lo, int bsize) {
-    this(lo, bsize, Long.MAX_VALUE);
-  }
-
-  /**
-   * @param lo LargeObject to read from
-   * @param bsize buffer size
-   * @param limit max number of bytes to read
-   */
-  public BlobInputStream(LargeObject lo, int bsize, long limit) {
-    this.lo = lo;
-    this.maxBufferSize = bsize;
-    // The very first read multiplies the last buffer size by two, so we divide by two to get
-    // the first read to be exactly the initial buffer size
-    this.lastBufferSize = INITIAL_BUFFER_SIZE / 2;
-    // Treat -1 as no limit for backward compatibility
-    this.limit = limit == -1 ? Long.MAX_VALUE : limit;
-  }
-
-  /**
-   * The minimum required to implement input stream.
-   */
-  @Override
-  public int read() throws IOException {
-    try (ResourceLock ignore = lock.obtain()) {
-      LargeObject lo = getLo();
-      if (absolutePosition >= limit) {
-        buffer = null;
-        bufferPosition = 0;
-        return -1;
-      }
-      // read more in if necessary
-      if (buffer == null || bufferPosition >= buffer.length) {
-        // Don't hold the buffer while waiting for DB to respond
-        // Note: lo.read(...) does not support "fetching the response into the user-provided buffer"
-        // See https://github.com/pgjdbc/pgjdbc/issues/3043
-        int nextBufferSize = getNextBufferSize(1);
-        buffer = lo.read(nextBufferSize);
-        bufferPosition = 0;
-
-        if (buffer.length == 0) {
-          // The lob does not produce any more data, so we are at the end of the stream
-          return -1;
-        }
-      }
-
-      int ret = buffer[bufferPosition] & 0xFF;
-
-      bufferPosition++;
-      absolutePosition++;
-      if (bufferPosition >= buffer.length) {
-        // TODO: support buffer reuse in mark/reset
-        buffer = null;
-        bufferPosition = 0;
-      }
-
-      return ret;
-    } catch (SQLException e) {
-      long loId = lo == null ? -1 : lo.getLongOID();
-      throw new IOException(
-          GT.tr("Can not read data from large object {0}, position: {1}, buffer size: {2}",
-              loId, absolutePosition, lastBufferSize),
-          e);
+    /**
+     * @param lo LargeObject to read from
+     */
+    public BlobInputStream(LargeObject lo) {
+        this(lo, DEFAULT_MAX_BUFFER_SIZE);
     }
-  }
 
-  /**
-   * Computes the next buffer size to use for reading data from the large object.
-   * The idea is to avoid allocating too much memory, especially if the user will use just a few
-   * bytes of the data.
-   * @param len estimated read request
-   * @return next buffer size or {@link #maxBufferSize} if the buffer should not be increased
-   */
-  private int getNextBufferSize(int len) {
-    int nextBufferSize = Math.min(maxBufferSize, this.lastBufferSize * 2);
-    if (len > nextBufferSize) {
-      nextBufferSize = Math.min(maxBufferSize, Integer.highestOneBit(len * 2));
+    /**
+     * @param lo    LargeObject to read from
+     * @param bsize buffer size
+     */
+
+    public BlobInputStream(LargeObject lo, int bsize) {
+        this(lo, bsize, Long.MAX_VALUE);
     }
-    this.lastBufferSize = nextBufferSize;
-    return nextBufferSize;
-  }
 
-  @Override
-  public int read(byte[] dest, int off, int len) throws IOException {
-    if (len == 0) {
-      return 0;
+    /**
+     * @param lo    LargeObject to read from
+     * @param bsize buffer size
+     * @param limit max number of bytes to read
+     */
+    public BlobInputStream(LargeObject lo, int bsize, long limit) {
+        this.lo = lo;
+        this.maxBufferSize = bsize;
+        // The very first read multiplies the last buffer size by two, so we divide by two to get
+        // the first read to be exactly the initial buffer size
+        this.lastBufferSize = INITIAL_BUFFER_SIZE / 2;
+        // Treat -1 as no limit for backward compatibility
+        this.limit = limit == -1 ? Long.MAX_VALUE : limit;
     }
-    try (ResourceLock ignore = lock.obtain()) {
-      int bytesCopied = 0;
-      LargeObject lo = getLo();
 
-      // Check to make sure we aren't at the limit.
-      if (absolutePosition >= limit) {
-        return -1;
-      }
-
-      // Check to make sure we are not going to read past the limit
-      len = Math.min(len, (int) Math.min(limit - absolutePosition, Integer.MAX_VALUE));
-
-      // have we read anything into the buffer
-      if (buffer != null) {
-        // now figure out how much data is in the buffer
-        int bytesInBuffer = buffer.length - bufferPosition;
-        // figure out how many bytes the user wants
-        int bytesToCopy = Math.min(len, bytesInBuffer);
-        // copy them in
-        System.arraycopy(buffer, bufferPosition, dest, off, bytesToCopy);
-        // move the buffer position
-        bufferPosition += bytesToCopy;
-        if (bufferPosition >= buffer.length) {
-          // TODO: support buffer reuse in mark/reset
-          buffer = null;
-          bufferPosition = 0;
-        }
-        // position in the blob
-        absolutePosition += bytesToCopy;
-        // increment offset
-        off += bytesToCopy;
-        // decrement the length
-        len -= bytesToCopy;
-        bytesCopied = bytesToCopy;
-      }
-
-      if (len > 0) {
-        int nextBufferSize = getNextBufferSize(len);
-        // We are going to read data past the existing buffer, so we release the memory
-        // before making a DB call
-        buffer = null;
-        bufferPosition = 0;
-        int bytesRead;
-        try {
-          if (len >= nextBufferSize) {
-            // Read directly into the user's buffer
-            bytesRead = lo.read(dest, off, len);
-          } else {
-            // Refill the buffer and copy from it
-            buffer = lo.read(nextBufferSize);
-            // Note that actual number of bytes read may be less than requested
-            bytesRead = Math.min(len, buffer.length);
-            System.arraycopy(buffer, 0, dest, off, bytesRead);
-            // If we at the end of the stream, and we just copied the last bytes,
-            // we can release the buffer
-            if (bytesRead == buffer.length) {
-              // TODO: if we want to reuse the buffer in mark/reset we should not release the
-              //  buffer here
-              buffer = null;
-              bufferPosition = 0;
-            } else {
-              bufferPosition = bytesRead;
+    /**
+     * The minimum required to implement input stream.
+     */
+    @Override
+    public int read() throws IOException {
+        try (ResourceLock ignore = lock.obtain()) {
+            LargeObject lo = getLo();
+            if (absolutePosition >= limit) {
+                buffer = null;
+                bufferPosition = 0;
+                return -1;
             }
-          }
-        } catch (SQLException ex) {
-          throw new IOException(
-              GT.tr("Can not read data from large object {0}, position: {1}, buffer size: {2}",
-                  lo.getLongOID(), absolutePosition, len),
-              ex);
+            // read more in if necessary
+            if (buffer == null || bufferPosition >= buffer.length) {
+                // Don't hold the buffer while waiting for DB to respond
+                // Note: lo.read(...) does not support "fetching the response into the user-provided buffer"
+                // See https://github.com/pgjdbc/pgjdbc/issues/3043
+                int nextBufferSize = getNextBufferSize(1);
+                buffer = lo.read(nextBufferSize);
+                bufferPosition = 0;
+
+                if (buffer.length == 0) {
+                    // The lob does not produce any more data, so we are at the end of the stream
+                    return -1;
+                }
+            }
+
+            int ret = buffer[bufferPosition] & 0xFF;
+
+            bufferPosition++;
+            absolutePosition++;
+            if (bufferPosition >= buffer.length) {
+                // TODO: support buffer reuse in mark/reset
+                buffer = null;
+                bufferPosition = 0;
+            }
+
+            return ret;
+        } catch (SQLException e) {
+            long loId = lo == null ? -1 : lo.getLongOID();
+            throw new IOException(
+                    GT.tr("Can not read data from large object {0}, position: {1}, buffer size: {2}",
+                            loId, absolutePosition, lastBufferSize),
+                    e);
         }
-        bytesCopied += bytesRead;
-        absolutePosition += bytesRead;
-      }
-      return bytesCopied == 0 ? -1 : bytesCopied;
     }
-  }
 
-  /**
-   * <p>Closes this input stream and releases any system resources associated with the stream.</p>
-   *
-   * <p>The <code>close</code> method of <code>InputStream</code> does nothing.</p>
-   *
-   * @throws IOException if an I/O error occurs.
-   */
-  @Override
-  public void close() throws IOException {
-    long loId = 0;
-    try (ResourceLock ignore = lock.obtain()) {
-      LargeObject lo = this.lo;
-      if (lo != null) {
-        loId = lo.getLongOID();
-        lo.close();
-      }
-      this.lo = null;
-    } catch (SQLException e) {
-      throw new IOException(
-          GT.tr("Can not close large object {0}",
-              loId),
-          e);
-    }
-  }
-
-  /**
-   * <p>Marks the current position in this input stream. A subsequent call to the <code>reset</code>
-   * method repositions this stream at the last marked position so that subsequent reads re-read the
-   * same bytes.</p>
-   *
-   * <p>The <code>readlimit</code> arguments tells this input stream to allow that many bytes to be
-   * read before the mark position gets invalidated.</p>
-   *
-   * <p>The general contract of <code>mark</code> is that, if the method <code>markSupported</code>
-   * returns <code>true</code>, the stream somehow remembers all the bytes read after the call to
-   * <code>mark</code> and stands ready to supply those same bytes again if and whenever the method
-   * <code>reset</code> is called. However, the stream is not required to remember any data at all
-   * if more than <code>readlimit</code> bytes are read from the stream before <code>reset</code> is
-   * called.</p>
-   *
-   * <p>Marking a closed stream should not have any effect on the stream.</p>
-   *
-   * @param readlimit the maximum limit of bytes that can be read before the mark position becomes
-   *        invalid.
-   * @see java.io.InputStream#reset()
-   */
-  @Override
-  public void mark(int readlimit) {
-    try (ResourceLock ignore = lock.obtain()) {
-      markPosition = absolutePosition;
-    }
-  }
-
-  /**
-   * Repositions this stream to the position at the time the <code>mark</code> method was last
-   * called on this input stream. NB: If mark is not called we move to the beginning.
-   *
-   * @see java.io.InputStream#mark(int)
-   * @see java.io.IOException
-   */
-  @Override
-  public void reset() throws IOException {
-    try (ResourceLock ignore = lock.obtain()) {
-      LargeObject lo = getLo();
-      long loId = lo.getLongOID();
-      try {
-        if (markPosition <= Integer.MAX_VALUE) {
-          lo.seek((int) markPosition);
-        } else {
-          lo.seek64(markPosition, LargeObject.SEEK_SET);
+    /**
+     * Computes the next buffer size to use for reading data from the large object.
+     * The idea is to avoid allocating too much memory, especially if the user will use just a few
+     * bytes of the data.
+     *
+     * @param len estimated read request
+     * @return next buffer size or {@link #maxBufferSize} if the buffer should not be increased
+     */
+    private int getNextBufferSize(int len) {
+        int nextBufferSize = Math.min(maxBufferSize, this.lastBufferSize * 2);
+        if (len > nextBufferSize) {
+            nextBufferSize = Math.min(maxBufferSize, Integer.highestOneBit(len * 2));
         }
-        buffer = null;
-        absolutePosition = markPosition;
-      } catch (SQLException e) {
-        throw new IOException(
-            GT.tr("Can not reset stream for large object {0} to position {1}",
-                loId, markPosition),
-            e);
-      }
+        this.lastBufferSize = nextBufferSize;
+        return nextBufferSize;
     }
-  }
 
-  /**
-   * Tests if this input stream supports the <code>mark</code> and <code>reset</code> methods. The
-   * <code>markSupported</code> method of <code>InputStream</code> returns <code>false</code>.
-   *
-   * @return <code>true</code> if this true type supports the mark and reset method;
-   *         <code>false</code> otherwise.
-   * @see java.io.InputStream#mark(int)
-   * @see java.io.InputStream#reset()
-   */
-  @Override
-  public boolean markSupported() {
-    return true;
-  }
+    @Override
+    public int read(byte[] dest, int off, int len) throws IOException {
+        if (len == 0) {
+            return 0;
+        }
+        try (ResourceLock ignore = lock.obtain()) {
+            int bytesCopied = 0;
+            LargeObject lo = getLo();
 
-  private LargeObject getLo() throws IOException {
-    if (lo == null) {
-      throw new IOException("BlobOutputStream is closed");
+            // Check to make sure we aren't at the limit.
+            if (absolutePosition >= limit) {
+                return -1;
+            }
+
+            // Check to make sure we are not going to read past the limit
+            len = Math.min(len, (int) Math.min(limit - absolutePosition, Integer.MAX_VALUE));
+
+            // have we read anything into the buffer
+            if (buffer != null) {
+                // now figure out how much data is in the buffer
+                int bytesInBuffer = buffer.length - bufferPosition;
+                // figure out how many bytes the user wants
+                int bytesToCopy = Math.min(len, bytesInBuffer);
+                // copy them in
+                System.arraycopy(buffer, bufferPosition, dest, off, bytesToCopy);
+                // move the buffer position
+                bufferPosition += bytesToCopy;
+                if (bufferPosition >= buffer.length) {
+                    // TODO: support buffer reuse in mark/reset
+                    buffer = null;
+                    bufferPosition = 0;
+                }
+                // position in the blob
+                absolutePosition += bytesToCopy;
+                // increment offset
+                off += bytesToCopy;
+                // decrement the length
+                len -= bytesToCopy;
+                bytesCopied = bytesToCopy;
+            }
+
+            if (len > 0) {
+                int nextBufferSize = getNextBufferSize(len);
+                // We are going to read data past the existing buffer, so we release the memory
+                // before making a DB call
+                buffer = null;
+                bufferPosition = 0;
+                int bytesRead;
+                try {
+                    if (len >= nextBufferSize) {
+                        // Read directly into the user's buffer
+                        bytesRead = lo.read(dest, off, len);
+                    } else {
+                        // Refill the buffer and copy from it
+                        buffer = lo.read(nextBufferSize);
+                        // Note that actual number of bytes read may be less than requested
+                        bytesRead = Math.min(len, buffer.length);
+                        System.arraycopy(buffer, 0, dest, off, bytesRead);
+                        // If we at the end of the stream, and we just copied the last bytes,
+                        // we can release the buffer
+                        if (bytesRead == buffer.length) {
+                            // TODO: if we want to reuse the buffer in mark/reset we should not release the
+                            //  buffer here
+                            buffer = null;
+                            bufferPosition = 0;
+                        } else {
+                            bufferPosition = bytesRead;
+                        }
+                    }
+                } catch (SQLException ex) {
+                    throw new IOException(
+                            GT.tr("Can not read data from large object {0}, position: {1}, buffer size: {2}",
+                                    lo.getLongOID(), absolutePosition, len),
+                            ex);
+                }
+                bytesCopied += bytesRead;
+                absolutePosition += bytesRead;
+            }
+            return bytesCopied == 0 ? -1 : bytesCopied;
+        }
+    }
+
+    /**
+     * <p>Closes this input stream and releases any system resources associated with the stream.</p>
+     *
+     * <p>The <code>close</code> method of <code>InputStream</code> does nothing.</p>
+     *
+     * @throws IOException if an I/O error occurs.
+     */
+    @Override
+    public void close() throws IOException {
+        long loId = 0;
+        try (ResourceLock ignore = lock.obtain()) {
+            LargeObject lo = this.lo;
+            if (lo != null) {
+                loId = lo.getLongOID();
+                lo.close();
+            }
+            this.lo = null;
+        } catch (SQLException e) {
+            throw new IOException(
+                    GT.tr("Can not close large object {0}",
+                            loId),
+                    e);
+        }
+    }
+
+    /**
+     * <p>Marks the current position in this input stream. A subsequent call to the <code>reset</code>
+     * method repositions this stream at the last marked position so that subsequent reads re-read the
+     * same bytes.</p>
+     *
+     * <p>The <code>readlimit</code> arguments tells this input stream to allow that many bytes to be
+     * read before the mark position gets invalidated.</p>
+     *
+     * <p>The general contract of <code>mark</code> is that, if the method <code>markSupported</code>
+     * returns <code>true</code>, the stream somehow remembers all the bytes read after the call to
+     * <code>mark</code> and stands ready to supply those same bytes again if and whenever the method
+     * <code>reset</code> is called. However, the stream is not required to remember any data at all
+     * if more than <code>readlimit</code> bytes are read from the stream before <code>reset</code> is
+     * called.</p>
+     *
+     * <p>Marking a closed stream should not have any effect on the stream.</p>
+     *
+     * @param readlimit the maximum limit of bytes that can be read before the mark position becomes
+     *                  invalid.
+     * @see java.io.InputStream#reset()
+     */
+    @Override
+    public void mark(int readlimit) {
+        try (ResourceLock ignore = lock.obtain()) {
+            markPosition = absolutePosition;
+        }
+    }
+
+    /**
+     * Repositions this stream to the position at the time the <code>mark</code> method was last
+     * called on this input stream. NB: If mark is not called we move to the beginning.
+     *
+     * @see java.io.InputStream#mark(int)
+     * @see java.io.IOException
+     */
+    @Override
+    public void reset() throws IOException {
+        try (ResourceLock ignore = lock.obtain()) {
+            LargeObject lo = getLo();
+            long loId = lo.getLongOID();
+            try {
+                if (markPosition <= Integer.MAX_VALUE) {
+                    lo.seek((int) markPosition);
+                } else {
+                    lo.seek64(markPosition, LargeObject.SEEK_SET);
+                }
+                buffer = null;
+                absolutePosition = markPosition;
+            } catch (SQLException e) {
+                throw new IOException(
+                        GT.tr("Can not reset stream for large object {0} to position {1}",
+                                loId, markPosition),
+                        e);
+            }
+        }
+    }
+
+    /**
+     * Tests if this input stream supports the <code>mark</code> and <code>reset</code> methods. The
+     * <code>markSupported</code> method of <code>InputStream</code> returns <code>false</code>.
+     *
+     * @return <code>true</code> if this true type supports the mark and reset method;
+     * <code>false</code> otherwise.
+     * @see java.io.InputStream#mark(int)
+     * @see java.io.InputStream#reset()
+     */
+    @Override
+    public boolean markSupported() {
+        return true;
+    }
+
+    private LargeObject getLo() throws IOException {
+        if (lo == null) {
+            throw new IOException("BlobOutputStream is closed");
+        }
+        return lo;
     }
-    return lo;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/largeobject/BlobOutputStream.java b/pgjdbc/src/main/java/org/postgresql/largeobject/BlobOutputStream.java
index 2636ee4..4d32313 100644
--- a/pgjdbc/src/main/java/org/postgresql/largeobject/BlobOutputStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/largeobject/BlobOutputStream.java
@@ -19,228 +19,225 @@ import java.sql.SQLException;
  */
 @SuppressWarnings("try")
 public class BlobOutputStream extends OutputStream {
-  static final int DEFAULT_MAX_BUFFER_SIZE = 512 * 1024;
+    static final int DEFAULT_MAX_BUFFER_SIZE = 512 * 1024;
+    private final ResourceLock lock = new ResourceLock();
+    /**
+     * Size of the buffer (default 1K).
+     */
+    private final int maxBufferSize;
+    /**
+     * The parent LargeObject.
+     */
+    private LargeObject lo;
+    /**
+     * Buffer.
+     */
+    private byte[] buf;
+    /**
+     * Position within the buffer.
+     */
+    private int bufferPosition;
 
-  /**
-   * The parent LargeObject.
-   */
-  private LargeObject lo;
-  private final ResourceLock lock = new ResourceLock();
-
-  /**
-   * Buffer.
-   */
-  private byte [] buf;
-
-  /**
-   * Size of the buffer (default 1K).
-   */
-  private final int maxBufferSize;
-
-  /**
-   * Position within the buffer.
-   */
-  private int bufferPosition;
-
-  /**
-   * Create an OutputStream to a large object.
-   *
-   * @param lo LargeObject
-   */
-  public BlobOutputStream(LargeObject lo) {
-    this(lo, DEFAULT_MAX_BUFFER_SIZE);
-  }
-
-  /**
-   * Create an OutputStream to a large object.
-   *
-   * @param lo LargeObject
-   * @param bufferSize The size of the buffer for single-byte writes
-   */
-  public BlobOutputStream(LargeObject lo, int bufferSize) {
-    this.lo = lo;
-    // Avoid "0" buffer size, and ensure the bufferSize will always be a power of two
-    this.maxBufferSize = Integer.highestOneBit(Math.max(bufferSize, 1));
-  }
-
-  /**
-   * Grows an internal buffer to ensure the extra bytes fit in the buffer.
-   * @param extraBytes the number of extra bytes that should fit in the buffer
-   * @return new buffer
-   */
-  private byte[] growBuffer(int extraBytes) {
-    byte[] buf = this.buf;
-    if (buf != null && (buf.length == maxBufferSize || buf.length - bufferPosition >= extraBytes)) {
-      // Buffer is already large enough
-      return buf;
+    /**
+     * Create an OutputStream to a large object.
+     *
+     * @param lo LargeObject
+     */
+    public BlobOutputStream(LargeObject lo) {
+        this(lo, DEFAULT_MAX_BUFFER_SIZE);
     }
-    // We use power-of-two buffers, so they align nicely with PostgreSQL's LargeObject slicing
-    // By default PostgreSQL slices the data in 2KiB chunks
-    int newSize = Math.min(maxBufferSize, Integer.highestOneBit(bufferPosition + extraBytes) * 2);
-    byte[] newBuffer = new byte[newSize];
-    if (buf != null && bufferPosition != 0) {
-      // There was some data in the old buffer, copy it over
-      System.arraycopy(buf, 0, newBuffer, 0, bufferPosition);
+
+    /**
+     * Create an OutputStream to a large object.
+     *
+     * @param lo         LargeObject
+     * @param bufferSize The size of the buffer for single-byte writes
+     */
+    public BlobOutputStream(LargeObject lo, int bufferSize) {
+        this.lo = lo;
+        // Avoid "0" buffer size, and ensure the bufferSize will always be a power of two
+        this.maxBufferSize = Integer.highestOneBit(Math.max(bufferSize, 1));
     }
-    this.buf = newBuffer;
-    return newBuffer;
-  }
 
-  @Override
-  public void write(int b) throws IOException {
-    long loId = 0;
-    try (ResourceLock ignore = lock.obtain()) {
-      LargeObject lo = checkClosed();
-      loId = lo.getLongOID();
-      byte[] buf = growBuffer(16);
-      if (bufferPosition >= buf.length) {
-        lo.write(buf);
-        bufferPosition = 0;
-      }
-      buf[bufferPosition++] = (byte) b;
-    } catch (SQLException e) {
-      throw new IOException(
-          GT.tr("Can not write data to large object {0}, requested write length: {1}",
-              loId, 1),
-          e);
-    }
-  }
-
-  @Override
-  public void write(byte[] b, int off, int len) throws IOException {
-    long loId = 0;
-    try (ResourceLock ignore = lock.obtain()) {
-      LargeObject lo = checkClosed();
-      loId = lo.getLongOID();
-      byte[] buf = this.buf;
-      int totalData = bufferPosition + len;
-      // We have two parts of the data (it goes sequentially):
-      // 1) Data in buf at positions [0, bufferPosition)
-      // 2) Data in b at positions [off, off + len)
-      // If the new data fits into the buffer, we just copy it there.
-      // Otherwise, it might sound nice idea to just write them to the database, unfortunately,
-      // it is not optimal, as PostgreSQL chunks LargeObjects into 2KiB rows.
-      // That is why we would like to avoid writing a part of 2KiB chunk, and then issue overwrite
-      // causing DB to load and update the row.
-      //
-      // In fact, LOBLKSIZE is BLCKSZ/4, so users might have different values, so we use
-      // 8KiB write alignment for larger buffer sizes just in case.
-      //
-      //  | buf[0] ... buf[bufferPosition] | b[off] ... b[off + len] |
-      //  |<----------------- totalData ---------------------------->|
-      // If the total data does not align with 2048, we might have some remainder that we will
-      // copy to the beginning of the buffer and write later.
-      // The remainder can fall into either b (e.g. if the requested len is big enough):
-      //
-      //  | buf[0] ... buf[bufferPosition] | b[off] ........ b[off + len] |
-      //  |<----------------- totalData --------------------------------->|
-      //  |<-------writeFromBuf----------->|<-writeFromB->|<--tailLength->|
-      //
-      // or
-      // buf (e.g. if the requested write len is small yet it does not fit into the max buffer size):
-      //  | buf[0] .................... buf[bufferPosition] | b[off] .. b[off + len] |
-      //  |<----------------- totalData -------------------------------------------->|
-      //  |<-------writeFromBuf---------------->|<--------tailLength---------------->|
-      // "writeFromB" will be zero in that case
-
-      // We want aligned writes, so the write requests chunk nicely into large object rows
-      int tailLength =
-          maxBufferSize >= 8192 ? totalData % 8192 : (
-              maxBufferSize >= 2048 ? totalData % 2048 : 0
-          );
-
-      if (totalData >= maxBufferSize) {
-        // The resulting data won't fit into the buffer, so we flush the data to the database
-        int writeFromBuffer = Math.min(bufferPosition, totalData - tailLength);
-        int writeFromB = Math.max(0, totalData - writeFromBuffer - tailLength);
-        if (buf == null || bufferPosition <= 0) {
-          // The buffer is empty, so we can write the data directly
-          lo.write(b, off, writeFromB);
-        } else {
-          if (writeFromB == 0) {
-            lo.write(buf, 0, writeFromBuffer);
-          } else {
-            lo.write(
-                ByteStreamWriter.of(
-                    ByteBuffer.wrap(buf, 0, writeFromBuffer),
-                    ByteBuffer.wrap(b, off, writeFromB)));
-          }
-          // There might be some data left in the buffer since we keep the tail
-          if (writeFromBuffer >= bufferPosition) {
-            // The buffer was fully written to the database
-            bufferPosition = 0;
-          } else {
-            // Copy the rest to the beginning
-            System.arraycopy(buf, writeFromBuffer, buf, 0, bufferPosition - writeFromBuffer);
-            bufferPosition -= writeFromBuffer;
-          }
+    /**
+     * Grows an internal buffer to ensure the extra bytes fit in the buffer.
+     *
+     * @param extraBytes the number of extra bytes that should fit in the buffer
+     * @return new buffer
+     */
+    private byte[] growBuffer(int extraBytes) {
+        byte[] buf = this.buf;
+        if (buf != null && (buf.length == maxBufferSize || buf.length - bufferPosition >= extraBytes)) {
+            // Buffer is already large enough
+            return buf;
         }
-        len -= writeFromB;
-        off += writeFromB;
-      }
-      if (len > 0) {
-        buf = growBuffer(len);
-        System.arraycopy(b, off, buf, bufferPosition, len);
-        bufferPosition += len;
-      }
-    } catch (SQLException e) {
-      throw new IOException(
-          GT.tr("Can not write data to large object {0}, requested write length: {1}",
-              loId, len),
-          e);
+        // We use power-of-two buffers, so they align nicely with PostgreSQL's LargeObject slicing
+        // By default PostgreSQL slices the data in 2KiB chunks
+        int newSize = Math.min(maxBufferSize, Integer.highestOneBit(bufferPosition + extraBytes) * 2);
+        byte[] newBuffer = new byte[newSize];
+        if (buf != null && bufferPosition != 0) {
+            // There was some data in the old buffer, copy it over
+            System.arraycopy(buf, 0, newBuffer, 0, bufferPosition);
+        }
+        this.buf = newBuffer;
+        return newBuffer;
     }
-  }
 
-  /**
-   * Flushes this output stream and forces any buffered output bytes to be written out. The general
-   * contract of <code>flush</code> is that calling it is an indication that, if any bytes
-   * previously written have been buffered by the implementation of the output stream, such bytes
-   * should immediately be written to their intended destination.
-   *
-   * @throws IOException if an I/O error occurs.
-   */
-  @Override
-  public void flush() throws IOException {
-    long loId = 0;
-    try (ResourceLock ignore = lock.obtain()) {
-      LargeObject lo = checkClosed();
-      loId = lo.getLongOID();
-      byte[] buf = this.buf;
-      if (buf != null && bufferPosition > 0) {
-        lo.write(buf, 0, bufferPosition);
-      }
-      bufferPosition = 0;
-    } catch (SQLException e) {
-      throw new IOException(
-          GT.tr("Can not flush large object {0}",
-              loId),
-          e);
+    @Override
+    public void write(int b) throws IOException {
+        long loId = 0;
+        try (ResourceLock ignore = lock.obtain()) {
+            LargeObject lo = checkClosed();
+            loId = lo.getLongOID();
+            byte[] buf = growBuffer(16);
+            if (bufferPosition >= buf.length) {
+                lo.write(buf);
+                bufferPosition = 0;
+            }
+            buf[bufferPosition++] = (byte) b;
+        } catch (SQLException e) {
+            throw new IOException(
+                    GT.tr("Can not write data to large object {0}, requested write length: {1}",
+                            loId, 1),
+                    e);
+        }
     }
-  }
 
-  @Override
-  public void close() throws IOException {
-    long loId = 0;
-    try (ResourceLock ignore = lock.obtain()) {
-      LargeObject lo = this.lo;
-      if (lo != null) {
-        loId = lo.getLongOID();
-        flush();
-        lo.close();
-        this.lo = null;
-      }
-    } catch (SQLException e) {
-      throw new IOException(
-          GT.tr("Can not close large object {0}",
-              loId),
-          e);
-    }
-  }
+    @Override
+    public void write(byte[] b, int off, int len) throws IOException {
+        long loId = 0;
+        try (ResourceLock ignore = lock.obtain()) {
+            LargeObject lo = checkClosed();
+            loId = lo.getLongOID();
+            byte[] buf = this.buf;
+            int totalData = bufferPosition + len;
+            // We have two parts of the data (it goes sequentially):
+            // 1) Data in buf at positions [0, bufferPosition)
+            // 2) Data in b at positions [off, off + len)
+            // If the new data fits into the buffer, we just copy it there.
+            // Otherwise, it might sound nice idea to just write them to the database, unfortunately,
+            // it is not optimal, as PostgreSQL chunks LargeObjects into 2KiB rows.
+            // That is why we would like to avoid writing a part of 2KiB chunk, and then issue overwrite
+            // causing DB to load and update the row.
+            //
+            // In fact, LOBLKSIZE is BLCKSZ/4, so users might have different values, so we use
+            // 8KiB write alignment for larger buffer sizes just in case.
+            //
+            //  | buf[0] ... buf[bufferPosition] | b[off] ... b[off + len] |
+            //  |<----------------- totalData ---------------------------->|
+            // If the total data does not align with 2048, we might have some remainder that we will
+            // copy to the beginning of the buffer and write later.
+            // The remainder can fall into either b (e.g. if the requested len is big enough):
+            //
+            //  | buf[0] ... buf[bufferPosition] | b[off] ........ b[off + len] |
+            //  |<----------------- totalData --------------------------------->|
+            //  |<-------writeFromBuf----------->|<-writeFromB->|<--tailLength->|
+            //
+            // or
+            // buf (e.g. if the requested write len is small yet it does not fit into the max buffer size):
+            //  | buf[0] .................... buf[bufferPosition] | b[off] .. b[off + len] |
+            //  |<----------------- totalData -------------------------------------------->|
+            //  |<-------writeFromBuf---------------->|<--------tailLength---------------->|
+            // "writeFromB" will be zero in that case
 
-  private LargeObject checkClosed() throws IOException {
-    if (lo == null) {
-      throw new IOException("BlobOutputStream is closed");
+            // We want aligned writes, so the write requests chunk nicely into large object rows
+            int tailLength =
+                    maxBufferSize >= 8192 ? totalData % 8192 : (
+                            maxBufferSize >= 2048 ? totalData % 2048 : 0
+                    );
+
+            if (totalData >= maxBufferSize) {
+                // The resulting data won't fit into the buffer, so we flush the data to the database
+                int writeFromBuffer = Math.min(bufferPosition, totalData - tailLength);
+                int writeFromB = Math.max(0, totalData - writeFromBuffer - tailLength);
+                if (buf == null || bufferPosition <= 0) {
+                    // The buffer is empty, so we can write the data directly
+                    lo.write(b, off, writeFromB);
+                } else {
+                    if (writeFromB == 0) {
+                        lo.write(buf, 0, writeFromBuffer);
+                    } else {
+                        lo.write(
+                                ByteStreamWriter.of(
+                                        ByteBuffer.wrap(buf, 0, writeFromBuffer),
+                                        ByteBuffer.wrap(b, off, writeFromB)));
+                    }
+                    // There might be some data left in the buffer since we keep the tail
+                    if (writeFromBuffer >= bufferPosition) {
+                        // The buffer was fully written to the database
+                        bufferPosition = 0;
+                    } else {
+                        // Copy the rest to the beginning
+                        System.arraycopy(buf, writeFromBuffer, buf, 0, bufferPosition - writeFromBuffer);
+                        bufferPosition -= writeFromBuffer;
+                    }
+                }
+                len -= writeFromB;
+                off += writeFromB;
+            }
+            if (len > 0) {
+                buf = growBuffer(len);
+                System.arraycopy(b, off, buf, bufferPosition, len);
+                bufferPosition += len;
+            }
+        } catch (SQLException e) {
+            throw new IOException(
+                    GT.tr("Can not write data to large object {0}, requested write length: {1}",
+                            loId, len),
+                    e);
+        }
+    }
+
+    /**
+     * Flushes this output stream and forces any buffered output bytes to be written out. The general
+     * contract of <code>flush</code> is that calling it is an indication that, if any bytes
+     * previously written have been buffered by the implementation of the output stream, such bytes
+     * should immediately be written to their intended destination.
+     *
+     * @throws IOException if an I/O error occurs.
+     */
+    @Override
+    public void flush() throws IOException {
+        long loId = 0;
+        try (ResourceLock ignore = lock.obtain()) {
+            LargeObject lo = checkClosed();
+            loId = lo.getLongOID();
+            byte[] buf = this.buf;
+            if (buf != null && bufferPosition > 0) {
+                lo.write(buf, 0, bufferPosition);
+            }
+            bufferPosition = 0;
+        } catch (SQLException e) {
+            throw new IOException(
+                    GT.tr("Can not flush large object {0}",
+                            loId),
+                    e);
+        }
+    }
+
+    @Override
+    public void close() throws IOException {
+        long loId = 0;
+        try (ResourceLock ignore = lock.obtain()) {
+            LargeObject lo = this.lo;
+            if (lo != null) {
+                loId = lo.getLongOID();
+                flush();
+                lo.close();
+                this.lo = null;
+            }
+        } catch (SQLException e) {
+            throw new IOException(
+                    GT.tr("Can not close large object {0}",
+                            loId),
+                    e);
+        }
+    }
+
+    private LargeObject checkClosed() throws IOException {
+        if (lo == null) {
+            throw new IOException("BlobOutputStream is closed");
+        }
+        return lo;
     }
-    return lo;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObject.java b/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObject.java
index f56812e..d92cfb9 100644
--- a/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObject.java
+++ b/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObject.java
@@ -41,405 +41,402 @@ import java.sql.SQLException;
  */
 @SuppressWarnings("deprecation") // support for deprecated Fastpath API
 public class LargeObject
-    implements AutoCloseable {
+        implements AutoCloseable {
 
-  /**
-   * Indicates a seek from the beginning of a file.
-   */
-  public static final int SEEK_SET = 0;
+    /**
+     * Indicates a seek from the beginning of a file.
+     */
+    public static final int SEEK_SET = 0;
 
-  /**
-   * Indicates a seek from the current position.
-   */
-  public static final int SEEK_CUR = 1;
+    /**
+     * Indicates a seek from the current position.
+     */
+    public static final int SEEK_CUR = 1;
 
-  /**
-   * Indicates a seek from the end of a file.
-   */
-  public static final int SEEK_END = 2;
+    /**
+     * Indicates a seek from the end of a file.
+     */
+    public static final int SEEK_END = 2;
 
-  private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
+    private static final byte[] EMPTY_BYTE_ARRAY = new byte[0];
 
-  private final Fastpath fp; // Fastpath API to use
-  private final long oid; // OID of this object
-  private final int mode; // read/write mode of this object
-  private final int fd; // the descriptor of the open large object
+    private final Fastpath fp; // Fastpath API to use
+    private final long oid; // OID of this object
+    private final int mode; // read/write mode of this object
+    private final int fd; // the descriptor of the open large object
+    private final boolean commitOnClose; // Only initialized when open a LOB with CommitOnClose
+    private BlobOutputStream os; // The current output stream
+    private boolean closed; // true when we are closed
+    private BaseConnection conn; // Only initialized when open a LOB with CommitOnClose
 
-  private BlobOutputStream os; // The current output stream
-
-  private boolean closed; // true when we are closed
-
-  private BaseConnection conn; // Only initialized when open a LOB with CommitOnClose
-  private final boolean commitOnClose; // Only initialized when open a LOB with CommitOnClose
-
-  /**
-   * <p>This opens a large object.</p>
-   *
-   * <p>If the object does not exist, then an SQLException is thrown.</p>
-   *
-   * @param fp FastPath API for the connection to use
-   * @param oid of the Large Object to open
-   * @param mode Mode of opening the large object
-   * @param conn the connection to the database used to access this LOB
-   * @param commitOnClose commit the transaction when this LOB will be closed (defined in
-   *        LargeObjectManager)
-   * @throws SQLException if a database-access error occurs.
-   * @see org.postgresql.largeobject.LargeObjectManager
-   */
-  protected LargeObject(Fastpath fp, long oid, int mode,
-      BaseConnection conn, boolean commitOnClose)
-      throws SQLException {
-    this.fp = fp;
-    this.oid = oid;
-    this.mode = mode;
-    if (commitOnClose) {
-      this.commitOnClose = true;
-      this.conn = conn;
-    } else {
-      this.commitOnClose = false;
-    }
-
-    FastpathArg[] args = new FastpathArg[2];
-    args[0] = Fastpath.createOIDArg(oid);
-    args[1] = new FastpathArg(mode);
-    this.fd = fp.getInteger("lo_open", args);
-  }
-
-  /**
-   * <p>This opens a large object.</p>
-   *
-   * <p>If the object does not exist, then an SQLException is thrown.</p>
-   *
-   * @param fp FastPath API for the connection to use
-   * @param oid of the Large Object to open
-   * @param mode Mode of opening the large object (defined in LargeObjectManager)
-   * @throws SQLException if a database-access error occurs.
-   * @see org.postgresql.largeobject.LargeObjectManager
-   */
-  protected LargeObject(Fastpath fp, long oid, int mode) throws SQLException {
-    this(fp, oid, mode, null, false);
-  }
-
-  public LargeObject copy() throws SQLException {
-    return new LargeObject(fp, oid, mode);
-  }
-
-  /*
-   * Release large object resources during garbage cleanup.
-   *
-   * This code used to call close() however that was problematic because the scope of the fd is a
-   * transaction, thus if commit or rollback was called before garbage collection ran then the call
-   * to close would error out with an invalid large object handle. So this method now does nothing
-   * and lets the server handle cleanup when it ends the transaction.
-   *
-   * protected void finalize() throws SQLException { }
-   */
-
-  /**
-   * @return the OID of this LargeObject
-   * @deprecated As of 8.3, replaced by {@link #getLongOID()}
-   */
-  @Deprecated
-  public int getOID() {
-    return (int) oid;
-  }
-
-  /**
-   * @return the OID of this LargeObject
-   */
-  public long getLongOID() {
-    return oid;
-  }
-
-  /**
-   * This method closes the object. You must not call methods in this object after this is called.
-   *
-   * @throws SQLException if a database-access error occurs.
-   */
-  @Override
-  public void close() throws SQLException {
-    if (!closed) {
-      // flush any open output streams
-      if (os != null) {
-        try {
-          // we can't call os.close() otherwise we go into an infinite loop!
-          os.flush();
-        } catch (IOException ioe) {
-          throw new PSQLException("Exception flushing output stream", PSQLState.DATA_ERROR, ioe);
-        } finally {
-          os = null;
+    /**
+     * <p>This opens a large object.</p>
+     *
+     * <p>If the object does not exist, then an SQLException is thrown.</p>
+     *
+     * @param fp            FastPath API for the connection to use
+     * @param oid           of the Large Object to open
+     * @param mode          Mode of opening the large object
+     * @param conn          the connection to the database used to access this LOB
+     * @param commitOnClose commit the transaction when this LOB will be closed (defined in
+     *                      LargeObjectManager)
+     * @throws SQLException if a database-access error occurs.
+     * @see org.postgresql.largeobject.LargeObjectManager
+     */
+    protected LargeObject(Fastpath fp, long oid, int mode,
+                          BaseConnection conn, boolean commitOnClose)
+            throws SQLException {
+        this.fp = fp;
+        this.oid = oid;
+        this.mode = mode;
+        if (commitOnClose) {
+            this.commitOnClose = true;
+            this.conn = conn;
+        } else {
+            this.commitOnClose = false;
         }
-      }
 
-      // finally close
-      FastpathArg[] args = new FastpathArg[1];
-      args[0] = new FastpathArg(fd);
-      fp.fastpath("lo_close", args); // true here as we dont care!!
-      closed = true;
-      BaseConnection conn = this.conn;
-      if (this.commitOnClose && conn != null) {
-        conn.commit();
-      }
+        FastpathArg[] args = new FastpathArg[2];
+        args[0] = Fastpath.createOIDArg(oid);
+        args[1] = new FastpathArg(mode);
+        this.fd = fp.getInteger("lo_open", args);
     }
-  }
 
-  /**
-   * Reads some data from the object, and return as a byte[] array.
-   *
-   * @param len number of bytes to read
-   * @return byte[] array containing data read
-   * @throws SQLException if a database-access error occurs.
-   */
-  public byte[] read(int len) throws SQLException {
-    // This is the original method, where the entire block (len bytes)
-    // is retrieved in one go.
-    FastpathArg[] args = new FastpathArg[2];
-    args[0] = new FastpathArg(fd);
-    args[1] = new FastpathArg(len);
-    byte[] bytes = fp.getData("loread", args);
-    if (bytes == null) {
-      return EMPTY_BYTE_ARRAY;
+    /**
+     * <p>This opens a large object.</p>
+     *
+     * <p>If the object does not exist, then an SQLException is thrown.</p>
+     *
+     * @param fp   FastPath API for the connection to use
+     * @param oid  of the Large Object to open
+     * @param mode Mode of opening the large object (defined in LargeObjectManager)
+     * @throws SQLException if a database-access error occurs.
+     * @see org.postgresql.largeobject.LargeObjectManager
+     */
+    protected LargeObject(Fastpath fp, long oid, int mode) throws SQLException {
+        this(fp, oid, mode, null, false);
     }
-    return bytes;
-  }
 
-  /**
-   * Reads some data from the object into an existing array.
-   *
-   * @param buf destination array
-   * @param off offset within array
-   * @param len number of bytes to read
-   * @return the number of bytes actually read
-   * @throws SQLException if a database-access error occurs.
-   */
-  public int read(byte[] buf, int off, int len) throws SQLException {
-    byte[] b = read(len);
-    if (b.length == 0) {
-      return 0;
+    public LargeObject copy() throws SQLException {
+        return new LargeObject(fp, oid, mode);
     }
-    len = Math.min(len, b.length);
-    System.arraycopy(b, 0, buf, off, len);
-    return len;
-  }
 
-  /**
-   * Writes an array to the object.
-   *
-   * @param buf array to write
-   * @throws SQLException if a database-access error occurs.
-   */
-  public void write(byte[] buf) throws SQLException {
-    FastpathArg[] args = new FastpathArg[2];
-    args[0] = new FastpathArg(fd);
-    args[1] = new FastpathArg(buf);
-    fp.fastpath("lowrite", args);
-  }
+    /*
+     * Release large object resources during garbage cleanup.
+     *
+     * This code used to call close() however that was problematic because the scope of the fd is a
+     * transaction, thus if commit or rollback was called before garbage collection ran then the call
+     * to close would error out with an invalid large object handle. So this method now does nothing
+     * and lets the server handle cleanup when it ends the transaction.
+     *
+     * protected void finalize() throws SQLException { }
+     */
 
-  /**
-   * Writes some data from an array to the object.
-   *
-   * @param buf destination array
-   * @param off offset within array
-   * @param len number of bytes to write
-   * @throws SQLException if a database-access error occurs.
-   */
-  public void write(byte[] buf, int off, int len) throws SQLException {
-    FastpathArg[] args = new FastpathArg[2];
-    args[0] = new FastpathArg(fd);
-    args[1] = new FastpathArg(buf, off, len);
-    fp.fastpath("lowrite", args);
-  }
-
-  /**
-   * Writes some data from a given writer to the object.
-   *
-   * @param writer the source of the data to write
-   * @throws SQLException if a database-access error occurs.
-   */
-  public void write(ByteStreamWriter writer) throws SQLException {
-    FastpathArg[] args = new FastpathArg[2];
-    args[0] = new FastpathArg(fd);
-    args[1] = FastpathArg.of(writer);
-    fp.fastpath("lowrite", args);
-  }
-
-  /**
-   * <p>Sets the current position within the object.</p>
-   *
-   * <p>This is similar to the fseek() call in the standard C library. It allows you to have random
-   * access to the large object.</p>
-   *
-   * @param pos position within object
-   * @param ref Either SEEK_SET, SEEK_CUR or SEEK_END
-   * @throws SQLException if a database-access error occurs.
-   */
-  public void seek(int pos, int ref) throws SQLException {
-    FastpathArg[] args = new FastpathArg[3];
-    args[0] = new FastpathArg(fd);
-    args[1] = new FastpathArg(pos);
-    args[2] = new FastpathArg(ref);
-    fp.fastpath("lo_lseek", args);
-  }
-
-  /**
-   * Sets the current position within the object using 64-bit value (9.3+).
-   *
-   * @param pos position within object
-   * @param ref Either SEEK_SET, SEEK_CUR or SEEK_END
-   * @throws SQLException if a database-access error occurs.
-   */
-  public void seek64(long pos, int ref) throws SQLException {
-    FastpathArg[] args = new FastpathArg[3];
-    args[0] = new FastpathArg(fd);
-    args[1] = new FastpathArg(pos);
-    args[2] = new FastpathArg(ref);
-    fp.fastpath("lo_lseek64", args);
-  }
-
-  /**
-   * <p>Sets the current position within the object.</p>
-   *
-   * <p>This is similar to the fseek() call in the standard C library. It allows you to have random
-   * access to the large object.</p>
-   *
-   * @param pos position within object from beginning
-   * @throws SQLException if a database-access error occurs.
-   */
-  public void seek(int pos) throws SQLException {
-    seek(pos, SEEK_SET);
-  }
-
-  /**
-   * @return the current position within the object
-   * @throws SQLException if a database-access error occurs.
-   */
-  public int tell() throws SQLException {
-    FastpathArg[] args = new FastpathArg[1];
-    args[0] = new FastpathArg(fd);
-    return fp.getInteger("lo_tell", args);
-  }
-
-  /**
-   * @return the current position within the object
-   * @throws SQLException if a database-access error occurs.
-   */
-  public long tell64() throws SQLException {
-    FastpathArg[] args = new FastpathArg[1];
-    args[0] = new FastpathArg(fd);
-    return fp.getLong("lo_tell64", args);
-  }
-
-  /**
-   * <p>This method is inefficient, as the only way to find out the size of the object is to seek to
-   * the end, record the current position, then return to the original position.</p>
-   *
-   * <p>A better method will be found in the future.</p>
-   *
-   * @return the size of the large object
-   * @throws SQLException if a database-access error occurs.
-   */
-  public int size() throws SQLException {
-    int cp = tell();
-    seek(0, SEEK_END);
-    int sz = tell();
-    seek(cp, SEEK_SET);
-    return sz;
-  }
-
-  /**
-   * See #size() for information about efficiency.
-   *
-   * @return the size of the large object
-   * @throws SQLException if a database-access error occurs.
-   */
-  public long size64() throws SQLException {
-    long cp = tell64();
-    seek64(0, SEEK_END);
-    long sz = tell64();
-    seek64(cp, SEEK_SET);
-    return sz;
-  }
-
-  /**
-   * Truncates the large object to the given length in bytes. If the number of bytes is larger than
-   * the current large object length, the large object will be filled with zero bytes. This method
-   * does not modify the current file offset.
-   *
-   * @param len given length in bytes
-   * @throws SQLException if something goes wrong
-   */
-  public void truncate(int len) throws SQLException {
-    FastpathArg[] args = new FastpathArg[2];
-    args[0] = new FastpathArg(fd);
-    args[1] = new FastpathArg(len);
-    fp.getInteger("lo_truncate", args);
-  }
-
-  /**
-   * Truncates the large object to the given length in bytes. If the number of bytes is larger than
-   * the current large object length, the large object will be filled with zero bytes. This method
-   * does not modify the current file offset.
-   *
-   * @param len given length in bytes
-   * @throws SQLException if something goes wrong
-   */
-  public void truncate64(long len) throws SQLException {
-    FastpathArg[] args = new FastpathArg[2];
-    args[0] = new FastpathArg(fd);
-    args[1] = new FastpathArg(len);
-    fp.getInteger("lo_truncate64", args);
-  }
-
-  /**
-   * <p>Returns an {@link InputStream} from this object.</p>
-   *
-   * <p>This {@link InputStream} can then be used in any method that requires an InputStream.</p>
-   *
-   * @return {@link InputStream} from this object
-   * @throws SQLException if a database-access error occurs.
-   */
-  public InputStream getInputStream() throws SQLException {
-    return new BlobInputStream(this);
-  }
-
-  /**
-   * Returns an {@link InputStream} from this object, that will limit the amount of data that is
-   * visible.
-   *
-   * @param limit maximum number of bytes the resulting stream will serve
-   * @return {@link InputStream} from this object
-   * @throws SQLException if a database-access error occurs.
-   */
-  public InputStream getInputStream(long limit) throws SQLException {
-    return new BlobInputStream(this, BlobInputStream.DEFAULT_MAX_BUFFER_SIZE, limit);
-  }
-
-  /**
-   * Returns an {@link InputStream} from this object, that will limit the amount of data that is
-   * visible.
-   * Added mostly for testing
-   *
-   * @param bufferSize buffer size for the stream
-   * @param limit maximum number of bytes the resulting stream will serve
-   * @return {@link InputStream} from this object
-   * @throws SQLException if a database-access error occurs.
-   */
-  public InputStream getInputStream(int bufferSize, long limit) throws SQLException {
-    return new BlobInputStream(this, bufferSize, limit);
-  }
-
-  /**
-   * <p>Returns an {@link OutputStream} to this object.</p>
-   *
-   * <p>This OutputStream can then be used in any method that requires an OutputStream.</p>
-   *
-   * @return {@link OutputStream} from this object
-   * @throws SQLException if a database-access error occurs.
-   */
-  public OutputStream getOutputStream() throws SQLException {
-    if (os == null) {
-      os = new BlobOutputStream(this);
+    /**
+     * @return the OID of this LargeObject
+     * @deprecated As of 8.3, replaced by {@link #getLongOID()}
+     */
+    @Deprecated
+    public int getOID() {
+        return (int) oid;
+    }
+
+    /**
+     * @return the OID of this LargeObject
+     */
+    public long getLongOID() {
+        return oid;
+    }
+
+    /**
+     * This method closes the object. You must not call methods in this object after this is called.
+     *
+     * @throws SQLException if a database-access error occurs.
+     */
+    @Override
+    public void close() throws SQLException {
+        if (!closed) {
+            // flush any open output streams
+            if (os != null) {
+                try {
+                    // we can't call os.close() otherwise we go into an infinite loop!
+                    os.flush();
+                } catch (IOException ioe) {
+                    throw new PSQLException("Exception flushing output stream", PSQLState.DATA_ERROR, ioe);
+                } finally {
+                    os = null;
+                }
+            }
+
+            // finally close
+            FastpathArg[] args = new FastpathArg[1];
+            args[0] = new FastpathArg(fd);
+            fp.fastpath("lo_close", args); // true here as we dont care!!
+            closed = true;
+            BaseConnection conn = this.conn;
+            if (this.commitOnClose && conn != null) {
+                conn.commit();
+            }
+        }
+    }
+
+    /**
+     * Reads some data from the object, and return as a byte[] array.
+     *
+     * @param len number of bytes to read
+     * @return byte[] array containing data read
+     * @throws SQLException if a database-access error occurs.
+     */
+    public byte[] read(int len) throws SQLException {
+        // This is the original method, where the entire block (len bytes)
+        // is retrieved in one go.
+        FastpathArg[] args = new FastpathArg[2];
+        args[0] = new FastpathArg(fd);
+        args[1] = new FastpathArg(len);
+        byte[] bytes = fp.getData("loread", args);
+        if (bytes == null) {
+            return EMPTY_BYTE_ARRAY;
+        }
+        return bytes;
+    }
+
+    /**
+     * Reads some data from the object into an existing array.
+     *
+     * @param buf destination array
+     * @param off offset within array
+     * @param len number of bytes to read
+     * @return the number of bytes actually read
+     * @throws SQLException if a database-access error occurs.
+     */
+    public int read(byte[] buf, int off, int len) throws SQLException {
+        byte[] b = read(len);
+        if (b.length == 0) {
+            return 0;
+        }
+        len = Math.min(len, b.length);
+        System.arraycopy(b, 0, buf, off, len);
+        return len;
+    }
+
+    /**
+     * Writes an array to the object.
+     *
+     * @param buf array to write
+     * @throws SQLException if a database-access error occurs.
+     */
+    public void write(byte[] buf) throws SQLException {
+        FastpathArg[] args = new FastpathArg[2];
+        args[0] = new FastpathArg(fd);
+        args[1] = new FastpathArg(buf);
+        fp.fastpath("lowrite", args);
+    }
+
+    /**
+     * Writes some data from an array to the object.
+     *
+     * @param buf destination array
+     * @param off offset within array
+     * @param len number of bytes to write
+     * @throws SQLException if a database-access error occurs.
+     */
+    public void write(byte[] buf, int off, int len) throws SQLException {
+        FastpathArg[] args = new FastpathArg[2];
+        args[0] = new FastpathArg(fd);
+        args[1] = new FastpathArg(buf, off, len);
+        fp.fastpath("lowrite", args);
+    }
+
+    /**
+     * Writes some data from a given writer to the object.
+     *
+     * @param writer the source of the data to write
+     * @throws SQLException if a database-access error occurs.
+     */
+    public void write(ByteStreamWriter writer) throws SQLException {
+        FastpathArg[] args = new FastpathArg[2];
+        args[0] = new FastpathArg(fd);
+        args[1] = FastpathArg.of(writer);
+        fp.fastpath("lowrite", args);
+    }
+
+    /**
+     * <p>Sets the current position within the object.</p>
+     *
+     * <p>This is similar to the fseek() call in the standard C library. It allows you to have random
+     * access to the large object.</p>
+     *
+     * @param pos position within object
+     * @param ref Either SEEK_SET, SEEK_CUR or SEEK_END
+     * @throws SQLException if a database-access error occurs.
+     */
+    public void seek(int pos, int ref) throws SQLException {
+        FastpathArg[] args = new FastpathArg[3];
+        args[0] = new FastpathArg(fd);
+        args[1] = new FastpathArg(pos);
+        args[2] = new FastpathArg(ref);
+        fp.fastpath("lo_lseek", args);
+    }
+
+    /**
+     * Sets the current position within the object using 64-bit value (9.3+).
+     *
+     * @param pos position within object
+     * @param ref Either SEEK_SET, SEEK_CUR or SEEK_END
+     * @throws SQLException if a database-access error occurs.
+     */
+    public void seek64(long pos, int ref) throws SQLException {
+        FastpathArg[] args = new FastpathArg[3];
+        args[0] = new FastpathArg(fd);
+        args[1] = new FastpathArg(pos);
+        args[2] = new FastpathArg(ref);
+        fp.fastpath("lo_lseek64", args);
+    }
+
+    /**
+     * <p>Sets the current position within the object.</p>
+     *
+     * <p>This is similar to the fseek() call in the standard C library. It allows you to have random
+     * access to the large object.</p>
+     *
+     * @param pos position within object from beginning
+     * @throws SQLException if a database-access error occurs.
+     */
+    public void seek(int pos) throws SQLException {
+        seek(pos, SEEK_SET);
+    }
+
+    /**
+     * @return the current position within the object
+     * @throws SQLException if a database-access error occurs.
+     */
+    public int tell() throws SQLException {
+        FastpathArg[] args = new FastpathArg[1];
+        args[0] = new FastpathArg(fd);
+        return fp.getInteger("lo_tell", args);
+    }
+
+    /**
+     * @return the current position within the object
+     * @throws SQLException if a database-access error occurs.
+     */
+    public long tell64() throws SQLException {
+        FastpathArg[] args = new FastpathArg[1];
+        args[0] = new FastpathArg(fd);
+        return fp.getLong("lo_tell64", args);
+    }
+
+    /**
+     * <p>This method is inefficient, as the only way to find out the size of the object is to seek to
+     * the end, record the current position, then return to the original position.</p>
+     *
+     * <p>A better method will be found in the future.</p>
+     *
+     * @return the size of the large object
+     * @throws SQLException if a database-access error occurs.
+     */
+    public int size() throws SQLException {
+        int cp = tell();
+        seek(0, SEEK_END);
+        int sz = tell();
+        seek(cp, SEEK_SET);
+        return sz;
+    }
+
+    /**
+     * See #size() for information about efficiency.
+     *
+     * @return the size of the large object
+     * @throws SQLException if a database-access error occurs.
+     */
+    public long size64() throws SQLException {
+        long cp = tell64();
+        seek64(0, SEEK_END);
+        long sz = tell64();
+        seek64(cp, SEEK_SET);
+        return sz;
+    }
+
+    /**
+     * Truncates the large object to the given length in bytes. If the number of bytes is larger than
+     * the current large object length, the large object will be filled with zero bytes. This method
+     * does not modify the current file offset.
+     *
+     * @param len given length in bytes
+     * @throws SQLException if something goes wrong
+     */
+    public void truncate(int len) throws SQLException {
+        FastpathArg[] args = new FastpathArg[2];
+        args[0] = new FastpathArg(fd);
+        args[1] = new FastpathArg(len);
+        fp.getInteger("lo_truncate", args);
+    }
+
+    /**
+     * Truncates the large object to the given length in bytes. If the number of bytes is larger than
+     * the current large object length, the large object will be filled with zero bytes. This method
+     * does not modify the current file offset.
+     *
+     * @param len given length in bytes
+     * @throws SQLException if something goes wrong
+     */
+    public void truncate64(long len) throws SQLException {
+        FastpathArg[] args = new FastpathArg[2];
+        args[0] = new FastpathArg(fd);
+        args[1] = new FastpathArg(len);
+        fp.getInteger("lo_truncate64", args);
+    }
+
+    /**
+     * <p>Returns an {@link InputStream} from this object.</p>
+     *
+     * <p>This {@link InputStream} can then be used in any method that requires an InputStream.</p>
+     *
+     * @return {@link InputStream} from this object
+     * @throws SQLException if a database-access error occurs.
+     */
+    public InputStream getInputStream() throws SQLException {
+        return new BlobInputStream(this);
+    }
+
+    /**
+     * Returns an {@link InputStream} from this object, that will limit the amount of data that is
+     * visible.
+     *
+     * @param limit maximum number of bytes the resulting stream will serve
+     * @return {@link InputStream} from this object
+     * @throws SQLException if a database-access error occurs.
+     */
+    public InputStream getInputStream(long limit) throws SQLException {
+        return new BlobInputStream(this, BlobInputStream.DEFAULT_MAX_BUFFER_SIZE, limit);
+    }
+
+    /**
+     * Returns an {@link InputStream} from this object, that will limit the amount of data that is
+     * visible.
+     * Added mostly for testing
+     *
+     * @param bufferSize buffer size for the stream
+     * @param limit      maximum number of bytes the resulting stream will serve
+     * @return {@link InputStream} from this object
+     * @throws SQLException if a database-access error occurs.
+     */
+    public InputStream getInputStream(int bufferSize, long limit) throws SQLException {
+        return new BlobInputStream(this, bufferSize, limit);
+    }
+
+    /**
+     * <p>Returns an {@link OutputStream} to this object.</p>
+     *
+     * <p>This OutputStream can then be used in any method that requires an OutputStream.</p>
+     *
+     * @return {@link OutputStream} from this object
+     * @throws SQLException if a database-access error occurs.
+     */
+    public OutputStream getOutputStream() throws SQLException {
+        if (os == null) {
+            os = new BlobOutputStream(this);
+        }
+        return os;
     }
-    return os;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObjectManager.java b/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObjectManager.java
index 12efe14..140540e 100644
--- a/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObjectManager.java
+++ b/pgjdbc/src/main/java/org/postgresql/largeobject/LargeObjectManager.java
@@ -58,299 +58,296 @@ import java.util.logging.Level;
  */
 @SuppressWarnings("deprecation") // support for deprecated Fastpath API
 public class LargeObjectManager {
-  // the fastpath api for this connection
-  private Fastpath fp;
-  private BaseConnection conn;
+    /**
+     * This mode indicates we want to write to an object.
+     */
+    public static final int WRITE = 0x00020000;
+    /**
+     * This mode indicates we want to read an object.
+     */
+    public static final int READ = 0x00040000;
+    /**
+     * This mode is the default. It indicates we want read and write access to a large object.
+     */
+    public static final int READWRITE = READ | WRITE;
+    // the fastpath api for this connection
+    private Fastpath fp;
+    private BaseConnection conn;
 
-  /**
-   * This mode indicates we want to write to an object.
-   */
-  public static final int WRITE = 0x00020000;
+    /**
+     * <p>Constructs the LargeObject API.</p>
+     *
+     * <p><b>Important Notice</b> <br>
+     * This method should only be called by {@link BaseConnection}</p>
+     *
+     * <p>There should only be one LargeObjectManager per Connection. The {@link BaseConnection} class
+     * keeps track of the various extension API's and it's advised you use those to gain access, and
+     * not going direct.</p>
+     *
+     * @param conn connection
+     * @throws SQLException if something wrong happens
+     */
+    public LargeObjectManager(BaseConnection conn) throws SQLException {
+        this.conn = conn;
+        // We need Fastpath to do anything
+        this.fp = conn.getFastpathAPI();
 
-  /**
-   * This mode indicates we want to read an object.
-   */
-  public static final int READ = 0x00040000;
+        // Now get the function oid's for the api
+        //
+        // This is an example of Fastpath.addFunctions();
+        //
+        String sql;
+        if (conn.getMetaData().supportsSchemasInTableDefinitions()) {
+            sql = "SELECT p.proname,p.oid "
+                    + " FROM pg_catalog.pg_proc p, pg_catalog.pg_namespace n "
+                    + " WHERE p.pronamespace=n.oid AND n.nspname='pg_catalog' AND (";
+        } else {
+            sql = "SELECT proname,oid FROM pg_proc WHERE ";
+        }
+        sql += " proname = 'lo_open'"
+                + " or proname = 'lo_close'"
+                + " or proname = 'lo_creat'"
+                + " or proname = 'lo_unlink'"
+                + " or proname = 'lo_lseek'"
+                + " or proname = 'lo_lseek64'"
+                + " or proname = 'lo_tell'"
+                + " or proname = 'lo_tell64'"
+                + " or proname = 'loread'"
+                + " or proname = 'lowrite'"
+                + " or proname = 'lo_truncate'"
+                + " or proname = 'lo_truncate64'";
 
-  /**
-   * This mode is the default. It indicates we want read and write access to a large object.
-   */
-  public static final int READWRITE = READ | WRITE;
+        if (conn.getMetaData().supportsSchemasInTableDefinitions()) {
+            sql += ")";
+        }
 
-  /**
-   * <p>Constructs the LargeObject API.</p>
-   *
-   * <p><b>Important Notice</b> <br>
-   * This method should only be called by {@link BaseConnection}</p>
-   *
-   * <p>There should only be one LargeObjectManager per Connection. The {@link BaseConnection} class
-   * keeps track of the various extension API's and it's advised you use those to gain access, and
-   * not going direct.</p>
-   *
-   * @param conn connection
-   * @throws SQLException if something wrong happens
-   */
-  public LargeObjectManager(BaseConnection conn) throws SQLException {
-    this.conn = conn;
-    // We need Fastpath to do anything
-    this.fp = conn.getFastpathAPI();
+        Statement stmt = conn.createStatement();
+        ResultSet res = stmt.executeQuery(sql);
 
-    // Now get the function oid's for the api
-    //
-    // This is an example of Fastpath.addFunctions();
-    //
-    String sql;
-    if (conn.getMetaData().supportsSchemasInTableDefinitions()) {
-      sql = "SELECT p.proname,p.oid "
-          + " FROM pg_catalog.pg_proc p, pg_catalog.pg_namespace n "
-          + " WHERE p.pronamespace=n.oid AND n.nspname='pg_catalog' AND (";
-    } else {
-      sql = "SELECT proname,oid FROM pg_proc WHERE ";
-    }
-    sql += " proname = 'lo_open'"
-        + " or proname = 'lo_close'"
-        + " or proname = 'lo_creat'"
-        + " or proname = 'lo_unlink'"
-        + " or proname = 'lo_lseek'"
-        + " or proname = 'lo_lseek64'"
-        + " or proname = 'lo_tell'"
-        + " or proname = 'lo_tell64'"
-        + " or proname = 'loread'"
-        + " or proname = 'lowrite'"
-        + " or proname = 'lo_truncate'"
-        + " or proname = 'lo_truncate64'";
+        fp.addFunctions(res);
+        res.close();
+        stmt.close();
 
-    if (conn.getMetaData().supportsSchemasInTableDefinitions()) {
-      sql += ")";
+        conn.getLogger().log(Level.FINE, "Large Object initialised");
     }
 
-    Statement stmt = conn.createStatement();
-    ResultSet res = stmt.executeQuery(sql);
-
-    fp.addFunctions(res);
-    res.close();
-    stmt.close();
-
-    conn.getLogger().log(Level.FINE, "Large Object initialised");
-  }
-
-  /**
-   * This opens an existing large object, based on its OID. This method assumes that READ and WRITE
-   * access is required (the default).
-   *
-   * @param oid of large object
-   * @return LargeObject instance providing access to the object
-   * @throws SQLException on error
-   * @deprecated As of 8.3, replaced by {@link #open(long)}
-   */
-  @Deprecated
-  public LargeObject open(int oid) throws SQLException {
-    return open((long) oid, false);
-  }
-
-  /**
-   * This opens an existing large object, same as previous method, but commits the transaction on
-   * close if asked. This is useful when the LOB is returned to a caller which won't take care of
-   * transactions by itself.
-   *
-   * @param oid of large object
-   * @param commitOnClose commit the transaction when this LOB will be closed
-   * @return LargeObject instance providing access to the object
-   * @throws SQLException on error
-   */
-
-  public LargeObject open(int oid, boolean commitOnClose) throws SQLException {
-    return open((long) oid, commitOnClose);
-  }
-
-  /**
-   * This opens an existing large object, based on its OID. This method assumes that READ and WRITE
-   * access is required (the default).
-   *
-   * @param oid of large object
-   * @return LargeObject instance providing access to the object
-   * @throws SQLException on error
-   */
-  public LargeObject open(long oid) throws SQLException {
-    return open(oid, READWRITE, false);
-  }
-
-  /**
-   * This opens an existing large object, same as previous method, but commits the transaction on
-   * close if asked.
-   *
-   * @param oid of large object
-   * @param commitOnClose commit the transaction when this LOB will be closed
-   * @return LargeObject instance providing access to the object
-   * @throws SQLException on error
-   */
-
-  public LargeObject open(long oid, boolean commitOnClose) throws SQLException {
-    return open(oid, READWRITE, commitOnClose);
-  }
-
-  /**
-   * This opens an existing large object, based on its OID.
-   *
-   * @param oid of large object
-   * @param mode mode of open
-   * @return LargeObject instance providing access to the object
-   * @throws SQLException on error
-   * @deprecated As of 8.3, replaced by {@link #open(long, int)}
-   */
-  @Deprecated
-  public LargeObject open(int oid, int mode) throws SQLException {
-    return open((long) oid, mode, false);
-  }
-
-  /**
-   * This opens an existing large object, same as previous method, but commits the transaction on
-   * close if asked.
-   *
-   * @param oid of large object
-   * @param mode mode of open
-   * @param commitOnClose commit the transaction when this LOB will be closed
-   * @return LargeObject instance providing access to the object
-   * @throws SQLException on error
-   */
-
-  public LargeObject open(int oid, int mode, boolean commitOnClose) throws SQLException {
-    return open((long) oid, mode, commitOnClose);
-  }
-
-  /**
-   * This opens an existing large object, based on its OID.
-   *
-   * @param oid of large object
-   * @param mode mode of open
-   * @return LargeObject instance providing access to the object
-   * @throws SQLException on error
-   */
-  public LargeObject open(long oid, int mode) throws SQLException {
-    return open(oid, mode, false);
-  }
-
-  /**
-   * This opens an existing large object, based on its OID.
-   *
-   * @param oid of large object
-   * @param mode mode of open
-   * @param commitOnClose commit the transaction when this LOB will be closed
-   * @return LargeObject instance providing access to the object
-   * @throws SQLException on error
-   */
-  public LargeObject open(long oid, int mode, boolean commitOnClose) throws SQLException {
-    if (conn.getAutoCommit()) {
-      throw new PSQLException(GT.tr("Large Objects may not be used in auto-commit mode."),
-          PSQLState.NO_ACTIVE_SQL_TRANSACTION);
+    /**
+     * This opens an existing large object, based on its OID. This method assumes that READ and WRITE
+     * access is required (the default).
+     *
+     * @param oid of large object
+     * @return LargeObject instance providing access to the object
+     * @throws SQLException on error
+     * @deprecated As of 8.3, replaced by {@link #open(long)}
+     */
+    @Deprecated
+    public LargeObject open(int oid) throws SQLException {
+        return open((long) oid, false);
     }
-    return new LargeObject(fp, oid, mode, conn, commitOnClose);
-  }
 
-  /**
-   * <p>This creates a large object, returning its OID.</p>
-   *
-   * <p>It defaults to READWRITE for the new object's attributes.</p>
-   *
-   * @return oid of new object
-   * @throws SQLException on error
-   * @deprecated As of 8.3, replaced by {@link #createLO()}
-   */
-  @Deprecated
-  public int create() throws SQLException {
-    return create(READWRITE);
-  }
+    /**
+     * This opens an existing large object, same as previous method, but commits the transaction on
+     * close if asked. This is useful when the LOB is returned to a caller which won't take care of
+     * transactions by itself.
+     *
+     * @param oid           of large object
+     * @param commitOnClose commit the transaction when this LOB will be closed
+     * @return LargeObject instance providing access to the object
+     * @throws SQLException on error
+     */
 
-  /**
-   * <p>This creates a large object, returning its OID.</p>
-   *
-   * <p>It defaults to READWRITE for the new object's attributes.</p>
-   *
-   * @return oid of new object
-   * @throws SQLException if something wrong happens
-   */
-  public long createLO() throws SQLException {
-    return createLO(READWRITE);
-  }
-
-  /**
-   * This creates a large object, returning its OID.
-   *
-   * @param mode a bitmask describing different attributes of the new object
-   * @return oid of new object
-   * @throws SQLException on error
-   */
-  public long createLO(int mode) throws SQLException {
-    if (conn.getAutoCommit()) {
-      throw new PSQLException(GT.tr("Large Objects may not be used in auto-commit mode."),
-          PSQLState.NO_ACTIVE_SQL_TRANSACTION);
+    public LargeObject open(int oid, boolean commitOnClose) throws SQLException {
+        return open((long) oid, commitOnClose);
     }
-    FastpathArg[] args = new FastpathArg[1];
-    args[0] = new FastpathArg(mode);
-    return fp.getOID("lo_creat", args);
-  }
 
-  /**
-   * This creates a large object, returning its OID.
-   *
-   * @param mode a bitmask describing different attributes of the new object
-   * @return oid of new object
-   * @throws SQLException on error
-   * @deprecated As of 8.3, replaced by {@link #createLO(int)}
-   */
-  @Deprecated
-  public int create(int mode) throws SQLException {
-    long oid = createLO(mode);
-    return (int) oid;
-  }
+    /**
+     * This opens an existing large object, based on its OID. This method assumes that READ and WRITE
+     * access is required (the default).
+     *
+     * @param oid of large object
+     * @return LargeObject instance providing access to the object
+     * @throws SQLException on error
+     */
+    public LargeObject open(long oid) throws SQLException {
+        return open(oid, READWRITE, false);
+    }
 
-  /**
-   * This deletes a large object.
-   *
-   * @param oid describing object to delete
-   * @throws SQLException on error
-   */
-  public void delete(long oid) throws SQLException {
-    FastpathArg[] args = new FastpathArg[1];
-    args[0] = Fastpath.createOIDArg(oid);
-    fp.fastpath("lo_unlink", args);
-  }
+    /**
+     * This opens an existing large object, same as previous method, but commits the transaction on
+     * close if asked.
+     *
+     * @param oid           of large object
+     * @param commitOnClose commit the transaction when this LOB will be closed
+     * @return LargeObject instance providing access to the object
+     * @throws SQLException on error
+     */
 
-  /**
-   * <p>This deletes a large object.</p>
-   *
-   * <p>It is identical to the delete method, and is supplied as the C API uses unlink.</p>
-   *
-   * @param oid describing object to delete
-   * @throws SQLException on error
-   * @deprecated As of 8.3, replaced by {@link #unlink(long)}
-   */
-  @Deprecated
-  public void unlink(int oid) throws SQLException {
-    delete((long) oid);
-  }
+    public LargeObject open(long oid, boolean commitOnClose) throws SQLException {
+        return open(oid, READWRITE, commitOnClose);
+    }
 
-  /**
-   * <p>This deletes a large object.</p>
-   *
-   * <p>It is identical to the delete method, and is supplied as the C API uses unlink.</p>
-   *
-   * @param oid describing object to delete
-   * @throws SQLException on error
-   */
-  public void unlink(long oid) throws SQLException {
-    delete(oid);
-  }
+    /**
+     * This opens an existing large object, based on its OID.
+     *
+     * @param oid  of large object
+     * @param mode mode of open
+     * @return LargeObject instance providing access to the object
+     * @throws SQLException on error
+     * @deprecated As of 8.3, replaced by {@link #open(long, int)}
+     */
+    @Deprecated
+    public LargeObject open(int oid, int mode) throws SQLException {
+        return open((long) oid, mode, false);
+    }
 
-  /**
-   * This deletes a large object.
-   *
-   * @param oid describing object to delete
-   * @throws SQLException on error
-   * @deprecated As of 8.3, replaced by {@link #delete(long)}
-   */
-  @Deprecated
-  public void delete(int oid) throws SQLException {
-    delete((long) oid);
-  }
+    /**
+     * This opens an existing large object, same as previous method, but commits the transaction on
+     * close if asked.
+     *
+     * @param oid           of large object
+     * @param mode          mode of open
+     * @param commitOnClose commit the transaction when this LOB will be closed
+     * @return LargeObject instance providing access to the object
+     * @throws SQLException on error
+     */
+
+    public LargeObject open(int oid, int mode, boolean commitOnClose) throws SQLException {
+        return open((long) oid, mode, commitOnClose);
+    }
+
+    /**
+     * This opens an existing large object, based on its OID.
+     *
+     * @param oid  of large object
+     * @param mode mode of open
+     * @return LargeObject instance providing access to the object
+     * @throws SQLException on error
+     */
+    public LargeObject open(long oid, int mode) throws SQLException {
+        return open(oid, mode, false);
+    }
+
+    /**
+     * This opens an existing large object, based on its OID.
+     *
+     * @param oid           of large object
+     * @param mode          mode of open
+     * @param commitOnClose commit the transaction when this LOB will be closed
+     * @return LargeObject instance providing access to the object
+     * @throws SQLException on error
+     */
+    public LargeObject open(long oid, int mode, boolean commitOnClose) throws SQLException {
+        if (conn.getAutoCommit()) {
+            throw new PSQLException(GT.tr("Large Objects may not be used in auto-commit mode."),
+                    PSQLState.NO_ACTIVE_SQL_TRANSACTION);
+        }
+        return new LargeObject(fp, oid, mode, conn, commitOnClose);
+    }
+
+    /**
+     * <p>This creates a large object, returning its OID.</p>
+     *
+     * <p>It defaults to READWRITE for the new object's attributes.</p>
+     *
+     * @return oid of new object
+     * @throws SQLException on error
+     * @deprecated As of 8.3, replaced by {@link #createLO()}
+     */
+    @Deprecated
+    public int create() throws SQLException {
+        return create(READWRITE);
+    }
+
+    /**
+     * <p>This creates a large object, returning its OID.</p>
+     *
+     * <p>It defaults to READWRITE for the new object's attributes.</p>
+     *
+     * @return oid of new object
+     * @throws SQLException if something wrong happens
+     */
+    public long createLO() throws SQLException {
+        return createLO(READWRITE);
+    }
+
+    /**
+     * This creates a large object, returning its OID.
+     *
+     * @param mode a bitmask describing different attributes of the new object
+     * @return oid of new object
+     * @throws SQLException on error
+     */
+    public long createLO(int mode) throws SQLException {
+        if (conn.getAutoCommit()) {
+            throw new PSQLException(GT.tr("Large Objects may not be used in auto-commit mode."),
+                    PSQLState.NO_ACTIVE_SQL_TRANSACTION);
+        }
+        FastpathArg[] args = new FastpathArg[1];
+        args[0] = new FastpathArg(mode);
+        return fp.getOID("lo_creat", args);
+    }
+
+    /**
+     * This creates a large object, returning its OID.
+     *
+     * @param mode a bitmask describing different attributes of the new object
+     * @return oid of new object
+     * @throws SQLException on error
+     * @deprecated As of 8.3, replaced by {@link #createLO(int)}
+     */
+    @Deprecated
+    public int create(int mode) throws SQLException {
+        long oid = createLO(mode);
+        return (int) oid;
+    }
+
+    /**
+     * This deletes a large object.
+     *
+     * @param oid describing object to delete
+     * @throws SQLException on error
+     */
+    public void delete(long oid) throws SQLException {
+        FastpathArg[] args = new FastpathArg[1];
+        args[0] = Fastpath.createOIDArg(oid);
+        fp.fastpath("lo_unlink", args);
+    }
+
+    /**
+     * <p>This deletes a large object.</p>
+     *
+     * <p>It is identical to the delete method, and is supplied as the C API uses unlink.</p>
+     *
+     * @param oid describing object to delete
+     * @throws SQLException on error
+     * @deprecated As of 8.3, replaced by {@link #unlink(long)}
+     */
+    @Deprecated
+    public void unlink(int oid) throws SQLException {
+        delete((long) oid);
+    }
+
+    /**
+     * <p>This deletes a large object.</p>
+     *
+     * <p>It is identical to the delete method, and is supplied as the C API uses unlink.</p>
+     *
+     * @param oid describing object to delete
+     * @throws SQLException on error
+     */
+    public void unlink(long oid) throws SQLException {
+        delete(oid);
+    }
+
+    /**
+     * This deletes a large object.
+     *
+     * @param oid describing object to delete
+     * @throws SQLException on error
+     * @deprecated As of 8.3, replaced by {@link #delete(long)}
+     */
+    @Deprecated
+    public void delete(int oid) throws SQLException {
+        delete((long) oid);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/plugin/AuthenticationPlugin.java b/pgjdbc/src/main/java/org/postgresql/plugin/AuthenticationPlugin.java
index 3734cbe..8a2b374 100644
--- a/pgjdbc/src/main/java/org/postgresql/plugin/AuthenticationPlugin.java
+++ b/pgjdbc/src/main/java/org/postgresql/plugin/AuthenticationPlugin.java
@@ -9,23 +9,23 @@ import org.postgresql.util.PSQLException;
 
 public interface AuthenticationPlugin {
 
-  /**
-   * Callback method to provide the password to use for authentication.
-   *
-   * <p>Implementers can also check the authentication type to ensure that the
-   * authentication handshake is using a specific authentication method (e.g. SASL)
-   * or avoiding a specific one (e.g. cleartext).</p>
-   *
-   * <p>For security reasons, the driver will wipe the contents of the array returned
-   * by this method after it has been used for authentication.</p>
-   *
-   * <p><b>Implementers must provide a new array each time this method is invoked as
-   * the previous contents will have been wiped.</b></p>
-   *
-   * @param type The authentication method that the server is requesting
-   * @return The password to use or null if no password is available
-   * @throws PSQLException if something goes wrong supplying the password
-   */
-  char [] getPassword(AuthenticationRequestType type) throws PSQLException;
+    /**
+     * Callback method to provide the password to use for authentication.
+     *
+     * <p>Implementers can also check the authentication type to ensure that the
+     * authentication handshake is using a specific authentication method (e.g. SASL)
+     * or avoiding a specific one (e.g. cleartext).</p>
+     *
+     * <p>For security reasons, the driver will wipe the contents of the array returned
+     * by this method after it has been used for authentication.</p>
+     *
+     * <p><b>Implementers must provide a new array each time this method is invoked as
+     * the previous contents will have been wiped.</b></p>
+     *
+     * @param type The authentication method that the server is requesting
+     * @return The password to use or null if no password is available
+     * @throws PSQLException if something goes wrong supplying the password
+     */
+    char[] getPassword(AuthenticationRequestType type) throws PSQLException;
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/LogSequenceNumber.java b/pgjdbc/src/main/java/org/postgresql/replication/LogSequenceNumber.java
index 1886a7d..291757e 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/LogSequenceNumber.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/LogSequenceNumber.java
@@ -11,108 +11,108 @@ import java.nio.ByteBuffer;
  * LSN (Log Sequence Number) data which is a pointer to a location in the XLOG.
  */
 public final class LogSequenceNumber implements Comparable<LogSequenceNumber> {
-  /**
-   * Zero is used indicate an invalid pointer. Bootstrap skips the first possible WAL segment,
-   * initializing the first WAL page at XLOG_SEG_SIZE, so no XLOG record can begin at zero.
-   */
-  public static final LogSequenceNumber INVALID_LSN = LogSequenceNumber.valueOf(0);
+    /**
+     * Zero is used indicate an invalid pointer. Bootstrap skips the first possible WAL segment,
+     * initializing the first WAL page at XLOG_SEG_SIZE, so no XLOG record can begin at zero.
+     */
+    public static final LogSequenceNumber INVALID_LSN = LogSequenceNumber.valueOf(0);
 
-  private final long value;
+    private final long value;
 
-  private LogSequenceNumber(long value) {
-    this.value = value;
-  }
-
-  /**
-   * @param value numeric represent position in the write-ahead log stream
-   * @return not null LSN instance
-   */
-  public static LogSequenceNumber valueOf(long value) {
-    return new LogSequenceNumber(value);
-  }
-
-  /**
-   * Create LSN instance by string represent LSN.
-   *
-   * @param strValue not null string as two hexadecimal numbers of up to 8 digits each, separated by
-   *                 a slash. For example {@code 16/3002D50}, {@code 0/15D68C50}
-   * @return not null LSN instance where if specified string represent have not valid form {@link
-   * LogSequenceNumber#INVALID_LSN}
-   */
-  public static LogSequenceNumber valueOf(String strValue) {
-    int slashIndex = strValue.lastIndexOf('/');
-
-    if (slashIndex <= 0) {
-      return INVALID_LSN;
+    private LogSequenceNumber(long value) {
+        this.value = value;
     }
 
-    String logicalXLogStr = strValue.substring(0, slashIndex);
-    int logicalXlog = (int) Long.parseLong(logicalXLogStr, 16);
-    String segmentStr = strValue.substring(slashIndex + 1, strValue.length());
-    int segment = (int) Long.parseLong(segmentStr, 16);
-
-    ByteBuffer buf = ByteBuffer.allocate(8);
-    buf.putInt(logicalXlog);
-    buf.putInt(segment);
-    buf.position(0);
-    long value = buf.getLong();
-
-    return LogSequenceNumber.valueOf(value);
-  }
-
-  /**
-   * @return Long represent position in the write-ahead log stream
-   */
-  public long asLong() {
-    return value;
-  }
-
-  /**
-   * @return String represent position in the write-ahead log stream as two hexadecimal numbers of
-   *     up to 8 digits each, separated by a slash. For example {@code 16/3002D50}, {@code 0/15D68C50}
-   */
-  public String asString() {
-    ByteBuffer buf = ByteBuffer.allocate(8);
-    buf.putLong(value);
-    buf.position(0);
-
-    int logicalXlog = buf.getInt();
-    int segment = buf.getInt();
-    return String.format("%X/%X", logicalXlog, segment);
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
+    /**
+     * @param value numeric represent position in the write-ahead log stream
+     * @return not null LSN instance
+     */
+    public static LogSequenceNumber valueOf(long value) {
+        return new LogSequenceNumber(value);
     }
 
-    LogSequenceNumber that = (LogSequenceNumber) o;
+    /**
+     * Create LSN instance by string represent LSN.
+     *
+     * @param strValue not null string as two hexadecimal numbers of up to 8 digits each, separated by
+     *                 a slash. For example {@code 16/3002D50}, {@code 0/15D68C50}
+     * @return not null LSN instance where if specified string represent have not valid form {@link
+     * LogSequenceNumber#INVALID_LSN}
+     */
+    public static LogSequenceNumber valueOf(String strValue) {
+        int slashIndex = strValue.lastIndexOf('/');
 
-    return value == that.value;
+        if (slashIndex <= 0) {
+            return INVALID_LSN;
+        }
 
-  }
+        String logicalXLogStr = strValue.substring(0, slashIndex);
+        int logicalXlog = (int) Long.parseLong(logicalXLogStr, 16);
+        String segmentStr = strValue.substring(slashIndex + 1, strValue.length());
+        int segment = (int) Long.parseLong(segmentStr, 16);
 
-  @Override
-  public int hashCode() {
-    return (int) (value ^ (value >>> 32));
-  }
+        ByteBuffer buf = ByteBuffer.allocate(8);
+        buf.putInt(logicalXlog);
+        buf.putInt(segment);
+        buf.position(0);
+        long value = buf.getLong();
 
-  @Override
-  public String toString() {
-    return "LSN{" + asString() + '}';
-  }
-
-  @Override
-  public int compareTo(LogSequenceNumber o) {
-    if (value == o.value) {
-      return 0;
+        return LogSequenceNumber.valueOf(value);
+    }
+
+    /**
+     * @return Long represent position in the write-ahead log stream
+     */
+    public long asLong() {
+        return value;
+    }
+
+    /**
+     * @return String represent position in the write-ahead log stream as two hexadecimal numbers of
+     * up to 8 digits each, separated by a slash. For example {@code 16/3002D50}, {@code 0/15D68C50}
+     */
+    public String asString() {
+        ByteBuffer buf = ByteBuffer.allocate(8);
+        buf.putLong(value);
+        buf.position(0);
+
+        int logicalXlog = buf.getInt();
+        int segment = buf.getInt();
+        return String.format("%X/%X", logicalXlog, segment);
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+
+        LogSequenceNumber that = (LogSequenceNumber) o;
+
+        return value == that.value;
+
+    }
+
+    @Override
+    public int hashCode() {
+        return (int) (value ^ (value >>> 32));
+    }
+
+    @Override
+    public String toString() {
+        return "LSN{" + asString() + '}';
+    }
+
+    @Override
+    public int compareTo(LogSequenceNumber o) {
+        if (value == o.value) {
+            return 0;
+        }
+        //Unsigned comparison
+        return value + Long.MIN_VALUE < o.value + Long.MIN_VALUE ? -1 : 1;
     }
-    //Unsigned comparison
-    return value + Long.MIN_VALUE < o.value + Long.MIN_VALUE ? -1 : 1;
-  }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnection.java b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnection.java
index 6148f49..e6c104b 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnection.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnection.java
@@ -18,28 +18,28 @@ import java.sql.SQLException;
  */
 public interface PGReplicationConnection {
 
-  /**
-   * After start replication stream this connection not available to use for another queries until
-   * replication stream will not close.
-   *
-   * @return not null fluent api for build replication stream
-   */
-  ChainedStreamBuilder replicationStream();
+    /**
+     * After start replication stream this connection not available to use for another queries until
+     * replication stream will not close.
+     *
+     * @return not null fluent api for build replication stream
+     */
+    ChainedStreamBuilder replicationStream();
 
-  /**
-   * <p>Create replication slot, that can be next use in {@link PGReplicationConnection#replicationStream()}</p>
-   *
-   * <p>Replication slots provide an automated way to ensure that the master does not remove WAL
-   * segments until they have been received by all standbys, and that the master does not remove
-   * rows which could cause a recovery conflict even when the standby is disconnected.</p>
-   *
-   * @return not null fluent api for build create replication slot
-   */
-  ChainedCreateReplicationSlotBuilder createReplicationSlot();
+    /**
+     * <p>Create replication slot, that can be next use in {@link PGReplicationConnection#replicationStream()}</p>
+     *
+     * <p>Replication slots provide an automated way to ensure that the master does not remove WAL
+     * segments until they have been received by all standbys, and that the master does not remove
+     * rows which could cause a recovery conflict even when the standby is disconnected.</p>
+     *
+     * @return not null fluent api for build create replication slot
+     */
+    ChainedCreateReplicationSlotBuilder createReplicationSlot();
 
-  /**
-   * @param slotName not null replication slot name exists in database that should be drop
-   * @throws SQLException if the replication slot cannot be dropped.
-   */
-  void dropReplicationSlot(String slotName) throws SQLException;
+    /**
+     * @param slotName not null replication slot name exists in database that should be drop
+     * @throws SQLException if the replication slot cannot be dropped.
+     */
+    void dropReplicationSlot(String slotName) throws SQLException;
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnectionImpl.java b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnectionImpl.java
index 350526e..1c6c8ed 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnectionImpl.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationConnectionImpl.java
@@ -15,33 +15,33 @@ import java.sql.SQLException;
 import java.sql.Statement;
 
 public class PGReplicationConnectionImpl implements PGReplicationConnection {
-  private final BaseConnection connection;
+    private final BaseConnection connection;
 
-  public PGReplicationConnectionImpl(BaseConnection connection) {
-    this.connection = connection;
-  }
-
-  @Override
-  public ChainedStreamBuilder replicationStream() {
-    return new ReplicationStreamBuilder(connection);
-  }
-
-  @Override
-  public ChainedCreateReplicationSlotBuilder createReplicationSlot() {
-    return new ReplicationCreateSlotBuilder(connection);
-  }
-
-  @Override
-  public void dropReplicationSlot(String slotName) throws SQLException {
-    if (slotName == null || slotName.isEmpty()) {
-      throw new IllegalArgumentException("Replication slot name can't be null or empty");
+    public PGReplicationConnectionImpl(BaseConnection connection) {
+        this.connection = connection;
     }
 
-    Statement statement = connection.createStatement();
-    try {
-      statement.execute("DROP_REPLICATION_SLOT " + slotName);
-    } finally {
-      statement.close();
+    @Override
+    public ChainedStreamBuilder replicationStream() {
+        return new ReplicationStreamBuilder(connection);
+    }
+
+    @Override
+    public ChainedCreateReplicationSlotBuilder createReplicationSlot() {
+        return new ReplicationCreateSlotBuilder(connection);
+    }
+
+    @Override
+    public void dropReplicationSlot(String slotName) throws SQLException {
+        if (slotName == null || slotName.isEmpty()) {
+            throw new IllegalArgumentException("Replication slot name can't be null or empty");
+        }
+
+        Statement statement = connection.createStatement();
+        try {
+            statement.execute("DROP_REPLICATION_SLOT " + slotName);
+        } finally {
+            statement.close();
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationStream.java b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationStream.java
index cbd06f2..95b11b5 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/PGReplicationStream.java
@@ -19,125 +19,125 @@ import java.sql.SQLException;
  * disconnect by timeout from server.
  */
 public interface PGReplicationStream
-    extends AutoCloseable {
+        extends AutoCloseable {
 
-  /**
-   * <p>Read next wal record from backend. It method can be block until new message will not get
-   * from server.</p>
-   *
-   * <p>A single WAL record is never split across two XLogData messages. When a WAL record crosses a
-   * WAL page boundary, and is therefore already split using continuation records, it can be split
-   * at the page boundary. In other words, the first main WAL record and its continuation records
-   * can be sent in different XLogData messages.</p>
-   *
-   * @return not null byte array received by replication protocol, return ByteBuffer wrap around
-   *     received byte array with use offset, so, use {@link ByteBuffer#array()} carefully
-   * @throws SQLException when some internal exception occurs during read from stream
-   */
-  ByteBuffer read() throws SQLException;
+    /**
+     * <p>Read next wal record from backend. It method can be block until new message will not get
+     * from server.</p>
+     *
+     * <p>A single WAL record is never split across two XLogData messages. When a WAL record crosses a
+     * WAL page boundary, and is therefore already split using continuation records, it can be split
+     * at the page boundary. In other words, the first main WAL record and its continuation records
+     * can be sent in different XLogData messages.</p>
+     *
+     * @return not null byte array received by replication protocol, return ByteBuffer wrap around
+     * received byte array with use offset, so, use {@link ByteBuffer#array()} carefully
+     * @throws SQLException when some internal exception occurs during read from stream
+     */
+    ByteBuffer read() throws SQLException;
 
-  /**
-   * <p>Read next WAL record from backend. This method does not block and in contrast to {@link
-   * PGReplicationStream#read()}. If message from backend absent return null. It allow periodically
-   * check message in stream and if they absent sleep some time, but it time should be less than
-   * {@link CommonOptions#getStatusInterval()} to avoid disconnect from the server.</p>
-   *
-   * <p>A single WAL record is never split across two XLogData messages. When a WAL record crosses a
-   * WAL page boundary, and is therefore already split using continuation records, it can be split
-   * at the page boundary. In other words, the first main WAL record and its continuation records
-   * can be sent in different XLogData messages.</p>
-   *
-   * @return byte array received by replication protocol or NULL if pending message from server
-   *     absent. Returns ByteBuffer wrap around received byte array with use offset, so, use {@link
-   *     ByteBuffer#array()} carefully.
-   * @throws SQLException when some internal exception occurs during read from stream
-   */
-  ByteBuffer readPending() throws SQLException;
+    /**
+     * <p>Read next WAL record from backend. This method does not block and in contrast to {@link
+     * PGReplicationStream#read()}. If message from backend absent return null. It allow periodically
+     * check message in stream and if they absent sleep some time, but it time should be less than
+     * {@link CommonOptions#getStatusInterval()} to avoid disconnect from the server.</p>
+     *
+     * <p>A single WAL record is never split across two XLogData messages. When a WAL record crosses a
+     * WAL page boundary, and is therefore already split using continuation records, it can be split
+     * at the page boundary. In other words, the first main WAL record and its continuation records
+     * can be sent in different XLogData messages.</p>
+     *
+     * @return byte array received by replication protocol or NULL if pending message from server
+     * absent. Returns ByteBuffer wrap around received byte array with use offset, so, use {@link
+     * ByteBuffer#array()} carefully.
+     * @throws SQLException when some internal exception occurs during read from stream
+     */
+    ByteBuffer readPending() throws SQLException;
 
-  /**
-   * <p>Parameter updates by execute {@link PGReplicationStream#read()} method.</p>
-   *
-   * <p>It is safe to call this method in a thread different than the main thread. However, usually this
-   * method is called in the main thread after a successful {@link PGReplicationStream#read()} or
-   * {@link PGReplicationStream#readPending()}, to get the LSN corresponding to the received record.</p>
-   *
-   * @return NOT NULL LSN position that was receive last time via {@link PGReplicationStream#read()}
-   *     method
-   */
-  LogSequenceNumber getLastReceiveLSN();
+    /**
+     * <p>Parameter updates by execute {@link PGReplicationStream#read()} method.</p>
+     *
+     * <p>It is safe to call this method in a thread different than the main thread. However, usually this
+     * method is called in the main thread after a successful {@link PGReplicationStream#read()} or
+     * {@link PGReplicationStream#readPending()}, to get the LSN corresponding to the received record.</p>
+     *
+     * @return NOT NULL LSN position that was receive last time via {@link PGReplicationStream#read()}
+     * method
+     */
+    LogSequenceNumber getLastReceiveLSN();
 
-  /**
-   * <p>Last flushed LSN sent in update message to backend. Parameter updates only via {@link
-   * PGReplicationStream#setFlushedLSN(LogSequenceNumber)}</p>
-   *
-   * <p>It is safe to call this method in a thread different than the main thread.</p>
-   *
-   * @return NOT NULL location of the last WAL flushed to disk in the standby.
-   */
-  LogSequenceNumber getLastFlushedLSN();
+    /**
+     * <p>Last flushed LSN sent in update message to backend. Parameter updates only via {@link
+     * PGReplicationStream#setFlushedLSN(LogSequenceNumber)}</p>
+     *
+     * <p>It is safe to call this method in a thread different than the main thread.</p>
+     *
+     * @return NOT NULL location of the last WAL flushed to disk in the standby.
+     */
+    LogSequenceNumber getLastFlushedLSN();
 
-  /**
-   * <p>Last applied lsn sent in update message to backed. Parameter updates only via {@link
-   * PGReplicationStream#setAppliedLSN(LogSequenceNumber)}</p>
-   *
-   * <p>It is safe to call this method in a thread different than the main thread.</p>
-   *
-   * @return not null location of the last WAL applied in the standby.
-   */
-  LogSequenceNumber getLastAppliedLSN();
+    /**
+     * <p>Last applied lsn sent in update message to backed. Parameter updates only via {@link
+     * PGReplicationStream#setAppliedLSN(LogSequenceNumber)}</p>
+     *
+     * <p>It is safe to call this method in a thread different than the main thread.</p>
+     *
+     * @return not null location of the last WAL applied in the standby.
+     */
+    LogSequenceNumber getLastAppliedLSN();
 
-  /**
-   * <p>Set flushed LSN. This parameter will be sent to backend on next update status iteration. Flushed
-   * LSN position help backend define which WAL can be recycled.</p>
-   *
-   * <p>It is safe to call this method in a thread different than the main thread. The updated value
-   * will be sent to the backend in the next status update run.</p>
-   *
-   * @param flushed NOT NULL location of the last WAL flushed to disk in the standby.
-   * @see PGReplicationStream#forceUpdateStatus()
-   */
-  void setFlushedLSN(LogSequenceNumber flushed);
+    /**
+     * <p>Set flushed LSN. This parameter will be sent to backend on next update status iteration. Flushed
+     * LSN position help backend define which WAL can be recycled.</p>
+     *
+     * <p>It is safe to call this method in a thread different than the main thread. The updated value
+     * will be sent to the backend in the next status update run.</p>
+     *
+     * @param flushed NOT NULL location of the last WAL flushed to disk in the standby.
+     * @see PGReplicationStream#forceUpdateStatus()
+     */
+    void setFlushedLSN(LogSequenceNumber flushed);
 
-  /**
-   * <p>Inform backend which LSN has been applied on standby.
-   * Feedback will send to backend on next update status iteration.</p>
-   *
-   * <p>It is safe to call this method in a thread different than the main thread. The updated value
-   * will be sent to the backend in the next status update run.</p>
-   *
-   * @param applied NOT NULL location of the last WAL applied in the standby.
-   * @see PGReplicationStream#forceUpdateStatus()
-   */
-  void setAppliedLSN(LogSequenceNumber applied);
+    /**
+     * <p>Inform backend which LSN has been applied on standby.
+     * Feedback will send to backend on next update status iteration.</p>
+     *
+     * <p>It is safe to call this method in a thread different than the main thread. The updated value
+     * will be sent to the backend in the next status update run.</p>
+     *
+     * @param applied NOT NULL location of the last WAL applied in the standby.
+     * @see PGReplicationStream#forceUpdateStatus()
+     */
+    void setAppliedLSN(LogSequenceNumber applied);
 
-  /**
-   * Force send last received, flushed and applied LSN status to backend. You cannot send LSN status
-   * explicitly because {@link PGReplicationStream} sends the status to backend periodically by
-   * configured interval via {@link LogicalReplicationOptions#getStatusInterval}
-   *
-   * @throws SQLException when some internal exception occurs during read from stream
-   * @see LogicalReplicationOptions#getStatusInterval()
-   */
-  void forceUpdateStatus() throws SQLException;
+    /**
+     * Force send last received, flushed and applied LSN status to backend. You cannot send LSN status
+     * explicitly because {@link PGReplicationStream} sends the status to backend periodically by
+     * configured interval via {@link LogicalReplicationOptions#getStatusInterval}
+     *
+     * @throws SQLException when some internal exception occurs during read from stream
+     * @see LogicalReplicationOptions#getStatusInterval()
+     */
+    void forceUpdateStatus() throws SQLException;
 
-  /**
-   * @return {@code true} if replication stream was already close, otherwise return {@code false}
-   */
-  boolean isClosed();
+    /**
+     * @return {@code true} if replication stream was already close, otherwise return {@code false}
+     */
+    boolean isClosed();
 
-  /**
-   * <p>Stop replication changes from server and free resources. After that connection can be reuse
-   * to another queries. Also after close current stream they cannot be used anymore.</p>
-   *
-   * <p><b>Note:</b> This method can spend much time for logical replication stream on postgresql
-   * version 9.6 and lower, because postgresql have bug - during decode big transaction to logical
-   * form and during wait new changes postgresql ignore messages from client. As workaround you can
-   * close replication connection instead of close replication stream. For more information about it
-   * problem see mailing list thread <a href="http://www.postgresql.org/message-id/CAFgjRd3hdYOa33m69TbeOfNNer2BZbwa8FFjt2V5VFzTBvUU3w@mail.gmail.com">
-   * Stopping logical replication protocol</a></p>
-   *
-   * @throws SQLException when some internal exception occurs during end streaming
-   */
-  @Override
-  void close() throws SQLException;
+    /**
+     * <p>Stop replication changes from server and free resources. After that connection can be reuse
+     * to another queries. Also after close current stream they cannot be used anymore.</p>
+     *
+     * <p><b>Note:</b> This method can spend much time for logical replication stream on postgresql
+     * version 9.6 and lower, because postgresql have bug - during decode big transaction to logical
+     * form and during wait new changes postgresql ignore messages from client. As workaround you can
+     * close replication connection instead of close replication stream. For more information about it
+     * problem see mailing list thread <a href="http://www.postgresql.org/message-id/CAFgjRd3hdYOa33m69TbeOfNNer2BZbwa8FFjt2V5VFzTBvUU3w@mail.gmail.com">
+     * Stopping logical replication protocol</a></p>
+     *
+     * @throws SQLException when some internal exception occurs during end streaming
+     */
+    @Override
+    void close() throws SQLException;
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/ReplicationSlotInfo.java b/pgjdbc/src/main/java/org/postgresql/replication/ReplicationSlotInfo.java
index 8c904b3..f8cf72b 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/ReplicationSlotInfo.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/ReplicationSlotInfo.java
@@ -20,69 +20,69 @@ package org.postgresql.replication;
  */
 public final class ReplicationSlotInfo {
 
-  private final String slotName;
-  private final ReplicationType replicationType;
-  private final LogSequenceNumber consistentPoint;
-  private final String snapshotName;
-  private final String outputPlugin;
+    private final String slotName;
+    private final ReplicationType replicationType;
+    private final LogSequenceNumber consistentPoint;
+    private final String snapshotName;
+    private final String outputPlugin;
 
-  public ReplicationSlotInfo(String slotName, ReplicationType replicationType,
-      LogSequenceNumber consistentPoint, String snapshotName,
-      String outputPlugin) {
-    this.slotName = slotName;
-    this.replicationType = replicationType;
-    this.consistentPoint = consistentPoint;
-    this.snapshotName = snapshotName;
-    this.outputPlugin = outputPlugin;
-  }
+    public ReplicationSlotInfo(String slotName, ReplicationType replicationType,
+                               LogSequenceNumber consistentPoint, String snapshotName,
+                               String outputPlugin) {
+        this.slotName = slotName;
+        this.replicationType = replicationType;
+        this.consistentPoint = consistentPoint;
+        this.snapshotName = snapshotName;
+        this.outputPlugin = outputPlugin;
+    }
 
-  /**
-   * Replication slot name.
-   *
-   * @return the slot name
-   */
-  public String getSlotName() {
-    return slotName;
-  }
+    /**
+     * Replication slot name.
+     *
+     * @return the slot name
+     */
+    public String getSlotName() {
+        return slotName;
+    }
 
-  /**
-   * Replication type of the slot created, might be PHYSICAL or LOGICAL.
-   *
-   * @return ReplicationType, PHYSICAL or LOGICAL
-   */
-  public ReplicationType getReplicationType() {
-    return replicationType;
-  }
+    /**
+     * Replication type of the slot created, might be PHYSICAL or LOGICAL.
+     *
+     * @return ReplicationType, PHYSICAL or LOGICAL
+     */
+    public ReplicationType getReplicationType() {
+        return replicationType;
+    }
 
-  /**
-   * LSN at which we became consistent.
-   *
-   * @return LogSequenceNumber with the consistent_point
-   */
-  public LogSequenceNumber getConsistentPoint() {
-    return consistentPoint;
-  }
+    /**
+     * LSN at which we became consistent.
+     *
+     * @return LogSequenceNumber with the consistent_point
+     */
+    public LogSequenceNumber getConsistentPoint() {
+        return consistentPoint;
+    }
 
-  /**
-   * Exported snapshot name at the point of replication slot creation.
-   *
-   * <p>As long as the exporting transaction remains open, other transactions can import its snapshot,
-   * and thereby be guaranteed that they see exactly the same view of the database that the first
-   * transaction sees.
-   *
-   * @return exported snapshot_name (may be <code>null</code>)
-   */
-  public String getSnapshotName() {
-    return snapshotName;
-  }
+    /**
+     * Exported snapshot name at the point of replication slot creation.
+     *
+     * <p>As long as the exporting transaction remains open, other transactions can import its snapshot,
+     * and thereby be guaranteed that they see exactly the same view of the database that the first
+     * transaction sees.
+     *
+     * @return exported snapshot_name (may be <code>null</code>)
+     */
+    public String getSnapshotName() {
+        return snapshotName;
+    }
 
-  /**
-   * Output Plugin used on slot creation.
-   *
-   * @return output_plugin (may be <code>null</code>)
-   */
-  public String getOutputPlugin() {
-    return outputPlugin;
-  }
+    /**
+     * Output Plugin used on slot creation.
+     *
+     * @return output_plugin (may be <code>null</code>)
+     */
+    public String getOutputPlugin() {
+        return outputPlugin;
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/ReplicationType.java b/pgjdbc/src/main/java/org/postgresql/replication/ReplicationType.java
index ab93bfd..ae826f7 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/ReplicationType.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/ReplicationType.java
@@ -6,6 +6,6 @@
 package org.postgresql.replication;
 
 public enum ReplicationType {
-  LOGICAL,
-  PHYSICAL
+    LOGICAL,
+    PHYSICAL
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractCreateSlotBuilder.java
index 807400f..e318007 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractCreateSlotBuilder.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractCreateSlotBuilder.java
@@ -12,34 +12,34 @@ import org.postgresql.util.GT;
 import java.sql.SQLFeatureNotSupportedException;
 
 public abstract class AbstractCreateSlotBuilder<T extends ChainedCommonCreateSlotBuilder<T>>
-    implements ChainedCommonCreateSlotBuilder<T> {
+        implements ChainedCommonCreateSlotBuilder<T> {
 
-  protected String slotName;
-  protected boolean temporaryOption;
-  protected BaseConnection connection;
+    protected String slotName;
+    protected boolean temporaryOption;
+    protected BaseConnection connection;
 
-  protected AbstractCreateSlotBuilder(BaseConnection connection) {
-    this.connection = connection;
-  }
-
-  protected abstract T self();
-
-  @Override
-  public T withSlotName(String slotName) {
-    this.slotName = slotName;
-    return self();
-  }
-
-  @Override
-  public T withTemporaryOption() throws SQLFeatureNotSupportedException {
-
-    if (!connection.haveMinimumServerVersion(ServerVersion.v10)) {
-      throw new SQLFeatureNotSupportedException(
-          GT.tr("Server does not support temporary replication slots")
-      );
+    protected AbstractCreateSlotBuilder(BaseConnection connection) {
+        this.connection = connection;
     }
 
-    this.temporaryOption = true;
-    return self();
-  }
+    protected abstract T self();
+
+    @Override
+    public T withSlotName(String slotName) {
+        this.slotName = slotName;
+        return self();
+    }
+
+    @Override
+    public T withTemporaryOption() throws SQLFeatureNotSupportedException {
+
+        if (!connection.haveMinimumServerVersion(ServerVersion.v10)) {
+            throw new SQLFeatureNotSupportedException(
+                    GT.tr("Server does not support temporary replication slots")
+            );
+        }
+
+        this.temporaryOption = true;
+        return self();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractStreamBuilder.java
index 8f08bba..f020497 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractStreamBuilder.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/AbstractStreamBuilder.java
@@ -10,32 +10,32 @@ import org.postgresql.replication.LogSequenceNumber;
 import java.util.concurrent.TimeUnit;
 
 public abstract class AbstractStreamBuilder<T extends ChainedCommonStreamBuilder<T>>
-    implements ChainedCommonStreamBuilder<T> {
-  private static final int DEFAULT_STATUS_INTERVAL = (int) TimeUnit.SECONDS.toMillis(10L);
-  protected int statusIntervalMs = DEFAULT_STATUS_INTERVAL;
-  protected LogSequenceNumber startPosition = LogSequenceNumber.INVALID_LSN;
-  protected String slotName;
+        implements ChainedCommonStreamBuilder<T> {
+    private static final int DEFAULT_STATUS_INTERVAL = (int) TimeUnit.SECONDS.toMillis(10L);
+    protected int statusIntervalMs = DEFAULT_STATUS_INTERVAL;
+    protected LogSequenceNumber startPosition = LogSequenceNumber.INVALID_LSN;
+    protected String slotName;
 
-  public AbstractStreamBuilder() {
-  }
+    public AbstractStreamBuilder() {
+    }
 
-  protected abstract T self();
+    protected abstract T self();
 
-  @Override
-  public T withStatusInterval(int time, TimeUnit format) {
-    statusIntervalMs = (int) TimeUnit.MILLISECONDS.convert(time, format);
-    return self();
-  }
+    @Override
+    public T withStatusInterval(int time, TimeUnit format) {
+        statusIntervalMs = (int) TimeUnit.MILLISECONDS.convert(time, format);
+        return self();
+    }
 
-  @Override
-  public T withStartPosition(LogSequenceNumber lsn) {
-    this.startPosition = lsn;
-    return self();
-  }
+    @Override
+    public T withStartPosition(LogSequenceNumber lsn) {
+        this.startPosition = lsn;
+        return self();
+    }
 
-  @Override
-  public T withSlotName(String slotName) {
-    this.slotName = slotName;
-    return self();
-  }
+    @Override
+    public T withSlotName(String slotName) {
+        this.slotName = slotName;
+        return self();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonCreateSlotBuilder.java
index 4114bef..ca5e2ce 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonCreateSlotBuilder.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonCreateSlotBuilder.java
@@ -15,32 +15,32 @@ import java.sql.SQLFeatureNotSupportedException;
  */
 public interface ChainedCommonCreateSlotBuilder<T extends ChainedCommonCreateSlotBuilder<T>> {
 
-  /**
-   * Replication slots provide an automated way to ensure that the master does not remove WAL
-   * segments until they have been received by all standbys, and that the master does not remove
-   * rows which could cause a recovery conflict even when the standby is disconnected.
-   *
-   * @param slotName not null unique replication slot name for create.
-   * @return T a slot builder
-   */
-  T withSlotName(String slotName);
+    /**
+     * Replication slots provide an automated way to ensure that the master does not remove WAL
+     * segments until they have been received by all standbys, and that the master does not remove
+     * rows which could cause a recovery conflict even when the standby is disconnected.
+     *
+     * @param slotName not null unique replication slot name for create.
+     * @return T a slot builder
+     */
+    T withSlotName(String slotName);
 
-  /**
-   * <p>Temporary slots are not saved to disk and are automatically dropped on error or when
-   * the session has finished.</p>
-   *
-   * <p>This feature is only supported by PostgreSQL versions &gt;= 10.</p>
-   *
-   * @return T a slot builder
-   * @throws SQLFeatureNotSupportedException thrown if PostgreSQL version is less than 10.
-   */
-  T withTemporaryOption() throws SQLFeatureNotSupportedException;
+    /**
+     * <p>Temporary slots are not saved to disk and are automatically dropped on error or when
+     * the session has finished.</p>
+     *
+     * <p>This feature is only supported by PostgreSQL versions &gt;= 10.</p>
+     *
+     * @return T a slot builder
+     * @throws SQLFeatureNotSupportedException thrown if PostgreSQL version is less than 10.
+     */
+    T withTemporaryOption() throws SQLFeatureNotSupportedException;
 
-  /**
-   * Create slot with specified parameters in database.
-   *
-   * @return ReplicationSlotInfo with the information of the created slot.
-   * @throws SQLException on error
-   */
-  ReplicationSlotInfo make() throws SQLException;
+    /**
+     * Create slot with specified parameters in database.
+     *
+     * @return ReplicationSlotInfo with the information of the created slot.
+     * @throws SQLException on error
+     */
+    ReplicationSlotInfo make() throws SQLException;
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonStreamBuilder.java
index 2a41246..35c9cce 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonStreamBuilder.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCommonStreamBuilder.java
@@ -14,35 +14,35 @@ import java.util.concurrent.TimeUnit;
  */
 public interface ChainedCommonStreamBuilder<T extends ChainedCommonStreamBuilder<T>> {
 
-  /**
-   * Replication slots provide an automated way to ensure that the master does not remove WAL
-   * segments until they have been received by all standbys, and that the master does not remove
-   * rows which could cause a recovery conflict even when the standby is disconnected.
-   *
-   * @param slotName not null replication slot already exists on server.
-   * @return this instance as a fluent interface
-   */
-  T withSlotName(String slotName);
+    /**
+     * Replication slots provide an automated way to ensure that the master does not remove WAL
+     * segments until they have been received by all standbys, and that the master does not remove
+     * rows which could cause a recovery conflict even when the standby is disconnected.
+     *
+     * @param slotName not null replication slot already exists on server.
+     * @return this instance as a fluent interface
+     */
+    T withSlotName(String slotName);
 
-  /**
-   * Specifies the number of time between status packets sent back to the server. This allows for
-   * easier monitoring of the progress from server. A value of zero disables the periodic status
-   * updates completely, although an update will still be sent when requested by the server, to
-   * avoid timeout disconnect. The default value is 10 seconds.
-   *
-   * @param time   positive time
-   * @param format format for specified time
-   * @return not null fluent
-   */
-  T withStatusInterval(int time, TimeUnit format);
+    /**
+     * Specifies the number of time between status packets sent back to the server. This allows for
+     * easier monitoring of the progress from server. A value of zero disables the periodic status
+     * updates completely, although an update will still be sent when requested by the server, to
+     * avoid timeout disconnect. The default value is 10 seconds.
+     *
+     * @param time   positive time
+     * @param format format for specified time
+     * @return not null fluent
+     */
+    T withStatusInterval(int time, TimeUnit format);
 
-  /**
-   * Specify start position from which backend will start stream changes. If parameter will not
-   * specify, streaming starts from restart_lsn. For more details see pg_replication_slots
-   * description.
-   *
-   * @param lsn not null position from which need start replicate changes
-   * @return not null fluent
-   */
-  T withStartPosition(LogSequenceNumber lsn);
+    /**
+     * Specify start position from which backend will start stream changes. If parameter will not
+     * specify, streaming starts from restart_lsn. For more details see pg_replication_slots
+     * description.
+     *
+     * @param lsn not null position from which need start replicate changes
+     * @return not null fluent
+     */
+    T withStartPosition(LogSequenceNumber lsn);
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCreateReplicationSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCreateReplicationSlotBuilder.java
index 36e2f0b..3121438 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCreateReplicationSlotBuilder.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedCreateReplicationSlotBuilder.java
@@ -12,72 +12,73 @@ import org.postgresql.replication.fluent.physical.ChainedPhysicalCreateSlotBuild
  * Fluent interface for specify common parameters for Logical and Physical replication.
  */
 public interface ChainedCreateReplicationSlotBuilder {
-  /**
-   * Get the logical slot builder.
-   * Example usage:
-   * <pre>
-   *   {@code
-   *
-   *    pgConnection
-   *        .getReplicationAPI()
-   *        .createReplicationSlot()
-   *        .logical()
-   *        .withSlotName("mySlot")
-   *        .withOutputPlugin("test_decoding")
-   *        .make();
-   *
-   *    PGReplicationStream stream =
-   *        pgConnection
-   *            .getReplicationAPI()
-   *            .replicationStream()
-   *            .logical()
-   *            .withSlotName("mySlot")
-   *            .withSlotOption("include-xids", false)
-   *            .withSlotOption("skip-empty-xacts", true)
-   *            .start();
-   *
-   *    while (true) {
-   *      ByteBuffer buffer = stream.read();
-   *      //process logical changes
-   *    }
-   *
-   *   }
-   * </pre>
-   * @return not null fluent api
-   */
-  ChainedLogicalCreateSlotBuilder logical();
+    /**
+     * Get the logical slot builder.
+     * Example usage:
+     * <pre>
+     *   {@code
+     *
+     *    pgConnection
+     *        .getReplicationAPI()
+     *        .createReplicationSlot()
+     *        .logical()
+     *        .withSlotName("mySlot")
+     *        .withOutputPlugin("test_decoding")
+     *        .make();
+     *
+     *    PGReplicationStream stream =
+     *        pgConnection
+     *            .getReplicationAPI()
+     *            .replicationStream()
+     *            .logical()
+     *            .withSlotName("mySlot")
+     *            .withSlotOption("include-xids", false)
+     *            .withSlotOption("skip-empty-xacts", true)
+     *            .start();
+     *
+     *    while (true) {
+     *      ByteBuffer buffer = stream.read();
+     *      //process logical changes
+     *    }
+     *
+     *   }
+     * </pre>
+     *
+     * @return not null fluent api
+     */
+    ChainedLogicalCreateSlotBuilder logical();
 
-  /**
-   * <p>Create physical replication stream for process wal logs in binary form.</p>
-   *
-   * <p>Example usage:</p>
-   * <pre>
-   *   {@code
-   *
-   *    pgConnection
-   *        .getReplicationAPI()
-   *        .createReplicationSlot()
-   *        .physical()
-   *        .withSlotName("mySlot")
-   *        .make();
-   *
-   *    PGReplicationStream stream =
-   *        pgConnection
-   *            .getReplicationAPI()
-   *            .replicationStream()
-   *            .physical()
-   *            .withSlotName("mySlot")
-   *            .start();
-   *
-   *    while (true) {
-   *      ByteBuffer buffer = stream.read();
-   *      //process binary WAL logs
-   *    }
-   *
-   *   }
-   * </pre>
-   *
-   * @return not null fluent api
-   */
-  ChainedPhysicalCreateSlotBuilder physical();
+    /**
+     * <p>Create physical replication stream for process wal logs in binary form.</p>
+     *
+     * <p>Example usage:</p>
+     * <pre>
+     *   {@code
+     *
+     *    pgConnection
+     *        .getReplicationAPI()
+     *        .createReplicationSlot()
+     *        .physical()
+     *        .withSlotName("mySlot")
+     *        .make();
+     *
+     *    PGReplicationStream stream =
+     *        pgConnection
+     *            .getReplicationAPI()
+     *            .replicationStream()
+     *            .physical()
+     *            .withSlotName("mySlot")
+     *            .start();
+     *
+     *    while (true) {
+     *      ByteBuffer buffer = stream.read();
+     *      //process binary WAL logs
+     *    }
+     *
+     *   }
+     * </pre>
+     *
+     * @return not null fluent api
+     */
+    ChainedPhysicalCreateSlotBuilder physical();
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedStreamBuilder.java
index 58cbd2e..eca4b7f 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedStreamBuilder.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ChainedStreamBuilder.java
@@ -13,66 +13,66 @@ import org.postgresql.replication.fluent.physical.ChainedPhysicalStreamBuilder;
  * Api not thread safe, and can be use only for crate single stream.
  */
 public interface ChainedStreamBuilder {
-  /**
-   * <p>Create logical replication stream that decode raw wal logs by output plugin to logical form.
-   * Default about logical decoding you can see by following link
-   * <a href="http://www.postgresql.org/docs/current/static/logicaldecoding-explanation.html">
-   *     Logical Decoding Concepts
-   * </a>.
-   * </p>
-   *
-   * <p>Example usage:</p>
-   * <pre>
-   *   {@code
-   *
-   *    PGReplicationStream stream =
-   *        pgConnection
-   *            .getReplicationAPI()
-   *            .replicationStream()
-   *            .logical()
-   *            .withSlotName("test_decoding")
-   *            .withSlotOption("include-xids", false)
-   *            .withSlotOption("skip-empty-xacts", true)
-   *            .start();
-   *
-   *    while (true) {
-   *      ByteBuffer buffer = stream.read();
-   *      //process logical changes
-   *    }
-   *
-   *   }
-   * </pre>
-   *
-   * @return not null fluent api
-   */
-  ChainedLogicalStreamBuilder logical();
+    /**
+     * <p>Create logical replication stream that decode raw wal logs by output plugin to logical form.
+     * Default about logical decoding you can see by following link
+     * <a href="http://www.postgresql.org/docs/current/static/logicaldecoding-explanation.html">
+     * Logical Decoding Concepts
+     * </a>.
+     * </p>
+     *
+     * <p>Example usage:</p>
+     * <pre>
+     *   {@code
+     *
+     *    PGReplicationStream stream =
+     *        pgConnection
+     *            .getReplicationAPI()
+     *            .replicationStream()
+     *            .logical()
+     *            .withSlotName("test_decoding")
+     *            .withSlotOption("include-xids", false)
+     *            .withSlotOption("skip-empty-xacts", true)
+     *            .start();
+     *
+     *    while (true) {
+     *      ByteBuffer buffer = stream.read();
+     *      //process logical changes
+     *    }
+     *
+     *   }
+     * </pre>
+     *
+     * @return not null fluent api
+     */
+    ChainedLogicalStreamBuilder logical();
 
-  /**
-   * <p>Create physical replication stream for process wal logs in binary form.</p>
-   *
-   * <p>Example usage:</p>
-   * <pre>
-   *   {@code
-   *
-   *    LogSequenceNumber lsn = getCurrentLSN();
-   *
-   *    PGReplicationStream stream =
-   *        pgConnection
-   *            .getReplicationAPI()
-   *            .replicationStream()
-   *            .physical()
-   *            .withStartPosition(lsn)
-   *            .start();
-   *
-   *    while (true) {
-   *      ByteBuffer buffer = stream.read();
-   *      //process binary WAL logs
-   *    }
-   *
-   *   }
-   * </pre>
-   *
-   * @return not null fluent api
-   */
-  ChainedPhysicalStreamBuilder physical();
+    /**
+     * <p>Create physical replication stream for process wal logs in binary form.</p>
+     *
+     * <p>Example usage:</p>
+     * <pre>
+     *   {@code
+     *
+     *    LogSequenceNumber lsn = getCurrentLSN();
+     *
+     *    PGReplicationStream stream =
+     *        pgConnection
+     *            .getReplicationAPI()
+     *            .replicationStream()
+     *            .physical()
+     *            .withStartPosition(lsn)
+     *            .start();
+     *
+     *    while (true) {
+     *      ByteBuffer buffer = stream.read();
+     *      //process binary WAL logs
+     *    }
+     *
+     *   }
+     * </pre>
+     *
+     * @return not null fluent api
+     */
+    ChainedPhysicalStreamBuilder physical();
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/CommonOptions.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/CommonOptions.java
index 6eacbee..c078b0b 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/CommonOptions.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/CommonOptions.java
@@ -11,27 +11,27 @@ import org.postgresql.replication.LogSequenceNumber;
  * Common parameters for logical and physical replication.
  */
 public interface CommonOptions {
-  /**
-   * Replication slots provide an automated way to ensure that the master does not remove WAL
-   * segments until they have been received by all standbys, and that the master does not remove
-   * rows which could cause a recovery conflict even when the standby is disconnected.
-   *
-   * @return nullable replication slot name that already exists on server and free.
-   */
-  String getSlotName();
+    /**
+     * Replication slots provide an automated way to ensure that the master does not remove WAL
+     * segments until they have been received by all standbys, and that the master does not remove
+     * rows which could cause a recovery conflict even when the standby is disconnected.
+     *
+     * @return nullable replication slot name that already exists on server and free.
+     */
+    String getSlotName();
 
-  /**
-   * @return the position to start replication. This cannot be null.
-   */
-  LogSequenceNumber getStartLSNPosition();
+    /**
+     * @return the position to start replication. This cannot be null.
+     */
+    LogSequenceNumber getStartLSNPosition();
 
-  /**
-   * Specifies the number of millisecond between status packets sent back to the server. This allows
-   * for easier monitoring of the progress from server. A value of zero disables the periodic status
-   * updates completely, although an update will still be sent when requested by the server, to
-   * avoid timeout disconnect. The default value is 10 seconds.
-   *
-   * @return the current status interval
-   */
-  int getStatusInterval();
+    /**
+     * Specifies the number of millisecond between status packets sent back to the server. This allows
+     * for easier monitoring of the progress from server. A value of zero disables the periodic status
+     * updates completely, although an update will still be sent when requested by the server, to
+     * avoid timeout disconnect. The default value is 10 seconds.
+     *
+     * @return the current status interval
+     */
+    int getStatusInterval();
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationCreateSlotBuilder.java
index e0067a3..36e396e 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationCreateSlotBuilder.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationCreateSlotBuilder.java
@@ -12,19 +12,19 @@ import org.postgresql.replication.fluent.physical.ChainedPhysicalCreateSlotBuild
 import org.postgresql.replication.fluent.physical.PhysicalCreateSlotBuilder;
 
 public class ReplicationCreateSlotBuilder implements ChainedCreateReplicationSlotBuilder {
-  private final BaseConnection baseConnection;
+    private final BaseConnection baseConnection;
 
-  public ReplicationCreateSlotBuilder(BaseConnection baseConnection) {
-    this.baseConnection = baseConnection;
-  }
+    public ReplicationCreateSlotBuilder(BaseConnection baseConnection) {
+        this.baseConnection = baseConnection;
+    }
 
-  @Override
-  public ChainedLogicalCreateSlotBuilder logical() {
-    return new LogicalCreateSlotBuilder(baseConnection);
-  }
+    @Override
+    public ChainedLogicalCreateSlotBuilder logical() {
+        return new LogicalCreateSlotBuilder(baseConnection);
+    }
 
-  @Override
-  public ChainedPhysicalCreateSlotBuilder physical() {
-    return new PhysicalCreateSlotBuilder(baseConnection);
-  }
+    @Override
+    public ChainedPhysicalCreateSlotBuilder physical() {
+        return new PhysicalCreateSlotBuilder(baseConnection);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationStreamBuilder.java
index 4d8443a..28f822e 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationStreamBuilder.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/ReplicationStreamBuilder.java
@@ -20,34 +20,34 @@ import org.postgresql.replication.fluent.physical.StartPhysicalReplicationCallba
 import java.sql.SQLException;
 
 public class ReplicationStreamBuilder implements ChainedStreamBuilder {
-  private final BaseConnection baseConnection;
+    private final BaseConnection baseConnection;
 
-  /**
-   * @param connection not null connection with that will be associate replication
-   */
-  public ReplicationStreamBuilder(final BaseConnection connection) {
-    this.baseConnection = connection;
-  }
+    /**
+     * @param connection not null connection with that will be associate replication
+     */
+    public ReplicationStreamBuilder(final BaseConnection connection) {
+        this.baseConnection = connection;
+    }
 
-  @Override
-  public ChainedLogicalStreamBuilder logical() {
-    return new LogicalStreamBuilder(new StartLogicalReplicationCallback() {
-      @Override
-      public PGReplicationStream start(LogicalReplicationOptions options) throws SQLException {
-        ReplicationProtocol protocol = baseConnection.getReplicationProtocol();
-        return protocol.startLogical(options);
-      }
-    });
-  }
+    @Override
+    public ChainedLogicalStreamBuilder logical() {
+        return new LogicalStreamBuilder(new StartLogicalReplicationCallback() {
+            @Override
+            public PGReplicationStream start(LogicalReplicationOptions options) throws SQLException {
+                ReplicationProtocol protocol = baseConnection.getReplicationProtocol();
+                return protocol.startLogical(options);
+            }
+        });
+    }
 
-  @Override
-  public ChainedPhysicalStreamBuilder physical() {
-    return new PhysicalStreamBuilder(new StartPhysicalReplicationCallback() {
-      @Override
-      public PGReplicationStream start(PhysicalReplicationOptions options) throws SQLException {
-        ReplicationProtocol protocol = baseConnection.getReplicationProtocol();
-        return protocol.startPhysical(options);
-      }
-    });
-  }
+    @Override
+    public ChainedPhysicalStreamBuilder physical() {
+        return new PhysicalStreamBuilder(new StartPhysicalReplicationCallback() {
+            @Override
+            public PGReplicationStream start(PhysicalReplicationOptions options) throws SQLException {
+                ReplicationProtocol protocol = baseConnection.getReplicationProtocol();
+                return protocol.startPhysical(options);
+            }
+        });
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalCreateSlotBuilder.java
index cae77bb..78a64e0 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalCreateSlotBuilder.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalCreateSlotBuilder.java
@@ -11,17 +11,17 @@ import org.postgresql.replication.fluent.ChainedCommonCreateSlotBuilder;
  * Logical replication slot specific parameters.
  */
 public interface ChainedLogicalCreateSlotBuilder
-    extends ChainedCommonCreateSlotBuilder<ChainedLogicalCreateSlotBuilder> {
+        extends ChainedCommonCreateSlotBuilder<ChainedLogicalCreateSlotBuilder> {
 
-  /**
-   * <p>Output plugin that should be use for decode physical represent WAL to some logical form.
-   * Output plugin should be installed on server(exists in shared_preload_libraries).</p>
-   *
-   * <p>Package postgresql-contrib provides sample output plugin <b>test_decoding</b> that can be
-   * use for test logical replication api</p>
-   *
-   * @param outputPlugin not null name of the output plugin used for logical decoding
-   * @return the logical slot builder
-   */
-  ChainedLogicalCreateSlotBuilder withOutputPlugin(String outputPlugin);
+    /**
+     * <p>Output plugin that should be use for decode physical represent WAL to some logical form.
+     * Output plugin should be installed on server(exists in shared_preload_libraries).</p>
+     *
+     * <p>Package postgresql-contrib provides sample output plugin <b>test_decoding</b> that can be
+     * use for test logical replication api</p>
+     *
+     * @param outputPlugin not null name of the output plugin used for logical decoding
+     * @return the logical slot builder
+     */
+    ChainedLogicalCreateSlotBuilder withOutputPlugin(String outputPlugin);
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalStreamBuilder.java
index 0dc60b9..725dcc4 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalStreamBuilder.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/ChainedLogicalStreamBuilder.java
@@ -12,45 +12,41 @@ import java.sql.SQLException;
 import java.util.Properties;
 
 public interface ChainedLogicalStreamBuilder
-    extends ChainedCommonStreamBuilder<ChainedLogicalStreamBuilder> {
-  /**
-   * Open logical replication stream.
-   *
-   * @return not null PGReplicationStream available for fetch data in logical form
-   * @throws SQLException  if there are errors
-   */
-  PGReplicationStream start() throws SQLException;
+        extends ChainedCommonStreamBuilder<ChainedLogicalStreamBuilder> {
+    /**
+     * Open logical replication stream.
+     *
+     * @return not null PGReplicationStream available for fetch data in logical form
+     * @throws SQLException if there are errors
+     */
+    PGReplicationStream start() throws SQLException;
 
-  /**
-   *
-   * @param optionName name of option
-   * @param optionValue boolean value
-   * @return ChainedLogicalStreamBuilder
-   */
+    /**
+     * @param optionName  name of option
+     * @param optionValue boolean value
+     * @return ChainedLogicalStreamBuilder
+     */
 
-  ChainedLogicalStreamBuilder withSlotOption(String optionName, boolean optionValue);
+    ChainedLogicalStreamBuilder withSlotOption(String optionName, boolean optionValue);
 
-  /**
-   *
-   * @param optionName name of option
-   * @param optionValue integer value
-   * @return ChainedLogicalStreamBuilder
-   */
-  ChainedLogicalStreamBuilder withSlotOption(String optionName, int optionValue);
+    /**
+     * @param optionName  name of option
+     * @param optionValue integer value
+     * @return ChainedLogicalStreamBuilder
+     */
+    ChainedLogicalStreamBuilder withSlotOption(String optionName, int optionValue);
 
-  /**
-   *
-   * @param optionName name of option
-   * @param optionValue String value
-   * @return ChainedLogicalStreamBuilder
-   */
-  ChainedLogicalStreamBuilder withSlotOption(String optionName, String optionValue);
+    /**
+     * @param optionName  name of option
+     * @param optionValue String value
+     * @return ChainedLogicalStreamBuilder
+     */
+    ChainedLogicalStreamBuilder withSlotOption(String optionName, String optionValue);
 
-  /**
-   *
-   * @param options properties
-   * @return ChainedLogicalStreamBuilder
-   */
-  ChainedLogicalStreamBuilder withSlotOptions(Properties options);
+    /**
+     * @param options properties
+     * @return ChainedLogicalStreamBuilder
+     */
+    ChainedLogicalStreamBuilder withSlotOptions(Properties options);
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalCreateSlotBuilder.java
index 0688822..a5cfe39 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalCreateSlotBuilder.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalCreateSlotBuilder.java
@@ -19,68 +19,68 @@ import java.sql.SQLException;
 import java.sql.Statement;
 
 public class LogicalCreateSlotBuilder
-    extends AbstractCreateSlotBuilder<ChainedLogicalCreateSlotBuilder>
-    implements ChainedLogicalCreateSlotBuilder {
+        extends AbstractCreateSlotBuilder<ChainedLogicalCreateSlotBuilder>
+        implements ChainedLogicalCreateSlotBuilder {
 
-  private String outputPlugin;
+    private String outputPlugin;
 
-  public LogicalCreateSlotBuilder(BaseConnection connection) {
-    super(connection);
-  }
-
-  @Override
-  protected ChainedLogicalCreateSlotBuilder self() {
-    return this;
-  }
-
-  @Override
-  public ChainedLogicalCreateSlotBuilder withOutputPlugin(String outputPlugin) {
-    this.outputPlugin = outputPlugin;
-    return self();
-  }
-
-  @Override
-  public ReplicationSlotInfo make() throws SQLException {
-    String outputPlugin = this.outputPlugin;
-    if (outputPlugin == null || outputPlugin.isEmpty()) {
-      throw new IllegalArgumentException(
-          "OutputPlugin required parameter for logical replication slot");
+    public LogicalCreateSlotBuilder(BaseConnection connection) {
+        super(connection);
     }
 
-    if (slotName == null || slotName.isEmpty()) {
-      throw new IllegalArgumentException("Replication slotName can't be null");
+    @Override
+    protected ChainedLogicalCreateSlotBuilder self() {
+        return this;
     }
 
-    Statement statement = connection.createStatement();
-    ResultSet result = null;
-    ReplicationSlotInfo slotInfo = null;
-    try {
-      String sql = String.format(
-          "CREATE_REPLICATION_SLOT %s %s LOGICAL %s",
-          slotName,
-          temporaryOption ? "TEMPORARY" : "",
-          outputPlugin
-      );
-      statement.execute(sql);
-      result = statement.getResultSet();
-      if (result != null && result.next()) {
-        slotInfo = new ReplicationSlotInfo(
-            result.getString("slot_name"),
-            ReplicationType.LOGICAL,
-            LogSequenceNumber.valueOf(result.getString("consistent_point")),
-            result.getString("snapshot_name"),
-            result.getString("output_plugin"));
-      } else {
-        throw new PSQLException(
-            GT.tr("{0} returned no results"),
-            PSQLState.OBJECT_NOT_IN_STATE);
-      }
-    } finally {
-      if (result != null) {
-        result.close();
-      }
-      statement.close();
+    @Override
+    public ChainedLogicalCreateSlotBuilder withOutputPlugin(String outputPlugin) {
+        this.outputPlugin = outputPlugin;
+        return self();
+    }
+
+    @Override
+    public ReplicationSlotInfo make() throws SQLException {
+        String outputPlugin = this.outputPlugin;
+        if (outputPlugin == null || outputPlugin.isEmpty()) {
+            throw new IllegalArgumentException(
+                    "OutputPlugin required parameter for logical replication slot");
+        }
+
+        if (slotName == null || slotName.isEmpty()) {
+            throw new IllegalArgumentException("Replication slotName can't be null");
+        }
+
+        Statement statement = connection.createStatement();
+        ResultSet result = null;
+        ReplicationSlotInfo slotInfo = null;
+        try {
+            String sql = String.format(
+                    "CREATE_REPLICATION_SLOT %s %s LOGICAL %s",
+                    slotName,
+                    temporaryOption ? "TEMPORARY" : "",
+                    outputPlugin
+            );
+            statement.execute(sql);
+            result = statement.getResultSet();
+            if (result != null && result.next()) {
+                slotInfo = new ReplicationSlotInfo(
+                        result.getString("slot_name"),
+                        ReplicationType.LOGICAL,
+                        LogSequenceNumber.valueOf(result.getString("consistent_point")),
+                        result.getString("snapshot_name"),
+                        result.getString("output_plugin"));
+            } else {
+                throw new PSQLException(
+                        GT.tr("{0} returned no results"),
+                        PSQLState.OBJECT_NOT_IN_STATE);
+            }
+        } finally {
+            if (result != null) {
+                result.close();
+            }
+            statement.close();
+        }
+        return slotInfo;
     }
-    return slotInfo;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalReplicationOptions.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalReplicationOptions.java
index 8f1ef01..e875525 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalReplicationOptions.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalReplicationOptions.java
@@ -10,19 +10,19 @@ import org.postgresql.replication.fluent.CommonOptions;
 import java.util.Properties;
 
 public interface LogicalReplicationOptions extends CommonOptions {
-  /**
-   * Required parameter for logical replication.
-   *
-   * @return not null logical replication slot name that already exists on server and free.
-   */
-  @Override
-  String getSlotName();
+    /**
+     * Required parameter for logical replication.
+     *
+     * @return not null logical replication slot name that already exists on server and free.
+     */
+    @Override
+    String getSlotName();
 
-  /**
-   * Parameters for output plugin. Parameters will be set to output plugin that register for
-   * specified replication slot name.
-   *
-   * @return list options that will be pass to output_plugin for that was create replication slot
-   */
-  Properties getSlotOptions();
+    /**
+     * Parameters for output plugin. Parameters will be set to output plugin that register for
+     * specified replication slot name.
+     *
+     * @return list options that will be pass to output_plugin for that was create replication slot
+     */
+    Properties getSlotOptions();
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalStreamBuilder.java
index f8a1bcf..271d173 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalStreamBuilder.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/LogicalStreamBuilder.java
@@ -13,79 +13,79 @@ import java.sql.SQLException;
 import java.util.Properties;
 
 public class LogicalStreamBuilder extends AbstractStreamBuilder<ChainedLogicalStreamBuilder>
-    implements ChainedLogicalStreamBuilder, LogicalReplicationOptions {
-  private final Properties slotOptions;
+        implements ChainedLogicalStreamBuilder, LogicalReplicationOptions {
+    private final Properties slotOptions;
 
-  private final StartLogicalReplicationCallback startCallback;
+    private final StartLogicalReplicationCallback startCallback;
 
-  /**
-   * @param startCallback not null callback that should be execute after build parameters for start
-   *                      replication
-   */
-  public LogicalStreamBuilder(StartLogicalReplicationCallback startCallback) {
-    this.startCallback = startCallback;
-    this.slotOptions = new Properties();
-  }
-
-  @Override
-  protected ChainedLogicalStreamBuilder self() {
-    return this;
-  }
-
-  @Override
-  public PGReplicationStream start() throws SQLException {
-    return startCallback.start(this);
-  }
-
-  @Override
-  public String getSlotName() {
-    return slotName;
-  }
-
-  @Override
-  public ChainedLogicalStreamBuilder withStartPosition(LogSequenceNumber lsn) {
-    startPosition = lsn;
-    return this;
-  }
-
-  @Override
-  public ChainedLogicalStreamBuilder withSlotOption(String optionName, boolean optionValue) {
-    slotOptions.setProperty(optionName, String.valueOf(optionValue));
-    return this;
-  }
-
-  @Override
-  public ChainedLogicalStreamBuilder withSlotOption(String optionName, int optionValue) {
-    slotOptions.setProperty(optionName, String.valueOf(optionValue));
-    return this;
-  }
-
-  @Override
-  public ChainedLogicalStreamBuilder withSlotOption(String optionName, String optionValue) {
-    slotOptions.setProperty(optionName, optionValue);
-    return this;
-  }
-
-  @Override
-  public ChainedLogicalStreamBuilder withSlotOptions(Properties options) {
-    for (String propertyName : options.stringPropertyNames()) {
-      slotOptions.setProperty(propertyName, options.getProperty(propertyName));
+    /**
+     * @param startCallback not null callback that should be execute after build parameters for start
+     *                      replication
+     */
+    public LogicalStreamBuilder(StartLogicalReplicationCallback startCallback) {
+        this.startCallback = startCallback;
+        this.slotOptions = new Properties();
     }
-    return this;
-  }
 
-  @Override
-  public LogSequenceNumber getStartLSNPosition() {
-    return startPosition;
-  }
+    @Override
+    protected ChainedLogicalStreamBuilder self() {
+        return this;
+    }
 
-  @Override
-  public Properties getSlotOptions() {
-    return slotOptions;
-  }
+    @Override
+    public PGReplicationStream start() throws SQLException {
+        return startCallback.start(this);
+    }
 
-  @Override
-  public int getStatusInterval() {
-    return statusIntervalMs;
-  }
+    @Override
+    public String getSlotName() {
+        return slotName;
+    }
+
+    @Override
+    public ChainedLogicalStreamBuilder withStartPosition(LogSequenceNumber lsn) {
+        startPosition = lsn;
+        return this;
+    }
+
+    @Override
+    public ChainedLogicalStreamBuilder withSlotOption(String optionName, boolean optionValue) {
+        slotOptions.setProperty(optionName, String.valueOf(optionValue));
+        return this;
+    }
+
+    @Override
+    public ChainedLogicalStreamBuilder withSlotOption(String optionName, int optionValue) {
+        slotOptions.setProperty(optionName, String.valueOf(optionValue));
+        return this;
+    }
+
+    @Override
+    public ChainedLogicalStreamBuilder withSlotOption(String optionName, String optionValue) {
+        slotOptions.setProperty(optionName, optionValue);
+        return this;
+    }
+
+    @Override
+    public ChainedLogicalStreamBuilder withSlotOptions(Properties options) {
+        for (String propertyName : options.stringPropertyNames()) {
+            slotOptions.setProperty(propertyName, options.getProperty(propertyName));
+        }
+        return this;
+    }
+
+    @Override
+    public LogSequenceNumber getStartLSNPosition() {
+        return startPosition;
+    }
+
+    @Override
+    public Properties getSlotOptions() {
+        return slotOptions;
+    }
+
+    @Override
+    public int getStatusInterval() {
+        return statusIntervalMs;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/StartLogicalReplicationCallback.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/StartLogicalReplicationCallback.java
index 8612eca..2ccf1dc 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/StartLogicalReplicationCallback.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/logical/StartLogicalReplicationCallback.java
@@ -10,5 +10,5 @@ import org.postgresql.replication.PGReplicationStream;
 import java.sql.SQLException;
 
 public interface StartLogicalReplicationCallback {
-  PGReplicationStream start(LogicalReplicationOptions options) throws SQLException;
+    PGReplicationStream start(LogicalReplicationOptions options) throws SQLException;
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalCreateSlotBuilder.java
index 8fdc810..086996c 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalCreateSlotBuilder.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalCreateSlotBuilder.java
@@ -11,5 +11,5 @@ import org.postgresql.replication.fluent.ChainedCommonCreateSlotBuilder;
  * Physical replication slot specific parameters.
  */
 public interface ChainedPhysicalCreateSlotBuilder extends
-    ChainedCommonCreateSlotBuilder<ChainedPhysicalCreateSlotBuilder> {
+        ChainedCommonCreateSlotBuilder<ChainedPhysicalCreateSlotBuilder> {
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalStreamBuilder.java
index f458c88..db4ce38 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalStreamBuilder.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/ChainedPhysicalStreamBuilder.java
@@ -11,13 +11,13 @@ import org.postgresql.replication.fluent.ChainedCommonStreamBuilder;
 import java.sql.SQLException;
 
 public interface ChainedPhysicalStreamBuilder extends
-    ChainedCommonStreamBuilder<ChainedPhysicalStreamBuilder> {
+        ChainedCommonStreamBuilder<ChainedPhysicalStreamBuilder> {
 
-  /**
-   * Open physical replication stream.
-   *
-   * @return not null PGReplicationStream available for fetch wal logs in binary form
-   * @throws SQLException on error
-   */
-  PGReplicationStream start() throws SQLException;
+    /**
+     * Open physical replication stream.
+     *
+     * @return not null PGReplicationStream available for fetch wal logs in binary form
+     * @throws SQLException on error
+     */
+    PGReplicationStream start() throws SQLException;
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalCreateSlotBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalCreateSlotBuilder.java
index 4c2597b..e0fc552 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalCreateSlotBuilder.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalCreateSlotBuilder.java
@@ -19,53 +19,53 @@ import java.sql.SQLException;
 import java.sql.Statement;
 
 public class PhysicalCreateSlotBuilder
-    extends AbstractCreateSlotBuilder<ChainedPhysicalCreateSlotBuilder>
-    implements ChainedPhysicalCreateSlotBuilder {
+        extends AbstractCreateSlotBuilder<ChainedPhysicalCreateSlotBuilder>
+        implements ChainedPhysicalCreateSlotBuilder {
 
-  public PhysicalCreateSlotBuilder(BaseConnection connection) {
-    super(connection);
-  }
-
-  @Override
-  protected ChainedPhysicalCreateSlotBuilder self() {
-    return this;
-  }
-
-  @Override
-  public ReplicationSlotInfo make() throws SQLException {
-    if (slotName == null || slotName.isEmpty()) {
-      throw new IllegalArgumentException("Replication slotName can't be null");
+    public PhysicalCreateSlotBuilder(BaseConnection connection) {
+        super(connection);
     }
 
-    Statement statement = connection.createStatement();
-    ResultSet result = null;
-    ReplicationSlotInfo slotInfo = null;
-    try {
-      String sql = String.format(
-          "CREATE_REPLICATION_SLOT %s %s PHYSICAL",
-          slotName,
-          temporaryOption ? "TEMPORARY" : ""
-      );
-      statement.execute(sql);
-      result = statement.getResultSet();
-      if (result != null && result.next()) {
-        slotInfo = new ReplicationSlotInfo(
-            result.getString("slot_name"),
-            ReplicationType.PHYSICAL,
-            LogSequenceNumber.valueOf(result.getString("consistent_point")),
-            result.getString("snapshot_name"),
-            result.getString("output_plugin"));
-      } else {
-        throw new PSQLException(
-            GT.tr("{0} returned no results"),
-            PSQLState.OBJECT_NOT_IN_STATE);
-      }
-    } finally {
-      if (result != null) {
-        result.close();
-      }
-      statement.close();
+    @Override
+    protected ChainedPhysicalCreateSlotBuilder self() {
+        return this;
+    }
+
+    @Override
+    public ReplicationSlotInfo make() throws SQLException {
+        if (slotName == null || slotName.isEmpty()) {
+            throw new IllegalArgumentException("Replication slotName can't be null");
+        }
+
+        Statement statement = connection.createStatement();
+        ResultSet result = null;
+        ReplicationSlotInfo slotInfo = null;
+        try {
+            String sql = String.format(
+                    "CREATE_REPLICATION_SLOT %s %s PHYSICAL",
+                    slotName,
+                    temporaryOption ? "TEMPORARY" : ""
+            );
+            statement.execute(sql);
+            result = statement.getResultSet();
+            if (result != null && result.next()) {
+                slotInfo = new ReplicationSlotInfo(
+                        result.getString("slot_name"),
+                        ReplicationType.PHYSICAL,
+                        LogSequenceNumber.valueOf(result.getString("consistent_point")),
+                        result.getString("snapshot_name"),
+                        result.getString("output_plugin"));
+            } else {
+                throw new PSQLException(
+                        GT.tr("{0} returned no results"),
+                        PSQLState.OBJECT_NOT_IN_STATE);
+            }
+        } finally {
+            if (result != null) {
+                result.close();
+            }
+            statement.close();
+        }
+        return slotInfo;
     }
-    return slotInfo;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalStreamBuilder.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalStreamBuilder.java
index eb177d0..324743e 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalStreamBuilder.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/PhysicalStreamBuilder.java
@@ -12,40 +12,40 @@ import org.postgresql.replication.fluent.AbstractStreamBuilder;
 import java.sql.SQLException;
 
 public class PhysicalStreamBuilder extends AbstractStreamBuilder<ChainedPhysicalStreamBuilder>
-    implements ChainedPhysicalStreamBuilder, PhysicalReplicationOptions {
+        implements ChainedPhysicalStreamBuilder, PhysicalReplicationOptions {
 
-  private final StartPhysicalReplicationCallback startCallback;
+    private final StartPhysicalReplicationCallback startCallback;
 
-  /**
-   * @param startCallback not null callback that should be execute after build parameters for start
-   *                      replication
-   */
-  public PhysicalStreamBuilder(StartPhysicalReplicationCallback startCallback) {
-    this.startCallback = startCallback;
-  }
+    /**
+     * @param startCallback not null callback that should be execute after build parameters for start
+     *                      replication
+     */
+    public PhysicalStreamBuilder(StartPhysicalReplicationCallback startCallback) {
+        this.startCallback = startCallback;
+    }
 
-  @Override
-  protected ChainedPhysicalStreamBuilder self() {
-    return this;
-  }
+    @Override
+    protected ChainedPhysicalStreamBuilder self() {
+        return this;
+    }
 
-  @Override
-  public PGReplicationStream start() throws SQLException {
-    return this.startCallback.start(this);
-  }
+    @Override
+    public PGReplicationStream start() throws SQLException {
+        return this.startCallback.start(this);
+    }
 
-  @Override
-  public String getSlotName() {
-    return slotName;
-  }
+    @Override
+    public String getSlotName() {
+        return slotName;
+    }
 
-  @Override
-  public LogSequenceNumber getStartLSNPosition() {
-    return startPosition;
-  }
+    @Override
+    public LogSequenceNumber getStartLSNPosition() {
+        return startPosition;
+    }
 
-  @Override
-  public int getStatusInterval() {
-    return statusIntervalMs;
-  }
+    @Override
+    public int getStatusInterval() {
+        return statusIntervalMs;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/StartPhysicalReplicationCallback.java b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/StartPhysicalReplicationCallback.java
index 543edcb..e6b254c 100644
--- a/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/StartPhysicalReplicationCallback.java
+++ b/pgjdbc/src/main/java/org/postgresql/replication/fluent/physical/StartPhysicalReplicationCallback.java
@@ -10,5 +10,5 @@ import org.postgresql.replication.PGReplicationStream;
 import java.sql.SQLException;
 
 public interface StartPhysicalReplicationCallback {
-  PGReplicationStream start(PhysicalReplicationOptions options) throws SQLException;
+    PGReplicationStream start(PhysicalReplicationOptions options) throws SQLException;
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/scram/ScramAuthenticator.java b/pgjdbc/src/main/java/org/postgresql/scram/ScramAuthenticator.java
index f673e34..052327a 100644
--- a/pgjdbc/src/main/java/org/postgresql/scram/ScramAuthenticator.java
+++ b/pgjdbc/src/main/java/org/postgresql/scram/ScramAuthenticator.java
@@ -26,170 +26,170 @@ import java.util.logging.Level;
 import java.util.logging.Logger;
 
 public class ScramAuthenticator {
-  private static final Logger LOGGER = Logger.getLogger(ScramAuthenticator.class.getName());
+    private static final Logger LOGGER = Logger.getLogger(ScramAuthenticator.class.getName());
 
-  private final String user;
-  private final String password;
-  private final PGStream pgStream;
-  private ScramClient scramClient;
-  private ScramSession scramSession;
-  private ScramSession.ClientFinalProcessor clientFinalProcessor;
+    private final String user;
+    private final String password;
+    private final PGStream pgStream;
+    private ScramClient scramClient;
+    private ScramSession scramSession;
+    private ScramSession.ClientFinalProcessor clientFinalProcessor;
 
-  private interface BodySender {
-    void sendBody(PGStream pgStream) throws IOException;
-  }
-
-  private void sendAuthenticationMessage(int bodyLength, BodySender bodySender)
-      throws IOException {
-    pgStream.sendChar('p');
-    pgStream.sendInteger4(Integer.SIZE / Byte.SIZE + bodyLength);
-    bodySender.sendBody(pgStream);
-    pgStream.flush();
-  }
-
-  public ScramAuthenticator(String user, String password, PGStream pgStream) {
-    this.user = user;
-    this.password = password;
-    this.pgStream = pgStream;
-  }
-
-  public void processServerMechanismsAndInit() throws IOException, PSQLException {
-    List<String> mechanisms = new ArrayList<>();
-    do {
-      mechanisms.add(pgStream.receiveString());
-    } while (pgStream.peekChar() != 0);
-    int c = pgStream.receiveChar();
-    assert c == 0;
-    if (mechanisms.isEmpty()) {
-      throw new PSQLException(
-          GT.tr("No SCRAM mechanism(s) advertised by the server"),
-          PSQLState.CONNECTION_REJECTED
-      );
+    public ScramAuthenticator(String user, String password, PGStream pgStream) {
+        this.user = user;
+        this.password = password;
+        this.pgStream = pgStream;
     }
 
-    ScramClient scramClient;
-    try {
-      scramClient = ScramClient
-          .channelBinding(ScramClient.ChannelBinding.NO)
-          .stringPreparation(StringPreparations.SASL_PREPARATION)
-          .selectMechanismBasedOnServerAdvertised(mechanisms.toArray(new String[]{}))
-          .setup();
-    } catch (IllegalArgumentException e) {
-      throw new PSQLException(
-          GT.tr("Invalid or unsupported by client SCRAM mechanisms", e),
-          PSQLState.CONNECTION_REJECTED
-      );
-    }
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      LOGGER.log(Level.FINEST, " Using SCRAM mechanism {0}", scramClient.getScramMechanism().getName());
+    private void sendAuthenticationMessage(int bodyLength, BodySender bodySender)
+            throws IOException {
+        pgStream.sendChar('p');
+        pgStream.sendInteger4(Integer.SIZE / Byte.SIZE + bodyLength);
+        bodySender.sendBody(pgStream);
+        pgStream.flush();
     }
 
-    this.scramClient = scramClient;
-    scramSession =
-        scramClient.scramSession("*");   // Real username is ignored by server, uses startup one
-  }
-
-  public void sendScramClientFirstMessage() throws IOException {
-    ScramSession scramSession = this.scramSession;
-    String clientFirstMessage = scramSession.clientFirstMessage();
-    LOGGER.log(Level.FINEST, " FE=> SASLInitialResponse( {0} )", clientFirstMessage);
-
-    ScramClient scramClient = this.scramClient;
-    String scramMechanismName = scramClient.getScramMechanism().getName();
-    final byte[] scramMechanismNameBytes = scramMechanismName.getBytes(StandardCharsets.UTF_8);
-    final byte[] clientFirstMessageBytes = clientFirstMessage.getBytes(StandardCharsets.UTF_8);
-    sendAuthenticationMessage(
-        (scramMechanismNameBytes.length + 1) + 4 + clientFirstMessageBytes.length,
-        new BodySender() {
-          @Override
-          public void sendBody(PGStream pgStream) throws IOException {
-            pgStream.send(scramMechanismNameBytes);
-            pgStream.sendChar(0); // List terminated in '\0'
-            pgStream.sendInteger4(clientFirstMessageBytes.length);
-            pgStream.send(clientFirstMessageBytes);
-          }
+    public void processServerMechanismsAndInit() throws IOException, PSQLException {
+        List<String> mechanisms = new ArrayList<>();
+        do {
+            mechanisms.add(pgStream.receiveString());
+        } while (pgStream.peekChar() != 0);
+        int c = pgStream.receiveChar();
+        assert c == 0;
+        if (mechanisms.isEmpty()) {
+            throw new PSQLException(
+                    GT.tr("No SCRAM mechanism(s) advertised by the server"),
+                    PSQLState.CONNECTION_REJECTED
+            );
         }
-    );
-  }
 
-  public void processServerFirstMessage(int length) throws IOException, PSQLException {
-    String serverFirstMessage = pgStream.receiveString(length);
-    LOGGER.log(Level.FINEST, " <=BE AuthenticationSASLContinue( {0} )", serverFirstMessage);
-
-    ScramSession scramSession = this.scramSession;
-    if (scramSession == null) {
-      throw new PSQLException(
-          GT.tr("SCRAM session does not exist"),
-          PSQLState.UNKNOWN_STATE
-      );
-    }
-
-    ScramSession.ServerFirstProcessor serverFirstProcessor;
-    try {
-      serverFirstProcessor = scramSession.receiveServerFirstMessage(serverFirstMessage);
-    } catch (ScramException e) {
-      throw new PSQLException(
-          GT.tr("Invalid server-first-message: {0}", serverFirstMessage),
-          PSQLState.CONNECTION_REJECTED,
-          e
-      );
-    }
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      LOGGER.log(Level.FINEST,
-                 " <=BE AuthenticationSASLContinue(salt={0}, iterations={1})",
-                 new Object[]{serverFirstProcessor.getSalt(), serverFirstProcessor.getIteration()}
-                 );
-    }
-
-    clientFinalProcessor = serverFirstProcessor.clientFinalProcessor(password);
-
-    String clientFinalMessage = clientFinalProcessor.clientFinalMessage();
-    LOGGER.log(Level.FINEST, " FE=> SASLResponse( {0} )", clientFinalMessage);
-
-    final byte[] clientFinalMessageBytes = clientFinalMessage.getBytes(StandardCharsets.UTF_8);
-    sendAuthenticationMessage(
-        clientFinalMessageBytes.length,
-        new BodySender() {
-          @Override
-          public void sendBody(PGStream pgStream) throws IOException {
-            pgStream.send(clientFinalMessageBytes);
-          }
+        ScramClient scramClient;
+        try {
+            scramClient = ScramClient
+                    .channelBinding(ScramClient.ChannelBinding.NO)
+                    .stringPreparation(StringPreparations.SASL_PREPARATION)
+                    .selectMechanismBasedOnServerAdvertised(mechanisms.toArray(new String[]{}))
+                    .setup();
+        } catch (IllegalArgumentException e) {
+            throw new PSQLException(
+                    GT.tr("Invalid or unsupported by client SCRAM mechanisms", e),
+                    PSQLState.CONNECTION_REJECTED
+            );
+        }
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            LOGGER.log(Level.FINEST, " Using SCRAM mechanism {0}", scramClient.getScramMechanism().getName());
         }
-    );
-  }
 
-  public void verifyServerSignature(int length) throws IOException, PSQLException {
-    String serverFinalMessage = pgStream.receiveString(length);
-    LOGGER.log(Level.FINEST, " <=BE AuthenticationSASLFinal( {0} )", serverFinalMessage);
+        this.scramClient = scramClient;
+        scramSession =
+                scramClient.scramSession("*");   // Real username is ignored by server, uses startup one
+    }
 
-    ScramSession.ClientFinalProcessor clientFinalProcessor = this.clientFinalProcessor;
-    if (clientFinalProcessor == null) {
-      throw new PSQLException(
-          GT.tr("SCRAM client final processor does not exist"),
-          PSQLState.UNKNOWN_STATE
-      );
+    public void sendScramClientFirstMessage() throws IOException {
+        ScramSession scramSession = this.scramSession;
+        String clientFirstMessage = scramSession.clientFirstMessage();
+        LOGGER.log(Level.FINEST, " FE=> SASLInitialResponse( {0} )", clientFirstMessage);
+
+        ScramClient scramClient = this.scramClient;
+        String scramMechanismName = scramClient.getScramMechanism().getName();
+        final byte[] scramMechanismNameBytes = scramMechanismName.getBytes(StandardCharsets.UTF_8);
+        final byte[] clientFirstMessageBytes = clientFirstMessage.getBytes(StandardCharsets.UTF_8);
+        sendAuthenticationMessage(
+                (scramMechanismNameBytes.length + 1) + 4 + clientFirstMessageBytes.length,
+                new BodySender() {
+                    @Override
+                    public void sendBody(PGStream pgStream) throws IOException {
+                        pgStream.send(scramMechanismNameBytes);
+                        pgStream.sendChar(0); // List terminated in '\0'
+                        pgStream.sendInteger4(clientFirstMessageBytes.length);
+                        pgStream.send(clientFirstMessageBytes);
+                    }
+                }
+        );
     }
-    try {
-      clientFinalProcessor.receiveServerFinalMessage(serverFinalMessage);
-    } catch (ScramParseException e) {
-      throw new PSQLException(
-          GT.tr("Invalid server-final-message: {0}", serverFinalMessage),
-          PSQLState.CONNECTION_REJECTED,
-          e
-      );
-    } catch (ScramServerErrorException e) {
-      throw new PSQLException(
-          GT.tr("SCRAM authentication failed, server returned error: {0}",
-              e.getError().getErrorMessage()),
-          PSQLState.CONNECTION_REJECTED,
-          e
-      );
-    } catch (ScramInvalidServerSignatureException e) {
-      throw new PSQLException(
-          GT.tr("Invalid server SCRAM signature"),
-          PSQLState.CONNECTION_REJECTED,
-          e
-      );
+
+    public void processServerFirstMessage(int length) throws IOException, PSQLException {
+        String serverFirstMessage = pgStream.receiveString(length);
+        LOGGER.log(Level.FINEST, " <=BE AuthenticationSASLContinue( {0} )", serverFirstMessage);
+
+        ScramSession scramSession = this.scramSession;
+        if (scramSession == null) {
+            throw new PSQLException(
+                    GT.tr("SCRAM session does not exist"),
+                    PSQLState.UNKNOWN_STATE
+            );
+        }
+
+        ScramSession.ServerFirstProcessor serverFirstProcessor;
+        try {
+            serverFirstProcessor = scramSession.receiveServerFirstMessage(serverFirstMessage);
+        } catch (ScramException e) {
+            throw new PSQLException(
+                    GT.tr("Invalid server-first-message: {0}", serverFirstMessage),
+                    PSQLState.CONNECTION_REJECTED,
+                    e
+            );
+        }
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            LOGGER.log(Level.FINEST,
+                    " <=BE AuthenticationSASLContinue(salt={0}, iterations={1})",
+                    new Object[]{serverFirstProcessor.getSalt(), serverFirstProcessor.getIteration()}
+            );
+        }
+
+        clientFinalProcessor = serverFirstProcessor.clientFinalProcessor(password);
+
+        String clientFinalMessage = clientFinalProcessor.clientFinalMessage();
+        LOGGER.log(Level.FINEST, " FE=> SASLResponse( {0} )", clientFinalMessage);
+
+        final byte[] clientFinalMessageBytes = clientFinalMessage.getBytes(StandardCharsets.UTF_8);
+        sendAuthenticationMessage(
+                clientFinalMessageBytes.length,
+                new BodySender() {
+                    @Override
+                    public void sendBody(PGStream pgStream) throws IOException {
+                        pgStream.send(clientFinalMessageBytes);
+                    }
+                }
+        );
+    }
+
+    public void verifyServerSignature(int length) throws IOException, PSQLException {
+        String serverFinalMessage = pgStream.receiveString(length);
+        LOGGER.log(Level.FINEST, " <=BE AuthenticationSASLFinal( {0} )", serverFinalMessage);
+
+        ScramSession.ClientFinalProcessor clientFinalProcessor = this.clientFinalProcessor;
+        if (clientFinalProcessor == null) {
+            throw new PSQLException(
+                    GT.tr("SCRAM client final processor does not exist"),
+                    PSQLState.UNKNOWN_STATE
+            );
+        }
+        try {
+            clientFinalProcessor.receiveServerFinalMessage(serverFinalMessage);
+        } catch (ScramParseException e) {
+            throw new PSQLException(
+                    GT.tr("Invalid server-final-message: {0}", serverFinalMessage),
+                    PSQLState.CONNECTION_REJECTED,
+                    e
+            );
+        } catch (ScramServerErrorException e) {
+            throw new PSQLException(
+                    GT.tr("SCRAM authentication failed, server returned error: {0}",
+                            e.getError().getErrorMessage()),
+                    PSQLState.CONNECTION_REJECTED,
+                    e
+            );
+        } catch (ScramInvalidServerSignatureException e) {
+            throw new PSQLException(
+                    GT.tr("Invalid server SCRAM signature"),
+                    PSQLState.CONNECTION_REJECTED,
+                    e
+            );
+        }
+    }
+
+    private interface BodySender {
+        void sendBody(PGStream pgStream) throws IOException;
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/DbKeyStoreSocketFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/DbKeyStoreSocketFactory.java
index d7e375f..c4bd608 100644
--- a/pgjdbc/src/main/java/org/postgresql/ssl/DbKeyStoreSocketFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/DbKeyStoreSocketFactory.java
@@ -10,60 +10,59 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.security.GeneralSecurityException;
 import java.security.KeyStore;
-
 import javax.net.ssl.KeyManagerFactory;
 import javax.net.ssl.SSLContext;
 import javax.net.ssl.TrustManagerFactory;
 
 public abstract class DbKeyStoreSocketFactory extends WrappedFactory {
-  /*
-   * Populate the WrappedFactory member factory with an SSL Socket Factory that uses the JKS
-   * keystore provided by getKeyStorePassword() and getKeyStoreStream(). A subclass only needs to
-   * implement these two methods. The key store will be used both for selecting a private key
-   * certificate to send to the server, as well as checking the server's certificate against a set
-   * of trusted CAs.
-   */
-  public DbKeyStoreSocketFactory() throws DbKeyStoreSocketException {
-    KeyStore keys;
-    char[] password;
-    try {
-      keys = KeyStore.getInstance("JKS");
-      // Call of the sub-class method during object initialization is generally a bad idea
-      password = getKeyStorePassword();
-      keys.load(getKeyStoreStream(), password);
-    } catch (GeneralSecurityException gse) {
-      throw new DbKeyStoreSocketException("Failed to load keystore: " + gse.getMessage());
-    } catch (FileNotFoundException fnfe) {
-      throw new DbKeyStoreSocketException("Failed to find keystore file." + fnfe.getMessage());
-    } catch (IOException ioe) {
-      throw new DbKeyStoreSocketException("Failed to read keystore file: " + ioe.getMessage());
+    /*
+     * Populate the WrappedFactory member factory with an SSL Socket Factory that uses the JKS
+     * keystore provided by getKeyStorePassword() and getKeyStoreStream(). A subclass only needs to
+     * implement these two methods. The key store will be used both for selecting a private key
+     * certificate to send to the server, as well as checking the server's certificate against a set
+     * of trusted CAs.
+     */
+    public DbKeyStoreSocketFactory() throws DbKeyStoreSocketException {
+        KeyStore keys;
+        char[] password;
+        try {
+            keys = KeyStore.getInstance("JKS");
+            // Call of the sub-class method during object initialization is generally a bad idea
+            password = getKeyStorePassword();
+            keys.load(getKeyStoreStream(), password);
+        } catch (GeneralSecurityException gse) {
+            throw new DbKeyStoreSocketException("Failed to load keystore: " + gse.getMessage());
+        } catch (FileNotFoundException fnfe) {
+            throw new DbKeyStoreSocketException("Failed to find keystore file." + fnfe.getMessage());
+        } catch (IOException ioe) {
+            throw new DbKeyStoreSocketException("Failed to read keystore file: " + ioe.getMessage());
+        }
+        try {
+            KeyManagerFactory keyfact =
+                    KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
+            keyfact.init(keys, password);
+
+            TrustManagerFactory trustfact =
+                    TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
+            trustfact.init(keys);
+
+            SSLContext ctx = SSLContext.getInstance("SSL");
+            ctx.init(keyfact.getKeyManagers(), trustfact.getTrustManagers(), null);
+            factory = ctx.getSocketFactory();
+        } catch (GeneralSecurityException gse) {
+            throw new DbKeyStoreSocketException(
+                    "Failed to set up database socket factory: " + gse.getMessage());
+        }
     }
-    try {
-      KeyManagerFactory keyfact =
-          KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
-      keyfact.init(keys, password);
 
-      TrustManagerFactory trustfact =
-          TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
-      trustfact.init(keys);
+    public abstract char[] getKeyStorePassword();
 
-      SSLContext ctx = SSLContext.getInstance("SSL");
-      ctx.init(keyfact.getKeyManagers(), trustfact.getTrustManagers(), null);
-      factory = ctx.getSocketFactory();
-    } catch (GeneralSecurityException gse) {
-      throw new DbKeyStoreSocketException(
-          "Failed to set up database socket factory: " + gse.getMessage());
+    public abstract InputStream getKeyStoreStream();
+
+    @SuppressWarnings("serial")
+    public static class DbKeyStoreSocketException extends Exception {
+        public DbKeyStoreSocketException(String message) {
+            super(message);
+        }
     }
-  }
-
-  public abstract char[] getKeyStorePassword();
-
-  public abstract InputStream getKeyStoreStream();
-
-  @SuppressWarnings("serial")
-  public static class DbKeyStoreSocketException extends Exception {
-    public DbKeyStoreSocketException(String message) {
-      super(message);
-    }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/DefaultJavaSSLFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/DefaultJavaSSLFactory.java
index a772e18..f1bc8b8 100644
--- a/pgjdbc/src/main/java/org/postgresql/ssl/DefaultJavaSSLFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/DefaultJavaSSLFactory.java
@@ -6,7 +6,6 @@
 package org.postgresql.ssl;
 
 import java.util.Properties;
-
 import javax.net.ssl.SSLSocketFactory;
 
 /**
@@ -15,7 +14,7 @@ import javax.net.ssl.SSLSocketFactory;
  * connection when default truststore lacks certificates to validate server.
  */
 public class DefaultJavaSSLFactory extends WrappedFactory {
-  public DefaultJavaSSLFactory(Properties info) {
-    factory = (SSLSocketFactory) SSLSocketFactory.getDefault();
-  }
+    public DefaultJavaSSLFactory(Properties info) {
+        factory = (SSLSocketFactory) SSLSocketFactory.getDefault();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/LazyKeyManager.java b/pgjdbc/src/main/java/org/postgresql/ssl/LazyKeyManager.java
index e3c46b5..3c15c37 100644
--- a/pgjdbc/src/main/java/org/postgresql/ssl/LazyKeyManager.java
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/LazyKeyManager.java
@@ -5,10 +5,6 @@
 
 package org.postgresql.ssl;
 
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -29,7 +25,6 @@ import java.security.spec.InvalidKeySpecException;
 import java.security.spec.KeySpec;
 import java.security.spec.PKCS8EncodedKeySpec;
 import java.util.Collection;
-
 import javax.crypto.Cipher;
 import javax.crypto.EncryptedPrivateKeyInfo;
 import javax.crypto.NoSuchPaddingException;
@@ -41,257 +36,260 @@ import javax.security.auth.callback.CallbackHandler;
 import javax.security.auth.callback.PasswordCallback;
 import javax.security.auth.callback.UnsupportedCallbackException;
 import javax.security.auth.x500.X500Principal;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
 
 /**
  * A Key manager that only loads the keys, if necessary.
  */
 public class LazyKeyManager implements X509KeyManager {
-  private X509Certificate [] cert;
-  private PrivateKey key;
-  private final String certfile;
-  private final String keyfile;
-  private final CallbackHandler cbh;
-  private final boolean defaultfile;
-  private PSQLException error;
+    private final String certfile;
+    private final String keyfile;
+    private final CallbackHandler cbh;
+    private final boolean defaultfile;
+    private X509Certificate[] cert;
+    private PrivateKey key;
+    private PSQLException error;
 
-  /**
-   * Constructor. certfile and keyfile can be null, in that case no certificate is presented to the
-   * server.
-   *
-   * @param certfile certfile
-   * @param keyfile key file
-   * @param cbh callback handler
-   * @param defaultfile default file
-   */
-  public LazyKeyManager(String certfile, String keyfile, CallbackHandler cbh, boolean defaultfile) {
-    this.certfile = certfile;
-    this.keyfile = keyfile;
-    this.cbh = cbh;
-    this.defaultfile = defaultfile;
-  }
-
-  /**
-   * getCertificateChain and getPrivateKey cannot throw exceptions, therefore any exception is stored
-   * in {@link #error} and can be raised by this method.
-   *
-   * @throws PSQLException if any exception is stored in {@link #error} and can be raised
-   */
-  public void throwKeyManagerException() throws PSQLException {
-    if (error != null) {
-      throw error;
+    /**
+     * Constructor. certfile and keyfile can be null, in that case no certificate is presented to the
+     * server.
+     *
+     * @param certfile    certfile
+     * @param keyfile     key file
+     * @param cbh         callback handler
+     * @param defaultfile default file
+     */
+    public LazyKeyManager(String certfile, String keyfile, CallbackHandler cbh, boolean defaultfile) {
+        this.certfile = certfile;
+        this.keyfile = keyfile;
+        this.cbh = cbh;
+        this.defaultfile = defaultfile;
     }
-  }
 
-  @Override
-  public String chooseClientAlias(String[] keyType,
-      Principal [] issuers, Socket socket) {
-    if (certfile == null) {
-      return null;
-    } else {
-      if (issuers == null || issuers.length == 0) {
-        // Postgres 8.4 and earlier do not send the list of accepted certificate authorities
-        // to the client. See BUG #5468. We only hope, that our certificate will be accepted.
-        return "user";
-      } else {
-        // Sending a wrong certificate makes the connection rejected, even, if clientcert=0 in
-        // pg_hba.conf.
-        // therefore we only send our certificate, if the issuer is listed in issuers
-        X509Certificate[] certchain = getCertificateChain("user");
-        if (certchain == null) {
-          return null;
+    private static byte[] readFileFully(String path) throws IOException {
+        RandomAccessFile raf = new RandomAccessFile(path, "r");
+        try {
+            byte[] ret = new byte[(int) raf.length()];
+            raf.readFully(ret);
+            return ret;
+        } finally {
+            raf.close();
+        }
+    }
+
+    /**
+     * getCertificateChain and getPrivateKey cannot throw exceptions, therefore any exception is stored
+     * in {@link #error} and can be raised by this method.
+     *
+     * @throws PSQLException if any exception is stored in {@link #error} and can be raised
+     */
+    public void throwKeyManagerException() throws PSQLException {
+        if (error != null) {
+            throw error;
+        }
+    }
+
+    @Override
+    public String chooseClientAlias(String[] keyType,
+                                    Principal[] issuers, Socket socket) {
+        if (certfile == null) {
+            return null;
         } else {
-          X509Certificate cert = certchain[certchain.length - 1];
-          X500Principal ourissuer = cert.getIssuerX500Principal();
-          String certKeyType = cert.getPublicKey().getAlgorithm();
-          boolean keyTypeFound = false;
-          boolean found = false;
-          if (keyType != null && keyType.length > 0) {
-            for (String kt : keyType) {
-              if (kt.equalsIgnoreCase(certKeyType)) {
-                keyTypeFound = true;
-              }
-            }
-          } else {
-            // If no key types were passed in, assume we don't care
-            // about checking that the cert uses a particular key type.
-            keyTypeFound = true;
-          }
-          if (keyTypeFound) {
-            for (Principal issuer : issuers) {
-              if (ourissuer.equals(issuer)) {
-                found = keyTypeFound;
-              }
-            }
-          }
-          return found ? "user" : null;
-        }
-      }
-    }
-  }
-
-  @Override
-  public String chooseServerAlias(String keyType,
-      Principal [] issuers, Socket socket) {
-    return null; // We are not a server
-  }
-
-  @Override
-  public X509Certificate [] getCertificateChain(String alias) {
-    if (cert == null && certfile != null) {
-      // If certfile is null, we do not load the certificate
-      // The certificate must be loaded
-      CertificateFactory cf;
-      try {
-        cf = CertificateFactory.getInstance("X.509");
-      } catch (CertificateException ex) {
-        // For some strange reason it throws CertificateException instead of
-        // NoSuchAlgorithmException...
-        error = new PSQLException(GT.tr(
-            "Could not find a java cryptographic algorithm: X.509 CertificateFactory not available."),
-            PSQLState.CONNECTION_FAILURE, ex);
-        return null;
-      }
-      Collection<? extends Certificate> certs;
-      FileInputStream certfileStream = null;
-      try {
-        certfileStream = new FileInputStream(certfile);
-        certs = cf.generateCertificates(certfileStream);
-      } catch (FileNotFoundException ioex) {
-        if (!defaultfile) { // It is not an error if there is no file at the default location
-          error = new PSQLException(
-              GT.tr("Could not open SSL certificate file {0}.", certfile),
-              PSQLState.CONNECTION_FAILURE, ioex);
-        }
-        return null;
-      } catch (CertificateException gsex) {
-        error = new PSQLException(GT.tr("Loading the SSL certificate {0} into a KeyManager failed.",
-            certfile), PSQLState.CONNECTION_FAILURE, gsex);
-        return null;
-      } finally {
-        if (certfileStream != null) {
-          try {
-            certfileStream.close();
-          } catch (IOException ioex) {
-            if (!defaultfile) { // It is not an error if there is no file at the default location
-              error = new PSQLException(
-                  GT.tr("Could not close SSL certificate file {0}.", certfile),
-                  PSQLState.CONNECTION_FAILURE, ioex);
-            }
-          }
-        }
-      }
-      cert = certs.toArray(new X509Certificate[0]);
-    }
-    return cert;
-  }
-
-  @Override
-  public String [] getClientAliases(String keyType,
-      Principal [] issuers) {
-    String alias = chooseClientAlias(new String[]{keyType}, issuers, (Socket) null);
-    return alias == null ? new String[]{} : new String[]{alias};
-  }
-
-  private static byte[] readFileFully(String path) throws IOException {
-    RandomAccessFile raf = new RandomAccessFile(path, "r");
-    try {
-      byte[] ret = new byte[(int) raf.length()];
-      raf.readFully(ret);
-      return ret;
-    } finally {
-      raf.close();
-    }
-  }
-
-  @Override
-  public PrivateKey getPrivateKey(String alias) {
-    try {
-      if (key == null && keyfile != null) {
-        // If keyfile is null, we do not load the key
-        // The private key must be loaded
-        X509Certificate[] cert = getCertificateChain("user");
-        if (cert == null || cert.length == 0) { // We need the certificate for the algorithm
-          return null;
-        }
-
-        byte[] keydata;
-        try {
-          keydata = readFileFully(keyfile);
-        } catch (FileNotFoundException ex) {
-          if (!defaultfile) {
-            // It is not an error if there is no file at the default location
-            throw ex;
-          }
-          return null;
-        }
-
-        KeyFactory kf = KeyFactory.getInstance(cert[0].getPublicKey().getAlgorithm());
-        try {
-          KeySpec pkcs8KeySpec = new PKCS8EncodedKeySpec(keydata);
-          key = kf.generatePrivate(pkcs8KeySpec);
-        } catch (InvalidKeySpecException ex) {
-          // The key might be password protected
-          EncryptedPrivateKeyInfo ePKInfo = new EncryptedPrivateKeyInfo(keydata);
-          Cipher cipher;
-          try {
-            cipher = Cipher.getInstance(ePKInfo.getAlgName());
-          } catch (NoSuchPaddingException npex) {
-            // Why is it not a subclass of NoSuchAlgorithmException?
-            throw new NoSuchAlgorithmException(npex.getMessage(), npex);
-          }
-          // We call back for the password
-          PasswordCallback pwdcb = new PasswordCallback(GT.tr("Enter SSL password: "), false);
-          try {
-            cbh.handle(new Callback[]{pwdcb});
-          } catch (UnsupportedCallbackException ucex) {
-            if ((cbh instanceof LibPQFactory.ConsoleCallbackHandler)
-                && ("Console is not available".equals(ucex.getMessage()))) {
-              error = new PSQLException(GT
-                  .tr("Could not read password for SSL key file, console is not available."),
-                  PSQLState.CONNECTION_FAILURE, ucex);
+            if (issuers == null || issuers.length == 0) {
+                // Postgres 8.4 and earlier do not send the list of accepted certificate authorities
+                // to the client. See BUG #5468. We only hope, that our certificate will be accepted.
+                return "user";
             } else {
-              error =
-                  new PSQLException(
-                      GT.tr("Could not read password for SSL key file by callbackhandler {0}.",
-                              cbh.getClass().getName()),
-                      PSQLState.CONNECTION_FAILURE, ucex);
+                // Sending a wrong certificate makes the connection rejected, even, if clientcert=0 in
+                // pg_hba.conf.
+                // therefore we only send our certificate, if the issuer is listed in issuers
+                X509Certificate[] certchain = getCertificateChain("user");
+                if (certchain == null) {
+                    return null;
+                } else {
+                    X509Certificate cert = certchain[certchain.length - 1];
+                    X500Principal ourissuer = cert.getIssuerX500Principal();
+                    String certKeyType = cert.getPublicKey().getAlgorithm();
+                    boolean keyTypeFound = false;
+                    boolean found = false;
+                    if (keyType != null && keyType.length > 0) {
+                        for (String kt : keyType) {
+                            if (kt.equalsIgnoreCase(certKeyType)) {
+                                keyTypeFound = true;
+                            }
+                        }
+                    } else {
+                        // If no key types were passed in, assume we don't care
+                        // about checking that the cert uses a particular key type.
+                        keyTypeFound = true;
+                    }
+                    if (keyTypeFound) {
+                        for (Principal issuer : issuers) {
+                            if (ourissuer.equals(issuer)) {
+                                found = keyTypeFound;
+                            }
+                        }
+                    }
+                    return found ? "user" : null;
+                }
             }
-            return null;
-          }
-          try {
-            PBEKeySpec pbeKeySpec = new PBEKeySpec(pwdcb.getPassword());
-            pwdcb.clearPassword();
-            // Now create the Key from the PBEKeySpec
-            SecretKeyFactory skFac = SecretKeyFactory.getInstance(ePKInfo.getAlgName());
-            Key pbeKey = skFac.generateSecret(pbeKeySpec);
-            // Extract the iteration count and the salt
-            AlgorithmParameters algParams = ePKInfo.getAlgParameters();
-            cipher.init(Cipher.DECRYPT_MODE, pbeKey, algParams);
-            // Decrypt the encrypted private key into a PKCS8EncodedKeySpec
-            KeySpec pkcs8KeySpec = ePKInfo.getKeySpec(cipher);
-            key = kf.generatePrivate(pkcs8KeySpec);
-          } catch (GeneralSecurityException ikex) {
-            error = new PSQLException(
-                GT.tr("Could not decrypt SSL key file {0}.", keyfile),
-                PSQLState.CONNECTION_FAILURE, ikex);
-            return null;
-          }
         }
-      }
-    } catch (IOException ioex) {
-      error = new PSQLException(GT.tr("Could not read SSL key file {0}.", keyfile),
-          PSQLState.CONNECTION_FAILURE, ioex);
-    } catch (NoSuchAlgorithmException ex) {
-      error = new PSQLException(GT.tr("Could not find a java cryptographic algorithm: {0}.",
-              ex.getMessage()), PSQLState.CONNECTION_FAILURE, ex);
-      return null;
     }
 
-    return key;
-  }
+    @Override
+    public String chooseServerAlias(String keyType,
+                                    Principal[] issuers, Socket socket) {
+        return null; // We are not a server
+    }
 
-  @Override
-  public String [] getServerAliases(String keyType, Principal [] issuers) {
-    return new String[]{};
-  }
+    @Override
+    public X509Certificate[] getCertificateChain(String alias) {
+        if (cert == null && certfile != null) {
+            // If certfile is null, we do not load the certificate
+            // The certificate must be loaded
+            CertificateFactory cf;
+            try {
+                cf = CertificateFactory.getInstance("X.509");
+            } catch (CertificateException ex) {
+                // For some strange reason it throws CertificateException instead of
+                // NoSuchAlgorithmException...
+                error = new PSQLException(GT.tr(
+                        "Could not find a java cryptographic algorithm: X.509 CertificateFactory not available."),
+                        PSQLState.CONNECTION_FAILURE, ex);
+                return null;
+            }
+            Collection<? extends Certificate> certs;
+            FileInputStream certfileStream = null;
+            try {
+                certfileStream = new FileInputStream(certfile);
+                certs = cf.generateCertificates(certfileStream);
+            } catch (FileNotFoundException ioex) {
+                if (!defaultfile) { // It is not an error if there is no file at the default location
+                    error = new PSQLException(
+                            GT.tr("Could not open SSL certificate file {0}.", certfile),
+                            PSQLState.CONNECTION_FAILURE, ioex);
+                }
+                return null;
+            } catch (CertificateException gsex) {
+                error = new PSQLException(GT.tr("Loading the SSL certificate {0} into a KeyManager failed.",
+                        certfile), PSQLState.CONNECTION_FAILURE, gsex);
+                return null;
+            } finally {
+                if (certfileStream != null) {
+                    try {
+                        certfileStream.close();
+                    } catch (IOException ioex) {
+                        if (!defaultfile) { // It is not an error if there is no file at the default location
+                            error = new PSQLException(
+                                    GT.tr("Could not close SSL certificate file {0}.", certfile),
+                                    PSQLState.CONNECTION_FAILURE, ioex);
+                        }
+                    }
+                }
+            }
+            cert = certs.toArray(new X509Certificate[0]);
+        }
+        return cert;
+    }
+
+    @Override
+    public String[] getClientAliases(String keyType,
+                                     Principal[] issuers) {
+        String alias = chooseClientAlias(new String[]{keyType}, issuers, (Socket) null);
+        return alias == null ? new String[]{} : new String[]{alias};
+    }
+
+    @Override
+    public PrivateKey getPrivateKey(String alias) {
+        try {
+            if (key == null && keyfile != null) {
+                // If keyfile is null, we do not load the key
+                // The private key must be loaded
+                X509Certificate[] cert = getCertificateChain("user");
+                if (cert == null || cert.length == 0) { // We need the certificate for the algorithm
+                    return null;
+                }
+
+                byte[] keydata;
+                try {
+                    keydata = readFileFully(keyfile);
+                } catch (FileNotFoundException ex) {
+                    if (!defaultfile) {
+                        // It is not an error if there is no file at the default location
+                        throw ex;
+                    }
+                    return null;
+                }
+
+                KeyFactory kf = KeyFactory.getInstance(cert[0].getPublicKey().getAlgorithm());
+                try {
+                    KeySpec pkcs8KeySpec = new PKCS8EncodedKeySpec(keydata);
+                    key = kf.generatePrivate(pkcs8KeySpec);
+                } catch (InvalidKeySpecException ex) {
+                    // The key might be password protected
+                    EncryptedPrivateKeyInfo ePKInfo = new EncryptedPrivateKeyInfo(keydata);
+                    Cipher cipher;
+                    try {
+                        cipher = Cipher.getInstance(ePKInfo.getAlgName());
+                    } catch (NoSuchPaddingException npex) {
+                        // Why is it not a subclass of NoSuchAlgorithmException?
+                        throw new NoSuchAlgorithmException(npex.getMessage(), npex);
+                    }
+                    // We call back for the password
+                    PasswordCallback pwdcb = new PasswordCallback(GT.tr("Enter SSL password: "), false);
+                    try {
+                        cbh.handle(new Callback[]{pwdcb});
+                    } catch (UnsupportedCallbackException ucex) {
+                        if ((cbh instanceof LibPQFactory.ConsoleCallbackHandler)
+                                && ("Console is not available".equals(ucex.getMessage()))) {
+                            error = new PSQLException(GT
+                                    .tr("Could not read password for SSL key file, console is not available."),
+                                    PSQLState.CONNECTION_FAILURE, ucex);
+                        } else {
+                            error =
+                                    new PSQLException(
+                                            GT.tr("Could not read password for SSL key file by callbackhandler {0}.",
+                                                    cbh.getClass().getName()),
+                                            PSQLState.CONNECTION_FAILURE, ucex);
+                        }
+                        return null;
+                    }
+                    try {
+                        PBEKeySpec pbeKeySpec = new PBEKeySpec(pwdcb.getPassword());
+                        pwdcb.clearPassword();
+                        // Now create the Key from the PBEKeySpec
+                        SecretKeyFactory skFac = SecretKeyFactory.getInstance(ePKInfo.getAlgName());
+                        Key pbeKey = skFac.generateSecret(pbeKeySpec);
+                        // Extract the iteration count and the salt
+                        AlgorithmParameters algParams = ePKInfo.getAlgParameters();
+                        cipher.init(Cipher.DECRYPT_MODE, pbeKey, algParams);
+                        // Decrypt the encrypted private key into a PKCS8EncodedKeySpec
+                        KeySpec pkcs8KeySpec = ePKInfo.getKeySpec(cipher);
+                        key = kf.generatePrivate(pkcs8KeySpec);
+                    } catch (GeneralSecurityException ikex) {
+                        error = new PSQLException(
+                                GT.tr("Could not decrypt SSL key file {0}.", keyfile),
+                                PSQLState.CONNECTION_FAILURE, ikex);
+                        return null;
+                    }
+                }
+            }
+        } catch (IOException ioex) {
+            error = new PSQLException(GT.tr("Could not read SSL key file {0}.", keyfile),
+                    PSQLState.CONNECTION_FAILURE, ioex);
+        } catch (NoSuchAlgorithmException ex) {
+            error = new PSQLException(GT.tr("Could not find a java cryptographic algorithm: {0}.",
+                    ex.getMessage()), PSQLState.CONNECTION_FAILURE, ex);
+            return null;
+        }
+
+        return key;
+    }
+
+    @Override
+    public String[] getServerAliases(String keyType, Principal[] issuers) {
+        return new String[]{};
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/LibPQFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/LibPQFactory.java
index 9677d59..da96e3c 100644
--- a/pgjdbc/src/main/java/org/postgresql/ssl/LibPQFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/LibPQFactory.java
@@ -5,14 +5,6 @@
 
 package org.postgresql.ssl;
 
-import org.postgresql.PGProperty;
-import org.postgresql.jdbc.SslMode;
-import org.postgresql.ssl.NonValidatingFactory.NonValidatingTM;
-import org.postgresql.util.GT;
-import org.postgresql.util.ObjectFactory;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
 import java.io.Console;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
@@ -26,7 +18,6 @@ import java.security.cert.Certificate;
 import java.security.cert.CertificateFactory;
 import java.util.Locale;
 import java.util.Properties;
-
 import javax.net.ssl.KeyManager;
 import javax.net.ssl.SSLContext;
 import javax.net.ssl.TrustManager;
@@ -35,216 +26,223 @@ import javax.security.auth.callback.Callback;
 import javax.security.auth.callback.CallbackHandler;
 import javax.security.auth.callback.PasswordCallback;
 import javax.security.auth.callback.UnsupportedCallbackException;
+import org.postgresql.PGProperty;
+import org.postgresql.jdbc.SslMode;
+import org.postgresql.ssl.NonValidatingFactory.NonValidatingTM;
+import org.postgresql.util.GT;
+import org.postgresql.util.ObjectFactory;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
 
 /**
  * Provide an SSLSocketFactory that is compatible with the libpq behaviour.
  */
 public class LibPQFactory extends WrappedFactory {
 
-  KeyManager km;
-  boolean defaultfile;
+    KeyManager km;
+    boolean defaultfile;
 
-  private CallbackHandler getCallbackHandler(LibPQFactory this, Properties info) throws PSQLException {
-    // Determine the callback handler
-    CallbackHandler cbh;
-    String sslpasswordcallback = PGProperty.SSL_PASSWORD_CALLBACK.getOrDefault(info);
-    if (sslpasswordcallback != null) {
-      try {
-        cbh = ObjectFactory.instantiate(CallbackHandler.class, sslpasswordcallback, info, false, null);
-      } catch (Exception e) {
-        throw new PSQLException(
-          GT.tr("The password callback class provided {0} could not be instantiated.",
-            sslpasswordcallback),
-          PSQLState.CONNECTION_FAILURE, e);
-      }
-    } else {
-      cbh = new ConsoleCallbackHandler(PGProperty.SSL_PASSWORD.getOrDefault(info));
-    }
-    return cbh;
-  }
-
-  private void initPk8(LibPQFactory this,
-      String sslkeyfile, String defaultdir, Properties info) throws  PSQLException {
-
-    // Load the client's certificate and key
-    String sslcertfile = PGProperty.SSL_CERT.getOrDefault(info);
-    if (sslcertfile == null) { // Fall back to default
-      defaultfile = true;
-      sslcertfile = defaultdir + "postgresql.crt";
-    }
-
-    // If the properties are empty, give null to prevent client key selection
-    km = new LazyKeyManager(("".equals(sslcertfile) ? null : sslcertfile),
-      ("".equals(sslkeyfile) ? null : sslkeyfile), getCallbackHandler(info), defaultfile);
-  }
-
-  private void initP12(LibPQFactory this,
-      String sslkeyfile, Properties info) throws PSQLException {
-    km = new PKCS12KeyManager(sslkeyfile, getCallbackHandler(info));
-  }
-
-  /**
-   * @param info the connection parameters The following parameters are used:
-   *        sslmode,sslcert,sslkey,sslrootcert,sslhostnameverifier,sslpasswordcallback,sslpassword
-   * @throws PSQLException if security error appears when initializing factory
-   */
-  public LibPQFactory(Properties info) throws PSQLException {
-    try {
-      SSLContext ctx = SSLContext.getInstance("TLS"); // or "SSL" ?
-
-      // Determining the default file location
-      String pathsep = System.getProperty("file.separator");
-      String defaultdir;
-
-      if (System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("windows")) { // It is Windows
-        defaultdir = System.getenv("APPDATA") + pathsep + "postgresql" + pathsep;
-      } else {
-        defaultdir = System.getProperty("user.home") + pathsep + ".postgresql" + pathsep;
-      }
-
-      String sslkeyfile = PGProperty.SSL_KEY.getOrDefault(info);
-      if (sslkeyfile == null) { // Fall back to default
-        defaultfile = true;
-        sslkeyfile = defaultdir + "postgresql.pk8";
-      }
-
-      if (sslkeyfile.endsWith(".p12") || sslkeyfile.endsWith(".pfx")) {
-        initP12(sslkeyfile, info);
-      } else {
-        initPk8(sslkeyfile, defaultdir, info);
-      }
-
-      TrustManager[] tm;
-      SslMode sslMode = SslMode.of(info);
-      if (!sslMode.verifyCertificate()) {
-        // server validation is not required
-        tm = new TrustManager[]{new NonValidatingTM()};
-      } else {
-        // Load the server certificate
-
-        TrustManagerFactory tmf = TrustManagerFactory.getInstance("PKIX");
-        KeyStore ks;
+    /**
+     * @param info the connection parameters The following parameters are used:
+     *             sslmode,sslcert,sslkey,sslrootcert,sslhostnameverifier,sslpasswordcallback,sslpassword
+     * @throws PSQLException if security error appears when initializing factory
+     */
+    public LibPQFactory(Properties info) throws PSQLException {
         try {
-          ks = KeyStore.getInstance("jks");
-        } catch (KeyStoreException e) {
-          // this should never happen
-          throw new NoSuchAlgorithmException("jks KeyStore not available");
-        }
-        String sslrootcertfile = PGProperty.SSL_ROOT_CERT.getOrDefault(info);
-        if (sslrootcertfile == null) { // Fall back to default
-          sslrootcertfile = defaultdir + "root.crt";
-        }
-        FileInputStream fis;
-        try {
-          fis = new FileInputStream(sslrootcertfile); // NOSONAR
-        } catch (FileNotFoundException ex) {
-          throw new PSQLException(
-              GT.tr("Could not open SSL root certificate file {0}.", sslrootcertfile),
-              PSQLState.CONNECTION_FAILURE, ex);
-        }
-        try {
-          CertificateFactory cf = CertificateFactory.getInstance("X.509");
-          // Certificate[] certs = cf.generateCertificates(fis).toArray(new Certificate[]{}); //Does
-          // not work in java 1.4
-          Object[] certs = cf.generateCertificates(fis).toArray(new Certificate[]{});
-          ks.load(null, null);
-          for (int i = 0; i < certs.length; i++) {
-            ks.setCertificateEntry("cert" + i, (Certificate) certs[i]);
-          }
-          tmf.init(ks);
-        } catch (IOException ioex) {
-          throw new PSQLException(
-              GT.tr("Could not read SSL root certificate file {0}.", sslrootcertfile),
-              PSQLState.CONNECTION_FAILURE, ioex);
-        } catch (GeneralSecurityException gsex) {
-          throw new PSQLException(
-              GT.tr("Loading the SSL root certificate {0} into a TrustManager failed.",
-                      sslrootcertfile),
-              PSQLState.CONNECTION_FAILURE, gsex);
-        } finally {
-          try {
-            fis.close();
-          } catch (IOException e) {
-            /* ignore */
-          }
-        }
-        tm = tmf.getTrustManagers();
-      }
+            SSLContext ctx = SSLContext.getInstance("TLS"); // or "SSL" ?
 
-      // finally we can initialize the context
-      try {
-        KeyManager km = this.km;
-        ctx.init(km == null ? null : new KeyManager[]{km}, tm, null);
-      } catch (KeyManagementException ex) {
-        throw new PSQLException(GT.tr("Could not initialize SSL context."),
-            PSQLState.CONNECTION_FAILURE, ex);
-      }
+            // Determining the default file location
+            String pathsep = System.getProperty("file.separator");
+            String defaultdir;
 
-      factory = ctx.getSocketFactory();
-    } catch (NoSuchAlgorithmException ex) {
-      throw new PSQLException(GT.tr("Could not find a java cryptographic algorithm: {0}.",
-              ex.getMessage()), PSQLState.CONNECTION_FAILURE, ex);
+            if (System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("windows")) { // It is Windows
+                defaultdir = System.getenv("APPDATA") + pathsep + "postgresql" + pathsep;
+            } else {
+                defaultdir = System.getProperty("user.home") + pathsep + ".postgresql" + pathsep;
+            }
+
+            String sslkeyfile = PGProperty.SSL_KEY.getOrDefault(info);
+            if (sslkeyfile == null) { // Fall back to default
+                defaultfile = true;
+                sslkeyfile = defaultdir + "postgresql.pk8";
+            }
+
+            if (sslkeyfile.endsWith(".p12") || sslkeyfile.endsWith(".pfx")) {
+                initP12(sslkeyfile, info);
+            } else {
+                initPk8(sslkeyfile, defaultdir, info);
+            }
+
+            TrustManager[] tm;
+            SslMode sslMode = SslMode.of(info);
+            if (!sslMode.verifyCertificate()) {
+                // server validation is not required
+                tm = new TrustManager[]{new NonValidatingTM()};
+            } else {
+                // Load the server certificate
+
+                TrustManagerFactory tmf = TrustManagerFactory.getInstance("PKIX");
+                KeyStore ks;
+                try {
+                    ks = KeyStore.getInstance("jks");
+                } catch (KeyStoreException e) {
+                    // this should never happen
+                    throw new NoSuchAlgorithmException("jks KeyStore not available");
+                }
+                String sslrootcertfile = PGProperty.SSL_ROOT_CERT.getOrDefault(info);
+                if (sslrootcertfile == null) { // Fall back to default
+                    sslrootcertfile = defaultdir + "root.crt";
+                }
+                FileInputStream fis;
+                try {
+                    fis = new FileInputStream(sslrootcertfile); // NOSONAR
+                } catch (FileNotFoundException ex) {
+                    throw new PSQLException(
+                            GT.tr("Could not open SSL root certificate file {0}.", sslrootcertfile),
+                            PSQLState.CONNECTION_FAILURE, ex);
+                }
+                try {
+                    CertificateFactory cf = CertificateFactory.getInstance("X.509");
+                    // Certificate[] certs = cf.generateCertificates(fis).toArray(new Certificate[]{}); //Does
+                    // not work in java 1.4
+                    Object[] certs = cf.generateCertificates(fis).toArray(new Certificate[]{});
+                    ks.load(null, null);
+                    for (int i = 0; i < certs.length; i++) {
+                        ks.setCertificateEntry("cert" + i, (Certificate) certs[i]);
+                    }
+                    tmf.init(ks);
+                } catch (IOException ioex) {
+                    throw new PSQLException(
+                            GT.tr("Could not read SSL root certificate file {0}.", sslrootcertfile),
+                            PSQLState.CONNECTION_FAILURE, ioex);
+                } catch (GeneralSecurityException gsex) {
+                    throw new PSQLException(
+                            GT.tr("Loading the SSL root certificate {0} into a TrustManager failed.",
+                                    sslrootcertfile),
+                            PSQLState.CONNECTION_FAILURE, gsex);
+                } finally {
+                    try {
+                        fis.close();
+                    } catch (IOException e) {
+                        /* ignore */
+                    }
+                }
+                tm = tmf.getTrustManagers();
+            }
+
+            // finally we can initialize the context
+            try {
+                KeyManager km = this.km;
+                ctx.init(km == null ? null : new KeyManager[]{km}, tm, null);
+            } catch (KeyManagementException ex) {
+                throw new PSQLException(GT.tr("Could not initialize SSL context."),
+                        PSQLState.CONNECTION_FAILURE, ex);
+            }
+
+            factory = ctx.getSocketFactory();
+        } catch (NoSuchAlgorithmException ex) {
+            throw new PSQLException(GT.tr("Could not find a java cryptographic algorithm: {0}.",
+                    ex.getMessage()), PSQLState.CONNECTION_FAILURE, ex);
+        }
     }
-  }
 
-  /**
-   * Propagates any exception from {@link LazyKeyManager}.
-   *
-   * @throws PSQLException if there is an exception to propagate
-   */
-  public void throwKeyManagerException() throws PSQLException {
-    if (km != null) {
-      if (km instanceof LazyKeyManager) {
-        ((LazyKeyManager) km).throwKeyManagerException();
-      }
-      if (km instanceof PKCS12KeyManager) {
-        ((PKCS12KeyManager) km).throwKeyManagerException();
-      }
+    private CallbackHandler getCallbackHandler(LibPQFactory this, Properties info) throws PSQLException {
+        // Determine the callback handler
+        CallbackHandler cbh;
+        String sslpasswordcallback = PGProperty.SSL_PASSWORD_CALLBACK.getOrDefault(info);
+        if (sslpasswordcallback != null) {
+            try {
+                cbh = ObjectFactory.instantiate(CallbackHandler.class, sslpasswordcallback, info, false, null);
+            } catch (Exception e) {
+                throw new PSQLException(
+                        GT.tr("The password callback class provided {0} could not be instantiated.",
+                                sslpasswordcallback),
+                        PSQLState.CONNECTION_FAILURE, e);
+            }
+        } else {
+            cbh = new ConsoleCallbackHandler(PGProperty.SSL_PASSWORD.getOrDefault(info));
+        }
+        return cbh;
     }
-  }
 
-  /**
-   * A CallbackHandler that reads the password from the console or returns the password given to its
-   * constructor.
-   */
-  public static class ConsoleCallbackHandler implements CallbackHandler {
+    private void initPk8(LibPQFactory this,
+                         String sslkeyfile, String defaultdir, Properties info) throws PSQLException {
 
-    private char [] password;
+        // Load the client's certificate and key
+        String sslcertfile = PGProperty.SSL_CERT.getOrDefault(info);
+        if (sslcertfile == null) { // Fall back to default
+            defaultfile = true;
+            sslcertfile = defaultdir + "postgresql.crt";
+        }
 
-    ConsoleCallbackHandler(String password) {
-      if (password != null) {
-        this.password = password.toCharArray();
-      }
+        // If the properties are empty, give null to prevent client key selection
+        km = new LazyKeyManager(("".equals(sslcertfile) ? null : sslcertfile),
+                ("".equals(sslkeyfile) ? null : sslkeyfile), getCallbackHandler(info), defaultfile);
+    }
+
+    private void initP12(LibPQFactory this,
+                         String sslkeyfile, Properties info) throws PSQLException {
+        km = new PKCS12KeyManager(sslkeyfile, getCallbackHandler(info));
     }
 
     /**
-     * Handles the callbacks.
+     * Propagates any exception from {@link LazyKeyManager}.
      *
-     * @param callbacks The callbacks to handle
-     * @throws UnsupportedCallbackException If the console is not available or other than
-     *         PasswordCallback is supplied
+     * @throws PSQLException if there is an exception to propagate
      */
-    @Override
-    public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
-      Console cons = System.console();
-      char[] password = this.password;
-      if (cons == null && password == null) {
-        throw new UnsupportedCallbackException(callbacks[0], "Console is not available");
-      }
-      for (Callback callback : callbacks) {
-        if (!(callback instanceof PasswordCallback)) {
-          throw new UnsupportedCallbackException(callback);
+    public void throwKeyManagerException() throws PSQLException {
+        if (km != null) {
+            if (km instanceof LazyKeyManager) {
+                ((LazyKeyManager) km).throwKeyManagerException();
+            }
+            if (km instanceof PKCS12KeyManager) {
+                ((PKCS12KeyManager) km).throwKeyManagerException();
+            }
+        }
+    }
+
+    /**
+     * A CallbackHandler that reads the password from the console or returns the password given to its
+     * constructor.
+     */
+    public static class ConsoleCallbackHandler implements CallbackHandler {
+
+        private char[] password;
+
+        ConsoleCallbackHandler(String password) {
+            if (password != null) {
+                this.password = password.toCharArray();
+            }
+        }
+
+        /**
+         * Handles the callbacks.
+         *
+         * @param callbacks The callbacks to handle
+         * @throws UnsupportedCallbackException If the console is not available or other than
+         *                                      PasswordCallback is supplied
+         */
+        @Override
+        public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
+            Console cons = System.console();
+            char[] password = this.password;
+            if (cons == null && password == null) {
+                throw new UnsupportedCallbackException(callbacks[0], "Console is not available");
+            }
+            for (Callback callback : callbacks) {
+                if (!(callback instanceof PasswordCallback)) {
+                    throw new UnsupportedCallbackException(callback);
+                }
+                PasswordCallback pwdCallback = (PasswordCallback) callback;
+                if (password != null) {
+                    pwdCallback.setPassword(password);
+                    continue;
+                }
+                // It is used instead of cons.readPassword(prompt), because the prompt may contain '%'
+                // characters
+                pwdCallback.setPassword(cons.readPassword("%s", pwdCallback.getPrompt())
+                );
+            }
         }
-        PasswordCallback pwdCallback = (PasswordCallback) callback;
-        if (password != null) {
-          pwdCallback.setPassword(password);
-          continue;
-        }
-        // It is used instead of cons.readPassword(prompt), because the prompt may contain '%'
-        // characters
-        pwdCallback.setPassword(cons.readPassword("%s", pwdCallback.getPrompt())
-        );
-      }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/MakeSSL.java b/pgjdbc/src/main/java/org/postgresql/ssl/MakeSSL.java
index b590970..1064fbb 100644
--- a/pgjdbc/src/main/java/org/postgresql/ssl/MakeSSL.java
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/MakeSSL.java
@@ -5,6 +5,13 @@
 
 package org.postgresql.ssl;
 
+import java.io.IOException;
+import java.util.Properties;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.SSLSocket;
+import javax.net.ssl.SSLSocketFactory;
 import org.postgresql.PGProperty;
 import org.postgresql.core.PGStream;
 import org.postgresql.core.SocketFactoryFactory;
@@ -14,77 +21,68 @@ import org.postgresql.util.ObjectFactory;
 import org.postgresql.util.PSQLException;
 import org.postgresql.util.PSQLState;
 
-import java.io.IOException;
-import java.util.Properties;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-import javax.net.ssl.HostnameVerifier;
-import javax.net.ssl.SSLSocket;
-import javax.net.ssl.SSLSocketFactory;
-
 public class MakeSSL extends ObjectFactory {
 
-  private static final Logger LOGGER = Logger.getLogger(MakeSSL.class.getName());
+    private static final Logger LOGGER = Logger.getLogger(MakeSSL.class.getName());
 
-  public static void convert(PGStream stream, Properties info)
-      throws PSQLException, IOException {
-    LOGGER.log(Level.FINE, "converting regular socket connection to ssl");
+    public static void convert(PGStream stream, Properties info)
+            throws PSQLException, IOException {
+        LOGGER.log(Level.FINE, "converting regular socket connection to ssl");
 
-    SSLSocketFactory factory = SocketFactoryFactory.getSslSocketFactory(info);
-    SSLSocket newConnection;
-    try {
-      newConnection = (SSLSocket) factory.createSocket(stream.getSocket(),
-          stream.getHostSpec().getHost(), stream.getHostSpec().getPort(), true);
-      int connectTimeoutSeconds = PGProperty.CONNECT_TIMEOUT.getInt(info);
-      newConnection.setSoTimeout(connectTimeoutSeconds * 1000);
-      // We must invoke manually, otherwise the exceptions are hidden
-      newConnection.setUseClientMode(true);
-      newConnection.startHandshake();
-    } catch (IOException ex) {
-      throw new PSQLException(GT.tr("SSL error: {0}", ex.getMessage()),
-          PSQLState.CONNECTION_FAILURE, ex);
-    }
-    if (factory instanceof LibPQFactory) { // throw any KeyManager exception
-      ((LibPQFactory) factory).throwKeyManagerException();
+        SSLSocketFactory factory = SocketFactoryFactory.getSslSocketFactory(info);
+        SSLSocket newConnection;
+        try {
+            newConnection = (SSLSocket) factory.createSocket(stream.getSocket(),
+                    stream.getHostSpec().getHost(), stream.getHostSpec().getPort(), true);
+            int connectTimeoutSeconds = PGProperty.CONNECT_TIMEOUT.getInt(info);
+            newConnection.setSoTimeout(connectTimeoutSeconds * 1000);
+            // We must invoke manually, otherwise the exceptions are hidden
+            newConnection.setUseClientMode(true);
+            newConnection.startHandshake();
+        } catch (IOException ex) {
+            throw new PSQLException(GT.tr("SSL error: {0}", ex.getMessage()),
+                    PSQLState.CONNECTION_FAILURE, ex);
+        }
+        if (factory instanceof LibPQFactory) { // throw any KeyManager exception
+            ((LibPQFactory) factory).throwKeyManagerException();
+        }
+
+        SslMode sslMode = SslMode.of(info);
+        if (sslMode.verifyPeerName()) {
+            verifyPeerName(stream, info, newConnection);
+        }
+        // Zero timeout (default) means infinite
+        int socketTimeout = PGProperty.SOCKET_TIMEOUT.getInt(info);
+        newConnection.setSoTimeout(socketTimeout * 1000);
+        stream.changeSocket(newConnection);
     }
 
-    SslMode sslMode = SslMode.of(info);
-    if (sslMode.verifyPeerName()) {
-      verifyPeerName(stream, info, newConnection);
-    }
-    // Zero timeout (default) means infinite
-    int socketTimeout = PGProperty.SOCKET_TIMEOUT.getInt(info);
-    newConnection.setSoTimeout(socketTimeout * 1000);
-    stream.changeSocket(newConnection);
-  }
+    private static void verifyPeerName(PGStream stream, Properties info, SSLSocket newConnection)
+            throws PSQLException {
+        HostnameVerifier hvn;
+        String sslhostnameverifier = PGProperty.SSL_HOSTNAME_VERIFIER.getOrDefault(info);
+        if (sslhostnameverifier == null) {
+            hvn = PGjdbcHostnameVerifier.INSTANCE;
+            sslhostnameverifier = "PgjdbcHostnameVerifier";
+        } else {
+            try {
+                hvn = instantiate(HostnameVerifier.class, sslhostnameverifier, info, false, null);
+            } catch (Exception e) {
+                throw new PSQLException(
+                        GT.tr("The HostnameVerifier class provided {0} could not be instantiated.",
+                                sslhostnameverifier),
+                        PSQLState.CONNECTION_FAILURE, e);
+            }
+        }
+
+        if (hvn.verify(stream.getHostSpec().getHost(), newConnection.getSession())) {
+            return;
+        }
 
-  private static void verifyPeerName(PGStream stream, Properties info, SSLSocket newConnection)
-      throws PSQLException {
-    HostnameVerifier hvn;
-    String sslhostnameverifier = PGProperty.SSL_HOSTNAME_VERIFIER.getOrDefault(info);
-    if (sslhostnameverifier == null) {
-      hvn = PGjdbcHostnameVerifier.INSTANCE;
-      sslhostnameverifier = "PgjdbcHostnameVerifier";
-    } else {
-      try {
-        hvn = instantiate(HostnameVerifier.class, sslhostnameverifier, info, false, null);
-      } catch (Exception e) {
         throw new PSQLException(
-            GT.tr("The HostnameVerifier class provided {0} could not be instantiated.",
-                sslhostnameverifier),
-            PSQLState.CONNECTION_FAILURE, e);
-      }
+                GT.tr("The hostname {0} could not be verified by hostnameverifier {1}.",
+                        stream.getHostSpec().getHost(), sslhostnameverifier),
+                PSQLState.CONNECTION_FAILURE);
     }
 
-    if (hvn.verify(stream.getHostSpec().getHost(), newConnection.getSession())) {
-      return;
-    }
-
-    throw new PSQLException(
-        GT.tr("The hostname {0} could not be verified by hostnameverifier {1}.",
-            stream.getHostSpec().getHost(), sslhostnameverifier),
-        PSQLState.CONNECTION_FAILURE);
-  }
-
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/NonValidatingFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/NonValidatingFactory.java
index 649a54d..60d8773 100644
--- a/pgjdbc/src/main/java/org/postgresql/ssl/NonValidatingFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/NonValidatingFactory.java
@@ -7,7 +7,6 @@ package org.postgresql.ssl;
 
 import java.security.GeneralSecurityException;
 import java.security.cert.X509Certificate;
-
 import javax.net.ssl.SSLContext;
 import javax.net.ssl.TrustManager;
 import javax.net.ssl.X509TrustManager;
@@ -19,35 +18,35 @@ import javax.net.ssl.X509TrustManager;
  */
 public class NonValidatingFactory extends WrappedFactory {
 
-  /**
-   * We provide a constructor that takes an unused argument solely because the ssl calling code will
-   * look for this constructor first and then fall back to the no argument constructor, so we avoid
-   * an exception and additional reflection lookups.
-   *
-   * @param arg input argument
-   * @throws GeneralSecurityException if something goes wrong
-   */
-  public NonValidatingFactory(String arg) throws GeneralSecurityException {
-    SSLContext ctx = SSLContext.getInstance("TLS"); // or "SSL" ?
+    /**
+     * We provide a constructor that takes an unused argument solely because the ssl calling code will
+     * look for this constructor first and then fall back to the no argument constructor, so we avoid
+     * an exception and additional reflection lookups.
+     *
+     * @param arg input argument
+     * @throws GeneralSecurityException if something goes wrong
+     */
+    public NonValidatingFactory(String arg) throws GeneralSecurityException {
+        SSLContext ctx = SSLContext.getInstance("TLS"); // or "SSL" ?
 
-    ctx.init(null, new TrustManager[]{new NonValidatingTM()}, null);
+        ctx.init(null, new TrustManager[]{new NonValidatingTM()}, null);
 
-    factory = ctx.getSocketFactory();
-  }
-
-  public static class NonValidatingTM implements X509TrustManager {
-
-    @Override
-    public X509Certificate[] getAcceptedIssuers() {
-      return new X509Certificate[0];
+        factory = ctx.getSocketFactory();
     }
 
-    @Override
-    public void checkClientTrusted(X509Certificate[] certs, String authType) {
-    }
+    public static class NonValidatingTM implements X509TrustManager {
 
-    @Override
-    public void checkServerTrusted(X509Certificate[] certs, String authType) {
+        @Override
+        public X509Certificate[] getAcceptedIssuers() {
+            return new X509Certificate[0];
+        }
+
+        @Override
+        public void checkClientTrusted(X509Certificate[] certs, String authType) {
+        }
+
+        @Override
+        public void checkServerTrusted(X509Certificate[] certs, String authType) {
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/PGjdbcHostnameVerifier.java b/pgjdbc/src/main/java/org/postgresql/ssl/PGjdbcHostnameVerifier.java
index dbd432c..2af6925 100644
--- a/pgjdbc/src/main/java/org/postgresql/ssl/PGjdbcHostnameVerifier.java
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/PGjdbcHostnameVerifier.java
@@ -5,8 +5,6 @@
 
 package org.postgresql.ssl;
 
-import org.postgresql.util.GT;
-
 import java.net.IDN;
 import java.security.cert.CertificateParsingException;
 import java.security.cert.X509Certificate;
@@ -17,7 +15,6 @@ import java.util.Comparator;
 import java.util.List;
 import java.util.logging.Level;
 import java.util.logging.Logger;
-
 import javax.naming.InvalidNameException;
 import javax.naming.ldap.LdapName;
 import javax.naming.ldap.Rdn;
@@ -25,241 +22,239 @@ import javax.net.ssl.HostnameVerifier;
 import javax.net.ssl.SSLPeerUnverifiedException;
 import javax.net.ssl.SSLSession;
 import javax.security.auth.x500.X500Principal;
+import org.postgresql.util.GT;
 
 public class PGjdbcHostnameVerifier implements HostnameVerifier {
-  private static final Logger LOGGER = Logger.getLogger(PGjdbcHostnameVerifier.class.getName());
-
-  public static final PGjdbcHostnameVerifier INSTANCE = new PGjdbcHostnameVerifier();
-
-  private static final int TYPE_DNS_NAME = 2;
-  private static final int TYPE_IP_ADDRESS = 7;
-
-  public static final Comparator<String> HOSTNAME_PATTERN_COMPARATOR = new Comparator<String>() {
-    private int countChars(String value, char ch) {
-      int count = 0;
-      int pos = -1;
-      while (true) {
-        pos = value.indexOf(ch, pos + 1);
-        if (pos == -1) {
-          break;
+    public static final PGjdbcHostnameVerifier INSTANCE = new PGjdbcHostnameVerifier();
+    public static final Comparator<String> HOSTNAME_PATTERN_COMPARATOR = new Comparator<String>() {
+        private int countChars(String value, char ch) {
+            int count = 0;
+            int pos = -1;
+            while (true) {
+                pos = value.indexOf(ch, pos + 1);
+                if (pos == -1) {
+                    break;
+                }
+                count++;
+            }
+            return count;
         }
-        count++;
-      }
-      return count;
-    }
+
+        @Override
+        public int compare(String o1, String o2) {
+            // The more the dots the better: a.b.c.postgresql.org is more specific than postgresql.org
+            int d1 = countChars(o1, '.');
+            int d2 = countChars(o2, '.');
+            if (d1 != d2) {
+                return d1 > d2 ? 1 : -1;
+            }
+
+            // The less the stars the better: postgresql.org is more specific than *.*.postgresql.org
+            int s1 = countChars(o1, '*');
+            int s2 = countChars(o2, '*');
+            if (s1 != s2) {
+                return s1 < s2 ? 1 : -1;
+            }
+
+            // The longer the better: postgresql.org is more specific than sql.org
+            int l1 = o1.length();
+            int l2 = o2.length();
+            if (l1 != l2) {
+                return l1 > l2 ? 1 : -1;
+            }
+
+            return 0;
+        }
+    };
+    private static final Logger LOGGER = Logger.getLogger(PGjdbcHostnameVerifier.class.getName());
+    private static final int TYPE_DNS_NAME = 2;
+    private static final int TYPE_IP_ADDRESS = 7;
 
     @Override
-    public int compare(String o1, String o2) {
-      // The more the dots the better: a.b.c.postgresql.org is more specific than postgresql.org
-      int d1 = countChars(o1, '.');
-      int d2 = countChars(o2, '.');
-      if (d1 != d2) {
-        return d1 > d2 ? 1 : -1;
-      }
-
-      // The less the stars the better: postgresql.org is more specific than *.*.postgresql.org
-      int s1 = countChars(o1, '*');
-      int s2 = countChars(o2, '*');
-      if (s1 != s2) {
-        return s1 < s2 ? 1 : -1;
-      }
-
-      // The longer the better: postgresql.org is more specific than sql.org
-      int l1 = o1.length();
-      int l2 = o2.length();
-      if (l1 != l2) {
-        return l1 > l2 ? 1 : -1;
-      }
-
-      return 0;
-    }
-  };
-
-  @Override
-  public boolean verify(String hostname, SSLSession session) {
-    X509Certificate[] peerCerts;
-    try {
-      peerCerts = (X509Certificate[]) session.getPeerCertificates();
-    } catch (SSLPeerUnverifiedException e) {
-      LOGGER.log(Level.SEVERE,
-          GT.tr("Unable to parse X509Certificate for hostname {0}", hostname), e);
-      return false;
-    }
-    if (peerCerts == null || peerCerts.length == 0) {
-      LOGGER.log(Level.SEVERE,
-          GT.tr("No certificates found for hostname {0}", hostname));
-      return false;
-    }
-
-    String canonicalHostname;
-    if (hostname.startsWith("[") && hostname.endsWith("]")) {
-      // IPv6 address like [2001:db8:0:1:1:1:1:1]
-      canonicalHostname = hostname.substring(1, hostname.length() - 1);
-    } else {
-      // This converts unicode domain name to ASCII
-      try {
-        canonicalHostname = IDN.toASCII(hostname);
-        if (LOGGER.isLoggable(Level.FINEST)) {
-          LOGGER.log(Level.FINEST, "Canonical host name for {0} is {1}",
-              new Object[]{hostname, canonicalHostname});
+    public boolean verify(String hostname, SSLSession session) {
+        X509Certificate[] peerCerts;
+        try {
+            peerCerts = (X509Certificate[]) session.getPeerCertificates();
+        } catch (SSLPeerUnverifiedException e) {
+            LOGGER.log(Level.SEVERE,
+                    GT.tr("Unable to parse X509Certificate for hostname {0}", hostname), e);
+            return false;
         }
-      } catch (IllegalArgumentException e) {
-        // e.g. hostname is invalid
-        LOGGER.log(Level.SEVERE,
-            GT.tr("Hostname {0} is invalid", hostname), e);
-        return false;
-      }
-    }
-
-    X509Certificate serverCert = peerCerts[0];
-
-    // Check for Subject Alternative Names (see RFC 6125)
-
-    Collection<List<?>> subjectAltNames;
-    try {
-      subjectAltNames = serverCert.getSubjectAlternativeNames();
-      if (subjectAltNames == null) {
-        subjectAltNames = Collections.emptyList();
-      }
-    } catch (CertificateParsingException e) {
-      LOGGER.log(Level.SEVERE,
-          GT.tr("Unable to parse certificates for hostname {0}", hostname), e);
-      return false;
-    }
-
-    boolean anyDnsSan = false;
-    /*
-     * Each item in the SAN collection is a 2-element list.
-     * See {@link X509Certificate#getSubjectAlternativeNames}
-     * The first element in each list is a number indicating the type of entry.
-     */
-    for (List<?> sanItem : subjectAltNames) {
-      if (sanItem.size() != 2) {
-        continue;
-      }
-      Integer sanType = (Integer) sanItem.get(0);
-      if (sanType == null) {
-        // just in case
-        continue;
-      }
-      if (sanType != TYPE_IP_ADDRESS && sanType != TYPE_DNS_NAME) {
-        continue;
-      }
-      String san = (String) sanItem.get(1);
-      if (sanType == TYPE_IP_ADDRESS && san != null && san.startsWith("*")) {
-        // Wildcards should not be present in the IP Address field
-        continue;
-      }
-      anyDnsSan |= sanType == TYPE_DNS_NAME;
-      if (verifyHostName(canonicalHostname, san)) {
-        if (LOGGER.isLoggable(Level.FINEST)) {
-          LOGGER.log(Level.FINEST,
-              GT.tr("Server name validation pass for {0}, subjectAltName {1}", hostname, san));
+        if (peerCerts == null || peerCerts.length == 0) {
+            LOGGER.log(Level.SEVERE,
+                    GT.tr("No certificates found for hostname {0}", hostname));
+            return false;
         }
-        return true;
-      }
+
+        String canonicalHostname;
+        if (hostname.startsWith("[") && hostname.endsWith("]")) {
+            // IPv6 address like [2001:db8:0:1:1:1:1:1]
+            canonicalHostname = hostname.substring(1, hostname.length() - 1);
+        } else {
+            // This converts unicode domain name to ASCII
+            try {
+                canonicalHostname = IDN.toASCII(hostname);
+                if (LOGGER.isLoggable(Level.FINEST)) {
+                    LOGGER.log(Level.FINEST, "Canonical host name for {0} is {1}",
+                            new Object[]{hostname, canonicalHostname});
+                }
+            } catch (IllegalArgumentException e) {
+                // e.g. hostname is invalid
+                LOGGER.log(Level.SEVERE,
+                        GT.tr("Hostname {0} is invalid", hostname), e);
+                return false;
+            }
+        }
+
+        X509Certificate serverCert = peerCerts[0];
+
+        // Check for Subject Alternative Names (see RFC 6125)
+
+        Collection<List<?>> subjectAltNames;
+        try {
+            subjectAltNames = serverCert.getSubjectAlternativeNames();
+            if (subjectAltNames == null) {
+                subjectAltNames = Collections.emptyList();
+            }
+        } catch (CertificateParsingException e) {
+            LOGGER.log(Level.SEVERE,
+                    GT.tr("Unable to parse certificates for hostname {0}", hostname), e);
+            return false;
+        }
+
+        boolean anyDnsSan = false;
+        /*
+         * Each item in the SAN collection is a 2-element list.
+         * See {@link X509Certificate#getSubjectAlternativeNames}
+         * The first element in each list is a number indicating the type of entry.
+         */
+        for (List<?> sanItem : subjectAltNames) {
+            if (sanItem.size() != 2) {
+                continue;
+            }
+            Integer sanType = (Integer) sanItem.get(0);
+            if (sanType == null) {
+                // just in case
+                continue;
+            }
+            if (sanType != TYPE_IP_ADDRESS && sanType != TYPE_DNS_NAME) {
+                continue;
+            }
+            String san = (String) sanItem.get(1);
+            if (sanType == TYPE_IP_ADDRESS && san != null && san.startsWith("*")) {
+                // Wildcards should not be present in the IP Address field
+                continue;
+            }
+            anyDnsSan |= sanType == TYPE_DNS_NAME;
+            if (verifyHostName(canonicalHostname, san)) {
+                if (LOGGER.isLoggable(Level.FINEST)) {
+                    LOGGER.log(Level.FINEST,
+                            GT.tr("Server name validation pass for {0}, subjectAltName {1}", hostname, san));
+                }
+                return true;
+            }
+        }
+
+        if (anyDnsSan) {
+            /*
+             * RFC2818, section 3.1 (I bet you won't recheck :)
+             * If a subjectAltName extension of type dNSName is present, that MUST
+             * be used as the identity. Otherwise, the (most specific) Common Name
+             * field in the Subject field of the certificate MUST be used. Although
+             * the use of the Common Name is existing practice, it is deprecated and
+             * Certification Authorities are encouraged to use the dNSName instead.
+             */
+            LOGGER.log(Level.SEVERE,
+                    GT.tr("Server name validation failed: certificate for host {0} dNSName entries subjectAltName,"
+                            + " but none of them match. Assuming server name validation failed", hostname));
+            return false;
+        }
+
+        // Last attempt: no DNS Subject Alternative Name entries detected, try common name
+        LdapName dn;
+        try {
+            dn = new LdapName(serverCert.getSubjectX500Principal().getName(X500Principal.RFC2253));
+        } catch (InvalidNameException e) {
+            LOGGER.log(Level.SEVERE,
+                    GT.tr("Server name validation failed: unable to extract common name"
+                            + " from X509Certificate for hostname {0}", hostname), e);
+            return false;
+        }
+
+        List<String> commonNames = new ArrayList<>(1);
+        for (Rdn rdn : dn.getRdns()) {
+            if ("CN".equals(rdn.getType())) {
+                commonNames.add((String) rdn.getValue());
+            }
+        }
+        if (commonNames.isEmpty()) {
+            LOGGER.log(Level.SEVERE,
+                    GT.tr("Server name validation failed: certificate for hostname {0} has no DNS subjectAltNames,"
+                                    + " and it CommonName is missing as well",
+                            hostname));
+            return false;
+        }
+        if (commonNames.size() > 1) {
+            /*
+             * RFC2818, section 3.1
+             * If a subjectAltName extension of type dNSName is present, that MUST
+             * be used as the identity. Otherwise, the (most specific) Common Name
+             * field in the Subject field of the certificate MUST be used
+             *
+             * The sort is from less specific to most specific.
+             */
+            Collections.sort(commonNames, HOSTNAME_PATTERN_COMPARATOR);
+        }
+        String commonName = commonNames.get(commonNames.size() - 1);
+        boolean result = verifyHostName(canonicalHostname, commonName);
+        if (!result) {
+            LOGGER.log(Level.SEVERE,
+                    GT.tr("Server name validation failed: hostname {0} does not match common name {1}",
+                            hostname, commonName));
+        }
+        return result;
     }
 
-    if (anyDnsSan) {
-      /*
-       * RFC2818, section 3.1 (I bet you won't recheck :)
-       * If a subjectAltName extension of type dNSName is present, that MUST
-       * be used as the identity. Otherwise, the (most specific) Common Name
-       * field in the Subject field of the certificate MUST be used. Although
-       * the use of the Common Name is existing practice, it is deprecated and
-       * Certification Authorities are encouraged to use the dNSName instead.
-       */
-      LOGGER.log(Level.SEVERE,
-          GT.tr("Server name validation failed: certificate for host {0} dNSName entries subjectAltName,"
-              + " but none of them match. Assuming server name validation failed", hostname));
-      return false;
-    }
+    public boolean verifyHostName(String hostname, String pattern) {
+        if (hostname == null || pattern == null) {
+            return false;
+        }
+        int lastStar = pattern.lastIndexOf('*');
+        if (lastStar == -1) {
+            // No wildcard => just compare hostnames
+            return hostname.equalsIgnoreCase(pattern);
+        }
+        if (lastStar > 0) {
+            // Wildcards like foo*.com are not supported yet
+            return false;
+        }
+        if (pattern.indexOf('.') == -1) {
+            // Wildcard certificates should contain at least one dot
+            return false;
+        }
+        // pattern starts with *, so hostname should be at least (pattern.length-1) long
+        if (hostname.length() < pattern.length() - 1) {
+            return false;
+        }
+        // Use case insensitive comparison
+        final boolean ignoreCase = true;
+        // Below code is "hostname.endsWithIgnoreCase(pattern.withoutFirstStar())"
 
-    // Last attempt: no DNS Subject Alternative Name entries detected, try common name
-    LdapName dn;
-    try {
-      dn = new LdapName(serverCert.getSubjectX500Principal().getName(X500Principal.RFC2253));
-    } catch (InvalidNameException e) {
-      LOGGER.log(Level.SEVERE,
-          GT.tr("Server name validation failed: unable to extract common name"
-              + " from X509Certificate for hostname {0}", hostname), e);
-      return false;
-    }
+        // E.g. hostname==sub.host.com; pattern==*.host.com
+        // We need to start the offset of ".host.com" in hostname
+        // For this we take hostname.length() - pattern.length()
+        // and +1 is required since pattern is known to start with *
+        int toffset = hostname.length() - pattern.length() + 1;
 
-    List<String> commonNames = new ArrayList<>(1);
-    for (Rdn rdn : dn.getRdns()) {
-      if ("CN".equals(rdn.getType())) {
-        commonNames.add((String) rdn.getValue());
-      }
-    }
-    if (commonNames.isEmpty()) {
-      LOGGER.log(Level.SEVERE,
-          GT.tr("Server name validation failed: certificate for hostname {0} has no DNS subjectAltNames,"
-                  + " and it CommonName is missing as well",
-              hostname));
-      return false;
-    }
-    if (commonNames.size() > 1) {
-      /*
-       * RFC2818, section 3.1
-       * If a subjectAltName extension of type dNSName is present, that MUST
-       * be used as the identity. Otherwise, the (most specific) Common Name
-       * field in the Subject field of the certificate MUST be used
-       *
-       * The sort is from less specific to most specific.
-       */
-      Collections.sort(commonNames, HOSTNAME_PATTERN_COMPARATOR);
-    }
-    String commonName = commonNames.get(commonNames.size() - 1);
-    boolean result = verifyHostName(canonicalHostname, commonName);
-    if (!result) {
-      LOGGER.log(Level.SEVERE,
-          GT.tr("Server name validation failed: hostname {0} does not match common name {1}",
-              hostname, commonName));
-    }
-    return result;
-  }
+        // Wildcard covers just one domain level
+        // a.b.c.com should not be covered by *.c.com
+        if (hostname.lastIndexOf('.', toffset - 1) >= 0) {
+            // If there's a dot in between 0..toffset
+            return false;
+        }
 
-  public boolean verifyHostName(String hostname, String pattern) {
-    if (hostname == null || pattern == null) {
-      return false;
+        return hostname.regionMatches(ignoreCase, toffset,
+                pattern, 1, pattern.length() - 1);
     }
-    int lastStar = pattern.lastIndexOf('*');
-    if (lastStar == -1) {
-      // No wildcard => just compare hostnames
-      return hostname.equalsIgnoreCase(pattern);
-    }
-    if (lastStar > 0) {
-      // Wildcards like foo*.com are not supported yet
-      return false;
-    }
-    if (pattern.indexOf('.') == -1) {
-      // Wildcard certificates should contain at least one dot
-      return false;
-    }
-    // pattern starts with *, so hostname should be at least (pattern.length-1) long
-    if (hostname.length() < pattern.length() - 1) {
-      return false;
-    }
-    // Use case insensitive comparison
-    final boolean ignoreCase = true;
-    // Below code is "hostname.endsWithIgnoreCase(pattern.withoutFirstStar())"
-
-    // E.g. hostname==sub.host.com; pattern==*.host.com
-    // We need to start the offset of ".host.com" in hostname
-    // For this we take hostname.length() - pattern.length()
-    // and +1 is required since pattern is known to start with *
-    int toffset = hostname.length() - pattern.length() + 1;
-
-    // Wildcard covers just one domain level
-    // a.b.c.com should not be covered by *.c.com
-    if (hostname.lastIndexOf('.', toffset - 1) >= 0) {
-      // If there's a dot in between 0..toffset
-      return false;
-    }
-
-    return hostname.regionMatches(ignoreCase, toffset,
-        pattern, 1, pattern.length() - 1);
-  }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/PKCS12KeyManager.java b/pgjdbc/src/main/java/org/postgresql/ssl/PKCS12KeyManager.java
index 4f12420..7b140af 100644
--- a/pgjdbc/src/main/java/org/postgresql/ssl/PKCS12KeyManager.java
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/PKCS12KeyManager.java
@@ -5,11 +5,6 @@
 
 package org.postgresql.ssl;
 
-import org.postgresql.jdbc.ResourceLock;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
 import java.io.FileInputStream;
 import java.net.Socket;
 import java.security.KeyStore;
@@ -18,180 +13,183 @@ import java.security.Principal;
 import java.security.PrivateKey;
 import java.security.cert.Certificate;
 import java.security.cert.X509Certificate;
-
 import javax.net.ssl.X509KeyManager;
 import javax.security.auth.callback.Callback;
 import javax.security.auth.callback.CallbackHandler;
 import javax.security.auth.callback.PasswordCallback;
 import javax.security.auth.callback.UnsupportedCallbackException;
 import javax.security.auth.x500.X500Principal;
+import org.postgresql.jdbc.ResourceLock;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
 
 public class PKCS12KeyManager implements X509KeyManager {
 
-  private final CallbackHandler cbh;
-  private PSQLException error;
-  private final String keyfile;
-  private final KeyStore keyStore;
-  boolean keystoreLoaded;
-  private final ResourceLock lock = new ResourceLock();
+    private final CallbackHandler cbh;
+    private final String keyfile;
+    private final KeyStore keyStore;
+    private final ResourceLock lock = new ResourceLock();
+    boolean keystoreLoaded;
+    private PSQLException error;
 
-  public PKCS12KeyManager(String pkcsFile, CallbackHandler cbh) throws PSQLException {
-    try {
-      keyStore = KeyStore.getInstance("pkcs12");
-      keyfile = pkcsFile;
-      this.cbh = cbh;
-    } catch ( KeyStoreException kse ) {
-      throw new PSQLException(GT.tr(
-        "Unable to find pkcs12 keystore."),
-        PSQLState.CONNECTION_FAILURE, kse);
+    public PKCS12KeyManager(String pkcsFile, CallbackHandler cbh) throws PSQLException {
+        try {
+            keyStore = KeyStore.getInstance("pkcs12");
+            keyfile = pkcsFile;
+            this.cbh = cbh;
+        } catch (KeyStoreException kse) {
+            throw new PSQLException(GT.tr(
+                    "Unable to find pkcs12 keystore."),
+                    PSQLState.CONNECTION_FAILURE, kse);
+        }
     }
-  }
 
-  /**
-   * getCertificateChain and getPrivateKey cannot throw exceptions, therefore any exception is stored
-   * in {@link #error} and can be raised by this method.
-   *
-   * @throws PSQLException if any exception is stored in {@link #error} and can be raised
-   */
-  public void throwKeyManagerException() throws PSQLException {
-    if (error != null) {
-      throw error;
+    /**
+     * getCertificateChain and getPrivateKey cannot throw exceptions, therefore any exception is stored
+     * in {@link #error} and can be raised by this method.
+     *
+     * @throws PSQLException if any exception is stored in {@link #error} and can be raised
+     */
+    public void throwKeyManagerException() throws PSQLException {
+        if (error != null) {
+            throw error;
+        }
     }
-  }
 
-  @Override
-  public String [] getClientAliases(String keyType, Principal [] principals) {
-    String alias = chooseClientAlias(new String[]{keyType}, principals, (Socket) null);
-    return alias == null ? null : new String[]{alias};
-  }
+    @Override
+    public String[] getClientAliases(String keyType, Principal[] principals) {
+        String alias = chooseClientAlias(new String[]{keyType}, principals, (Socket) null);
+        return alias == null ? null : new String[]{alias};
+    }
 
-  @Override
-  public String chooseClientAlias(String[] keyType, Principal [] principals,
-      Socket socket) {
-    if (principals == null || principals.length == 0) {
-      // Postgres 8.4 and earlier do not send the list of accepted certificate authorities
-      // to the client. See BUG #5468. We only hope, that our certificate will be accepted.
-      return "user";
-    } else {
-      // Sending a wrong certificate makes the connection rejected, even, if clientcert=0 in
-      // pg_hba.conf.
-      // therefore we only send our certificate, if the issuer is listed in issuers
-      X509Certificate[] certchain = getCertificateChain("user");
-      if (certchain == null) {
-        return null;
-      } else {
-        X509Certificate cert = certchain[certchain.length - 1];
-        X500Principal ourissuer = cert.getIssuerX500Principal();
-        String certKeyType = cert.getPublicKey().getAlgorithm();
-        boolean keyTypeFound = false;
-        boolean found = false;
-        if (keyType != null && keyType.length > 0) {
-          for (String kt : keyType) {
-            if (kt.equalsIgnoreCase(certKeyType)) {
-              keyTypeFound = true;
-            }
-          }
+    @Override
+    public String chooseClientAlias(String[] keyType, Principal[] principals,
+                                    Socket socket) {
+        if (principals == null || principals.length == 0) {
+            // Postgres 8.4 and earlier do not send the list of accepted certificate authorities
+            // to the client. See BUG #5468. We only hope, that our certificate will be accepted.
+            return "user";
         } else {
-          // If no key types were passed in, assume we don't care
-          // about checking that the cert uses a particular key type.
-          keyTypeFound = true;
-        }
-        if (keyTypeFound) {
-          for (Principal issuer : principals) {
-            if (ourissuer.equals(issuer)) {
-              found = keyTypeFound;
+            // Sending a wrong certificate makes the connection rejected, even, if clientcert=0 in
+            // pg_hba.conf.
+            // therefore we only send our certificate, if the issuer is listed in issuers
+            X509Certificate[] certchain = getCertificateChain("user");
+            if (certchain == null) {
+                return null;
+            } else {
+                X509Certificate cert = certchain[certchain.length - 1];
+                X500Principal ourissuer = cert.getIssuerX500Principal();
+                String certKeyType = cert.getPublicKey().getAlgorithm();
+                boolean keyTypeFound = false;
+                boolean found = false;
+                if (keyType != null && keyType.length > 0) {
+                    for (String kt : keyType) {
+                        if (kt.equalsIgnoreCase(certKeyType)) {
+                            keyTypeFound = true;
+                        }
+                    }
+                } else {
+                    // If no key types were passed in, assume we don't care
+                    // about checking that the cert uses a particular key type.
+                    keyTypeFound = true;
+                }
+                if (keyTypeFound) {
+                    for (Principal issuer : principals) {
+                        if (ourissuer.equals(issuer)) {
+                            found = keyTypeFound;
+                        }
+                    }
+                }
+                return found ? "user" : null;
             }
-          }
         }
-        return found ? "user" : null;
-      }
     }
-  }
 
-  @Override
-  public String [] getServerAliases(String s, Principal [] principals) {
-    return new String[]{};
-  }
+    @Override
+    public String[] getServerAliases(String s, Principal[] principals) {
+        return new String[]{};
+    }
 
-  @Override
-  public String chooseServerAlias(String s, Principal [] principals,
-      Socket socket) {
-    // we are not a server
-    return null;
-  }
-
-  @Override
-  public X509Certificate [] getCertificateChain(String alias) {
-    try {
-      loadKeyStore();
-      Certificate[] certs = keyStore.getCertificateChain(alias);
-      if (certs == null) {
+    @Override
+    public String chooseServerAlias(String s, Principal[] principals,
+                                    Socket socket) {
+        // we are not a server
         return null;
-      }
-      X509Certificate[] x509Certificates = new X509Certificate[certs.length];
-      int i = 0;
-      for (Certificate cert : certs) {
-        x509Certificates[i++] = (X509Certificate) cert;
-      }
-      return x509Certificates;
-    } catch (Exception kse) {
-      error = new PSQLException(GT.tr(
-        "Could not find a java cryptographic algorithm: X.509 CertificateFactory not available."),
-        PSQLState.CONNECTION_FAILURE, kse);
     }
-    return null;
-  }
 
-  @Override
-  public PrivateKey getPrivateKey(String s) {
-    try {
-      loadKeyStore();
-      PasswordCallback pwdcb = new PasswordCallback(GT.tr("Enter SSL password: "), false);
-      cbh.handle(new Callback[]{pwdcb});
-
-      KeyStore.ProtectionParameter protParam = new KeyStore.PasswordProtection(pwdcb.getPassword());
-      KeyStore.PrivateKeyEntry pkEntry =
-          (KeyStore.PrivateKeyEntry) keyStore.getEntry("user", protParam);
-      if (pkEntry == null) {
-        return null;
-      }
-      return pkEntry.getPrivateKey();
-    } catch (Exception ioex ) {
-      error = new PSQLException(GT.tr("Could not read SSL key file {0}.", keyfile),
-        PSQLState.CONNECTION_FAILURE, ioex);
-    }
-    return null;
-  }
-
-  @SuppressWarnings("try")
-  private void loadKeyStore() throws Exception {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (keystoreLoaded) {
-        return;
-      }
-      // We call back for the password
-      PasswordCallback pwdcb = new PasswordCallback(GT.tr("Enter SSL password: "), false);
-      try {
-        cbh.handle(new Callback[]{pwdcb});
-      } catch (UnsupportedCallbackException ucex) {
-        if ((cbh instanceof LibPQFactory.ConsoleCallbackHandler)
-            && ("Console is not available".equals(ucex.getMessage()))) {
-          error = new PSQLException(GT
-              .tr("Could not read password for SSL key file, console is not available."),
-              PSQLState.CONNECTION_FAILURE, ucex);
-        } else {
-          error =
-              new PSQLException(
-                  GT.tr("Could not read password for SSL key file by callbackhandler {0}.",
-                      cbh.getClass().getName()),
-                  PSQLState.CONNECTION_FAILURE, ucex);
+    @Override
+    public X509Certificate[] getCertificateChain(String alias) {
+        try {
+            loadKeyStore();
+            Certificate[] certs = keyStore.getCertificateChain(alias);
+            if (certs == null) {
+                return null;
+            }
+            X509Certificate[] x509Certificates = new X509Certificate[certs.length];
+            int i = 0;
+            for (Certificate cert : certs) {
+                x509Certificates[i++] = (X509Certificate) cert;
+            }
+            return x509Certificates;
+        } catch (Exception kse) {
+            error = new PSQLException(GT.tr(
+                    "Could not find a java cryptographic algorithm: X.509 CertificateFactory not available."),
+                    PSQLState.CONNECTION_FAILURE, kse);
+        }
+        return null;
+    }
+
+    @Override
+    public PrivateKey getPrivateKey(String s) {
+        try {
+            loadKeyStore();
+            PasswordCallback pwdcb = new PasswordCallback(GT.tr("Enter SSL password: "), false);
+            cbh.handle(new Callback[]{pwdcb});
+
+            KeyStore.ProtectionParameter protParam = new KeyStore.PasswordProtection(pwdcb.getPassword());
+            KeyStore.PrivateKeyEntry pkEntry =
+                    (KeyStore.PrivateKeyEntry) keyStore.getEntry("user", protParam);
+            if (pkEntry == null) {
+                return null;
+            }
+            return pkEntry.getPrivateKey();
+        } catch (Exception ioex) {
+            error = new PSQLException(GT.tr("Could not read SSL key file {0}.", keyfile),
+                    PSQLState.CONNECTION_FAILURE, ioex);
+        }
+        return null;
+    }
+
+    @SuppressWarnings("try")
+    private void loadKeyStore() throws Exception {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (keystoreLoaded) {
+                return;
+            }
+            // We call back for the password
+            PasswordCallback pwdcb = new PasswordCallback(GT.tr("Enter SSL password: "), false);
+            try {
+                cbh.handle(new Callback[]{pwdcb});
+            } catch (UnsupportedCallbackException ucex) {
+                if ((cbh instanceof LibPQFactory.ConsoleCallbackHandler)
+                        && ("Console is not available".equals(ucex.getMessage()))) {
+                    error = new PSQLException(GT
+                            .tr("Could not read password for SSL key file, console is not available."),
+                            PSQLState.CONNECTION_FAILURE, ucex);
+                } else {
+                    error =
+                            new PSQLException(
+                                    GT.tr("Could not read password for SSL key file by callbackhandler {0}.",
+                                            cbh.getClass().getName()),
+                                    PSQLState.CONNECTION_FAILURE, ucex);
+                }
+
+            }
+
+            keyStore.load(new FileInputStream(keyfile), pwdcb.getPassword());
+            keystoreLoaded = true;
         }
-
-      }
-
-      keyStore.load(new FileInputStream(keyfile), pwdcb.getPassword());
-      keystoreLoaded = true;
     }
-  }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/SingleCertValidatingFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/SingleCertValidatingFactory.java
index b42f635..5125e78 100644
--- a/pgjdbc/src/main/java/org/postgresql/ssl/SingleCertValidatingFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/SingleCertValidatingFactory.java
@@ -5,8 +5,6 @@
 
 package org.postgresql.ssl;
 
-import org.postgresql.util.GT;
-
 import java.io.BufferedInputStream;
 import java.io.ByteArrayInputStream;
 import java.io.FileInputStream;
@@ -19,11 +17,11 @@ import java.security.cert.CertificateException;
 import java.security.cert.CertificateFactory;
 import java.security.cert.X509Certificate;
 import java.util.UUID;
-
 import javax.net.ssl.SSLContext;
 import javax.net.ssl.TrustManager;
 import javax.net.ssl.TrustManagerFactory;
 import javax.net.ssl.X509TrustManager;
+import org.postgresql.util.GT;
 
 /**
  * <p>Provides a SSLSocketFactory that authenticates the remote server against an explicit pre-shared
@@ -75,132 +73,132 @@ import javax.net.ssl.X509TrustManager;
  * UCmmYqgiVkAGWRETVo+byOSDZ4swb10=
  * -----END CERTIFICATE-----
  *         </pre>
-*      </td>
+ *      </td>
  *     <td>Loaded from string value of the argument.</td>
  * </tr>
  * </table>
  */
 
 public class SingleCertValidatingFactory extends WrappedFactory {
-  private static final String FILE_PREFIX = "file:";
-  private static final String CLASSPATH_PREFIX = "classpath:";
-  private static final String ENV_PREFIX = "env:";
-  private static final String SYS_PROP_PREFIX = "sys:";
+    private static final String FILE_PREFIX = "file:";
+    private static final String CLASSPATH_PREFIX = "classpath:";
+    private static final String ENV_PREFIX = "env:";
+    private static final String SYS_PROP_PREFIX = "sys:";
 
-  public SingleCertValidatingFactory(String sslFactoryArg) throws GeneralSecurityException {
-    if (sslFactoryArg == null || "".equals(sslFactoryArg)) {
-      throw new GeneralSecurityException(GT.tr("The sslfactoryarg property may not be empty."));
-    }
-    InputStream in = null;
-    try {
-      if (sslFactoryArg.startsWith(FILE_PREFIX)) {
-        String path = sslFactoryArg.substring(FILE_PREFIX.length());
-        in = new BufferedInputStream(new FileInputStream(path));
-      } else if (sslFactoryArg.startsWith(CLASSPATH_PREFIX)) {
-        String path = sslFactoryArg.substring(CLASSPATH_PREFIX.length());
-        ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
-        InputStream inputStream;
-        if (classLoader != null) {
-          inputStream = classLoader.getResourceAsStream(path);
-          if (inputStream == null) {
-            throw new IllegalArgumentException(
-                GT.tr("Unable to find resource {0} via Thread contextClassLoader {1}",
-                    path, classLoader)
-            );
-          }
-        } else {
-          inputStream = getClass().getResourceAsStream(path);
-          if (inputStream == null) {
-            throw new IllegalArgumentException(
-                GT.tr("Unable to find resource {0} via class {1} ClassLoader {2}",
-                    path, getClass(), getClass().getClassLoader())
-            );
-          }
+    public SingleCertValidatingFactory(String sslFactoryArg) throws GeneralSecurityException {
+        if (sslFactoryArg == null || "".equals(sslFactoryArg)) {
+            throw new GeneralSecurityException(GT.tr("The sslfactoryarg property may not be empty."));
         }
-        in = new BufferedInputStream(inputStream);
-      } else if (sslFactoryArg.startsWith(ENV_PREFIX)) {
-        String name = sslFactoryArg.substring(ENV_PREFIX.length());
-        String cert = System.getenv(name);
-        if (cert == null || "".equals(cert)) {
-          throw new GeneralSecurityException(GT.tr(
-              "The environment variable containing the server's SSL certificate must not be empty."));
-        }
-        in = new ByteArrayInputStream(cert.getBytes(StandardCharsets.UTF_8));
-      } else if (sslFactoryArg.startsWith(SYS_PROP_PREFIX)) {
-        String name = sslFactoryArg.substring(SYS_PROP_PREFIX.length());
-        String cert = System.getProperty(name);
-        if (cert == null || "".equals(cert)) {
-          throw new GeneralSecurityException(GT.tr(
-              "The system property containing the server's SSL certificate must not be empty."));
-        }
-        in = new ByteArrayInputStream(cert.getBytes(StandardCharsets.UTF_8));
-      } else if (sslFactoryArg.startsWith("-----BEGIN CERTIFICATE-----")) {
-        in = new ByteArrayInputStream(sslFactoryArg.getBytes(StandardCharsets.UTF_8));
-      } else {
-        throw new GeneralSecurityException(GT.tr(
-            "The sslfactoryarg property must start with the prefix file:, classpath:, env:, sys:, or -----BEGIN CERTIFICATE-----."));
-      }
-
-      SSLContext ctx = SSLContext.getInstance("TLS");
-      ctx.init(null, new TrustManager[]{new SingleCertTrustManager(in)}, null);
-      factory = ctx.getSocketFactory();
-    } catch (RuntimeException e) {
-      throw e;
-    } catch (Exception e) {
-      if (e instanceof GeneralSecurityException) {
-        throw (GeneralSecurityException) e;
-      }
-      throw new GeneralSecurityException(GT.tr("An error occurred reading the certificate"), e);
-    } finally {
-      if (in != null) {
+        InputStream in = null;
         try {
-          in.close();
-        } catch (Exception e2) {
-          // ignore
+            if (sslFactoryArg.startsWith(FILE_PREFIX)) {
+                String path = sslFactoryArg.substring(FILE_PREFIX.length());
+                in = new BufferedInputStream(new FileInputStream(path));
+            } else if (sslFactoryArg.startsWith(CLASSPATH_PREFIX)) {
+                String path = sslFactoryArg.substring(CLASSPATH_PREFIX.length());
+                ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+                InputStream inputStream;
+                if (classLoader != null) {
+                    inputStream = classLoader.getResourceAsStream(path);
+                    if (inputStream == null) {
+                        throw new IllegalArgumentException(
+                                GT.tr("Unable to find resource {0} via Thread contextClassLoader {1}",
+                                        path, classLoader)
+                        );
+                    }
+                } else {
+                    inputStream = getClass().getResourceAsStream(path);
+                    if (inputStream == null) {
+                        throw new IllegalArgumentException(
+                                GT.tr("Unable to find resource {0} via class {1} ClassLoader {2}",
+                                        path, getClass(), getClass().getClassLoader())
+                        );
+                    }
+                }
+                in = new BufferedInputStream(inputStream);
+            } else if (sslFactoryArg.startsWith(ENV_PREFIX)) {
+                String name = sslFactoryArg.substring(ENV_PREFIX.length());
+                String cert = System.getenv(name);
+                if (cert == null || "".equals(cert)) {
+                    throw new GeneralSecurityException(GT.tr(
+                            "The environment variable containing the server's SSL certificate must not be empty."));
+                }
+                in = new ByteArrayInputStream(cert.getBytes(StandardCharsets.UTF_8));
+            } else if (sslFactoryArg.startsWith(SYS_PROP_PREFIX)) {
+                String name = sslFactoryArg.substring(SYS_PROP_PREFIX.length());
+                String cert = System.getProperty(name);
+                if (cert == null || "".equals(cert)) {
+                    throw new GeneralSecurityException(GT.tr(
+                            "The system property containing the server's SSL certificate must not be empty."));
+                }
+                in = new ByteArrayInputStream(cert.getBytes(StandardCharsets.UTF_8));
+            } else if (sslFactoryArg.startsWith("-----BEGIN CERTIFICATE-----")) {
+                in = new ByteArrayInputStream(sslFactoryArg.getBytes(StandardCharsets.UTF_8));
+            } else {
+                throw new GeneralSecurityException(GT.tr(
+                        "The sslfactoryarg property must start with the prefix file:, classpath:, env:, sys:, or -----BEGIN CERTIFICATE-----."));
+            }
+
+            SSLContext ctx = SSLContext.getInstance("TLS");
+            ctx.init(null, new TrustManager[]{new SingleCertTrustManager(in)}, null);
+            factory = ctx.getSocketFactory();
+        } catch (RuntimeException e) {
+            throw e;
+        } catch (Exception e) {
+            if (e instanceof GeneralSecurityException) {
+                throw (GeneralSecurityException) e;
+            }
+            throw new GeneralSecurityException(GT.tr("An error occurred reading the certificate"), e);
+        } finally {
+            if (in != null) {
+                try {
+                    in.close();
+                } catch (Exception e2) {
+                    // ignore
+                }
+            }
         }
-      }
     }
-  }
 
-  public static class SingleCertTrustManager implements X509TrustManager {
-    X509Certificate cert;
-    X509TrustManager trustManager;
+    public static class SingleCertTrustManager implements X509TrustManager {
+        X509Certificate cert;
+        X509TrustManager trustManager;
 
-    public SingleCertTrustManager(InputStream in) throws IOException, GeneralSecurityException {
-      KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType());
-      // Note: KeyStore requires it be loaded even if you don't load anything into it:
-      ks.load(null);
-      CertificateFactory cf = CertificateFactory.getInstance("X509");
-      cert = (X509Certificate) cf.generateCertificate(in);
-      ks.setCertificateEntry(UUID.randomUUID().toString(), cert);
-      TrustManagerFactory tmf =
-          TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
-      tmf.init(ks);
-      for (TrustManager tm : tmf.getTrustManagers()) {
-        if (tm instanceof X509TrustManager) {
-          trustManager = (X509TrustManager) tm;
-          break;
+        public SingleCertTrustManager(InputStream in) throws IOException, GeneralSecurityException {
+            KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType());
+            // Note: KeyStore requires it be loaded even if you don't load anything into it:
+            ks.load(null);
+            CertificateFactory cf = CertificateFactory.getInstance("X509");
+            cert = (X509Certificate) cf.generateCertificate(in);
+            ks.setCertificateEntry(UUID.randomUUID().toString(), cert);
+            TrustManagerFactory tmf =
+                    TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
+            tmf.init(ks);
+            for (TrustManager tm : tmf.getTrustManagers()) {
+                if (tm instanceof X509TrustManager) {
+                    trustManager = (X509TrustManager) tm;
+                    break;
+                }
+            }
+            if (trustManager == null) {
+                throw new GeneralSecurityException(GT.tr("No X509TrustManager found"));
+            }
         }
-      }
-      if (trustManager == null) {
-        throw new GeneralSecurityException(GT.tr("No X509TrustManager found"));
-      }
-    }
 
-    @Override
-    public void checkClientTrusted(X509Certificate[] chain, String authType)
-        throws CertificateException {
-    }
+        @Override
+        public void checkClientTrusted(X509Certificate[] chain, String authType)
+                throws CertificateException {
+        }
 
-    @Override
-    public void checkServerTrusted(X509Certificate[] chain, String authType)
-        throws CertificateException {
-      trustManager.checkServerTrusted(chain, authType);
-    }
+        @Override
+        public void checkServerTrusted(X509Certificate[] chain, String authType)
+                throws CertificateException {
+            trustManager.checkServerTrusted(chain, authType);
+        }
 
-    @Override
-    public X509Certificate[] getAcceptedIssuers() {
-      return new X509Certificate[]{cert};
+        @Override
+        public X509Certificate[] getAcceptedIssuers() {
+            return new X509Certificate[]{cert};
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/WrappedFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/WrappedFactory.java
index 66e940c..1e45145 100644
--- a/pgjdbc/src/main/java/org/postgresql/ssl/WrappedFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/WrappedFactory.java
@@ -8,7 +8,6 @@ package org.postgresql.ssl;
 import java.io.IOException;
 import java.net.InetAddress;
 import java.net.Socket;
-
 import javax.net.ssl.SSLSocketFactory;
 
 /**
@@ -17,46 +16,46 @@ import javax.net.ssl.SSLSocketFactory;
  */
 public abstract class WrappedFactory extends SSLSocketFactory {
 
-  // The field is indeed not initialized in this class, however it is a part of public API,
-  // so it is hard to fix.
-  @SuppressWarnings("initialization.field.uninitialized")
-  protected SSLSocketFactory factory;
+    // The field is indeed not initialized in this class, however it is a part of public API,
+    // so it is hard to fix.
+    @SuppressWarnings("initialization.field.uninitialized")
+    protected SSLSocketFactory factory;
 
-  @Override
-  public Socket createSocket(InetAddress host, int port) throws IOException {
-    return factory.createSocket(host, port);
-  }
+    @Override
+    public Socket createSocket(InetAddress host, int port) throws IOException {
+        return factory.createSocket(host, port);
+    }
 
-  @Override
-  public Socket createSocket(String host, int port) throws IOException {
-    return factory.createSocket(host, port);
-  }
+    @Override
+    public Socket createSocket(String host, int port) throws IOException {
+        return factory.createSocket(host, port);
+    }
 
-  @Override
-  public Socket createSocket(String host, int port, InetAddress localHost, int localPort)
-      throws IOException {
-    return factory.createSocket(host, port, localHost, localPort);
-  }
+    @Override
+    public Socket createSocket(String host, int port, InetAddress localHost, int localPort)
+            throws IOException {
+        return factory.createSocket(host, port, localHost, localPort);
+    }
 
-  @Override
-  public Socket createSocket(InetAddress address, int port, InetAddress localAddress, int localPort)
-      throws IOException {
-    return factory.createSocket(address, port, localAddress, localPort);
-  }
+    @Override
+    public Socket createSocket(InetAddress address, int port, InetAddress localAddress, int localPort)
+            throws IOException {
+        return factory.createSocket(address, port, localAddress, localPort);
+    }
 
-  @Override
-  public Socket createSocket(Socket socket, String host, int port, boolean autoClose)
-      throws IOException {
-    return factory.createSocket(socket, host, port, autoClose);
-  }
+    @Override
+    public Socket createSocket(Socket socket, String host, int port, boolean autoClose)
+            throws IOException {
+        return factory.createSocket(socket, host, port, autoClose);
+    }
 
-  @Override
-  public String[] getDefaultCipherSuites() {
-    return factory.getDefaultCipherSuites();
-  }
+    @Override
+    public String[] getDefaultCipherSuites() {
+        return factory.getDefaultCipherSuites();
+    }
 
-  @Override
-  public String[] getSupportedCipherSuites() {
-    return factory.getSupportedCipherSuites();
-  }
+    @Override
+    public String[] getSupportedCipherSuites() {
+        return factory.getSupportedCipherSuites();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/ssl/jdbc4/LibPQFactory.java b/pgjdbc/src/main/java/org/postgresql/ssl/jdbc4/LibPQFactory.java
index 40e5139..7e806f2 100644
--- a/pgjdbc/src/main/java/org/postgresql/ssl/jdbc4/LibPQFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/ssl/jdbc4/LibPQFactory.java
@@ -5,82 +5,80 @@
 
 package org.postgresql.ssl.jdbc4;
 
+import java.net.IDN;
+import java.util.Properties;
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.SSLSession;
 import org.postgresql.jdbc.SslMode;
 import org.postgresql.ssl.PGjdbcHostnameVerifier;
 import org.postgresql.util.PSQLException;
 
-import java.net.IDN;
-import java.util.Properties;
-
-import javax.net.ssl.HostnameVerifier;
-import javax.net.ssl.SSLSession;
-
 /**
  * @deprecated prefer {@link org.postgresql.ssl.LibPQFactory}
  */
 @Deprecated
 public class LibPQFactory extends org.postgresql.ssl.LibPQFactory implements HostnameVerifier {
-  private final SslMode sslMode;
+    private final SslMode sslMode;
 
-  /**
-   * @param info the connection parameters The following parameters are used:
-   *             sslmode,sslcert,sslkey,sslrootcert,sslhostnameverifier,sslpasswordcallback,sslpassword
-   * @throws PSQLException if security error appears when initializing factory
-   * @deprecated prefer {@link org.postgresql.ssl.LibPQFactory}
-   */
-  @Deprecated
-  public LibPQFactory(Properties info) throws PSQLException {
-    super(info);
+    /**
+     * @param info the connection parameters The following parameters are used:
+     *             sslmode,sslcert,sslkey,sslrootcert,sslhostnameverifier,sslpasswordcallback,sslpassword
+     * @throws PSQLException if security error appears when initializing factory
+     * @deprecated prefer {@link org.postgresql.ssl.LibPQFactory}
+     */
+    @Deprecated
+    public LibPQFactory(Properties info) throws PSQLException {
+        super(info);
 
-    sslMode = SslMode.of(info);
-  }
-
-  /**
-   * Verifies if given hostname matches pattern.
-   *
-   * @param hostname input hostname
-   * @param pattern domain name pattern
-   * @return true when domain matches pattern
-   * @deprecated use {@link PGjdbcHostnameVerifier}
-   */
-  @Deprecated
-  public static boolean verifyHostName(String hostname, String pattern) {
-    String canonicalHostname;
-    if (hostname.startsWith("[") && hostname.endsWith("]")) {
-      // IPv6 address like [2001:db8:0:1:1:1:1:1]
-      canonicalHostname = hostname.substring(1, hostname.length() - 1);
-    } else {
-      // This converts unicode domain name to ASCII
-      try {
-        canonicalHostname = IDN.toASCII(hostname);
-      } catch (IllegalArgumentException e) {
-        // e.g. hostname is invalid
-        return false;
-      }
+        sslMode = SslMode.of(info);
     }
-    return PGjdbcHostnameVerifier.INSTANCE.verifyHostName(canonicalHostname, pattern);
-  }
 
-  /**
-   * Verifies the server certificate according to the libpq rules. The cn attribute of the
-   * certificate is matched against the hostname. If the cn attribute starts with an asterisk (*),
-   * it will be treated as a wildcard, and will match all characters except a dot (.). This means
-   * the certificate will not match subdomains. If the connection is made using an IP address
-   * instead of a hostname, the IP address will be matched (without doing any DNS lookups).
-   *
-   * @param hostname Hostname or IP address of the server.
-   * @param session The SSL session.
-   * @return true if the certificate belongs to the server, false otherwise.
-   * @see PGjdbcHostnameVerifier
-   * @deprecated use PgjdbcHostnameVerifier
-   */
-  @Deprecated
-  @Override
-  public boolean verify(String hostname, SSLSession session) {
-    if (!sslMode.verifyPeerName()) {
-      return true;
+    /**
+     * Verifies if given hostname matches pattern.
+     *
+     * @param hostname input hostname
+     * @param pattern  domain name pattern
+     * @return true when domain matches pattern
+     * @deprecated use {@link PGjdbcHostnameVerifier}
+     */
+    @Deprecated
+    public static boolean verifyHostName(String hostname, String pattern) {
+        String canonicalHostname;
+        if (hostname.startsWith("[") && hostname.endsWith("]")) {
+            // IPv6 address like [2001:db8:0:1:1:1:1:1]
+            canonicalHostname = hostname.substring(1, hostname.length() - 1);
+        } else {
+            // This converts unicode domain name to ASCII
+            try {
+                canonicalHostname = IDN.toASCII(hostname);
+            } catch (IllegalArgumentException e) {
+                // e.g. hostname is invalid
+                return false;
+            }
+        }
+        return PGjdbcHostnameVerifier.INSTANCE.verifyHostName(canonicalHostname, pattern);
+    }
+
+    /**
+     * Verifies the server certificate according to the libpq rules. The cn attribute of the
+     * certificate is matched against the hostname. If the cn attribute starts with an asterisk (*),
+     * it will be treated as a wildcard, and will match all characters except a dot (.). This means
+     * the certificate will not match subdomains. If the connection is made using an IP address
+     * instead of a hostname, the IP address will be matched (without doing any DNS lookups).
+     *
+     * @param hostname Hostname or IP address of the server.
+     * @param session  The SSL session.
+     * @return true if the certificate belongs to the server, false otherwise.
+     * @see PGjdbcHostnameVerifier
+     * @deprecated use PgjdbcHostnameVerifier
+     */
+    @Deprecated
+    @Override
+    public boolean verify(String hostname, SSLSession session) {
+        if (!sslMode.verifyPeerName()) {
+            return true;
+        }
+        return PGjdbcHostnameVerifier.INSTANCE.verify(hostname, session);
     }
-    return PGjdbcHostnameVerifier.INSTANCE.verify(hostname, session);
-  }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_bg.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_bg.java
index 5c70c93..7443ed3 100644
--- a/pgjdbc/src/main/java/org/postgresql/translation/messages_bg.java
+++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_bg.java
@@ -5,454 +5,461 @@ import java.util.MissingResourceException;
 import java.util.ResourceBundle;
 
 public class messages_bg extends ResourceBundle {
-  private static final String[] table;
-  static {
-    String[] t = new String[890];
-    t[0] = "";
-    t[1] = "Project-Id-Version: JDBC Driver for PostgreSQL 8.x\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2009-12-28 00:01+0100\nLast-Translator: <usun0v@mail.bg>\nLanguage-Team: <bg@li.org>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Bulgarian\nX-Poedit-Country: BULGARIA\n";
-    t[2] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
-    t[3] = "CallableStatement функция бе обработена и изходния параметър {0} бе от тип {1}, обаче тип {2} бе използван.";
-    t[6] = "Too many update results were returned.";
-    t[7] = "Твърде много резултати бяха получени при актуализацията.";
-    t[10] = "There are no rows in this ResultSet.";
-    t[11] = "В този ResultSet няма редове.";
-    t[14] = "Detail: {0}";
-    t[15] = "Подробност: {0}";
-    t[20] = "Invalid fetch direction constant: {0}.";
-    t[21] = "Невалидна константа за fetch посоката: {0}.";
-    t[22] = "No function outputs were registered.";
-    t[23] = "Резултати от функцията не бяха регистрирани.";
-    t[24] = "The array index is out of range: {0}";
-    t[25] = "Индексът на масив е извън обхвата: {0}";
-    t[26] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
-    t[27] = "Тип на удостоверяване {0} не се поддържа. Проверете дали сте конфигурирали pg_hba.conf файла, да включва IP адреса на клиента или подмрежата, и че се използва схема за удостоверяване, поддържана от драйвъра.";
-    t[28] = "The server requested password-based authentication, but no password was provided.";
-    t[29] = "Сървърът изисква идентифициране с парола, но парола не бе въведена.";
-    t[40] = "Large Objects may not be used in auto-commit mode.";
-    t[41] = "Големи обекти LOB не могат да се използват в auto-commit модус.";
-    t[46] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
-    t[47] = "Операцията изисква резултатите да са scrollable, но този ResultSet е FORWARD_ONLY.";
-    t[48] = "Zero bytes may not occur in string parameters.";
-    t[49] = "Не може да има нула байта в низ параметрите.";
-    t[50] = "The JVM claims not to support the encoding: {0}";
-    t[51] = "JVM не поддържа тази кодова таблица за момента: {0}";
-    t[54] = "Your security policy has prevented the connection from being attempted.  You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to.";
-    t[55] = "Връзката не бе осъществена, поради вашите настройки за сигурност. Може би трябва да предоставите java.net.SocketPermission права на сървъра и порта с базата данни, към който искате да се свържете.";
-    t[62] = "Database connection failed when canceling copy operation";
-    t[63] = "Неосъществена връзка към базата данни при прекъсване на копирането";
-    t[78] = "Error loading default settings from driverconfig.properties";
-    t[79] = "Грешка при зареждане на настройките по подразбиране от файла driverconfig.properties";
-    t[82] = "Returning autogenerated keys is not supported.";
-    t[83] = "Автоматично генерирани ключове не се поддържат.";
-    t[92] = "Unable to find name datatype in the system catalogs.";
-    t[93] = "Не може да се намери името на типа данни в системните каталози.";
-    t[94] = "Tried to read from inactive copy";
-    t[95] = "Опит за четене при неактивно копиране";
-    t[96] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
-    t[97] = "ResultSet не може да се обновява. Заявката генерираща този резултат трябва да селектира само една таблица, както и всички първични ключове в нея. За повече информация, вижте раздел 5.6 на JDBC 2.1 API Specification.";
-    t[98] = "Cannot cast an instance of {0} to type {1}";
-    t[99] = "Не може да преобразува инстанция на {0} към тип {1}";
-    t[102] = "Requested CopyOut but got {0}";
-    t[103] = "Зададено CopyOut но получено {0}";
-    t[106] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}";
-    t[107] = "Невъзможна комбинация: Prepare трябва да бъде издадено чрез използване на същата връзка, при която е започната транзакцията. currentXid={0}, prepare xid={1}";
-    t[108] = "Can''t use query methods that take a query string on a PreparedStatement.";
-    t[109] = "Не може да се употребяват методи за заявка, които ползват низове на PreparedStatement.";
-    t[114] = "Conversion of money failed.";
-    t[115] = "Неуспешно валутно преобразуване.";
-    t[118] = "Tried to obtain lock while already holding it";
-    t[119] = "Опит за получаване на заключване/резервация докато вече е получено";
-    t[120] = "This SQLXML object has not been initialized, so you cannot retrieve data from it.";
-    t[121] = "Този SQLXML обект не е инициализиран, така че не могат да се извличат данни от него.";
-    t[122] = "This SQLXML object has already been freed.";
-    t[123] = "Този SQLXML обект вече е освободен.";
-    t[124] = "Invalid stream length {0}.";
-    t[125] = "Невалидна дължина {0} на потока данни.";
-    t[130] = "Position: {0}";
-    t[131] = "Позиция: {0}";
-    t[132] = "The server does not support SSL.";
-    t[133] = "Сървърът не поддържа SSL.";
-    t[134] = "Got {0} error responses to single copy cancel request";
-    t[135] = "Получени {0} отговори за грешка при единствено искане да се прекъсне копирането";
-    t[136] = "DataSource has been closed.";
-    t[137] = "Източникът на данни е прекъснат.";
-    t[138] = "Unable to convert DOMResult SQLXML data to a string.";
-    t[139] = "Не може да преобразува DOMResult SQLXML данни в низ.";
-    t[144] = "Invalid UUID data.";
-    t[145] = "Невалидни UUID данни.";
-    t[148] = "The fastpath function {0} is unknown.";
-    t[149] = "Функцията {0} е неизвестна.";
-    t[154] = "Connection has been closed.";
-    t[155] = "Връзката бе прекъсната.";
-    t[156] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
-    t[157] = "Тази заявка не декларира изходен параметър. Ползвайте '{' ?= call ... '}' за да декларирате такъв.";
-    t[158] = "A connection could not be made using the requested protocol {0}.";
-    t[159] = "Не може да осъществи връзка, ползвайки искания протокол {0}.";
-    t[162] = "The maximum field size must be a value greater than or equal to 0.";
-    t[163] = "Максималният размер на полето трябва да бъде стойност по-голяма или равна на 0.";
-    t[166] = "GSS Authentication failed";
-    t[167] = "GSS удостоверяването бе неуспешно";
-    t[176] = "Unknown XML Result class: {0}";
-    t[177] = "Неизвестен XML изходящ клас: {0}";
-    t[180] = "Server SQLState: {0}";
-    t[181] = "SQL статус на сървъра: {0}";
-    t[182] = "Unknown Response Type {0}.";
-    t[183] = "Неизвестен тип на отговор {0}.";
-    t[186] = "Tried to cancel an inactive copy operation";
-    t[187] = "Опит за прекъсване на неактивно копиране";
-    t[190] = "This PooledConnection has already been closed.";
-    t[191] = "Тази PooledConnection връзка бе вече прекъсната.";
-    t[200] = "Multiple ResultSets were returned by the query.";
-    t[201] = "Заявката върна няколко ResultSets.";
-    t[202] = "Finalizing a Connection that was never closed:";
-    t[203] = "Приключване на връзка, която не бе прекъсната:";
-    t[204] = "Unsupported Types value: {0}";
-    t[205] = "Неподдържана стойност за тип: {0}";
-    t[206] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
-    t[207] = "CallableStatement функция бе декларирана, но обработена като registerOutParameter(1, <some type>) ";
-    t[208] = "Cannot retrieve the name of an unnamed savepoint.";
-    t[209] = "Не може да определи името на неупомената savepoint.";
-    t[220] = "Cannot change transaction read-only property in the middle of a transaction.";
-    t[221] = "Не може да променяте правата на транзакцията по време на нейното извършване.";
-    t[222] = "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.";
-    t[223] = "Прекалено голяма дължина {0} на съобщението. Това може да е причинено от прекалено голяма или неправилно зададена дължина на InputStream параметри.";
-    t[224] = "The parameter index is out of range: {0}, number of parameters: {1}.";
-    t[225] = "Параметърният индекс е извън обхват: {0}, брой параметри: {1}.";
-    t[226] = "Transaction isolation level {0} not supported.";
-    t[227] = "Изолационно ниво на транзакциите {0} не се поддържа.";
-    t[234] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
-    t[235] = "Не може да се обнови ResultSet, когато се намираме преди началото или след края на резултатите.";
-    t[238] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
-    t[239] = "опита да извика end без съответстващо извикване на start. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
-    t[242] = "This SQLXML object has already been initialized, so you cannot manipulate it further.";
-    t[243] = "Този SQLXML обект вече е инициализиран и не може да бъде променен.";
-    t[250] = "Conversion to type {0} failed: {1}.";
-    t[251] = "Неуспешно преобразуване към тип {0}: {1}.";
-    t[252] = "The SSLSocketFactory class provided {0} could not be instantiated.";
-    t[253] = "Класът SSLSocketFactory връща {0} и не може да бъде инстанцииран.";
-    t[254] = "Unable to create SAXResult for SQLXML.";
-    t[255] = "Не може да се създаде SAXResult за SQLXML.";
-    t[256] = "Interrupted while attempting to connect.";
-    t[257] = "Опита за осъществяване на връзка бе своевременно прекъснат. ";
-    t[260] = "Protocol error.  Session setup failed.";
-    t[261] = "Грешка в протокола. Неуспешна настройка на сесията.";
-    t[264] = "Database connection failed when starting copy";
-    t[265] = "Неосъществена връзка към базата данни при започване на копирането";
-    t[272] = "Cannot call cancelRowUpdates() when on the insert row.";
-    t[273] = "Не може да се изпълни cancelRowUpdates() метода, когато се намираме при редицата на въвеждане.";
-    t[274] = "Unable to bind parameter values for statement.";
-    t[275] = "Не може да подготви параметрите на командата.";
-    t[280] = "A result was returned when none was expected.";
-    t[281] = "Бе получен резултат, когато такъв не бе очакван.";
-    t[282] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
-    t[283] = "Параметърът standard_conforming_strings при сървъра бе докладван като {0}. JDBC драйвъра очаква този параметър да бъде on или off.";
-    t[284] = "Unable to translate data into the desired encoding.";
-    t[285] = "Невъзможно преобразуване на данни в желаното кодиране.";
-    t[292] = "PostgreSQL LOBs can only index to: {0}";
-    t[293] = "PostgreSQL индексира големи обекти LOB само до: {0}";
-    t[294] = "Provided InputStream failed.";
-    t[295] = "Зададения InputStream поток е неуспешен.";
-    t[296] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}";
-    t[297] = "Транзакция в транзакция не се поддържа за момента. xid={0}, currentXid={1}, state={2}, flags={3}";
-    t[304] = "{0} function takes four and only four argument.";
-    t[305] = "Функцията {0} може да приеме четири и само четири аргумента.";
-    t[306] = "{0} function doesn''t take any argument.";
-    t[307] = "Функцията {0} не може да приема аргументи.";
-    t[310] = "Got CopyOutResponse from server during an active {0}";
-    t[311] = "Получен CopyOutResponse отговор от сървъра при активно {0}";
-    t[322] = "No value specified for parameter {0}.";
-    t[323] = "Няма стойност, определена за параметър {0}.";
-    t[324] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
-    t[325] = "Невалидна UTF-8 последователност: първоначален байт е {0}: {1}";
-    t[326] = "Error disabling autocommit";
-    t[327] = "Грешка при изключване на autocommit";
-    t[328] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
-    t[329] = "Невалидна UTF-8 последователност: байта {0} от байтова последователност {1} не е 10xxxxxx: {2}";
-    t[330] = "Received CommandComplete ''{0}'' without an active copy operation";
-    t[331] = "Получено командно допълнение ''{0}'' без активна команда за копиране";
-    t[332] = "Illegal UTF-8 sequence: final value is out of range: {0}";
-    t[333] = "Невалидна UTF-8 последователност: крайната стойност е извън стойностните граници: {0}";
-    t[336] = "Cannot change transaction isolation level in the middle of a transaction.";
-    t[337] = "Не може да променяте изолационното ниво на транзакцията по време на нейното извършване.";
-    t[340] = "An unexpected result was returned by a query.";
-    t[341] = "Заявката върна неочакван резултат.";
-    t[346] = "Conversion of interval failed";
-    t[347] = "Неуспешно преобразуване на интервал";
-    t[350] = "This ResultSet is closed.";
-    t[351] = "Операциите по този ResultSet са били прекратени.";
-    t[352] = "Read from copy failed.";
-    t[353] = "Четене от копието неуспешно.";
-    t[354] = "Unable to load the class {0} responsible for the datatype {1}";
-    t[355] = "Невъзможно е зареждането на клас {0}, отговарящ за типа данни {1}";
-    t[356] = "Failed to convert binary xml data to encoding: {0}.";
-    t[357] = "Неуспешно преобразуване на двоични XML данни за кодиране съгласно: {0}.";
-    t[362] = "Connection attempt timed out.";
-    t[363] = "Времето за осъществяване на връзката изтече (таймаут).";
-    t[364] = "Expected command status BEGIN, got {0}.";
-    t[365] = "Очаквана команда BEGIN, получена {0}.";
-    t[372] = "This copy stream is closed.";
-    t[373] = "Потока за копиране на данните е затворен.";
-    t[376] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.";
-    t[377] = "Не може да се определи SQL тип, който да се използва за инстанцията на {0}. Ползвайте метода setObject() с точни стойности, за да определите типа.";
-    t[378] = "Can''t refresh the insert row.";
-    t[379] = "Не може да обнови въведения ред.";
-    t[382] = "You must specify at least one column value to insert a row.";
-    t[383] = "Трябва да посочите поне една стойност за колона, за да вмъкнете ред.";
-    t[388] = "Connection is busy with another transaction";
-    t[389] = "Връзката е заета с друга транзакция";
-    t[392] = "Bad value for type {0} : {1}";
-    t[393] = "Невалидна стойност за тип {0} : {1}";
-    t[396] = "This statement has been closed.";
-    t[397] = "Командата е извършена.";
-    t[404] = "No primary key found for table {0}.";
-    t[405] = "Няма първичен ключ за таблица {0}.";
-    t[406] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
-    t[407] = "В момента се намираме преди края на ResultSet. Тук не може да се изпълни deleteRow() метода.";
-    t[414] = "{0} function takes two or three arguments.";
-    t[415] = "Функцията {0} може да приеме два или три аргумента.";
-    t[416] = "{0} function takes three and only three arguments.";
-    t[417] = "Функцията {0} може да приеме три и само три аргумента.";
-    t[418] = "Unable to find server array type for provided name {0}.";
-    t[419] = "Не може да се намери типа на сървърен масив за зададеното име {0}.";
-    t[420] = "Fastpath call {0} - No result was returned and we expected an integer.";
-    t[421] = "Извикване на {0} - няма резултати и а бе очаквано цяло число.";
-    t[426] = "Database connection failed when ending copy";
-    t[427] = "Неосъществена връзка към базата данни при завършване на копирането";
-    t[428] = "Cannot write to copy a byte of value {0}";
-    t[429] = "Няма пишещи права, за да копира байтова стойност {0}";
-    t[430] = "Results cannot be retrieved from a CallableStatement before it is executed.";
-    t[431] = "Резултати от CallableStatement функция не могат да бъдат получени, преди тя да бъде обработена.";
-    t[432] = "Cannot reference a savepoint after it has been released.";
-    t[433] = "Не може да референцира savepoint, след като е била освободена.";
-    t[434] = "Failed to create object for: {0}.";
-    t[435] = "Неуспешно създаване на обект за: {0}.";
-    t[438] = "Unexpected packet type during copy: {0}";
-    t[439] = "Неочакван тип пакет при копиране: {0}";
-    t[442] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data.";
-    t[443] = "Невъзможно е да се определи стойността за MaxIndexKeys поради липса на системния каталог с данни.";
-    t[444] = "Tried to end inactive copy";
-    t[445] = "Опит за прекъсване на неактивно копиране";
-    t[450] = "Unexpected copydata from server for {0}";
-    t[451] = "Неочаквано CopyData от сървъра за {0}";
-    t[460] = "Zero bytes may not occur in identifiers.";
-    t[461] = "Не може да има нула байта в идентификаторите.";
-    t[462] = "Error during one-phase commit. commit xid={0}";
-    t[463] = "Грешка при едно-фазов commit. commit xid={0}";
-    t[464] = "Ran out of memory retrieving query results.";
-    t[465] = "Недостатъчна памет при представяна на резултатите от заявката.";
-    t[468] = "Unable to create StAXResult for SQLXML";
-    t[469] = "Не може да се създаде StAXResult за SQLXML.";
-    t[470] = "Location: File: {0}, Routine: {1}, Line: {2}";
-    t[471] = "Местоположение: Файл: {0}, Функция: {1}, Ред: {2}";
-    t[482] = "A CallableStatement was executed with an invalid number of parameters";
-    t[483] = "CallableStatement функция бе обработена, но с непозволен брой параметри.";
-    t[486] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
-    t[487] = "Невалидна UTF-8 последователност: {0} байта използвани за кодирането на {1} байтова стойност: {2}";
-    t[496] = "Interrupted while waiting to obtain lock on database connection";
-    t[497] = "Прекъсване при чакане да получи заключване/резервация при връзка към базата данни";
-    t[502] = "LOB positioning offsets start at 1.";
-    t[503] = "Позиционалният офсет при големи обекти LOB започва от 1.";
-    t[506] = "Returning autogenerated keys by column index is not supported.";
-    t[507] = "Автоматично генерирани ключове спрямо индекс на колона не се поддържат.";
-    t[510] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
-    t[511] = "В момента се намираме в началото на ResultSet. Тук не може да се изпълни deleteRow() метода.";
-    t[524] = "Truncation of large objects is only implemented in 8.3 and later servers.";
-    t[525] = "Скъсяване на големи обекти LOB е осъществено само във версии след 8.3.";
-    t[526] = "Statement has been closed.";
-    t[527] = "Командата е завършена.";
-    t[540] = "Database connection failed when writing to copy";
-    t[541] = "Неосъществена връзка към базата данни при опит за копиране";
-    t[544] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
-    t[545] = "Параметърът DateStyle при сървъра бе променен на {0}. JDBC драйвъра изисква DateStyle започва с ISO за да функционира правилно.";
-    t[546] = "Provided Reader failed.";
-    t[547] = "Грешка с ползвания четец.";
-    t[550] = "Not on the insert row.";
-    t[551] = "Не сме в редицата на въвеждане.";
-    t[566] = "Unable to decode xml data.";
-    t[567] = "Не може да декодира XML данните.";
-    t[570] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
-    t[571] = "Невъзможна комбинация: втората фаза на commit задължително трябва да бъде издадена при свободна връзка. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
-    t[596] = "Tried to write to an inactive copy operation";
-    t[597] = "Опит за писане при неактивна операция за копиране";
-    t[606] = "An error occurred while setting up the SSL connection.";
-    t[607] = "Възникна грешка при осъществяване на SSL връзката.";
-    t[614] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
-    t[615] = "Възникна неочаквана грешка с драйвъра. Моля докадвайте това изключение. ";
-    t[618] = "No results were returned by the query.";
-    t[619] = "Няма намерени резултати за заявката.";
-    t[620] = "ClientInfo property not supported.";
-    t[621] = "Информацията за ClientInfo не се поддържа.";
-    t[622] = "Unexpected error writing large object to database.";
-    t[623] = "Неочаквана грешка при записване на голям обект LOB в базата данни.";
-    t[628] = "The JVM claims not to support the {0} encoding.";
-    t[629] = "JVM не поддържа за момента {0} кодовата таблица.";
-    t[630] = "Unknown XML Source class: {0}";
-    t[631] = "Неизвестен XML входящ клас: {0}";
-    t[632] = "Interval {0} not yet implemented";
-    t[633] = "Интервалът {0} не е валиден все още.";
-    t[636] = "commit called before end. commit xid={0}, state={1}";
-    t[637] = "commit извикан преди end. commit xid={0}, state={1}";
-    t[638] = "Tried to break lock on database connection";
-    t[639] = "Опит за премахване на заключването/резервацията при връзка към базата данни";
-    t[642] = "Missing expected error response to copy cancel request";
-    t[643] = "Липсва очакван отговор при грешка да прекъсне копирането";
-    t[644] = "Maximum number of rows must be a value grater than or equal to 0.";
-    t[645] = "Максималният брой редове трябва да бъде стойност по-голяма или равна на 0.";
-    t[652] = "Requested CopyIn but got {0}";
-    t[653] = "Зададено CopyIn но получено {0}";
-    t[656] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
-    t[657] = "Отчетен параметър от тип {0}, но обработено като get{1} (sqltype={2}). ";
-    t[662] = "Unsupported value for stringtype parameter: {0}";
-    t[663] = "Непозволена стойност за StringType параметър: {0}";
-    t[664] = "Fetch size must be a value greater to or equal to 0.";
-    t[665] = "Размера за fetch size трябва да бъде по-голям или равен на 0.";
-    t[670] = "Cannot tell if path is open or closed: {0}.";
-    t[671] = "Не може да определи дали адреса е отворен или затворен: {0}.";
-    t[672] = "Expected an EOF from server, got: {0}";
-    t[673] = "Очакван край на файла от сървъра, но получено: {0}";
-    t[680] = "Copying from database failed: {0}";
-    t[681] = "Копирането от базата данни бе неуспешно: {0}";
-    t[682] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
-    t[683] = "Връзката бе автоматично прекъсната, защото нова връзка за същата беше осъществена или PooledConnection връзката е вече прекъсната.";
-    t[698] = "Custom type maps are not supported.";
-    t[699] = "Специфични типови съответствия не се поддържат.";
-    t[700] = "xid must not be null";
-    t[701] = "xid не може да бъде null";
-    t[706] = "Internal Position: {0}";
-    t[707] = "Вътрешна позиция: {0}";
-    t[708] = "Error during recover";
-    t[709] = "Грешка при възстановяване";
-    t[712] = "Method {0} is not yet implemented.";
-    t[713] = "Методът {0} все още не е функционален.";
-    t[714] = "Unexpected command status: {0}.";
-    t[715] = "Неочакван статус на команда: {0}.";
-    t[718] = "The column index is out of range: {0}, number of columns: {1}.";
-    t[719] = "Индексът на колоната е извън стойностен обхват: {0}, брой колони: {1}.";
-    t[730] = "Unknown ResultSet holdability setting: {0}.";
-    t[731] = "Неизвестна ResultSet holdability настройка: {0}.";
-    t[734] = "Cannot call deleteRow() when on the insert row.";
-    t[735] = "Не може да се изпълни deleteRow() метода, когато се намираме при редицата на въвеждане.";
-    t[740] = "ResultSet not positioned properly, perhaps you need to call next.";
-    t[741] = "ResultSet не е референциран правилно. Вероятно трябва да придвижите курсора посредством next.";
-    t[742] = "wasNull cannot be call before fetching a result.";
-    t[743] = "wasNull не може да бьде изпълнен, преди наличието на резултата.";
-    t[746] = "{0} function takes two and only two arguments.";
-    t[747] = "Функцията {0} може да приеме два и само два аргумента.";
-    t[750] = "Malformed function or procedure escape syntax at offset {0}.";
-    t[751] = "Непозволен синтаксис на функция или процедура при офсет {0}.";
-    t[752] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
-    t[753] = "Преждевременен край на входящ поток на данни, очаквани {0} байта, но прочетени само {1}.";
-    t[756] = "Got CopyData without an active copy operation";
-    t[757] = "Получено CopyData без наличие на активна операция за копиране";
-    t[758] = "Cannot retrieve the id of a named savepoint.";
-    t[759] = "Не може да определи ID на спомената savepoint.";
-    t[770] = "Where: {0}";
-    t[771] = "Където: {0}";
-    t[778] = "Got CopyInResponse from server during an active {0}";
-    t[779] = "Получен CopyInResponse отговор от сървъра при активно {0}";
-    t[780] = "Cannot convert an instance of {0} to type {1}";
-    t[781] = "Не може да преобразува инстанцията на {0} във вида {1}";
-    t[784] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it";
-    t[785] = "Невъзможна комбинация: едно-фазов commit трябва да бъде издаден чрез използване на същата връзка, при която е започнал";
-    t[790] = "Invalid flags {0}";
-    t[791] = "Невалидни флагове {0}";
-    t[798] = "Query timeout must be a value greater than or equals to 0.";
-    t[799] = "Времето за изпълнение на заявката трябва да бъде стойност по-голяма или равна на 0.";
-    t[802] = "Hint: {0}";
-    t[803] = "Забележка: {0}";
-    t[810] = "The array index is out of range: {0}, number of elements: {1}.";
-    t[811] = "Индексът на масив е извън обхвата: {0}, брой елементи: {1}.";
-    t[812] = "Internal Query: {0}";
-    t[813] = "Вътрешна заявка: {0}";
-    t[816] = "CommandComplete expected COPY but got: ";
-    t[817] = "Очаквано командно допълнение COPY но получено: ";
-    t[824] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
-    t[825] = "Невалидна UTF-8 последователност: крайната стойност е заместителна стойност: {0}";
-    t[826] = "Unknown type {0}.";
-    t[827] = "Неизвестен тип {0}.";
-    t[828] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
-    t[829] = "ResultSets с concurrency CONCUR_READ_ONLY не могат да бъдат актуализирани.";
-    t[830] = "The connection attempt failed.";
-    t[831] = "Опита за връзка бе неуспешен.";
-    t[834] = "{0} function takes one and only one argument.";
-    t[835] = "Функцията {0} може да приеме само един единствен аргумент.";
-    t[838] = "suspend/resume not implemented";
-    t[839] = "спиране / започване не се поддържа за момента";
-    t[840] = "Error preparing transaction. prepare xid={0}";
-    t[841] = "Грешка при подготвяне на транзакция. prepare xid={0}";
-    t[842] = "The driver currently does not support COPY operations.";
-    t[843] = "За момента драйвъра не поддържа COPY команди.";
-    t[852] = "Heuristic commit/rollback not supported. forget xid={0}";
-    t[853] = "Евристичен commit или rollback не се поддържа. forget xid={0}";
-    t[856] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
-    t[857] = "Бяха намерени невалидни данни. Това най-вероятно се дължи на съхранявани данни, съдържащи символи, които са невалидни за набора от знаци при създаване на базата данни. Чест пример за това е съхраняване на 8bit данни в SQL_ASCII бази данни.";
-    t[858] = "Cannot establish a savepoint in auto-commit mode.";
-    t[859] = "Не може да се установи savepoint в auto-commit модус.";
-    t[862] = "The column name {0} was not found in this ResultSet.";
-    t[863] = "Името на колоната {0} не бе намерено в този ResultSet.";
-    t[864] = "Prepare called before end. prepare xid={0}, state={1}";
-    t[865] = "Prepare извикано преди края. prepare xid={0}, state={1}";
-    t[866] = "Unknown Types value.";
-    t[867] = "Стойност от неизвестен тип.";
-    t[870] = "Cannot call updateRow() when on the insert row.";
-    t[871] = "Не може да се изпълни updateRow() метода, когато се намираме при редицата на въвеждане.";
-    t[876] = "Database connection failed when reading from copy";
-    t[877] = "Неосъществена връзка към базата данни при четене от копие";
-    t[880] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}";
-    t[881] = "Грешка при възстановяване на състоянието преди подготвена транзакция. rollback xid={0}, preparedXid={1}, currentXid={2}";
-    t[882] = "Can''t use relative move methods while on the insert row.";
-    t[883] = "Не може да се използват относителни методи за движение, когато се намираме при редицата на въвеждане.";
-    t[884] = "free() was called on this LOB previously";
-    t[885] = "Функцията free() бе вече извикана за този голям обект LOB";
-    t[888] = "A CallableStatement was executed with nothing returned.";
-    t[889] = "CallableStatement функция бе обработена, но няма резултати.";
-    table = t;
-  }
+    private static final String[] table;
 
-  @Override
-  public Object handleGetObject (String msgid) throws MissingResourceException {
-    int hash_val = msgid.hashCode() & 0x7fffffff;
-    int idx = (hash_val % 445) << 1;
-    {
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+    static {
+        String[] t = new String[890];
+        t[0] = "";
+        t[1] = "Project-Id-Version: JDBC Driver for PostgreSQL 8.x\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2009-12-28 00:01+0100\nLast-Translator: <usun0v@mail.bg>\nLanguage-Team: <bg@li.org>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Bulgarian\nX-Poedit-Country: BULGARIA\n";
+        t[2] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
+        t[3] = "CallableStatement функция бе обработена и изходния параметър {0} бе от тип {1}, обаче тип {2} бе използван.";
+        t[6] = "Too many update results were returned.";
+        t[7] = "Твърде много резултати бяха получени при актуализацията.";
+        t[10] = "There are no rows in this ResultSet.";
+        t[11] = "В този ResultSet няма редове.";
+        t[14] = "Detail: {0}";
+        t[15] = "Подробност: {0}";
+        t[20] = "Invalid fetch direction constant: {0}.";
+        t[21] = "Невалидна константа за fetch посоката: {0}.";
+        t[22] = "No function outputs were registered.";
+        t[23] = "Резултати от функцията не бяха регистрирани.";
+        t[24] = "The array index is out of range: {0}";
+        t[25] = "Индексът на масив е извън обхвата: {0}";
+        t[26] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
+        t[27] = "Тип на удостоверяване {0} не се поддържа. Проверете дали сте конфигурирали pg_hba.conf файла, да включва IP адреса на клиента или подмрежата, и че се използва схема за удостоверяване, поддържана от драйвъра.";
+        t[28] = "The server requested password-based authentication, but no password was provided.";
+        t[29] = "Сървърът изисква идентифициране с парола, но парола не бе въведена.";
+        t[40] = "Large Objects may not be used in auto-commit mode.";
+        t[41] = "Големи обекти LOB не могат да се използват в auto-commit модус.";
+        t[46] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
+        t[47] = "Операцията изисква резултатите да са scrollable, но този ResultSet е FORWARD_ONLY.";
+        t[48] = "Zero bytes may not occur in string parameters.";
+        t[49] = "Не може да има нула байта в низ параметрите.";
+        t[50] = "The JVM claims not to support the encoding: {0}";
+        t[51] = "JVM не поддържа тази кодова таблица за момента: {0}";
+        t[54] = "Your security policy has prevented the connection from being attempted.  You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to.";
+        t[55] = "Връзката не бе осъществена, поради вашите настройки за сигурност. Може би трябва да предоставите java.net.SocketPermission права на сървъра и порта с базата данни, към който искате да се свържете.";
+        t[62] = "Database connection failed when canceling copy operation";
+        t[63] = "Неосъществена връзка към базата данни при прекъсване на копирането";
+        t[78] = "Error loading default settings from driverconfig.properties";
+        t[79] = "Грешка при зареждане на настройките по подразбиране от файла driverconfig.properties";
+        t[82] = "Returning autogenerated keys is not supported.";
+        t[83] = "Автоматично генерирани ключове не се поддържат.";
+        t[92] = "Unable to find name datatype in the system catalogs.";
+        t[93] = "Не може да се намери името на типа данни в системните каталози.";
+        t[94] = "Tried to read from inactive copy";
+        t[95] = "Опит за четене при неактивно копиране";
+        t[96] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
+        t[97] = "ResultSet не може да се обновява. Заявката генерираща този резултат трябва да селектира само една таблица, както и всички първични ключове в нея. За повече информация, вижте раздел 5.6 на JDBC 2.1 API Specification.";
+        t[98] = "Cannot cast an instance of {0} to type {1}";
+        t[99] = "Не може да преобразува инстанция на {0} към тип {1}";
+        t[102] = "Requested CopyOut but got {0}";
+        t[103] = "Зададено CopyOut но получено {0}";
+        t[106] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}";
+        t[107] = "Невъзможна комбинация: Prepare трябва да бъде издадено чрез използване на същата връзка, при която е започната транзакцията. currentXid={0}, prepare xid={1}";
+        t[108] = "Can''t use query methods that take a query string on a PreparedStatement.";
+        t[109] = "Не може да се употребяват методи за заявка, които ползват низове на PreparedStatement.";
+        t[114] = "Conversion of money failed.";
+        t[115] = "Неуспешно валутно преобразуване.";
+        t[118] = "Tried to obtain lock while already holding it";
+        t[119] = "Опит за получаване на заключване/резервация докато вече е получено";
+        t[120] = "This SQLXML object has not been initialized, so you cannot retrieve data from it.";
+        t[121] = "Този SQLXML обект не е инициализиран, така че не могат да се извличат данни от него.";
+        t[122] = "This SQLXML object has already been freed.";
+        t[123] = "Този SQLXML обект вече е освободен.";
+        t[124] = "Invalid stream length {0}.";
+        t[125] = "Невалидна дължина {0} на потока данни.";
+        t[130] = "Position: {0}";
+        t[131] = "Позиция: {0}";
+        t[132] = "The server does not support SSL.";
+        t[133] = "Сървърът не поддържа SSL.";
+        t[134] = "Got {0} error responses to single copy cancel request";
+        t[135] = "Получени {0} отговори за грешка при единствено искане да се прекъсне копирането";
+        t[136] = "DataSource has been closed.";
+        t[137] = "Източникът на данни е прекъснат.";
+        t[138] = "Unable to convert DOMResult SQLXML data to a string.";
+        t[139] = "Не може да преобразува DOMResult SQLXML данни в низ.";
+        t[144] = "Invalid UUID data.";
+        t[145] = "Невалидни UUID данни.";
+        t[148] = "The fastpath function {0} is unknown.";
+        t[149] = "Функцията {0} е неизвестна.";
+        t[154] = "Connection has been closed.";
+        t[155] = "Връзката бе прекъсната.";
+        t[156] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
+        t[157] = "Тази заявка не декларира изходен параметър. Ползвайте '{' ?= call ... '}' за да декларирате такъв.";
+        t[158] = "A connection could not be made using the requested protocol {0}.";
+        t[159] = "Не може да осъществи връзка, ползвайки искания протокол {0}.";
+        t[162] = "The maximum field size must be a value greater than or equal to 0.";
+        t[163] = "Максималният размер на полето трябва да бъде стойност по-голяма или равна на 0.";
+        t[166] = "GSS Authentication failed";
+        t[167] = "GSS удостоверяването бе неуспешно";
+        t[176] = "Unknown XML Result class: {0}";
+        t[177] = "Неизвестен XML изходящ клас: {0}";
+        t[180] = "Server SQLState: {0}";
+        t[181] = "SQL статус на сървъра: {0}";
+        t[182] = "Unknown Response Type {0}.";
+        t[183] = "Неизвестен тип на отговор {0}.";
+        t[186] = "Tried to cancel an inactive copy operation";
+        t[187] = "Опит за прекъсване на неактивно копиране";
+        t[190] = "This PooledConnection has already been closed.";
+        t[191] = "Тази PooledConnection връзка бе вече прекъсната.";
+        t[200] = "Multiple ResultSets were returned by the query.";
+        t[201] = "Заявката върна няколко ResultSets.";
+        t[202] = "Finalizing a Connection that was never closed:";
+        t[203] = "Приключване на връзка, която не бе прекъсната:";
+        t[204] = "Unsupported Types value: {0}";
+        t[205] = "Неподдържана стойност за тип: {0}";
+        t[206] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
+        t[207] = "CallableStatement функция бе декларирана, но обработена като registerOutParameter(1, <some type>) ";
+        t[208] = "Cannot retrieve the name of an unnamed savepoint.";
+        t[209] = "Не може да определи името на неупомената savepoint.";
+        t[220] = "Cannot change transaction read-only property in the middle of a transaction.";
+        t[221] = "Не може да променяте правата на транзакцията по време на нейното извършване.";
+        t[222] = "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.";
+        t[223] = "Прекалено голяма дължина {0} на съобщението. Това може да е причинено от прекалено голяма или неправилно зададена дължина на InputStream параметри.";
+        t[224] = "The parameter index is out of range: {0}, number of parameters: {1}.";
+        t[225] = "Параметърният индекс е извън обхват: {0}, брой параметри: {1}.";
+        t[226] = "Transaction isolation level {0} not supported.";
+        t[227] = "Изолационно ниво на транзакциите {0} не се поддържа.";
+        t[234] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
+        t[235] = "Не може да се обнови ResultSet, когато се намираме преди началото или след края на резултатите.";
+        t[238] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
+        t[239] = "опита да извика end без съответстващо извикване на start. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
+        t[242] = "This SQLXML object has already been initialized, so you cannot manipulate it further.";
+        t[243] = "Този SQLXML обект вече е инициализиран и не може да бъде променен.";
+        t[250] = "Conversion to type {0} failed: {1}.";
+        t[251] = "Неуспешно преобразуване към тип {0}: {1}.";
+        t[252] = "The SSLSocketFactory class provided {0} could not be instantiated.";
+        t[253] = "Класът SSLSocketFactory връща {0} и не може да бъде инстанцииран.";
+        t[254] = "Unable to create SAXResult for SQLXML.";
+        t[255] = "Не може да се създаде SAXResult за SQLXML.";
+        t[256] = "Interrupted while attempting to connect.";
+        t[257] = "Опита за осъществяване на връзка бе своевременно прекъснат. ";
+        t[260] = "Protocol error.  Session setup failed.";
+        t[261] = "Грешка в протокола. Неуспешна настройка на сесията.";
+        t[264] = "Database connection failed when starting copy";
+        t[265] = "Неосъществена връзка към базата данни при започване на копирането";
+        t[272] = "Cannot call cancelRowUpdates() when on the insert row.";
+        t[273] = "Не може да се изпълни cancelRowUpdates() метода, когато се намираме при редицата на въвеждане.";
+        t[274] = "Unable to bind parameter values for statement.";
+        t[275] = "Не може да подготви параметрите на командата.";
+        t[280] = "A result was returned when none was expected.";
+        t[281] = "Бе получен резултат, когато такъв не бе очакван.";
+        t[282] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
+        t[283] = "Параметърът standard_conforming_strings при сървъра бе докладван като {0}. JDBC драйвъра очаква този параметър да бъде on или off.";
+        t[284] = "Unable to translate data into the desired encoding.";
+        t[285] = "Невъзможно преобразуване на данни в желаното кодиране.";
+        t[292] = "PostgreSQL LOBs can only index to: {0}";
+        t[293] = "PostgreSQL индексира големи обекти LOB само до: {0}";
+        t[294] = "Provided InputStream failed.";
+        t[295] = "Зададения InputStream поток е неуспешен.";
+        t[296] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}";
+        t[297] = "Транзакция в транзакция не се поддържа за момента. xid={0}, currentXid={1}, state={2}, flags={3}";
+        t[304] = "{0} function takes four and only four argument.";
+        t[305] = "Функцията {0} може да приеме четири и само четири аргумента.";
+        t[306] = "{0} function doesn''t take any argument.";
+        t[307] = "Функцията {0} не може да приема аргументи.";
+        t[310] = "Got CopyOutResponse from server during an active {0}";
+        t[311] = "Получен CopyOutResponse отговор от сървъра при активно {0}";
+        t[322] = "No value specified for parameter {0}.";
+        t[323] = "Няма стойност, определена за параметър {0}.";
+        t[324] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
+        t[325] = "Невалидна UTF-8 последователност: първоначален байт е {0}: {1}";
+        t[326] = "Error disabling autocommit";
+        t[327] = "Грешка при изключване на autocommit";
+        t[328] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
+        t[329] = "Невалидна UTF-8 последователност: байта {0} от байтова последователност {1} не е 10xxxxxx: {2}";
+        t[330] = "Received CommandComplete ''{0}'' without an active copy operation";
+        t[331] = "Получено командно допълнение ''{0}'' без активна команда за копиране";
+        t[332] = "Illegal UTF-8 sequence: final value is out of range: {0}";
+        t[333] = "Невалидна UTF-8 последователност: крайната стойност е извън стойностните граници: {0}";
+        t[336] = "Cannot change transaction isolation level in the middle of a transaction.";
+        t[337] = "Не може да променяте изолационното ниво на транзакцията по време на нейното извършване.";
+        t[340] = "An unexpected result was returned by a query.";
+        t[341] = "Заявката върна неочакван резултат.";
+        t[346] = "Conversion of interval failed";
+        t[347] = "Неуспешно преобразуване на интервал";
+        t[350] = "This ResultSet is closed.";
+        t[351] = "Операциите по този ResultSet са били прекратени.";
+        t[352] = "Read from copy failed.";
+        t[353] = "Четене от копието неуспешно.";
+        t[354] = "Unable to load the class {0} responsible for the datatype {1}";
+        t[355] = "Невъзможно е зареждането на клас {0}, отговарящ за типа данни {1}";
+        t[356] = "Failed to convert binary xml data to encoding: {0}.";
+        t[357] = "Неуспешно преобразуване на двоични XML данни за кодиране съгласно: {0}.";
+        t[362] = "Connection attempt timed out.";
+        t[363] = "Времето за осъществяване на връзката изтече (таймаут).";
+        t[364] = "Expected command status BEGIN, got {0}.";
+        t[365] = "Очаквана команда BEGIN, получена {0}.";
+        t[372] = "This copy stream is closed.";
+        t[373] = "Потока за копиране на данните е затворен.";
+        t[376] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.";
+        t[377] = "Не може да се определи SQL тип, който да се използва за инстанцията на {0}. Ползвайте метода setObject() с точни стойности, за да определите типа.";
+        t[378] = "Can''t refresh the insert row.";
+        t[379] = "Не може да обнови въведения ред.";
+        t[382] = "You must specify at least one column value to insert a row.";
+        t[383] = "Трябва да посочите поне една стойност за колона, за да вмъкнете ред.";
+        t[388] = "Connection is busy with another transaction";
+        t[389] = "Връзката е заета с друга транзакция";
+        t[392] = "Bad value for type {0} : {1}";
+        t[393] = "Невалидна стойност за тип {0} : {1}";
+        t[396] = "This statement has been closed.";
+        t[397] = "Командата е извършена.";
+        t[404] = "No primary key found for table {0}.";
+        t[405] = "Няма първичен ключ за таблица {0}.";
+        t[406] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
+        t[407] = "В момента се намираме преди края на ResultSet. Тук не може да се изпълни deleteRow() метода.";
+        t[414] = "{0} function takes two or three arguments.";
+        t[415] = "Функцията {0} може да приеме два или три аргумента.";
+        t[416] = "{0} function takes three and only three arguments.";
+        t[417] = "Функцията {0} може да приеме три и само три аргумента.";
+        t[418] = "Unable to find server array type for provided name {0}.";
+        t[419] = "Не може да се намери типа на сървърен масив за зададеното име {0}.";
+        t[420] = "Fastpath call {0} - No result was returned and we expected an integer.";
+        t[421] = "Извикване на {0} - няма резултати и а бе очаквано цяло число.";
+        t[426] = "Database connection failed when ending copy";
+        t[427] = "Неосъществена връзка към базата данни при завършване на копирането";
+        t[428] = "Cannot write to copy a byte of value {0}";
+        t[429] = "Няма пишещи права, за да копира байтова стойност {0}";
+        t[430] = "Results cannot be retrieved from a CallableStatement before it is executed.";
+        t[431] = "Резултати от CallableStatement функция не могат да бъдат получени, преди тя да бъде обработена.";
+        t[432] = "Cannot reference a savepoint after it has been released.";
+        t[433] = "Не може да референцира savepoint, след като е била освободена.";
+        t[434] = "Failed to create object for: {0}.";
+        t[435] = "Неуспешно създаване на обект за: {0}.";
+        t[438] = "Unexpected packet type during copy: {0}";
+        t[439] = "Неочакван тип пакет при копиране: {0}";
+        t[442] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data.";
+        t[443] = "Невъзможно е да се определи стойността за MaxIndexKeys поради липса на системния каталог с данни.";
+        t[444] = "Tried to end inactive copy";
+        t[445] = "Опит за прекъсване на неактивно копиране";
+        t[450] = "Unexpected copydata from server for {0}";
+        t[451] = "Неочаквано CopyData от сървъра за {0}";
+        t[460] = "Zero bytes may not occur in identifiers.";
+        t[461] = "Не може да има нула байта в идентификаторите.";
+        t[462] = "Error during one-phase commit. commit xid={0}";
+        t[463] = "Грешка при едно-фазов commit. commit xid={0}";
+        t[464] = "Ran out of memory retrieving query results.";
+        t[465] = "Недостатъчна памет при представяна на резултатите от заявката.";
+        t[468] = "Unable to create StAXResult for SQLXML";
+        t[469] = "Не може да се създаде StAXResult за SQLXML.";
+        t[470] = "Location: File: {0}, Routine: {1}, Line: {2}";
+        t[471] = "Местоположение: Файл: {0}, Функция: {1}, Ред: {2}";
+        t[482] = "A CallableStatement was executed with an invalid number of parameters";
+        t[483] = "CallableStatement функция бе обработена, но с непозволен брой параметри.";
+        t[486] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
+        t[487] = "Невалидна UTF-8 последователност: {0} байта използвани за кодирането на {1} байтова стойност: {2}";
+        t[496] = "Interrupted while waiting to obtain lock on database connection";
+        t[497] = "Прекъсване при чакане да получи заключване/резервация при връзка към базата данни";
+        t[502] = "LOB positioning offsets start at 1.";
+        t[503] = "Позиционалният офсет при големи обекти LOB започва от 1.";
+        t[506] = "Returning autogenerated keys by column index is not supported.";
+        t[507] = "Автоматично генерирани ключове спрямо индекс на колона не се поддържат.";
+        t[510] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
+        t[511] = "В момента се намираме в началото на ResultSet. Тук не може да се изпълни deleteRow() метода.";
+        t[524] = "Truncation of large objects is only implemented in 8.3 and later servers.";
+        t[525] = "Скъсяване на големи обекти LOB е осъществено само във версии след 8.3.";
+        t[526] = "Statement has been closed.";
+        t[527] = "Командата е завършена.";
+        t[540] = "Database connection failed when writing to copy";
+        t[541] = "Неосъществена връзка към базата данни при опит за копиране";
+        t[544] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
+        t[545] = "Параметърът DateStyle при сървъра бе променен на {0}. JDBC драйвъра изисква DateStyle започва с ISO за да функционира правилно.";
+        t[546] = "Provided Reader failed.";
+        t[547] = "Грешка с ползвания четец.";
+        t[550] = "Not on the insert row.";
+        t[551] = "Не сме в редицата на въвеждане.";
+        t[566] = "Unable to decode xml data.";
+        t[567] = "Не може да декодира XML данните.";
+        t[570] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
+        t[571] = "Невъзможна комбинация: втората фаза на commit задължително трябва да бъде издадена при свободна връзка. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
+        t[596] = "Tried to write to an inactive copy operation";
+        t[597] = "Опит за писане при неактивна операция за копиране";
+        t[606] = "An error occurred while setting up the SSL connection.";
+        t[607] = "Възникна грешка при осъществяване на SSL връзката.";
+        t[614] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
+        t[615] = "Възникна неочаквана грешка с драйвъра. Моля докадвайте това изключение. ";
+        t[618] = "No results were returned by the query.";
+        t[619] = "Няма намерени резултати за заявката.";
+        t[620] = "ClientInfo property not supported.";
+        t[621] = "Информацията за ClientInfo не се поддържа.";
+        t[622] = "Unexpected error writing large object to database.";
+        t[623] = "Неочаквана грешка при записване на голям обект LOB в базата данни.";
+        t[628] = "The JVM claims not to support the {0} encoding.";
+        t[629] = "JVM не поддържа за момента {0} кодовата таблица.";
+        t[630] = "Unknown XML Source class: {0}";
+        t[631] = "Неизвестен XML входящ клас: {0}";
+        t[632] = "Interval {0} not yet implemented";
+        t[633] = "Интервалът {0} не е валиден все още.";
+        t[636] = "commit called before end. commit xid={0}, state={1}";
+        t[637] = "commit извикан преди end. commit xid={0}, state={1}";
+        t[638] = "Tried to break lock on database connection";
+        t[639] = "Опит за премахване на заключването/резервацията при връзка към базата данни";
+        t[642] = "Missing expected error response to copy cancel request";
+        t[643] = "Липсва очакван отговор при грешка да прекъсне копирането";
+        t[644] = "Maximum number of rows must be a value grater than or equal to 0.";
+        t[645] = "Максималният брой редове трябва да бъде стойност по-голяма или равна на 0.";
+        t[652] = "Requested CopyIn but got {0}";
+        t[653] = "Зададено CopyIn но получено {0}";
+        t[656] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
+        t[657] = "Отчетен параметър от тип {0}, но обработено като get{1} (sqltype={2}). ";
+        t[662] = "Unsupported value for stringtype parameter: {0}";
+        t[663] = "Непозволена стойност за StringType параметър: {0}";
+        t[664] = "Fetch size must be a value greater to or equal to 0.";
+        t[665] = "Размера за fetch size трябва да бъде по-голям или равен на 0.";
+        t[670] = "Cannot tell if path is open or closed: {0}.";
+        t[671] = "Не може да определи дали адреса е отворен или затворен: {0}.";
+        t[672] = "Expected an EOF from server, got: {0}";
+        t[673] = "Очакван край на файла от сървъра, но получено: {0}";
+        t[680] = "Copying from database failed: {0}";
+        t[681] = "Копирането от базата данни бе неуспешно: {0}";
+        t[682] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
+        t[683] = "Връзката бе автоматично прекъсната, защото нова връзка за същата беше осъществена или PooledConnection връзката е вече прекъсната.";
+        t[698] = "Custom type maps are not supported.";
+        t[699] = "Специфични типови съответствия не се поддържат.";
+        t[700] = "xid must not be null";
+        t[701] = "xid не може да бъде null";
+        t[706] = "Internal Position: {0}";
+        t[707] = "Вътрешна позиция: {0}";
+        t[708] = "Error during recover";
+        t[709] = "Грешка при възстановяване";
+        t[712] = "Method {0} is not yet implemented.";
+        t[713] = "Методът {0} все още не е функционален.";
+        t[714] = "Unexpected command status: {0}.";
+        t[715] = "Неочакван статус на команда: {0}.";
+        t[718] = "The column index is out of range: {0}, number of columns: {1}.";
+        t[719] = "Индексът на колоната е извън стойностен обхват: {0}, брой колони: {1}.";
+        t[730] = "Unknown ResultSet holdability setting: {0}.";
+        t[731] = "Неизвестна ResultSet holdability настройка: {0}.";
+        t[734] = "Cannot call deleteRow() when on the insert row.";
+        t[735] = "Не може да се изпълни deleteRow() метода, когато се намираме при редицата на въвеждане.";
+        t[740] = "ResultSet not positioned properly, perhaps you need to call next.";
+        t[741] = "ResultSet не е референциран правилно. Вероятно трябва да придвижите курсора посредством next.";
+        t[742] = "wasNull cannot be call before fetching a result.";
+        t[743] = "wasNull не може да бьде изпълнен, преди наличието на резултата.";
+        t[746] = "{0} function takes two and only two arguments.";
+        t[747] = "Функцията {0} може да приеме два и само два аргумента.";
+        t[750] = "Malformed function or procedure escape syntax at offset {0}.";
+        t[751] = "Непозволен синтаксис на функция или процедура при офсет {0}.";
+        t[752] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
+        t[753] = "Преждевременен край на входящ поток на данни, очаквани {0} байта, но прочетени само {1}.";
+        t[756] = "Got CopyData without an active copy operation";
+        t[757] = "Получено CopyData без наличие на активна операция за копиране";
+        t[758] = "Cannot retrieve the id of a named savepoint.";
+        t[759] = "Не може да определи ID на спомената savepoint.";
+        t[770] = "Where: {0}";
+        t[771] = "Където: {0}";
+        t[778] = "Got CopyInResponse from server during an active {0}";
+        t[779] = "Получен CopyInResponse отговор от сървъра при активно {0}";
+        t[780] = "Cannot convert an instance of {0} to type {1}";
+        t[781] = "Не може да преобразува инстанцията на {0} във вида {1}";
+        t[784] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it";
+        t[785] = "Невъзможна комбинация: едно-фазов commit трябва да бъде издаден чрез използване на същата връзка, при която е започнал";
+        t[790] = "Invalid flags {0}";
+        t[791] = "Невалидни флагове {0}";
+        t[798] = "Query timeout must be a value greater than or equals to 0.";
+        t[799] = "Времето за изпълнение на заявката трябва да бъде стойност по-голяма или равна на 0.";
+        t[802] = "Hint: {0}";
+        t[803] = "Забележка: {0}";
+        t[810] = "The array index is out of range: {0}, number of elements: {1}.";
+        t[811] = "Индексът на масив е извън обхвата: {0}, брой елементи: {1}.";
+        t[812] = "Internal Query: {0}";
+        t[813] = "Вътрешна заявка: {0}";
+        t[816] = "CommandComplete expected COPY but got: ";
+        t[817] = "Очаквано командно допълнение COPY но получено: ";
+        t[824] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
+        t[825] = "Невалидна UTF-8 последователност: крайната стойност е заместителна стойност: {0}";
+        t[826] = "Unknown type {0}.";
+        t[827] = "Неизвестен тип {0}.";
+        t[828] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
+        t[829] = "ResultSets с concurrency CONCUR_READ_ONLY не могат да бъдат актуализирани.";
+        t[830] = "The connection attempt failed.";
+        t[831] = "Опита за връзка бе неуспешен.";
+        t[834] = "{0} function takes one and only one argument.";
+        t[835] = "Функцията {0} може да приеме само един единствен аргумент.";
+        t[838] = "suspend/resume not implemented";
+        t[839] = "спиране / започване не се поддържа за момента";
+        t[840] = "Error preparing transaction. prepare xid={0}";
+        t[841] = "Грешка при подготвяне на транзакция. prepare xid={0}";
+        t[842] = "The driver currently does not support COPY operations.";
+        t[843] = "За момента драйвъра не поддържа COPY команди.";
+        t[852] = "Heuristic commit/rollback not supported. forget xid={0}";
+        t[853] = "Евристичен commit или rollback не се поддържа. forget xid={0}";
+        t[856] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
+        t[857] = "Бяха намерени невалидни данни. Това най-вероятно се дължи на съхранявани данни, съдържащи символи, които са невалидни за набора от знаци при създаване на базата данни. Чест пример за това е съхраняване на 8bit данни в SQL_ASCII бази данни.";
+        t[858] = "Cannot establish a savepoint in auto-commit mode.";
+        t[859] = "Не може да се установи savepoint в auto-commit модус.";
+        t[862] = "The column name {0} was not found in this ResultSet.";
+        t[863] = "Името на колоната {0} не бе намерено в този ResultSet.";
+        t[864] = "Prepare called before end. prepare xid={0}, state={1}";
+        t[865] = "Prepare извикано преди края. prepare xid={0}, state={1}";
+        t[866] = "Unknown Types value.";
+        t[867] = "Стойност от неизвестен тип.";
+        t[870] = "Cannot call updateRow() when on the insert row.";
+        t[871] = "Не може да се изпълни updateRow() метода, когато се намираме при редицата на въвеждане.";
+        t[876] = "Database connection failed when reading from copy";
+        t[877] = "Неосъществена връзка към базата данни при четене от копие";
+        t[880] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}";
+        t[881] = "Грешка при възстановяване на състоянието преди подготвена транзакция. rollback xid={0}, preparedXid={1}, currentXid={2}";
+        t[882] = "Can''t use relative move methods while on the insert row.";
+        t[883] = "Не може да се използват относителни методи за движение, когато се намираме при редицата на въвеждане.";
+        t[884] = "free() was called on this LOB previously";
+        t[885] = "Функцията free() бе вече извикана за този голям обект LOB";
+        t[888] = "A CallableStatement was executed with nothing returned.";
+        t[889] = "CallableStatement функция бе обработена, но няма резултати.";
+        table = t;
     }
-    int incr = ((hash_val % 443) + 1) << 1;
-    for (;;) {
-      idx += incr;
-      if (idx >= 890)
-        idx -= 890;
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
-    }
-  }
-  @Override
-  public Enumeration<String> getKeys () {
-    return
-      new Enumeration<>() {
-        private int idx = 0;
-        { while (idx < 890 && table[idx] == null) idx += 2; }
-        @Override
-        public boolean hasMoreElements () {
-          return (idx < 890);
-        }
-        @Override
-        public String nextElement () {
-          Object key = table[idx];
-          do idx += 2; 
-          while (idx < 890 && table[idx] == null);
-          return key.toString();
-        }
-      };
-  }
 
-  public ResourceBundle getParent () {
-    return parent;
-  }
+    @Override
+    public Object handleGetObject(String msgid) throws MissingResourceException {
+        int hash_val = msgid.hashCode() & 0x7fffffff;
+        int idx = (hash_val % 445) << 1;
+        {
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
+        int incr = ((hash_val % 443) + 1) << 1;
+        for (; ; ) {
+            idx += incr;
+            if (idx >= 890)
+                idx -= 890;
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
+    }
+
+    @Override
+    public Enumeration<String> getKeys() {
+        return
+                new Enumeration<>() {
+                    private int idx = 0;
+
+                    {
+                        while (idx < 890 && table[idx] == null) idx += 2;
+                    }
+
+                    @Override
+                    public boolean hasMoreElements() {
+                        return (idx < 890);
+                    }
+
+                    @Override
+                    public String nextElement() {
+                        Object key = table[idx];
+                        do idx += 2;
+                        while (idx < 890 && table[idx] == null);
+                        return key.toString();
+                    }
+                };
+    }
+
+    public ResourceBundle getParent() {
+        return parent;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_cs.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_cs.java
index f617acf..1d30473 100644
--- a/pgjdbc/src/main/java/org/postgresql/translation/messages_cs.java
+++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_cs.java
@@ -5,232 +5,238 @@ import java.util.MissingResourceException;
 import java.util.ResourceBundle;
 
 public class messages_cs extends ResourceBundle {
-  private static final String[] table;
-  static {
-    String[] t = new String[314];
-    t[0] = "";
-    t[1] = "Project-Id-Version: PostgreSQL JDBC Driver 8.0\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2005-08-21 20:00+0200\nLast-Translator: Petr Dittrich <bodyn@medoro.org>\nLanguage-Team: \nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n";
-    t[2] = "A connection could not be made using the requested protocol {0}.";
-    t[3] = "Spojení nelze vytvořit s použitím žádaného protokolu {0}.";
-    t[4] = "Malformed function or procedure escape syntax at offset {0}.";
-    t[5] = "Poškozená funkce nebo opuštění procedury na pozici {0}.";
-    t[8] = "Cannot cast an instance of {0} to type {1}";
-    t[9] = "Nemohu přetypovat instanci {0} na typ {1}";
-    t[12] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
-    t[13] = "ResultSet není aktualizavatelný. Dotaz musí vybírat pouze z jedné tabulky a musí obsahovat všechny primární klíče tabulky. Koukni do JDBC 2.1 API Specifikace, sekce 5.6 pro více podrobností.";
-    t[14] = "The JVM claims not to support the {0} encoding.";
-    t[15] = "JVM tvrdí, že nepodporuje kodování {0}.";
-    t[16] = "An I/O error occurred while sending to the backend.";
-    t[17] = "Vystupně/výstupní chyba při odesílání k backend.";
-    t[18] = "Statement has been closed.";
-    t[19] = "Statement byl uzavřen.";
-    t[20] = "Unknown Types value.";
-    t[21] = "Neznámá hodnota typu.";
-    t[22] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
-    t[23] = "ResultSets se souběžností CONCUR_READ_ONLY nemůže být aktualizováno";
-    t[26] = "You must specify at least one column value to insert a row.";
-    t[27] = "Musíte vyplnit alespoň jeden sloupec pro vložení řádku.";
-    t[32] = "No primary key found for table {0}.";
-    t[33] = "Nenalezen primární klíč pro tabulku {0}.";
-    t[34] = "Cannot establish a savepoint in auto-commit mode.";
-    t[35] = "Nemohu vytvořit savepoint v auto-commit modu.";
-    t[38] = "Can''t use relative move methods while on the insert row.";
-    t[39] = "Nemůžete používat relativní přesuny při vkládání řádku.";
-    t[44] = "The column name {0} was not found in this ResultSet.";
-    t[45] = "Sloupec pojmenovaný {0} nebyl nalezen v ResultSet.";
-    t[46] = "This statement has been closed.";
-    t[47] = "Příkaz byl uzavřen.";
-    t[48] = "The SSLSocketFactory class provided {0} could not be instantiated.";
-    t[49] = "Třída SSLSocketFactory poskytla {0} což nemůže být instancionizováno.";
-    t[50] = "Multiple ResultSets were returned by the query.";
-    t[51] = "Vícenásobný ResultSet byl vrácen dotazem.";
-    t[52] = "DataSource has been closed.";
-    t[53] = "DataSource byl uzavřen.";
-    t[56] = "Error loading default settings from driverconfig.properties";
-    t[57] = "Chyba načítání standardního nastavení z driverconfig.properties";
-    t[62] = "Bad value for type {0} : {1}";
-    t[63] = "Špatná hodnota pro typ {0} : {1}";
-    t[66] = "Method {0} is not yet implemented.";
-    t[67] = "Metoda {0} není implementována.";
-    t[68] = "The array index is out of range: {0}";
-    t[69] = "Index pole mimo rozsah: {0}";
-    t[70] = "Unexpected command status: {0}.";
-    t[71] = "Neočekávaný stav příkazu: {0}.";
-    t[74] = "Expected command status BEGIN, got {0}.";
-    t[75] = "Očekáván příkaz BEGIN, obdržen {0}.";
-    t[76] = "Cannot retrieve the id of a named savepoint.";
-    t[77] = "Nemohu získat id nepojmenovaného savepointu.";
-    t[78] = "Unexpected error writing large object to database.";
-    t[79] = "Neočekávaná chyba při zapisování velkého objektu do databáze.";
-    t[84] = "Not on the insert row.";
-    t[85] = "Ne na vkládaném řádku.";
-    t[86] = "Returning autogenerated keys is not supported.";
-    t[87] = "Vrácení automaticky generovaných klíčů není podporováno.";
-    t[88] = "The server requested password-based authentication, but no password was provided.";
-    t[89] = "Server vyžaduje ověření heslem, ale žádné nebylo posláno.";
-    t[98] = "Unable to load the class {0} responsible for the datatype {1}";
-    t[99] = "Nemohu načíst třídu {0} odpovědnou za typ {1}";
-    t[100] = "Invalid fetch direction constant: {0}.";
-    t[101] = "Špatný směr čtení: {0}.";
-    t[102] = "Conversion of money failed.";
-    t[103] = "Převod peněz selhal.";
-    t[104] = "Connection has been closed.";
-    t[105] = "Spojeni bylo uzavřeno.";
-    t[106] = "Cannot retrieve the name of an unnamed savepoint.";
-    t[107] = "Nemohu získat název nepojmenovaného savepointu.";
-    t[108] = "Large Objects may not be used in auto-commit mode.";
-    t[109] = "Velké objecky nemohou být použity v auto-commit modu.";
-    t[110] = "This ResultSet is closed.";
-    t[111] = "Tento ResultSet je uzavřený.";
-    t[116] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
-    t[117] = "Něco neobvyklého přinutilo ovladač selhat. Prosím nahlaste tuto vyjímku.";
-    t[118] = "The server does not support SSL.";
-    t[119] = "Server nepodporuje SSL.";
-    t[120] = "Invalid stream length {0}.";
-    t[121] = "Vadná délka proudu {0}.";
-    t[126] = "The maximum field size must be a value greater than or equal to 0.";
-    t[127] = "Maximální velikost pole musí být nezáporné číslo.";
-    t[130] = "Cannot call updateRow() when on the insert row.";
-    t[131] = "Nemohu volat updateRow() na vlkádaném řádku.";
-    t[132] = "A CallableStatement was executed with nothing returned.";
-    t[133] = "CallableStatement byl spuštěn, leč nic nebylo vráceno.";
-    t[134] = "Provided Reader failed.";
-    t[135] = "Selhal poskytnutý Reader.";
-    t[146] = "Cannot call deleteRow() when on the insert row.";
-    t[147] = "Nemůžete volat deleteRow() při vkládání řádku.";
-    t[156] = "Where: {0}";
-    t[157] = "Kde: {0}";
-    t[158] = "An unexpected result was returned by a query.";
-    t[159] = "Obdržen neočekávaný výsledek dotazu.";
-    t[160] = "The connection attempt failed.";
-    t[161] = "Pokus o připojení selhal.";
-    t[162] = "Too many update results were returned.";
-    t[163] = "Bylo vráceno příliš mnoho výsledků aktualizací.";
-    t[164] = "Unknown type {0}.";
-    t[165] = "Neznámý typ {0}.";
-    t[166] = "{0} function takes two and only two arguments.";
-    t[167] = "Funkce {0} bere právě dva argumenty.";
-    t[168] = "{0} function doesn''t take any argument.";
-    t[169] = "Funkce {0} nebere žádný argument.";
-    t[172] = "Unable to find name datatype in the system catalogs.";
-    t[173] = "Nemohu najít název typu v systémovém katalogu.";
-    t[174] = "Protocol error.  Session setup failed.";
-    t[175] = "Chyba protokolu. Nastavení relace selhalo.";
-    t[176] = "{0} function takes one and only one argument.";
-    t[177] = "Funkce {0} bere jeden argument.";
-    t[186] = "The driver currently does not support COPY operations.";
-    t[187] = "Ovladač nyní nepodporuje příkaz COPY.";
-    t[190] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
-    t[191] = "Nalezena vada ve znakových datech. Toto může být způsobeno uloženými daty obsahujícími znaky, které jsou závadné pro znakovou sadu nastavenou při zakládání databáze. Nejznámejší příklad je ukládání 8bitových dat vSQL_ASCII databázi.";
-    t[196] = "Fetch size must be a value greater to or equal to 0.";
-    t[197] = "Nabraná velikost musí být nezáporná.";
-    t[204] = "Unsupported Types value: {0}";
-    t[205] = "Nepodporovaná hodnota typu: {0}";
-    t[206] = "Can''t refresh the insert row.";
-    t[207] = "Nemohu obnovit vkládaný řádek.";
-    t[210] = "Maximum number of rows must be a value grater than or equal to 0.";
-    t[211] = "Maximální počet řádek musí být nezáporné číslo.";
-    t[216] = "No value specified for parameter {0}.";
-    t[217] = "Nespecifikována hodnota parametru {0}.";
-    t[218] = "The array index is out of range: {0}, number of elements: {1}.";
-    t[219] = "Index pole mimo rozsah: {0}, počet prvků: {1}.";
-    t[220] = "Provided InputStream failed.";
-    t[221] = "Selhal poskytnutý InputStream.";
-    t[228] = "Cannot reference a savepoint after it has been released.";
-    t[229] = "Nemohu získat odkaz na savepoint, když byl uvolněn.";
-    t[232] = "An error occurred while setting up the SSL connection.";
-    t[233] = "Nastala chyba při nastavení SSL spojení.";
-    t[246] = "Detail: {0}";
-    t[247] = "Detail: {0}";
-    t[248] = "This PooledConnection has already been closed.";
-    t[249] = "Tento PooledConnection byl uzavřen.";
-    t[250] = "A result was returned when none was expected.";
-    t[251] = "Obdržen výsledek, ikdyž žádný nebyl očekáván.";
-    t[254] = "The JVM claims not to support the encoding: {0}";
-    t[255] = "JVM tvrdí, že nepodporuje kodování: {0}";
-    t[256] = "The parameter index is out of range: {0}, number of parameters: {1}.";
-    t[257] = "Index parametru mimo rozsah: {0}, počet parametrů {1}.";
-    t[258] = "LOB positioning offsets start at 1.";
-    t[259] = "Začátek pozicování LOB začína na 1.";
-    t[260] = "{0} function takes two or three arguments.";
-    t[261] = "Funkce {0} bere dva nebo tři argumenty.";
-    t[262] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
-    t[263] = "Právě jste za pozicí konce ResultSetu. Zde nemůžete volat deleteRow().s";
-    t[266] = "Server SQLState: {0}";
-    t[267] = "Server SQLState: {0}";
-    t[270] = "{0} function takes four and only four argument.";
-    t[271] = "Funkce {0} bere přesně čtyři argumenty.";
-    t[272] = "Failed to create object for: {0}.";
-    t[273] = "Selhalo vytvoření objektu: {0}.";
-    t[274] = "No results were returned by the query.";
-    t[275] = "Neobdržen žádný výsledek dotazu.";
-    t[276] = "Position: {0}";
-    t[277] = "Pozice: {0}";
-    t[278] = "The column index is out of range: {0}, number of columns: {1}.";
-    t[279] = "Index sloupece je mimo rozsah: {0}, počet sloupců: {1}.";
-    t[280] = "Unknown Response Type {0}.";
-    t[281] = "Neznámý typ odpovědi {0}.";
-    t[284] = "Hint: {0}";
-    t[285] = "Rada: {0}";
-    t[286] = "Location: File: {0}, Routine: {1}, Line: {2}";
-    t[287] = "Poloha: Soubor: {0}, Rutina: {1}, Řádek: {2}";
-    t[288] = "Query timeout must be a value greater than or equals to 0.";
-    t[289] = "Časový limit dotazu musí být nezáporné číslo.";
-    t[292] = "Unable to translate data into the desired encoding.";
-    t[293] = "Nemohu přeložit data do požadovaného kódování.";
-    t[296] = "Cannot call cancelRowUpdates() when on the insert row.";
-    t[297] = "Nemůžete volat cancelRowUpdates() při vkládání řádku.";
-    t[298] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
-    t[299] = "Ověření typu {0} není podporováno. Zkontrolujte zda konfigurační soubor pg_hba.conf obsahuje klientskou IP adresu či podsíť a zda je použité ověřenovací schéma podporováno ovladačem.";
-    t[308] = "There are no rows in this ResultSet.";
-    t[309] = "Žádný řádek v ResultSet.";
-    table = t;
-  }
+    private static final String[] table;
 
-  @Override
-  public Object handleGetObject (String msgid) throws MissingResourceException {
-    int hash_val = msgid.hashCode() & 0x7fffffff;
-    int idx = (hash_val % 157) << 1;
-    {
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+    static {
+        String[] t = new String[314];
+        t[0] = "";
+        t[1] = "Project-Id-Version: PostgreSQL JDBC Driver 8.0\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2005-08-21 20:00+0200\nLast-Translator: Petr Dittrich <bodyn@medoro.org>\nLanguage-Team: \nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n";
+        t[2] = "A connection could not be made using the requested protocol {0}.";
+        t[3] = "Spojení nelze vytvořit s použitím žádaného protokolu {0}.";
+        t[4] = "Malformed function or procedure escape syntax at offset {0}.";
+        t[5] = "Poškozená funkce nebo opuštění procedury na pozici {0}.";
+        t[8] = "Cannot cast an instance of {0} to type {1}";
+        t[9] = "Nemohu přetypovat instanci {0} na typ {1}";
+        t[12] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
+        t[13] = "ResultSet není aktualizavatelný. Dotaz musí vybírat pouze z jedné tabulky a musí obsahovat všechny primární klíče tabulky. Koukni do JDBC 2.1 API Specifikace, sekce 5.6 pro více podrobností.";
+        t[14] = "The JVM claims not to support the {0} encoding.";
+        t[15] = "JVM tvrdí, že nepodporuje kodování {0}.";
+        t[16] = "An I/O error occurred while sending to the backend.";
+        t[17] = "Vystupně/výstupní chyba při odesílání k backend.";
+        t[18] = "Statement has been closed.";
+        t[19] = "Statement byl uzavřen.";
+        t[20] = "Unknown Types value.";
+        t[21] = "Neznámá hodnota typu.";
+        t[22] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
+        t[23] = "ResultSets se souběžností CONCUR_READ_ONLY nemůže být aktualizováno";
+        t[26] = "You must specify at least one column value to insert a row.";
+        t[27] = "Musíte vyplnit alespoň jeden sloupec pro vložení řádku.";
+        t[32] = "No primary key found for table {0}.";
+        t[33] = "Nenalezen primární klíč pro tabulku {0}.";
+        t[34] = "Cannot establish a savepoint in auto-commit mode.";
+        t[35] = "Nemohu vytvořit savepoint v auto-commit modu.";
+        t[38] = "Can''t use relative move methods while on the insert row.";
+        t[39] = "Nemůžete používat relativní přesuny při vkládání řádku.";
+        t[44] = "The column name {0} was not found in this ResultSet.";
+        t[45] = "Sloupec pojmenovaný {0} nebyl nalezen v ResultSet.";
+        t[46] = "This statement has been closed.";
+        t[47] = "Příkaz byl uzavřen.";
+        t[48] = "The SSLSocketFactory class provided {0} could not be instantiated.";
+        t[49] = "Třída SSLSocketFactory poskytla {0} což nemůže být instancionizováno.";
+        t[50] = "Multiple ResultSets were returned by the query.";
+        t[51] = "Vícenásobný ResultSet byl vrácen dotazem.";
+        t[52] = "DataSource has been closed.";
+        t[53] = "DataSource byl uzavřen.";
+        t[56] = "Error loading default settings from driverconfig.properties";
+        t[57] = "Chyba načítání standardního nastavení z driverconfig.properties";
+        t[62] = "Bad value for type {0} : {1}";
+        t[63] = "Špatná hodnota pro typ {0} : {1}";
+        t[66] = "Method {0} is not yet implemented.";
+        t[67] = "Metoda {0} není implementována.";
+        t[68] = "The array index is out of range: {0}";
+        t[69] = "Index pole mimo rozsah: {0}";
+        t[70] = "Unexpected command status: {0}.";
+        t[71] = "Neočekávaný stav příkazu: {0}.";
+        t[74] = "Expected command status BEGIN, got {0}.";
+        t[75] = "Očekáván příkaz BEGIN, obdržen {0}.";
+        t[76] = "Cannot retrieve the id of a named savepoint.";
+        t[77] = "Nemohu získat id nepojmenovaného savepointu.";
+        t[78] = "Unexpected error writing large object to database.";
+        t[79] = "Neočekávaná chyba při zapisování velkého objektu do databáze.";
+        t[84] = "Not on the insert row.";
+        t[85] = "Ne na vkládaném řádku.";
+        t[86] = "Returning autogenerated keys is not supported.";
+        t[87] = "Vrácení automaticky generovaných klíčů není podporováno.";
+        t[88] = "The server requested password-based authentication, but no password was provided.";
+        t[89] = "Server vyžaduje ověření heslem, ale žádné nebylo posláno.";
+        t[98] = "Unable to load the class {0} responsible for the datatype {1}";
+        t[99] = "Nemohu načíst třídu {0} odpovědnou za typ {1}";
+        t[100] = "Invalid fetch direction constant: {0}.";
+        t[101] = "Špatný směr čtení: {0}.";
+        t[102] = "Conversion of money failed.";
+        t[103] = "Převod peněz selhal.";
+        t[104] = "Connection has been closed.";
+        t[105] = "Spojeni bylo uzavřeno.";
+        t[106] = "Cannot retrieve the name of an unnamed savepoint.";
+        t[107] = "Nemohu získat název nepojmenovaného savepointu.";
+        t[108] = "Large Objects may not be used in auto-commit mode.";
+        t[109] = "Velké objecky nemohou být použity v auto-commit modu.";
+        t[110] = "This ResultSet is closed.";
+        t[111] = "Tento ResultSet je uzavřený.";
+        t[116] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
+        t[117] = "Něco neobvyklého přinutilo ovladač selhat. Prosím nahlaste tuto vyjímku.";
+        t[118] = "The server does not support SSL.";
+        t[119] = "Server nepodporuje SSL.";
+        t[120] = "Invalid stream length {0}.";
+        t[121] = "Vadná délka proudu {0}.";
+        t[126] = "The maximum field size must be a value greater than or equal to 0.";
+        t[127] = "Maximální velikost pole musí být nezáporné číslo.";
+        t[130] = "Cannot call updateRow() when on the insert row.";
+        t[131] = "Nemohu volat updateRow() na vlkádaném řádku.";
+        t[132] = "A CallableStatement was executed with nothing returned.";
+        t[133] = "CallableStatement byl spuštěn, leč nic nebylo vráceno.";
+        t[134] = "Provided Reader failed.";
+        t[135] = "Selhal poskytnutý Reader.";
+        t[146] = "Cannot call deleteRow() when on the insert row.";
+        t[147] = "Nemůžete volat deleteRow() při vkládání řádku.";
+        t[156] = "Where: {0}";
+        t[157] = "Kde: {0}";
+        t[158] = "An unexpected result was returned by a query.";
+        t[159] = "Obdržen neočekávaný výsledek dotazu.";
+        t[160] = "The connection attempt failed.";
+        t[161] = "Pokus o připojení selhal.";
+        t[162] = "Too many update results were returned.";
+        t[163] = "Bylo vráceno příliš mnoho výsledků aktualizací.";
+        t[164] = "Unknown type {0}.";
+        t[165] = "Neznámý typ {0}.";
+        t[166] = "{0} function takes two and only two arguments.";
+        t[167] = "Funkce {0} bere právě dva argumenty.";
+        t[168] = "{0} function doesn''t take any argument.";
+        t[169] = "Funkce {0} nebere žádný argument.";
+        t[172] = "Unable to find name datatype in the system catalogs.";
+        t[173] = "Nemohu najít název typu v systémovém katalogu.";
+        t[174] = "Protocol error.  Session setup failed.";
+        t[175] = "Chyba protokolu. Nastavení relace selhalo.";
+        t[176] = "{0} function takes one and only one argument.";
+        t[177] = "Funkce {0} bere jeden argument.";
+        t[186] = "The driver currently does not support COPY operations.";
+        t[187] = "Ovladač nyní nepodporuje příkaz COPY.";
+        t[190] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
+        t[191] = "Nalezena vada ve znakových datech. Toto může být způsobeno uloženými daty obsahujícími znaky, které jsou závadné pro znakovou sadu nastavenou při zakládání databáze. Nejznámejší příklad je ukládání 8bitových dat vSQL_ASCII databázi.";
+        t[196] = "Fetch size must be a value greater to or equal to 0.";
+        t[197] = "Nabraná velikost musí být nezáporná.";
+        t[204] = "Unsupported Types value: {0}";
+        t[205] = "Nepodporovaná hodnota typu: {0}";
+        t[206] = "Can''t refresh the insert row.";
+        t[207] = "Nemohu obnovit vkládaný řádek.";
+        t[210] = "Maximum number of rows must be a value grater than or equal to 0.";
+        t[211] = "Maximální počet řádek musí být nezáporné číslo.";
+        t[216] = "No value specified for parameter {0}.";
+        t[217] = "Nespecifikována hodnota parametru {0}.";
+        t[218] = "The array index is out of range: {0}, number of elements: {1}.";
+        t[219] = "Index pole mimo rozsah: {0}, počet prvků: {1}.";
+        t[220] = "Provided InputStream failed.";
+        t[221] = "Selhal poskytnutý InputStream.";
+        t[228] = "Cannot reference a savepoint after it has been released.";
+        t[229] = "Nemohu získat odkaz na savepoint, když byl uvolněn.";
+        t[232] = "An error occurred while setting up the SSL connection.";
+        t[233] = "Nastala chyba při nastavení SSL spojení.";
+        t[246] = "Detail: {0}";
+        t[247] = "Detail: {0}";
+        t[248] = "This PooledConnection has already been closed.";
+        t[249] = "Tento PooledConnection byl uzavřen.";
+        t[250] = "A result was returned when none was expected.";
+        t[251] = "Obdržen výsledek, ikdyž žádný nebyl očekáván.";
+        t[254] = "The JVM claims not to support the encoding: {0}";
+        t[255] = "JVM tvrdí, že nepodporuje kodování: {0}";
+        t[256] = "The parameter index is out of range: {0}, number of parameters: {1}.";
+        t[257] = "Index parametru mimo rozsah: {0}, počet parametrů {1}.";
+        t[258] = "LOB positioning offsets start at 1.";
+        t[259] = "Začátek pozicování LOB začína na 1.";
+        t[260] = "{0} function takes two or three arguments.";
+        t[261] = "Funkce {0} bere dva nebo tři argumenty.";
+        t[262] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
+        t[263] = "Právě jste za pozicí konce ResultSetu. Zde nemůžete volat deleteRow().s";
+        t[266] = "Server SQLState: {0}";
+        t[267] = "Server SQLState: {0}";
+        t[270] = "{0} function takes four and only four argument.";
+        t[271] = "Funkce {0} bere přesně čtyři argumenty.";
+        t[272] = "Failed to create object for: {0}.";
+        t[273] = "Selhalo vytvoření objektu: {0}.";
+        t[274] = "No results were returned by the query.";
+        t[275] = "Neobdržen žádný výsledek dotazu.";
+        t[276] = "Position: {0}";
+        t[277] = "Pozice: {0}";
+        t[278] = "The column index is out of range: {0}, number of columns: {1}.";
+        t[279] = "Index sloupece je mimo rozsah: {0}, počet sloupců: {1}.";
+        t[280] = "Unknown Response Type {0}.";
+        t[281] = "Neznámý typ odpovědi {0}.";
+        t[284] = "Hint: {0}";
+        t[285] = "Rada: {0}";
+        t[286] = "Location: File: {0}, Routine: {1}, Line: {2}";
+        t[287] = "Poloha: Soubor: {0}, Rutina: {1}, Řádek: {2}";
+        t[288] = "Query timeout must be a value greater than or equals to 0.";
+        t[289] = "Časový limit dotazu musí být nezáporné číslo.";
+        t[292] = "Unable to translate data into the desired encoding.";
+        t[293] = "Nemohu přeložit data do požadovaného kódování.";
+        t[296] = "Cannot call cancelRowUpdates() when on the insert row.";
+        t[297] = "Nemůžete volat cancelRowUpdates() při vkládání řádku.";
+        t[298] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
+        t[299] = "Ověření typu {0} není podporováno. Zkontrolujte zda konfigurační soubor pg_hba.conf obsahuje klientskou IP adresu či podsíť a zda je použité ověřenovací schéma podporováno ovladačem.";
+        t[308] = "There are no rows in this ResultSet.";
+        t[309] = "Žádný řádek v ResultSet.";
+        table = t;
     }
-    int incr = ((hash_val % 155) + 1) << 1;
-    for (;;) {
-      idx += incr;
-      if (idx >= 314)
-        idx -= 314;
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+
+    @Override
+    public Object handleGetObject(String msgid) throws MissingResourceException {
+        int hash_val = msgid.hashCode() & 0x7fffffff;
+        int idx = (hash_val % 157) << 1;
+        {
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
+        int incr = ((hash_val % 155) + 1) << 1;
+        for (; ; ) {
+            idx += incr;
+            if (idx >= 314)
+                idx -= 314;
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
     }
-  }
 
-  @Override
-  public Enumeration<String> getKeys () {
-    return
-      new Enumeration<>() {
-        private int idx = 0;
-        { while (idx < 314 && table[idx] == null) idx += 2; }
-        @Override
-        public boolean hasMoreElements () {
-          return (idx < 314);
-        }
-        @Override
-        public String nextElement () {
-          Object key = table[idx];
-          do idx += 2; while (idx < 314 && table[idx] == null);
-          return key.toString();
-        }
-      };
-  }
+    @Override
+    public Enumeration<String> getKeys() {
+        return
+                new Enumeration<>() {
+                    private int idx = 0;
 
-  public ResourceBundle getParent () {
-    return parent;
-  }
+                    {
+                        while (idx < 314 && table[idx] == null) idx += 2;
+                    }
+
+                    @Override
+                    public boolean hasMoreElements() {
+                        return (idx < 314);
+                    }
+
+                    @Override
+                    public String nextElement() {
+                        Object key = table[idx];
+                        do idx += 2; while (idx < 314 && table[idx] == null);
+                        return key.toString();
+                    }
+                };
+    }
+
+    public ResourceBundle getParent() {
+        return parent;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_de.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_de.java
index c268e85..3bb0687 100644
--- a/pgjdbc/src/main/java/org/postgresql/translation/messages_de.java
+++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_de.java
@@ -5,339 +5,347 @@ import java.util.MissingResourceException;
 import java.util.ResourceBundle;
 
 public class messages_de extends ResourceBundle {
-  private static final String[] table;
-  static {
-    String[] t = new String[794];
-    t[0] = "";
-    t[1] = "Project-Id-Version: head-de\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2008-09-12 14:22+0200\nLast-Translator: Andre Bialojahn <ab.spamnews@freenet.de>\nLanguage-Team: Deutsch\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: KBabel 1.0.2\nX-Poedit-Language: German\nX-Poedit-Country: GERMANY\n";
-    t[4] = "DataSource has been closed.";
-    t[5] = "Die Datenquelle wurde geschlossen.";
-    t[18] = "Where: {0}";
-    t[19] = "Wobei: {0}";
-    t[26] = "The connection attempt failed.";
-    t[27] = "Der Verbindungsversuch schlug fehl.";
-    t[28] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
-    t[29] = "Die augenblickliche Position ist hinter dem Ende des ResultSets.  Dort kann ''deleteRow()'' nicht aufgerufen werden.";
-    t[36] = "Multiple ResultSets were returned by the query.";
-    t[37] = "Die Abfrage ergab mehrere ResultSets.";
-    t[50] = "Too many update results were returned.";
-    t[51] = "Zu viele Updateergebnisse wurden zurückgegeben.";
-    t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
-    t[59] = "Ungültige UTF-8-Sequenz: das erste Byte ist {0}: {1}";
-    t[66] = "The column name {0} was not found in this ResultSet.";
-    t[67] = "Der Spaltenname {0} wurde in diesem ResultSet nicht gefunden.";
-    t[70] = "Fastpath call {0} - No result was returned and we expected an integer.";
-    t[71] = "Der Fastpath-Aufruf {0} gab kein Ergebnis zurück, jedoch wurde ein Integer erwartet.";
-    t[74] = "Protocol error.  Session setup failed.";
-    t[75] = "Protokollfehler.  Die Sitzung konnte nicht gestartet werden.";
-    t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
-    t[77] = "Ein CallableStatement wurde deklariert, aber kein Aufruf von ''registerOutParameter(1, <some type>)'' erfolgte.";
-    t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
-    t[79] = "ResultSets, deren Zugriffsart CONCUR_READ_ONLY ist, können nicht aktualisiert werden.";
-    t[90] = "LOB positioning offsets start at 1.";
-    t[91] = "Positionsoffsets für LOBs beginnen bei 1.";
-    t[92] = "Internal Position: {0}";
-    t[93] = "Interne Position: {0}";
-    t[96] = "free() was called on this LOB previously";
-    t[97] = "free() wurde bereits für dieses LOB aufgerufen.";
-    t[100] = "Cannot change transaction read-only property in the middle of a transaction.";
-    t[101] = "Die Nur-Lesen-Eigenschaft einer Transaktion kann nicht während der Transaktion verändert werden.";
-    t[102] = "The JVM claims not to support the {0} encoding.";
-    t[103] = "Die JVM behauptet, die Zeichenkodierung {0} nicht zu unterstützen.";
-    t[108] = "{0} function doesn''t take any argument.";
-    t[109] = "Die {0}-Funktion akzeptiert kein Argument.";
-    t[112] = "xid must not be null";
-    t[113] = "Die xid darf nicht null sein.";
-    t[114] = "Connection has been closed.";
-    t[115] = "Die Verbindung wurde geschlossen.";
-    t[122] = "The server does not support SSL.";
-    t[123] = "Der Server unterstützt SSL nicht.";
-    t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
-    t[141] = "Ungültige UTF-8-Sequenz: Byte {0} der {1} Bytesequenz ist nicht 10xxxxxx: {2}";
-    t[148] = "Hint: {0}";
-    t[149] = "Hinweis: {0}";
-    t[152] = "Unable to find name datatype in the system catalogs.";
-    t[153] = "In den Systemkatalogen konnte der Namensdatentyp nicht gefunden werden.";
-    t[156] = "Unsupported Types value: {0}";
-    t[157] = "Unbekannter Typ: {0}.";
-    t[158] = "Unknown type {0}.";
-    t[159] = "Unbekannter Typ {0}.";
-    t[166] = "{0} function takes two and only two arguments.";
-    t[167] = "Die {0}-Funktion erwartet genau zwei Argumente.";
-    t[170] = "Finalizing a Connection that was never closed:";
-    t[171] = "Eine Connection wurde finalisiert, die nie geschlossen wurde:";
-    t[180] = "The maximum field size must be a value greater than or equal to 0.";
-    t[181] = "Die maximale Feldgröße muss ein Wert größer oder gleich Null sein.";
-    t[186] = "PostgreSQL LOBs can only index to: {0}";
-    t[187] = "LOBs in PostgreSQL können nur auf {0} verweisen.";
-    t[194] = "Method {0} is not yet implemented.";
-    t[195] = "Die Methode {0} ist noch nicht implementiert.";
-    t[198] = "Error loading default settings from driverconfig.properties";
-    t[199] = "Fehler beim Laden der Voreinstellungen aus driverconfig.properties";
-    t[200] = "Results cannot be retrieved from a CallableStatement before it is executed.";
-    t[201] = "Ergebnisse können nicht von einem CallableStatement abgerufen werden, bevor es ausgeführt wurde.";
-    t[202] = "Large Objects may not be used in auto-commit mode.";
-    t[203] = "LargeObjects (LOB) dürfen im Modus ''auto-commit'' nicht verwendet werden.";
-    t[208] = "Expected command status BEGIN, got {0}.";
-    t[209] = "Statt des erwarteten Befehlsstatus BEGIN, wurde {0} empfangen.";
-    t[218] = "Invalid fetch direction constant: {0}.";
-    t[219] = "Unzulässige Richtungskonstante bei fetch: {0}.";
-    t[222] = "{0} function takes three and only three arguments.";
-    t[223] = "Die {0}-Funktion erwartet genau drei Argumente.";
-    t[226] = "Error during recover";
-    t[227] = "Beim Wiederherstellen trat ein Fehler auf.";
-    t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
-    t[229] = "Das ResultSet kann nicht aktualisiert werden, da es entweder vor oder nach dem Ende der Ergebnisse ist.";
-    t[230] = "The JVM claims not to support the encoding: {0}";
-    t[231] = "Die JVM behauptet, die Zeichenkodierung {0} nicht zu unterstützen.";
-    t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
-    t[233] = "Ein Parameter des Typs {0} wurde registriert, jedoch erfolgte ein Aufruf get{1} (sqltype={2}).";
-    t[240] = "Cannot establish a savepoint in auto-commit mode.";
-    t[241] = "Ein Rettungspunkt kann im Modus ''auto-commit'' nicht erstellt werden.";
-    t[242] = "Cannot retrieve the id of a named savepoint.";
-    t[243] = "Die ID eines benamten Rettungspunktes kann nicht ermittelt werden.";
-    t[244] = "The column index is out of range: {0}, number of columns: {1}.";
-    t[245] = "Der Spaltenindex {0} ist außerhalb des gültigen Bereichs. Anzahl Spalten: {1}.";
-    t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
-    t[251] = "Etwas Ungewöhnliches ist passiert, das den Treiber fehlschlagen ließ. Bitte teilen Sie diesen Fehler mit.";
-    t[260] = "Cannot cast an instance of {0} to type {1}";
-    t[261] = "Die Typwandlung für eine Instanz von {0} nach {1} ist nicht möglich.";
-    t[264] = "Unknown Types value.";
-    t[265] = "Unbekannter Typ.";
-    t[266] = "Invalid stream length {0}.";
-    t[267] = "Ungültige Länge des Datenstroms: {0}.";
-    t[272] = "Cannot retrieve the name of an unnamed savepoint.";
-    t[273] = "Der Name eines namenlosen Rettungpunktes kann nicht ermittelt werden.";
-    t[274] = "Unable to translate data into the desired encoding.";
-    t[275] = "Die Daten konnten nicht in die gewünschte Kodierung gewandelt werden.";
-    t[276] = "Expected an EOF from server, got: {0}";
-    t[277] = "Vom Server wurde ein EOF erwartet, jedoch {0} gelesen.";
-    t[278] = "Bad value for type {0} : {1}";
-    t[279] = "Unzulässiger Wert für den Typ {0} : {1}.";
-    t[280] = "The server requested password-based authentication, but no password was provided.";
-    t[281] = "Der Server verlangt passwortbasierte Authentifizierung, jedoch wurde kein Passwort angegeben.";
-    t[296] = "Truncation of large objects is only implemented in 8.3 and later servers.";
-    t[297] = "Das Abschneiden großer Objekte ist nur in Versionen nach 8.3 implementiert.";
-    t[298] = "This PooledConnection has already been closed.";
-    t[299] = "Diese PooledConnection ist bereits geschlossen worden.";
-    t[302] = "ClientInfo property not supported.";
-    t[303] = "Die ClientInfo-Eigenschaft ist nicht unterstützt.";
-    t[306] = "Fetch size must be a value greater to or equal to 0.";
-    t[307] = "Die Fetch-Größe muss ein Wert größer oder gleich Null sein.";
-    t[312] = "A connection could not be made using the requested protocol {0}.";
-    t[313] = "Es konnte keine Verbindung unter Verwendung des Protokolls {0} hergestellt werden.";
-    t[322] = "There are no rows in this ResultSet.";
-    t[323] = "Es gibt keine Zeilen in diesem ResultSet.";
-    t[324] = "Unexpected command status: {0}.";
-    t[325] = "Unerwarteter Befehlsstatus: {0}.";
-    t[334] = "Not on the insert row.";
-    t[335] = "Nicht in der Einfügezeile.";
-    t[344] = "Server SQLState: {0}";
-    t[345] = "Server SQLState: {0}";
-    t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
-    t[349] = "Der standard_conforming_strings Parameter des Servers steht auf {0}. Der JDBC-Treiber erwartete on oder off.";
-    t[360] = "The driver currently does not support COPY operations.";
-    t[361] = "Der Treiber unterstützt derzeit keine COPY-Operationen.";
-    t[364] = "The array index is out of range: {0}, number of elements: {1}.";
-    t[365] = "Der Arrayindex {0} ist außerhalb des gültigen Bereichs. Vorhandene Elemente: {1}.";
-    t[374] = "suspend/resume not implemented";
-    t[375] = "Anhalten/Fortsetzen ist nicht implementiert.";
-    t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it";
-    t[379] = "Nicht implementiert: Die einphasige Bestätigung muss über die selbe Verbindung abgewickelt werden, die verwendet wurde, um sie zu beginnen.";
-    t[398] = "Cannot call cancelRowUpdates() when on the insert row.";
-    t[399] = "''cancelRowUpdates()'' kann in der Einfügezeile nicht aufgerufen werden.";
-    t[400] = "Cannot reference a savepoint after it has been released.";
-    t[401] = "Ein Rettungspunkt kann nicht angesprochen werden, nach dem er entfernt wurde.";
-    t[402] = "You must specify at least one column value to insert a row.";
-    t[403] = "Sie müssen mindestens einen Spaltenwert angeben, um eine Zeile einzufügen.";
-    t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data.";
-    t[405] = "Es konnte kein Wert für MaxIndexKeys gefunden werden, da die Systemkatalogdaten fehlen.";
-    t[412] = "Illegal UTF-8 sequence: final value is out of range: {0}";
-    t[413] = "Ungültige UTF-8-Sequenz: Der letzte Wert ist außerhalb des zulässigen Bereichs: {0}";
-    t[414] = "{0} function takes two or three arguments.";
-    t[415] = "Die {0}-Funktion erwartet zwei oder drei Argumente.";
-    t[440] = "Unexpected error writing large object to database.";
-    t[441] = "Beim Schreiben eines LargeObjects (LOB) in die Datenbank trat ein unerwarteter Fehler auf.";
-    t[442] = "Zero bytes may not occur in string parameters.";
-    t[443] = "Stringparameter dürfen keine Nullbytes enthalten.";
-    t[444] = "A result was returned when none was expected.";
-    t[445] = "Die Anweisung lieferte ein Ergebnis obwohl keines erwartet wurde.";
-    t[450] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
-    t[451] = "Das ResultSet kann nicht aktualisiert werden.  Die Abfrage, die es erzeugte, darf nur eine Tabelle und muss darin alle Primärschlüssel auswählen. Siehe JDBC 2.1 API-Spezifikation, Abschnitt 5.6 für mehr Details.";
-    t[454] = "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.";
-    t[455] = "Die Nachrichtenlänge {0} ist zu groß. Das kann von sehr großen oder inkorrekten Längenangaben eines InputStream-Parameters herrühren.";
-    t[460] = "Statement has been closed.";
-    t[461] = "Die Anweisung wurde geschlossen.";
-    t[462] = "No value specified for parameter {0}.";
-    t[463] = "Für den Parameter {0} wurde kein Wert angegeben.";
-    t[468] = "The array index is out of range: {0}";
-    t[469] = "Der Arrayindex ist außerhalb des gültigen Bereichs: {0}.";
-    t[474] = "Unable to bind parameter values for statement.";
-    t[475] = "Der Anweisung konnten keine Parameterwerte zugewiesen werden.";
-    t[476] = "Can''t refresh the insert row.";
-    t[477] = "Die Einfügezeile kann nicht aufgefrischt werden.";
-    t[480] = "No primary key found for table {0}.";
-    t[481] = "Für die Tabelle {0} konnte kein Primärschlüssel gefunden werden.";
-    t[482] = "Cannot change transaction isolation level in the middle of a transaction.";
-    t[483] = "Die Transaktions-Trennungsstufe kann nicht während einer Transaktion verändert werden.";
-    t[498] = "Provided InputStream failed.";
-    t[499] = "Der bereitgestellte InputStream scheiterte.";
-    t[500] = "The parameter index is out of range: {0}, number of parameters: {1}.";
-    t[501] = "Der Parameterindex {0} ist außerhalb des gültigen Bereichs. Es gibt {1} Parameter.";
-    t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
-    t[503] = "Der Parameter ''Date Style'' wurde auf dem Server auf {0} verändert. Der JDBC-Treiber setzt für korrekte Funktion voraus, dass ''Date Style'' mit ''ISO'' beginnt.";
-    t[508] = "Connection attempt timed out.";
-    t[509] = "Keine Verbindung innerhalb des Zeitintervalls möglich.";
-    t[512] = "Internal Query: {0}";
-    t[513] = "Interne Abfrage: {0}";
-    t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
-    t[519] = "Der Authentifizierungstyp {0} wird nicht unterstützt. Stellen Sie sicher, dass die Datei ''pg_hba.conf'' die IP-Adresse oder das Subnetz des Clients enthält und dass der Client ein Authentifizierungsschema nutzt, das vom Treiber unterstützt wird.";
-    t[526] = "Interval {0} not yet implemented";
-    t[527] = "Intervall {0} ist noch nicht implementiert.";
-    t[532] = "Conversion of interval failed";
-    t[533] = "Die Umwandlung eines Intervalls schlug fehl.";
-    t[540] = "Query timeout must be a value greater than or equals to 0.";
-    t[541] = "Das Abfragetimeout muss ein Wert größer oder gleich Null sein.";
-    t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
-    t[543] = "Die Verbindung wurde automatisch geschlossen, da entweder eine neue Verbindung für die gleiche PooledConnection geöffnet wurde, oder die PooledConnection geschlossen worden ist..";
-    t[544] = "ResultSet not positioned properly, perhaps you need to call next.";
-    t[545] = "Das ResultSet ist nicht richtig positioniert. Eventuell muss ''next'' aufgerufen werden.";
-    t[550] = "This statement has been closed.";
-    t[551] = "Die Anweisung wurde geschlossen.";
-    t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.";
-    t[553] = "Der in SQL für eine Instanz von {0} zu verwendende Datentyp kann nicht abgeleitet werden. Benutzen Sie ''setObject()'' mit einem expliziten Typ, um ihn festzulegen.";
-    t[554] = "Cannot call updateRow() when on the insert row.";
-    t[555] = "''updateRow()'' kann in der Einfügezeile nicht aufgerufen werden.";
-    t[562] = "Detail: {0}";
-    t[563] = "Detail: {0}";
-    t[566] = "Cannot call deleteRow() when on the insert row.";
-    t[567] = "''deleteRow()'' kann in der Einfügezeile nicht aufgerufen werden.";
-    t[568] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
-    t[569] = "Die augenblickliche Position ist vor dem Beginn des ResultSets.  Dort kann ''deleteRow()'' nicht aufgerufen werden.";
-    t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
-    t[577] = "Ungültige UTF-8-Sequenz: der letzte Wert ist ein Ersatzwert: {0}";
-    t[578] = "Unknown Response Type {0}.";
-    t[579] = "Die Antwort weist einen unbekannten Typ auf: {0}.";
-    t[582] = "Unsupported value for stringtype parameter: {0}";
-    t[583] = "Nichtunterstützter Wert für den Stringparameter: {0}";
-    t[584] = "Conversion to type {0} failed: {1}.";
-    t[585] = "Die Umwandlung in den Typ {0} schlug fehl: {1}.";
-    t[586] = "Conversion of money failed.";
-    t[587] = "Die Umwandlung eines Währungsbetrags schlug fehl.";
-    t[600] = "Unable to load the class {0} responsible for the datatype {1}";
-    t[601] = "Die für den Datentyp {1} verantwortliche Klasse {0} konnte nicht geladen werden.";
-    t[604] = "The fastpath function {0} is unknown.";
-    t[605] = "Die Fastpath-Funktion {0} ist unbekannt.";
-    t[608] = "Malformed function or procedure escape syntax at offset {0}.";
-    t[609] = "Unzulässige Syntax für ein Funktions- oder Prozedur-Escape an Offset {0}.";
-    t[612] = "Provided Reader failed.";
-    t[613] = "Der bereitgestellte Reader scheiterte.";
-    t[614] = "Maximum number of rows must be a value grater than or equal to 0.";
-    t[615] = "Die maximale Zeilenzahl muss ein Wert größer oder gleich Null sein.";
-    t[616] = "Failed to create object for: {0}.";
-    t[617] = "Erstellung des Objektes schlug fehl für: {0}.";
-    t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
-    t[623] = "Vorzeitiges Ende des Eingabedatenstroms. Es wurden {0} Bytes erwartet, jedoch nur {1} gelesen.";
-    t[626] = "An unexpected result was returned by a query.";
-    t[627] = "Eine Abfrage lieferte ein unerwartetes Resultat.";
-    t[646] = "An error occurred while setting up the SSL connection.";
-    t[647] = "Beim Aufbau der SSL-Verbindung trat ein Fehler auf.";
-    t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
-    t[655] = "Ungültige UTF-8-Sequenz: {0} Bytes wurden verwendet um einen {1} Bytewert zu kodieren: {2}";
-    t[658] = "The SSLSocketFactory class provided {0} could not be instantiated.";
-    t[659] = "Die von {0} bereitgestellte SSLSocketFactory-Klasse konnte nicht instanziiert werden.";
-    t[670] = "Position: {0}";
-    t[671] = "Position: {0}";
-    t[676] = "Location: File: {0}, Routine: {1}, Line: {2}";
-    t[677] = "Ort: Datei: {0}, Routine: {1}, Zeile: {2}.";
-    t[684] = "Cannot tell if path is open or closed: {0}.";
-    t[685] = "Es konnte nicht ermittelt werden, ob der Pfad offen oder geschlossen ist: {0}.";
-    t[700] = "Cannot convert an instance of {0} to type {1}";
-    t[701] = "Die Typwandlung für eine Instanz von {0} nach {1} ist nicht möglich.";
-    t[710] = "{0} function takes four and only four argument.";
-    t[711] = "Die {0}-Funktion erwartet genau vier Argumente.";
-    t[718] = "Interrupted while attempting to connect.";
-    t[719] = "Beim Verbindungsversuch trat eine Unterbrechung auf.";
-    t[722] = "Your security policy has prevented the connection from being attempted.  You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to.";
-    t[723] = "Ihre Sicherheitsrichtlinie hat den Versuch des Verbindungsaufbaus verhindert. Sie müssen wahrscheinlich der Verbindung zum Datenbankrechner java.net.SocketPermission gewähren, um den Rechner auf dem gewählten Port zu erreichen.";
-    t[736] = "{0} function takes one and only one argument.";
-    t[737] = "Die {0}-Funktion erwartet nur genau ein Argument.";
-    t[744] = "This ResultSet is closed.";
-    t[745] = "Dieses ResultSet ist geschlossen.";
-    t[746] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
-    t[747] = "Ungültige Zeichendaten.  Das ist höchstwahrscheinlich von in der Datenbank gespeicherten Zeichen hervorgerufen, die in einer anderen Kodierung vorliegen, als die, in der die Datenbank erstellt wurde.  Das häufigste Beispiel dafür ist es, 8Bit-Daten in SQL_ASCII-Datenbanken abzulegen.";
-    t[752] = "Error disabling autocommit";
-    t[753] = "Fehler beim Abschalten von Autocommit.";
-    t[754] = "Ran out of memory retrieving query results.";
-    t[755] = "Nicht genügend Speicher beim Abholen der Abfrageergebnisse.";
-    t[756] = "Returning autogenerated keys is not supported.";
-    t[757] = "Die Rückgabe automatisch generierter Schlüssel wird nicht unterstützt,";
-    t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
-    t[761] = "Die Operation erfordert ein scrollbares ResultSet, dieses jedoch ist FORWARD_ONLY.";
-    t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
-    t[763] = "Eine CallableStatement-Funktion wurde ausgeführt und der Rückgabewert {0} war vom Typ {1}. Jedoch wurde der Typ {2} dafür registriert.";
-    t[768] = "Unknown ResultSet holdability setting: {0}.";
-    t[769] = "Unbekannte Einstellung für die Haltbarkeit des ResultSets: {0}.";
-    t[772] = "Transaction isolation level {0} not supported.";
-    t[773] = "Die Transaktions-Trennungsstufe {0} ist nicht unterstützt.";
-    t[774] = "Zero bytes may not occur in identifiers.";
-    t[775] = "Nullbytes dürfen in Bezeichnern nicht vorkommen.";
-    t[776] = "No results were returned by the query.";
-    t[777] = "Die Abfrage lieferte kein Ergebnis.";
-    t[778] = "A CallableStatement was executed with nothing returned.";
-    t[779] = "Ein CallableStatement wurde ausgeführt ohne etwas zurückzugeben.";
-    t[780] = "wasNull cannot be call before fetching a result.";
-    t[781] = "wasNull kann nicht aufgerufen werden, bevor ein Ergebnis abgefragt wurde.";
-    t[786] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
-    t[787] = "Diese Anweisung deklariert keinen OUT-Parameter. Benutzen Sie '{' ?= call ... '}' um das zu tun.";
-    t[788] = "Can''t use relative move methods while on the insert row.";
-    t[789] = "Relative Bewegungen können in der Einfügezeile nicht durchgeführt werden.";
-    t[790] = "A CallableStatement was executed with an invalid number of parameters";
-    t[791] = "Ein CallableStatement wurde mit einer falschen Anzahl Parameter ausgeführt.";
-    t[792] = "Connection is busy with another transaction";
-    t[793] = "Die Verbindung ist derzeit mit einer anderen Transaktion beschäftigt.";
-    table = t;
-  }
-  public Object handleGetObject (String msgid) throws MissingResourceException {
-    int hash_val = msgid.hashCode() & 0x7fffffff;
-    int idx = (hash_val % 397) << 1;
-    {
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
-    }
-    int incr = ((hash_val % 395) + 1) << 1;
-    for (;;) {
-      idx += incr;
-      if (idx >= 794)
-        idx -= 794;
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
-    }
-  }
-  public Enumeration<String> getKeys () {
-    return
-      new Enumeration<>() {
-        private int idx = 0;
-        { while (idx < 794 && table[idx] == null) idx += 2; }
+    private static final String[] table;
 
-        public boolean hasMoreElements () {
-          return (idx < 794);
+    static {
+        String[] t = new String[794];
+        t[0] = "";
+        t[1] = "Project-Id-Version: head-de\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2008-09-12 14:22+0200\nLast-Translator: Andre Bialojahn <ab.spamnews@freenet.de>\nLanguage-Team: Deutsch\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: KBabel 1.0.2\nX-Poedit-Language: German\nX-Poedit-Country: GERMANY\n";
+        t[4] = "DataSource has been closed.";
+        t[5] = "Die Datenquelle wurde geschlossen.";
+        t[18] = "Where: {0}";
+        t[19] = "Wobei: {0}";
+        t[26] = "The connection attempt failed.";
+        t[27] = "Der Verbindungsversuch schlug fehl.";
+        t[28] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
+        t[29] = "Die augenblickliche Position ist hinter dem Ende des ResultSets.  Dort kann ''deleteRow()'' nicht aufgerufen werden.";
+        t[36] = "Multiple ResultSets were returned by the query.";
+        t[37] = "Die Abfrage ergab mehrere ResultSets.";
+        t[50] = "Too many update results were returned.";
+        t[51] = "Zu viele Updateergebnisse wurden zurückgegeben.";
+        t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
+        t[59] = "Ungültige UTF-8-Sequenz: das erste Byte ist {0}: {1}";
+        t[66] = "The column name {0} was not found in this ResultSet.";
+        t[67] = "Der Spaltenname {0} wurde in diesem ResultSet nicht gefunden.";
+        t[70] = "Fastpath call {0} - No result was returned and we expected an integer.";
+        t[71] = "Der Fastpath-Aufruf {0} gab kein Ergebnis zurück, jedoch wurde ein Integer erwartet.";
+        t[74] = "Protocol error.  Session setup failed.";
+        t[75] = "Protokollfehler.  Die Sitzung konnte nicht gestartet werden.";
+        t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
+        t[77] = "Ein CallableStatement wurde deklariert, aber kein Aufruf von ''registerOutParameter(1, <some type>)'' erfolgte.";
+        t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
+        t[79] = "ResultSets, deren Zugriffsart CONCUR_READ_ONLY ist, können nicht aktualisiert werden.";
+        t[90] = "LOB positioning offsets start at 1.";
+        t[91] = "Positionsoffsets für LOBs beginnen bei 1.";
+        t[92] = "Internal Position: {0}";
+        t[93] = "Interne Position: {0}";
+        t[96] = "free() was called on this LOB previously";
+        t[97] = "free() wurde bereits für dieses LOB aufgerufen.";
+        t[100] = "Cannot change transaction read-only property in the middle of a transaction.";
+        t[101] = "Die Nur-Lesen-Eigenschaft einer Transaktion kann nicht während der Transaktion verändert werden.";
+        t[102] = "The JVM claims not to support the {0} encoding.";
+        t[103] = "Die JVM behauptet, die Zeichenkodierung {0} nicht zu unterstützen.";
+        t[108] = "{0} function doesn''t take any argument.";
+        t[109] = "Die {0}-Funktion akzeptiert kein Argument.";
+        t[112] = "xid must not be null";
+        t[113] = "Die xid darf nicht null sein.";
+        t[114] = "Connection has been closed.";
+        t[115] = "Die Verbindung wurde geschlossen.";
+        t[122] = "The server does not support SSL.";
+        t[123] = "Der Server unterstützt SSL nicht.";
+        t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
+        t[141] = "Ungültige UTF-8-Sequenz: Byte {0} der {1} Bytesequenz ist nicht 10xxxxxx: {2}";
+        t[148] = "Hint: {0}";
+        t[149] = "Hinweis: {0}";
+        t[152] = "Unable to find name datatype in the system catalogs.";
+        t[153] = "In den Systemkatalogen konnte der Namensdatentyp nicht gefunden werden.";
+        t[156] = "Unsupported Types value: {0}";
+        t[157] = "Unbekannter Typ: {0}.";
+        t[158] = "Unknown type {0}.";
+        t[159] = "Unbekannter Typ {0}.";
+        t[166] = "{0} function takes two and only two arguments.";
+        t[167] = "Die {0}-Funktion erwartet genau zwei Argumente.";
+        t[170] = "Finalizing a Connection that was never closed:";
+        t[171] = "Eine Connection wurde finalisiert, die nie geschlossen wurde:";
+        t[180] = "The maximum field size must be a value greater than or equal to 0.";
+        t[181] = "Die maximale Feldgröße muss ein Wert größer oder gleich Null sein.";
+        t[186] = "PostgreSQL LOBs can only index to: {0}";
+        t[187] = "LOBs in PostgreSQL können nur auf {0} verweisen.";
+        t[194] = "Method {0} is not yet implemented.";
+        t[195] = "Die Methode {0} ist noch nicht implementiert.";
+        t[198] = "Error loading default settings from driverconfig.properties";
+        t[199] = "Fehler beim Laden der Voreinstellungen aus driverconfig.properties";
+        t[200] = "Results cannot be retrieved from a CallableStatement before it is executed.";
+        t[201] = "Ergebnisse können nicht von einem CallableStatement abgerufen werden, bevor es ausgeführt wurde.";
+        t[202] = "Large Objects may not be used in auto-commit mode.";
+        t[203] = "LargeObjects (LOB) dürfen im Modus ''auto-commit'' nicht verwendet werden.";
+        t[208] = "Expected command status BEGIN, got {0}.";
+        t[209] = "Statt des erwarteten Befehlsstatus BEGIN, wurde {0} empfangen.";
+        t[218] = "Invalid fetch direction constant: {0}.";
+        t[219] = "Unzulässige Richtungskonstante bei fetch: {0}.";
+        t[222] = "{0} function takes three and only three arguments.";
+        t[223] = "Die {0}-Funktion erwartet genau drei Argumente.";
+        t[226] = "Error during recover";
+        t[227] = "Beim Wiederherstellen trat ein Fehler auf.";
+        t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
+        t[229] = "Das ResultSet kann nicht aktualisiert werden, da es entweder vor oder nach dem Ende der Ergebnisse ist.";
+        t[230] = "The JVM claims not to support the encoding: {0}";
+        t[231] = "Die JVM behauptet, die Zeichenkodierung {0} nicht zu unterstützen.";
+        t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
+        t[233] = "Ein Parameter des Typs {0} wurde registriert, jedoch erfolgte ein Aufruf get{1} (sqltype={2}).";
+        t[240] = "Cannot establish a savepoint in auto-commit mode.";
+        t[241] = "Ein Rettungspunkt kann im Modus ''auto-commit'' nicht erstellt werden.";
+        t[242] = "Cannot retrieve the id of a named savepoint.";
+        t[243] = "Die ID eines benamten Rettungspunktes kann nicht ermittelt werden.";
+        t[244] = "The column index is out of range: {0}, number of columns: {1}.";
+        t[245] = "Der Spaltenindex {0} ist außerhalb des gültigen Bereichs. Anzahl Spalten: {1}.";
+        t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
+        t[251] = "Etwas Ungewöhnliches ist passiert, das den Treiber fehlschlagen ließ. Bitte teilen Sie diesen Fehler mit.";
+        t[260] = "Cannot cast an instance of {0} to type {1}";
+        t[261] = "Die Typwandlung für eine Instanz von {0} nach {1} ist nicht möglich.";
+        t[264] = "Unknown Types value.";
+        t[265] = "Unbekannter Typ.";
+        t[266] = "Invalid stream length {0}.";
+        t[267] = "Ungültige Länge des Datenstroms: {0}.";
+        t[272] = "Cannot retrieve the name of an unnamed savepoint.";
+        t[273] = "Der Name eines namenlosen Rettungpunktes kann nicht ermittelt werden.";
+        t[274] = "Unable to translate data into the desired encoding.";
+        t[275] = "Die Daten konnten nicht in die gewünschte Kodierung gewandelt werden.";
+        t[276] = "Expected an EOF from server, got: {0}";
+        t[277] = "Vom Server wurde ein EOF erwartet, jedoch {0} gelesen.";
+        t[278] = "Bad value for type {0} : {1}";
+        t[279] = "Unzulässiger Wert für den Typ {0} : {1}.";
+        t[280] = "The server requested password-based authentication, but no password was provided.";
+        t[281] = "Der Server verlangt passwortbasierte Authentifizierung, jedoch wurde kein Passwort angegeben.";
+        t[296] = "Truncation of large objects is only implemented in 8.3 and later servers.";
+        t[297] = "Das Abschneiden großer Objekte ist nur in Versionen nach 8.3 implementiert.";
+        t[298] = "This PooledConnection has already been closed.";
+        t[299] = "Diese PooledConnection ist bereits geschlossen worden.";
+        t[302] = "ClientInfo property not supported.";
+        t[303] = "Die ClientInfo-Eigenschaft ist nicht unterstützt.";
+        t[306] = "Fetch size must be a value greater to or equal to 0.";
+        t[307] = "Die Fetch-Größe muss ein Wert größer oder gleich Null sein.";
+        t[312] = "A connection could not be made using the requested protocol {0}.";
+        t[313] = "Es konnte keine Verbindung unter Verwendung des Protokolls {0} hergestellt werden.";
+        t[322] = "There are no rows in this ResultSet.";
+        t[323] = "Es gibt keine Zeilen in diesem ResultSet.";
+        t[324] = "Unexpected command status: {0}.";
+        t[325] = "Unerwarteter Befehlsstatus: {0}.";
+        t[334] = "Not on the insert row.";
+        t[335] = "Nicht in der Einfügezeile.";
+        t[344] = "Server SQLState: {0}";
+        t[345] = "Server SQLState: {0}";
+        t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
+        t[349] = "Der standard_conforming_strings Parameter des Servers steht auf {0}. Der JDBC-Treiber erwartete on oder off.";
+        t[360] = "The driver currently does not support COPY operations.";
+        t[361] = "Der Treiber unterstützt derzeit keine COPY-Operationen.";
+        t[364] = "The array index is out of range: {0}, number of elements: {1}.";
+        t[365] = "Der Arrayindex {0} ist außerhalb des gültigen Bereichs. Vorhandene Elemente: {1}.";
+        t[374] = "suspend/resume not implemented";
+        t[375] = "Anhalten/Fortsetzen ist nicht implementiert.";
+        t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it";
+        t[379] = "Nicht implementiert: Die einphasige Bestätigung muss über die selbe Verbindung abgewickelt werden, die verwendet wurde, um sie zu beginnen.";
+        t[398] = "Cannot call cancelRowUpdates() when on the insert row.";
+        t[399] = "''cancelRowUpdates()'' kann in der Einfügezeile nicht aufgerufen werden.";
+        t[400] = "Cannot reference a savepoint after it has been released.";
+        t[401] = "Ein Rettungspunkt kann nicht angesprochen werden, nach dem er entfernt wurde.";
+        t[402] = "You must specify at least one column value to insert a row.";
+        t[403] = "Sie müssen mindestens einen Spaltenwert angeben, um eine Zeile einzufügen.";
+        t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data.";
+        t[405] = "Es konnte kein Wert für MaxIndexKeys gefunden werden, da die Systemkatalogdaten fehlen.";
+        t[412] = "Illegal UTF-8 sequence: final value is out of range: {0}";
+        t[413] = "Ungültige UTF-8-Sequenz: Der letzte Wert ist außerhalb des zulässigen Bereichs: {0}";
+        t[414] = "{0} function takes two or three arguments.";
+        t[415] = "Die {0}-Funktion erwartet zwei oder drei Argumente.";
+        t[440] = "Unexpected error writing large object to database.";
+        t[441] = "Beim Schreiben eines LargeObjects (LOB) in die Datenbank trat ein unerwarteter Fehler auf.";
+        t[442] = "Zero bytes may not occur in string parameters.";
+        t[443] = "Stringparameter dürfen keine Nullbytes enthalten.";
+        t[444] = "A result was returned when none was expected.";
+        t[445] = "Die Anweisung lieferte ein Ergebnis obwohl keines erwartet wurde.";
+        t[450] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
+        t[451] = "Das ResultSet kann nicht aktualisiert werden.  Die Abfrage, die es erzeugte, darf nur eine Tabelle und muss darin alle Primärschlüssel auswählen. Siehe JDBC 2.1 API-Spezifikation, Abschnitt 5.6 für mehr Details.";
+        t[454] = "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.";
+        t[455] = "Die Nachrichtenlänge {0} ist zu groß. Das kann von sehr großen oder inkorrekten Längenangaben eines InputStream-Parameters herrühren.";
+        t[460] = "Statement has been closed.";
+        t[461] = "Die Anweisung wurde geschlossen.";
+        t[462] = "No value specified for parameter {0}.";
+        t[463] = "Für den Parameter {0} wurde kein Wert angegeben.";
+        t[468] = "The array index is out of range: {0}";
+        t[469] = "Der Arrayindex ist außerhalb des gültigen Bereichs: {0}.";
+        t[474] = "Unable to bind parameter values for statement.";
+        t[475] = "Der Anweisung konnten keine Parameterwerte zugewiesen werden.";
+        t[476] = "Can''t refresh the insert row.";
+        t[477] = "Die Einfügezeile kann nicht aufgefrischt werden.";
+        t[480] = "No primary key found for table {0}.";
+        t[481] = "Für die Tabelle {0} konnte kein Primärschlüssel gefunden werden.";
+        t[482] = "Cannot change transaction isolation level in the middle of a transaction.";
+        t[483] = "Die Transaktions-Trennungsstufe kann nicht während einer Transaktion verändert werden.";
+        t[498] = "Provided InputStream failed.";
+        t[499] = "Der bereitgestellte InputStream scheiterte.";
+        t[500] = "The parameter index is out of range: {0}, number of parameters: {1}.";
+        t[501] = "Der Parameterindex {0} ist außerhalb des gültigen Bereichs. Es gibt {1} Parameter.";
+        t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
+        t[503] = "Der Parameter ''Date Style'' wurde auf dem Server auf {0} verändert. Der JDBC-Treiber setzt für korrekte Funktion voraus, dass ''Date Style'' mit ''ISO'' beginnt.";
+        t[508] = "Connection attempt timed out.";
+        t[509] = "Keine Verbindung innerhalb des Zeitintervalls möglich.";
+        t[512] = "Internal Query: {0}";
+        t[513] = "Interne Abfrage: {0}";
+        t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
+        t[519] = "Der Authentifizierungstyp {0} wird nicht unterstützt. Stellen Sie sicher, dass die Datei ''pg_hba.conf'' die IP-Adresse oder das Subnetz des Clients enthält und dass der Client ein Authentifizierungsschema nutzt, das vom Treiber unterstützt wird.";
+        t[526] = "Interval {0} not yet implemented";
+        t[527] = "Intervall {0} ist noch nicht implementiert.";
+        t[532] = "Conversion of interval failed";
+        t[533] = "Die Umwandlung eines Intervalls schlug fehl.";
+        t[540] = "Query timeout must be a value greater than or equals to 0.";
+        t[541] = "Das Abfragetimeout muss ein Wert größer oder gleich Null sein.";
+        t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
+        t[543] = "Die Verbindung wurde automatisch geschlossen, da entweder eine neue Verbindung für die gleiche PooledConnection geöffnet wurde, oder die PooledConnection geschlossen worden ist..";
+        t[544] = "ResultSet not positioned properly, perhaps you need to call next.";
+        t[545] = "Das ResultSet ist nicht richtig positioniert. Eventuell muss ''next'' aufgerufen werden.";
+        t[550] = "This statement has been closed.";
+        t[551] = "Die Anweisung wurde geschlossen.";
+        t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.";
+        t[553] = "Der in SQL für eine Instanz von {0} zu verwendende Datentyp kann nicht abgeleitet werden. Benutzen Sie ''setObject()'' mit einem expliziten Typ, um ihn festzulegen.";
+        t[554] = "Cannot call updateRow() when on the insert row.";
+        t[555] = "''updateRow()'' kann in der Einfügezeile nicht aufgerufen werden.";
+        t[562] = "Detail: {0}";
+        t[563] = "Detail: {0}";
+        t[566] = "Cannot call deleteRow() when on the insert row.";
+        t[567] = "''deleteRow()'' kann in der Einfügezeile nicht aufgerufen werden.";
+        t[568] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
+        t[569] = "Die augenblickliche Position ist vor dem Beginn des ResultSets.  Dort kann ''deleteRow()'' nicht aufgerufen werden.";
+        t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
+        t[577] = "Ungültige UTF-8-Sequenz: der letzte Wert ist ein Ersatzwert: {0}";
+        t[578] = "Unknown Response Type {0}.";
+        t[579] = "Die Antwort weist einen unbekannten Typ auf: {0}.";
+        t[582] = "Unsupported value for stringtype parameter: {0}";
+        t[583] = "Nichtunterstützter Wert für den Stringparameter: {0}";
+        t[584] = "Conversion to type {0} failed: {1}.";
+        t[585] = "Die Umwandlung in den Typ {0} schlug fehl: {1}.";
+        t[586] = "Conversion of money failed.";
+        t[587] = "Die Umwandlung eines Währungsbetrags schlug fehl.";
+        t[600] = "Unable to load the class {0} responsible for the datatype {1}";
+        t[601] = "Die für den Datentyp {1} verantwortliche Klasse {0} konnte nicht geladen werden.";
+        t[604] = "The fastpath function {0} is unknown.";
+        t[605] = "Die Fastpath-Funktion {0} ist unbekannt.";
+        t[608] = "Malformed function or procedure escape syntax at offset {0}.";
+        t[609] = "Unzulässige Syntax für ein Funktions- oder Prozedur-Escape an Offset {0}.";
+        t[612] = "Provided Reader failed.";
+        t[613] = "Der bereitgestellte Reader scheiterte.";
+        t[614] = "Maximum number of rows must be a value grater than or equal to 0.";
+        t[615] = "Die maximale Zeilenzahl muss ein Wert größer oder gleich Null sein.";
+        t[616] = "Failed to create object for: {0}.";
+        t[617] = "Erstellung des Objektes schlug fehl für: {0}.";
+        t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
+        t[623] = "Vorzeitiges Ende des Eingabedatenstroms. Es wurden {0} Bytes erwartet, jedoch nur {1} gelesen.";
+        t[626] = "An unexpected result was returned by a query.";
+        t[627] = "Eine Abfrage lieferte ein unerwartetes Resultat.";
+        t[646] = "An error occurred while setting up the SSL connection.";
+        t[647] = "Beim Aufbau der SSL-Verbindung trat ein Fehler auf.";
+        t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
+        t[655] = "Ungültige UTF-8-Sequenz: {0} Bytes wurden verwendet um einen {1} Bytewert zu kodieren: {2}";
+        t[658] = "The SSLSocketFactory class provided {0} could not be instantiated.";
+        t[659] = "Die von {0} bereitgestellte SSLSocketFactory-Klasse konnte nicht instanziiert werden.";
+        t[670] = "Position: {0}";
+        t[671] = "Position: {0}";
+        t[676] = "Location: File: {0}, Routine: {1}, Line: {2}";
+        t[677] = "Ort: Datei: {0}, Routine: {1}, Zeile: {2}.";
+        t[684] = "Cannot tell if path is open or closed: {0}.";
+        t[685] = "Es konnte nicht ermittelt werden, ob der Pfad offen oder geschlossen ist: {0}.";
+        t[700] = "Cannot convert an instance of {0} to type {1}";
+        t[701] = "Die Typwandlung für eine Instanz von {0} nach {1} ist nicht möglich.";
+        t[710] = "{0} function takes four and only four argument.";
+        t[711] = "Die {0}-Funktion erwartet genau vier Argumente.";
+        t[718] = "Interrupted while attempting to connect.";
+        t[719] = "Beim Verbindungsversuch trat eine Unterbrechung auf.";
+        t[722] = "Your security policy has prevented the connection from being attempted.  You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to.";
+        t[723] = "Ihre Sicherheitsrichtlinie hat den Versuch des Verbindungsaufbaus verhindert. Sie müssen wahrscheinlich der Verbindung zum Datenbankrechner java.net.SocketPermission gewähren, um den Rechner auf dem gewählten Port zu erreichen.";
+        t[736] = "{0} function takes one and only one argument.";
+        t[737] = "Die {0}-Funktion erwartet nur genau ein Argument.";
+        t[744] = "This ResultSet is closed.";
+        t[745] = "Dieses ResultSet ist geschlossen.";
+        t[746] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
+        t[747] = "Ungültige Zeichendaten.  Das ist höchstwahrscheinlich von in der Datenbank gespeicherten Zeichen hervorgerufen, die in einer anderen Kodierung vorliegen, als die, in der die Datenbank erstellt wurde.  Das häufigste Beispiel dafür ist es, 8Bit-Daten in SQL_ASCII-Datenbanken abzulegen.";
+        t[752] = "Error disabling autocommit";
+        t[753] = "Fehler beim Abschalten von Autocommit.";
+        t[754] = "Ran out of memory retrieving query results.";
+        t[755] = "Nicht genügend Speicher beim Abholen der Abfrageergebnisse.";
+        t[756] = "Returning autogenerated keys is not supported.";
+        t[757] = "Die Rückgabe automatisch generierter Schlüssel wird nicht unterstützt,";
+        t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
+        t[761] = "Die Operation erfordert ein scrollbares ResultSet, dieses jedoch ist FORWARD_ONLY.";
+        t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
+        t[763] = "Eine CallableStatement-Funktion wurde ausgeführt und der Rückgabewert {0} war vom Typ {1}. Jedoch wurde der Typ {2} dafür registriert.";
+        t[768] = "Unknown ResultSet holdability setting: {0}.";
+        t[769] = "Unbekannte Einstellung für die Haltbarkeit des ResultSets: {0}.";
+        t[772] = "Transaction isolation level {0} not supported.";
+        t[773] = "Die Transaktions-Trennungsstufe {0} ist nicht unterstützt.";
+        t[774] = "Zero bytes may not occur in identifiers.";
+        t[775] = "Nullbytes dürfen in Bezeichnern nicht vorkommen.";
+        t[776] = "No results were returned by the query.";
+        t[777] = "Die Abfrage lieferte kein Ergebnis.";
+        t[778] = "A CallableStatement was executed with nothing returned.";
+        t[779] = "Ein CallableStatement wurde ausgeführt ohne etwas zurückzugeben.";
+        t[780] = "wasNull cannot be call before fetching a result.";
+        t[781] = "wasNull kann nicht aufgerufen werden, bevor ein Ergebnis abgefragt wurde.";
+        t[786] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
+        t[787] = "Diese Anweisung deklariert keinen OUT-Parameter. Benutzen Sie '{' ?= call ... '}' um das zu tun.";
+        t[788] = "Can''t use relative move methods while on the insert row.";
+        t[789] = "Relative Bewegungen können in der Einfügezeile nicht durchgeführt werden.";
+        t[790] = "A CallableStatement was executed with an invalid number of parameters";
+        t[791] = "Ein CallableStatement wurde mit einer falschen Anzahl Parameter ausgeführt.";
+        t[792] = "Connection is busy with another transaction";
+        t[793] = "Die Verbindung ist derzeit mit einer anderen Transaktion beschäftigt.";
+        table = t;
+    }
+
+    public Object handleGetObject(String msgid) throws MissingResourceException {
+        int hash_val = msgid.hashCode() & 0x7fffffff;
+        int idx = (hash_val % 397) << 1;
+        {
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
         }
-        @Override
-        public String nextElement () {
-          Object key = table[idx];
-          do idx += 2; while (idx < 794 && table[idx] == null);
-          return key.toString();
+        int incr = ((hash_val % 395) + 1) << 1;
+        for (; ; ) {
+            idx += incr;
+            if (idx >= 794)
+                idx -= 794;
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
         }
-      };
-  }
-  public java.util.ResourceBundle getParent () {
-    return parent;
-  }
+    }
+
+    public Enumeration<String> getKeys() {
+        return
+                new Enumeration<>() {
+                    private int idx = 0;
+
+                    {
+                        while (idx < 794 && table[idx] == null) idx += 2;
+                    }
+
+                    public boolean hasMoreElements() {
+                        return (idx < 794);
+                    }
+
+                    @Override
+                    public String nextElement() {
+                        Object key = table[idx];
+                        do idx += 2; while (idx < 794 && table[idx] == null);
+                        return key.toString();
+                    }
+                };
+    }
+
+    public java.util.ResourceBundle getParent() {
+        return parent;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_es.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_es.java
index eb28e05..d955824 100644
--- a/pgjdbc/src/main/java/org/postgresql/translation/messages_es.java
+++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_es.java
@@ -5,89 +5,95 @@ import java.util.MissingResourceException;
 import java.util.ResourceBundle;
 
 public class messages_es extends ResourceBundle {
-  private static final String[] table;
-  static {
-    String[] t = new String[74];
-    t[0] = "";
-    t[1] = "Project-Id-Version: JDBC PostgreSQL Driver\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2004-10-22 16:51-0300\nLast-Translator: Diego Gil <diego@adminsa.com>\nLanguage-Team: \nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Spanish\n";
-    t[4] = "The column index is out of range: {0}, number of columns: {1}.";
-    t[5] = "El índice de la columna está fuera de rango: {0}, número de columnas: {1}.";
-    t[12] = "Unknown Response Type {0}.";
-    t[13] = "Tipo de respuesta desconocida {0}.";
-    t[16] = "Protocol error.  Session setup failed.";
-    t[17] = "Error de protocolo. Falló el inicio de la sesión.";
-    t[20] = "The server requested password-based authentication, but no password was provided.";
-    t[21] = "El servidor requiere autenticación basada en contraseña, pero no se ha provisto ninguna contraseña.";
-    t[26] = "A result was returned when none was expected.";
-    t[27] = "Se retornó un resultado cuando no se esperaba ninguno.";
-    t[28] = "Server SQLState: {0}";
-    t[29] = "SQLState del servidor: {0}.";
-    t[30] = "The array index is out of range: {0}, number of elements: {1}.";
-    t[31] = "El índice del arreglo esta fuera de rango: {0}, número de elementos: {1}.";
-    t[32] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
-    t[33] = "Final prematuro del flujo de entrada, se esperaban {0} bytes, pero solo se leyeron {1}.";
-    t[36] = "The connection attempt failed.";
-    t[37] = "El intento de conexión falló.";
-    t[38] = "Failed to create object for: {0}.";
-    t[39] = "Fallo al crear objeto: {0}.";
-    t[42] = "An error occurred while setting up the SSL connection.";
-    t[43] = "Ha ocorrido un error mientras se establecía la conexión SSL.";
-    t[48] = "No value specified for parameter {0}.";
-    t[49] = "No se ha especificado un valor para el parámetro {0}.";
-    t[50] = "The server does not support SSL.";
-    t[51] = "Este servidor no soporta SSL.";
-    t[52] = "An unexpected result was returned by a query.";
-    t[53] = "Una consulta retornó un resultado inesperado.";
-    t[60] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
-    t[61] = "Algo inusual ha ocurrido que provocó un fallo en el controlador. Por favor reporte esta excepción.";
-    t[64] = "No results were returned by the query.";
-    t[65] = "La consulta no retornó ningún resultado.";
-    table = t;
-  }
+    private static final String[] table;
 
-  @Override
-  public Object handleGetObject (String msgid) throws MissingResourceException {
-    int hash_val = msgid.hashCode() & 0x7fffffff;
-    int idx = (hash_val % 37) << 1;
-    {
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+    static {
+        String[] t = new String[74];
+        t[0] = "";
+        t[1] = "Project-Id-Version: JDBC PostgreSQL Driver\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2004-10-22 16:51-0300\nLast-Translator: Diego Gil <diego@adminsa.com>\nLanguage-Team: \nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Spanish\n";
+        t[4] = "The column index is out of range: {0}, number of columns: {1}.";
+        t[5] = "El índice de la columna está fuera de rango: {0}, número de columnas: {1}.";
+        t[12] = "Unknown Response Type {0}.";
+        t[13] = "Tipo de respuesta desconocida {0}.";
+        t[16] = "Protocol error.  Session setup failed.";
+        t[17] = "Error de protocolo. Falló el inicio de la sesión.";
+        t[20] = "The server requested password-based authentication, but no password was provided.";
+        t[21] = "El servidor requiere autenticación basada en contraseña, pero no se ha provisto ninguna contraseña.";
+        t[26] = "A result was returned when none was expected.";
+        t[27] = "Se retornó un resultado cuando no se esperaba ninguno.";
+        t[28] = "Server SQLState: {0}";
+        t[29] = "SQLState del servidor: {0}.";
+        t[30] = "The array index is out of range: {0}, number of elements: {1}.";
+        t[31] = "El índice del arreglo esta fuera de rango: {0}, número de elementos: {1}.";
+        t[32] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
+        t[33] = "Final prematuro del flujo de entrada, se esperaban {0} bytes, pero solo se leyeron {1}.";
+        t[36] = "The connection attempt failed.";
+        t[37] = "El intento de conexión falló.";
+        t[38] = "Failed to create object for: {0}.";
+        t[39] = "Fallo al crear objeto: {0}.";
+        t[42] = "An error occurred while setting up the SSL connection.";
+        t[43] = "Ha ocorrido un error mientras se establecía la conexión SSL.";
+        t[48] = "No value specified for parameter {0}.";
+        t[49] = "No se ha especificado un valor para el parámetro {0}.";
+        t[50] = "The server does not support SSL.";
+        t[51] = "Este servidor no soporta SSL.";
+        t[52] = "An unexpected result was returned by a query.";
+        t[53] = "Una consulta retornó un resultado inesperado.";
+        t[60] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
+        t[61] = "Algo inusual ha ocurrido que provocó un fallo en el controlador. Por favor reporte esta excepción.";
+        t[64] = "No results were returned by the query.";
+        t[65] = "La consulta no retornó ningún resultado.";
+        table = t;
     }
-    int incr = ((hash_val % 35) + 1) << 1;
-    for (;;) {
-      idx += incr;
-      if (idx >= 74)
-        idx -= 74;
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+
+    @Override
+    public Object handleGetObject(String msgid) throws MissingResourceException {
+        int hash_val = msgid.hashCode() & 0x7fffffff;
+        int idx = (hash_val % 37) << 1;
+        {
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
+        int incr = ((hash_val % 35) + 1) << 1;
+        for (; ; ) {
+            idx += incr;
+            if (idx >= 74)
+                idx -= 74;
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
     }
-  }
 
-  @Override
-  public Enumeration<String> getKeys () {
-    return new Enumeration<>() {
-        private int idx = 0;
-        { while (idx < 74 && table[idx] == null) idx += 2; }
-      @Override
-        public boolean hasMoreElements () {
-          return (idx < 74);
-        }
-      @Override
-        public String nextElement () {
-          Object key = table[idx];
-          do idx += 2; while (idx < 74 && table[idx] == null);
-          return key.toString();
-        }
-      };
-  }
+    @Override
+    public Enumeration<String> getKeys() {
+        return new Enumeration<>() {
+            private int idx = 0;
 
-  public ResourceBundle getParent () {
-    return parent;
-  }
+            {
+                while (idx < 74 && table[idx] == null) idx += 2;
+            }
+
+            @Override
+            public boolean hasMoreElements() {
+                return (idx < 74);
+            }
+
+            @Override
+            public String nextElement() {
+                Object key = table[idx];
+                do idx += 2; while (idx < 74 && table[idx] == null);
+                return key.toString();
+            }
+        };
+    }
+
+    public ResourceBundle getParent() {
+        return parent;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_fr.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_fr.java
index a9e5e63..7e44d49 100644
--- a/pgjdbc/src/main/java/org/postgresql/translation/messages_fr.java
+++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_fr.java
@@ -5,343 +5,349 @@ import java.util.MissingResourceException;
 import java.util.ResourceBundle;
 
 public class messages_fr extends ResourceBundle {
-  private static final String[] table;
-  static {
-    String[] t = new String[794];
-    t[0] = "";
-    t[1] = "Project-Id-Version: head-fr\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2007-07-27 12:27+0200\nLast-Translator: \nLanguage-Team:  <en@li.org>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: KBabel 1.11.4\nPlural-Forms:  nplurals=2; plural=(n > 1);\n";
-    t[4] = "DataSource has been closed.";
-    t[5] = "DataSource a été fermée.";
-    t[18] = "Where: {0}";
-    t[19] = "Où : {0}";
-    t[26] = "The connection attempt failed.";
-    t[27] = "La tentative de connexion a échoué.";
-    t[28] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
-    t[29] = "Actuellement positionné après la fin du ResultSet. Vous ne pouvez pas appeler deleteRow() ici.";
-    t[32] = "Can''t use query methods that take a query string on a PreparedStatement.";
-    t[33] = "Impossible d''utiliser les fonctions de requête qui utilisent une chaîne de caractères sur un PreparedStatement.";
-    t[36] = "Multiple ResultSets were returned by the query.";
-    t[37] = "Plusieurs ResultSets ont été retournés par la requête.";
-    t[50] = "Too many update results were returned.";
-    t[51] = "Trop de résultats de mise à jour ont été retournés.";
-    t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
-    t[59] = "Séquence UTF-8 illégale: le premier octet est {0}: {1}";
-    t[66] = "The column name {0} was not found in this ResultSet.";
-    t[67] = "Le nom de colonne {0} n''a pas été trouvé dans ce ResultSet.";
-    t[70] = "Fastpath call {0} - No result was returned and we expected an integer.";
-    t[71] = "Appel Fastpath {0} - Aucun résultat n''a été retourné et nous attendions un entier.";
-    t[74] = "Protocol error.  Session setup failed.";
-    t[75] = "Erreur de protocole. Ouverture de la session en échec.";
-    t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
-    t[77] = "Un CallableStatement a été déclaré, mais aucun appel à registerOutParameter(1, <un type>) n''a été fait.";
-    t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
-    t[79] = "Les ResultSets avec la concurrence CONCUR_READ_ONLY ne peuvent être mis à jour.";
-    t[90] = "LOB positioning offsets start at 1.";
-    t[91] = "Les décalages de position des LOB commencent à 1.";
-    t[92] = "Internal Position: {0}";
-    t[93] = "Position interne : {0}";
-    t[96] = "free() was called on this LOB previously";
-    t[97] = "free() a été appelée auparavant sur ce LOB";
-    t[100] = "Cannot change transaction read-only property in the middle of a transaction.";
-    t[101] = "Impossible de changer la propriété read-only d''une transaction au milieu d''une transaction.";
-    t[102] = "The JVM claims not to support the {0} encoding.";
-    t[103] = "La JVM prétend ne pas supporter l''encodage {0}.";
-    t[108] = "{0} function doesn''t take any argument.";
-    t[109] = "La fonction {0} n''accepte aucun argument.";
-    t[112] = "xid must not be null";
-    t[113] = "xid ne doit pas être nul";
-    t[114] = "Connection has been closed.";
-    t[115] = "La connexion a été fermée.";
-    t[122] = "The server does not support SSL.";
-    t[123] = "Le serveur ne supporte pas SSL.";
-    t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
-    t[141] = "Séquence UTF-8 illégale: l''octet {0} de la séquence d''octet {1} n''est pas 10xxxxxx: {2}";
-    t[148] = "Hint: {0}";
-    t[149] = "Indice : {0}";
-    t[152] = "Unable to find name datatype in the system catalogs.";
-    t[153] = "Incapable de trouver le type de donnée name dans les catalogues systèmes.";
-    t[156] = "Unsupported Types value: {0}";
-    t[157] = "Valeur de type non supportée : {0}";
-    t[158] = "Unknown type {0}.";
-    t[159] = "Type inconnu : {0}.";
-    t[166] = "{0} function takes two and only two arguments.";
-    t[167] = "La fonction {0} n''accepte que deux et seulement deux arguments.";
-    t[170] = "Finalizing a Connection that was never closed:";
-    t[171] = "Destruction d''une connection qui n''a jamais été fermée:";
-    t[180] = "The maximum field size must be a value greater than or equal to 0.";
-    t[181] = "La taille maximum des champs doit être une valeur supérieure ou égale à 0.";
-    t[186] = "PostgreSQL LOBs can only index to: {0}";
-    t[187] = "Les LOB PostgreSQL peuvent seulement s''indicer à: {0}";
-    t[194] = "Method {0} is not yet implemented.";
-    t[195] = "La fonction {0} n''est pas encore implémentée.";
-    t[198] = "Error loading default settings from driverconfig.properties";
-    t[199] = "Erreur de chargement des valeurs par défaut depuis driverconfig.properties";
-    t[200] = "Results cannot be retrieved from a CallableStatement before it is executed.";
-    t[201] = "Les résultats ne peuvent être récupérés à partir d''un CallableStatement avant qu''il ne soit exécuté.";
-    t[202] = "Large Objects may not be used in auto-commit mode.";
-    t[203] = "Les Large Objects ne devraient pas être utilisés en mode auto-commit.";
-    t[208] = "Expected command status BEGIN, got {0}.";
-    t[209] = "Attendait le statut de commande BEGIN, obtenu {0}.";
-    t[218] = "Invalid fetch direction constant: {0}.";
-    t[219] = "Constante de direction pour la récupération invalide : {0}.";
-    t[222] = "{0} function takes three and only three arguments.";
-    t[223] = "La fonction {0} n''accepte que trois et seulement trois arguments.";
-    t[226] = "Error during recover";
-    t[227] = "Erreur durant la restauration";
-    t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
-    t[229] = "Impossible de mettre à jour le ResultSet car c''est soit avant le début ou après la fin des résultats.";
-    t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
-    t[233] = "Un paramètre de type {0} a été enregistré, mais un appel à get{1} (sqltype={2}) a été fait.";
-    t[240] = "Cannot establish a savepoint in auto-commit mode.";
-    t[241] = "Impossible d''établir un savepoint en mode auto-commit.";
-    t[242] = "Cannot retrieve the id of a named savepoint.";
-    t[243] = "Impossible de retrouver l''identifiant d''un savepoint nommé.";
-    t[244] = "The column index is out of range: {0}, number of columns: {1}.";
-    t[245] = "L''indice de la colonne est hors limite : {0}, nombre de colonnes : {1}.";
-    t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
-    t[251] = "Quelque chose d''inhabituel a provoqué l''échec du pilote. Veuillez faire un rapport sur cette erreur.";
-    t[260] = "Cannot cast an instance of {0} to type {1}";
-    t[261] = "Impossible de convertir une instance de {0} vers le type {1}";
-    t[264] = "Unknown Types value.";
-    t[265] = "Valeur de Types inconnue.";
-    t[266] = "Invalid stream length {0}.";
-    t[267] = "Longueur de flux invalide {0}.";
-    t[272] = "Cannot retrieve the name of an unnamed savepoint.";
-    t[273] = "Impossible de retrouver le nom d''un savepoint sans nom.";
-    t[274] = "Unable to translate data into the desired encoding.";
-    t[275] = "Impossible de traduire les données dans l''encodage désiré.";
-    t[276] = "Expected an EOF from server, got: {0}";
-    t[277] = "Attendait une fin de fichier du serveur, reçu: {0}";
-    t[278] = "Bad value for type {0} : {1}";
-    t[279] = "Mauvaise valeur pour le type {0} : {1}";
-    t[280] = "The server requested password-based authentication, but no password was provided.";
-    t[281] = "Le serveur a demandé une authentification par mots de passe, mais aucun mot de passe n''a été fourni.";
-    t[296] = "Truncation of large objects is only implemented in 8.3 and later servers.";
-    t[297] = "Le troncage des large objects n''est implémenté que dans les serveurs 8.3 et supérieurs.";
-    t[298] = "This PooledConnection has already been closed.";
-    t[299] = "Cette PooledConnection a déjà été fermée.";
-    t[306] = "Fetch size must be a value greater to or equal to 0.";
-    t[307] = "Fetch size doit être une valeur supérieur ou égal à 0.";
-    t[312] = "A connection could not be made using the requested protocol {0}.";
-    t[313] = "Aucune connexion n''a pu être établie en utilisant le protocole demandé {0}. ";
-    t[322] = "There are no rows in this ResultSet.";
-    t[323] = "Il n''y pas pas de lignes dans ce ResultSet.";
-    t[324] = "Unexpected command status: {0}.";
-    t[325] = "Statut de commande inattendu : {0}.";
-    t[334] = "Not on the insert row.";
-    t[335] = "Pas sur la ligne en insertion.";
-    t[344] = "Server SQLState: {0}";
-    t[345] = "SQLState serveur : {0}";
-    t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
-    t[349] = "Le paramètre serveur standard_conforming_strings a pour valeur {0}. Le driver JDBC attend on ou off.";
-    t[360] = "The driver currently does not support COPY operations.";
-    t[361] = "Le pilote ne supporte pas actuellement les opérations COPY.";
-    t[364] = "The array index is out of range: {0}, number of elements: {1}.";
-    t[365] = "L''indice du tableau est hors limites : {0}, nombre d''éléments : {1}.";
-    t[374] = "suspend/resume not implemented";
-    t[375] = "suspend/resume pas implémenté";
-    t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it";
-    t[379] = "Pas implémenté: le commit à une phase doit avoir lieu en utilisant la même connection que celle où il a commencé";
-    t[398] = "Cannot call cancelRowUpdates() when on the insert row.";
-    t[399] = "Impossible d''appeler cancelRowUpdates() pendant l''insertion d''une ligne.";
-    t[400] = "Cannot reference a savepoint after it has been released.";
-    t[401] = "Impossible de référencer un savepoint après qu''il ait été libéré.";
-    t[402] = "You must specify at least one column value to insert a row.";
-    t[403] = "Vous devez spécifier au moins une valeur de colonne pour insérer une ligne.";
-    t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data.";
-    t[405] = "Incapable de déterminer la valeur de MaxIndexKeys en raison de données manquante dans lecatalogue système.";
-    t[412] = "The JVM claims not to support the encoding: {0}";
-    t[413] = "La JVM prétend ne pas supporter l''encodage: {0}";
-    t[414] = "{0} function takes two or three arguments.";
-    t[415] = "La fonction {0} n''accepte que deux ou trois arguments.";
-    t[440] = "Unexpected error writing large object to database.";
-    t[441] = "Erreur inattendue pendant l''écriture de large object dans la base.";
-    t[442] = "Zero bytes may not occur in string parameters.";
-    t[443] = "Zéro octets ne devrait pas se produire dans les paramètres de type chaîne de caractères.";
-    t[444] = "A result was returned when none was expected.";
-    t[445] = "Un résultat a été retourné alors qu''aucun n''était attendu.";
-    t[450] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
-    t[451] = "Le ResultSet n''est pas modifiable. La requête qui a généré ce résultat doit sélectionner seulement une table, et doit sélectionner toutes les clés primaires de cette table. Voir la spécification de l''API JDBC 2.1, section 5.6 pour plus de détails.";
-    t[454] = "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.";
-    t[455] = "La longueur du message de liaison {0} est trop grande. Cela peut être causé par des spécification de longueur très grandes ou incorrectes pour les paramètres de type InputStream.";
-    t[460] = "Statement has been closed.";
-    t[461] = "Statement a été fermé.";
-    t[462] = "No value specified for parameter {0}.";
-    t[463] = "Pas de valeur spécifiée pour le paramètre {0}.";
-    t[468] = "The array index is out of range: {0}";
-    t[469] = "L''indice du tableau est hors limites : {0}";
-    t[474] = "Unable to bind parameter values for statement.";
-    t[475] = "Incapable de lier les valeurs des paramètres pour la commande.";
-    t[476] = "Can''t refresh the insert row.";
-    t[477] = "Impossible de rafraîchir la ligne insérée.";
-    t[480] = "No primary key found for table {0}.";
-    t[481] = "Pas de clé primaire trouvée pour la table {0}.";
-    t[482] = "Cannot change transaction isolation level in the middle of a transaction.";
-    t[483] = "Impossible de changer le niveau d''isolation des transactions au milieu d''une transaction.";
-    t[498] = "Provided InputStream failed.";
-    t[499] = "L''InputStream fourni a échoué.";
-    t[500] = "The parameter index is out of range: {0}, number of parameters: {1}.";
-    t[501] = "L''indice du paramètre est hors limites : {0}, nombre de paramètres : {1}.";
-    t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
-    t[503] = "Le paramètre DateStyle du serveur a été changé pour {0}. Le pilote JDBC nécessite que DateStyle commence par ISO pour un fonctionnement correct.";
-    t[508] = "Connection attempt timed out.";
-    t[509] = "La tentative de connexion a échoué dans le délai imparti.";
-    t[512] = "Internal Query: {0}";
-    t[513] = "Requête interne: {0}";
-    t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
-    t[519] = "Le type d''authentification {0} n''est pas supporté. Vérifiez que vous avez configuré le fichier pg_hba.conf pour inclure l''adresse IP du client ou le sous-réseau et qu''il utilise un schéma d''authentification supporté par le pilote.";
-    t[526] = "Interval {0} not yet implemented";
-    t[527] = "L''interval {0} n''est pas encore implémenté";
-    t[532] = "Conversion of interval failed";
-    t[533] = "La conversion de l''intervalle a échoué";
-    t[540] = "Query timeout must be a value greater than or equals to 0.";
-    t[541] = "Query timeout doit être une valeur supérieure ou égale à 0.";
-    t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
-    t[543] = "La connexion a été fermée automatiquement car une nouvelle connexion a été ouverte pour la même PooledConnection ou la PooledConnection a été fermée.";
-    t[544] = "ResultSet not positioned properly, perhaps you need to call next.";
-    t[545] = "Le ResultSet n''est pas positionné correctement, vous devez peut-être appeler next().";
-    t[550] = "This statement has been closed.";
-    t[551] = "Ce statement a été fermé.";
-    t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.";
-    t[553] = "Impossible de déduire le type SQL à utiliser pour une instance de {0}. Utilisez setObject() avec une valeur de type explicite pour spécifier le type à utiliser.";
-    t[554] = "Cannot call updateRow() when on the insert row.";
-    t[555] = "Impossible d''appeler updateRow() tant que l''on est sur la ligne insérée.";
-    t[562] = "Detail: {0}";
-    t[563] = "Détail : {0}";
-    t[566] = "Cannot call deleteRow() when on the insert row.";
-    t[567] = "Impossible d''appeler deleteRow() pendant l''insertion d''une ligne.";
-    t[568] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
-    t[569] = "Actuellement positionné avant le début du ResultSet. Vous ne pouvez pas appeler deleteRow() ici.";
-    t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
-    t[577] = "Séquence UTF-8 illégale: la valeur finale est une valeur de remplacement: {0}";
-    t[578] = "Unknown Response Type {0}.";
-    t[579] = "Type de réponse inconnu {0}.";
-    t[582] = "Unsupported value for stringtype parameter: {0}";
-    t[583] = "Valeur non supportée pour les paramètre de type chaîne de caractères : {0}";
-    t[584] = "Conversion to type {0} failed: {1}.";
-    t[585] = "La conversion vers le type {0} a échoué : {1}.";
-    t[586] = "Conversion of money failed.";
-    t[587] = "La conversion de money a échoué.";
-    t[600] = "Unable to load the class {0} responsible for the datatype {1}";
-    t[601] = "Incapable de charger la classe {0} responsable du type de données {1}";
-    t[604] = "The fastpath function {0} is unknown.";
-    t[605] = "La fonction fastpath {0} est inconnue.";
-    t[608] = "Malformed function or procedure escape syntax at offset {0}.";
-    t[609] = "Syntaxe de fonction ou d''échappement de procédure malformée à l''indice {0}.";
-    t[612] = "Provided Reader failed.";
-    t[613] = "Le Reader fourni a échoué.";
-    t[614] = "Maximum number of rows must be a value grater than or equal to 0.";
-    t[615] = "Le nombre maximum de lignes doit être une valeur supérieure ou égale à 0.";
-    t[616] = "Failed to create object for: {0}.";
-    t[617] = "Échec à la création de l''objet pour : {0}.";
-    t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
-    t[623] = "Fin prématurée du flux en entrée, {0} octets attendus, mais seulement {1} lus.";
-    t[626] = "An unexpected result was returned by a query.";
-    t[627] = "Un résultat inattendu a été retourné par une requête.";
-    t[646] = "An error occurred while setting up the SSL connection.";
-    t[647] = "Une erreur s''est produite pendant l''établissement de la connexion SSL.";
-    t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
-    t[655] = "Séquence UTF-8 illégale: {0} octets utilisé pour encoder une valeur à {1} octets: {2}";
-    t[658] = "The SSLSocketFactory class provided {0} could not be instantiated.";
-    t[659] = "La classe SSLSocketFactory fournie {0} n''a pas pu être instanciée.";
-    t[670] = "Position: {0}";
-    t[671] = "Position : {0}";
-    t[676] = "Location: File: {0}, Routine: {1}, Line: {2}";
-    t[677] = "Localisation : Fichier : {0}, Routine : {1}, Ligne : {2}";
-    t[684] = "Cannot tell if path is open or closed: {0}.";
-    t[685] = "Impossible de dire si path est fermé ou ouvert : {0}.";
-    t[700] = "Cannot convert an instance of {0} to type {1}";
-    t[701] = "Impossible de convertir une instance de type {0} vers le type {1}";
-    t[710] = "{0} function takes four and only four argument.";
-    t[711] = "La fonction {0} n''accepte que quatre et seulement quatre arguments.";
-    t[718] = "Interrupted while attempting to connect.";
-    t[719] = "Interrompu pendant l''établissement de la connexion.";
-    t[722] = "Illegal UTF-8 sequence: final value is out of range: {0}";
-    t[723] = "Séquence UTF-8 illégale: la valeur finale est en dehors des limites: {0}";
-    t[734] = "No function outputs were registered.";
-    t[735] = "Aucune fonction outputs n''a été enregistrée.";
-    t[736] = "{0} function takes one and only one argument.";
-    t[737] = "La fonction {0} n''accepte qu''un et un seul argument.";
-    t[744] = "This ResultSet is closed.";
-    t[745] = "Ce ResultSet est fermé.";
-    t[746] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
-    t[747] = "Des données de caractères invalides ont été trouvées. C''est probablement causé par le stockage de caractères invalides pour le jeu de caractères de création de la base. L''exemple le plus courant est le stockage de données 8bit dans une base SQL_ASCII.";
-    t[750] = "An I/O error occurred while sending to the backend.";
-    t[751] = "Une erreur d''entrée/sortie a eu lieu lors d''envoi vers le serveur.";
-    t[752] = "Error disabling autocommit";
-    t[753] = "Erreur en désactivant autocommit";
-    t[754] = "Ran out of memory retrieving query results.";
-    t[755] = "Ai manqué de mémoire en récupérant les résultats de la requête.";
-    t[756] = "Returning autogenerated keys is not supported.";
-    t[757] = "Le renvoi des clés automatiquement générées n''est pas supporté.";
-    t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
-    t[761] = "L''opération nécessite un scrollable ResultSet, mais ce ResultSet est FORWARD_ONLY.";
-    t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
-    t[763] = "Une fonction CallableStatement a été exécutée et le paramètre en sortie {0} était du type {1} alors que le type {2} était prévu.";
-    t[768] = "Unknown ResultSet holdability setting: {0}.";
-    t[769] = "Paramètre holdability du ResultSet inconnu : {0}.";
-    t[772] = "Transaction isolation level {0} not supported.";
-    t[773] = "Le niveau d''isolation de transaction {0} n''est pas supporté.";
-    t[774] = "Zero bytes may not occur in identifiers.";
-    t[775] = "Des octects à 0 ne devraient pas apparaître dans les identifiants.";
-    t[776] = "No results were returned by the query.";
-    t[777] = "Aucun résultat retourné par la requête.";
-    t[778] = "A CallableStatement was executed with nothing returned.";
-    t[779] = "Un CallableStatement a été exécuté mais n''a rien retourné.";
-    t[780] = "wasNull cannot be call before fetching a result.";
-    t[781] = "wasNull ne peut pas être appelé avant la récupération d''un résultat.";
-    t[786] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
-    t[787] = "Cette requête ne déclare pas de paramètre OUT. Utilisez '{' ?= call ... '}' pour en déclarer un.";
-    t[788] = "Can''t use relative move methods while on the insert row.";
-    t[789] = "Impossible d''utiliser les fonctions de déplacement relatif pendant l''insertion d''une ligne.";
-    t[792] = "Connection is busy with another transaction";
-    t[793] = "La connection est occupée avec une autre transaction";
-    table = t;
-  }
+    private static final String[] table;
 
-  public Object handleGetObject (String msgid) throws MissingResourceException {
-    int hash_val = msgid.hashCode() & 0x7fffffff;
-    int idx = (hash_val % 397) << 1;
-    {
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+    static {
+        String[] t = new String[794];
+        t[0] = "";
+        t[1] = "Project-Id-Version: head-fr\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2007-07-27 12:27+0200\nLast-Translator: \nLanguage-Team:  <en@li.org>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: KBabel 1.11.4\nPlural-Forms:  nplurals=2; plural=(n > 1);\n";
+        t[4] = "DataSource has been closed.";
+        t[5] = "DataSource a été fermée.";
+        t[18] = "Where: {0}";
+        t[19] = "Où : {0}";
+        t[26] = "The connection attempt failed.";
+        t[27] = "La tentative de connexion a échoué.";
+        t[28] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
+        t[29] = "Actuellement positionné après la fin du ResultSet. Vous ne pouvez pas appeler deleteRow() ici.";
+        t[32] = "Can''t use query methods that take a query string on a PreparedStatement.";
+        t[33] = "Impossible d''utiliser les fonctions de requête qui utilisent une chaîne de caractères sur un PreparedStatement.";
+        t[36] = "Multiple ResultSets were returned by the query.";
+        t[37] = "Plusieurs ResultSets ont été retournés par la requête.";
+        t[50] = "Too many update results were returned.";
+        t[51] = "Trop de résultats de mise à jour ont été retournés.";
+        t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
+        t[59] = "Séquence UTF-8 illégale: le premier octet est {0}: {1}";
+        t[66] = "The column name {0} was not found in this ResultSet.";
+        t[67] = "Le nom de colonne {0} n''a pas été trouvé dans ce ResultSet.";
+        t[70] = "Fastpath call {0} - No result was returned and we expected an integer.";
+        t[71] = "Appel Fastpath {0} - Aucun résultat n''a été retourné et nous attendions un entier.";
+        t[74] = "Protocol error.  Session setup failed.";
+        t[75] = "Erreur de protocole. Ouverture de la session en échec.";
+        t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
+        t[77] = "Un CallableStatement a été déclaré, mais aucun appel à registerOutParameter(1, <un type>) n''a été fait.";
+        t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
+        t[79] = "Les ResultSets avec la concurrence CONCUR_READ_ONLY ne peuvent être mis à jour.";
+        t[90] = "LOB positioning offsets start at 1.";
+        t[91] = "Les décalages de position des LOB commencent à 1.";
+        t[92] = "Internal Position: {0}";
+        t[93] = "Position interne : {0}";
+        t[96] = "free() was called on this LOB previously";
+        t[97] = "free() a été appelée auparavant sur ce LOB";
+        t[100] = "Cannot change transaction read-only property in the middle of a transaction.";
+        t[101] = "Impossible de changer la propriété read-only d''une transaction au milieu d''une transaction.";
+        t[102] = "The JVM claims not to support the {0} encoding.";
+        t[103] = "La JVM prétend ne pas supporter l''encodage {0}.";
+        t[108] = "{0} function doesn''t take any argument.";
+        t[109] = "La fonction {0} n''accepte aucun argument.";
+        t[112] = "xid must not be null";
+        t[113] = "xid ne doit pas être nul";
+        t[114] = "Connection has been closed.";
+        t[115] = "La connexion a été fermée.";
+        t[122] = "The server does not support SSL.";
+        t[123] = "Le serveur ne supporte pas SSL.";
+        t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
+        t[141] = "Séquence UTF-8 illégale: l''octet {0} de la séquence d''octet {1} n''est pas 10xxxxxx: {2}";
+        t[148] = "Hint: {0}";
+        t[149] = "Indice : {0}";
+        t[152] = "Unable to find name datatype in the system catalogs.";
+        t[153] = "Incapable de trouver le type de donnée name dans les catalogues systèmes.";
+        t[156] = "Unsupported Types value: {0}";
+        t[157] = "Valeur de type non supportée : {0}";
+        t[158] = "Unknown type {0}.";
+        t[159] = "Type inconnu : {0}.";
+        t[166] = "{0} function takes two and only two arguments.";
+        t[167] = "La fonction {0} n''accepte que deux et seulement deux arguments.";
+        t[170] = "Finalizing a Connection that was never closed:";
+        t[171] = "Destruction d''une connection qui n''a jamais été fermée:";
+        t[180] = "The maximum field size must be a value greater than or equal to 0.";
+        t[181] = "La taille maximum des champs doit être une valeur supérieure ou égale à 0.";
+        t[186] = "PostgreSQL LOBs can only index to: {0}";
+        t[187] = "Les LOB PostgreSQL peuvent seulement s''indicer à: {0}";
+        t[194] = "Method {0} is not yet implemented.";
+        t[195] = "La fonction {0} n''est pas encore implémentée.";
+        t[198] = "Error loading default settings from driverconfig.properties";
+        t[199] = "Erreur de chargement des valeurs par défaut depuis driverconfig.properties";
+        t[200] = "Results cannot be retrieved from a CallableStatement before it is executed.";
+        t[201] = "Les résultats ne peuvent être récupérés à partir d''un CallableStatement avant qu''il ne soit exécuté.";
+        t[202] = "Large Objects may not be used in auto-commit mode.";
+        t[203] = "Les Large Objects ne devraient pas être utilisés en mode auto-commit.";
+        t[208] = "Expected command status BEGIN, got {0}.";
+        t[209] = "Attendait le statut de commande BEGIN, obtenu {0}.";
+        t[218] = "Invalid fetch direction constant: {0}.";
+        t[219] = "Constante de direction pour la récupération invalide : {0}.";
+        t[222] = "{0} function takes three and only three arguments.";
+        t[223] = "La fonction {0} n''accepte que trois et seulement trois arguments.";
+        t[226] = "Error during recover";
+        t[227] = "Erreur durant la restauration";
+        t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
+        t[229] = "Impossible de mettre à jour le ResultSet car c''est soit avant le début ou après la fin des résultats.";
+        t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
+        t[233] = "Un paramètre de type {0} a été enregistré, mais un appel à get{1} (sqltype={2}) a été fait.";
+        t[240] = "Cannot establish a savepoint in auto-commit mode.";
+        t[241] = "Impossible d''établir un savepoint en mode auto-commit.";
+        t[242] = "Cannot retrieve the id of a named savepoint.";
+        t[243] = "Impossible de retrouver l''identifiant d''un savepoint nommé.";
+        t[244] = "The column index is out of range: {0}, number of columns: {1}.";
+        t[245] = "L''indice de la colonne est hors limite : {0}, nombre de colonnes : {1}.";
+        t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
+        t[251] = "Quelque chose d''inhabituel a provoqué l''échec du pilote. Veuillez faire un rapport sur cette erreur.";
+        t[260] = "Cannot cast an instance of {0} to type {1}";
+        t[261] = "Impossible de convertir une instance de {0} vers le type {1}";
+        t[264] = "Unknown Types value.";
+        t[265] = "Valeur de Types inconnue.";
+        t[266] = "Invalid stream length {0}.";
+        t[267] = "Longueur de flux invalide {0}.";
+        t[272] = "Cannot retrieve the name of an unnamed savepoint.";
+        t[273] = "Impossible de retrouver le nom d''un savepoint sans nom.";
+        t[274] = "Unable to translate data into the desired encoding.";
+        t[275] = "Impossible de traduire les données dans l''encodage désiré.";
+        t[276] = "Expected an EOF from server, got: {0}";
+        t[277] = "Attendait une fin de fichier du serveur, reçu: {0}";
+        t[278] = "Bad value for type {0} : {1}";
+        t[279] = "Mauvaise valeur pour le type {0} : {1}";
+        t[280] = "The server requested password-based authentication, but no password was provided.";
+        t[281] = "Le serveur a demandé une authentification par mots de passe, mais aucun mot de passe n''a été fourni.";
+        t[296] = "Truncation of large objects is only implemented in 8.3 and later servers.";
+        t[297] = "Le troncage des large objects n''est implémenté que dans les serveurs 8.3 et supérieurs.";
+        t[298] = "This PooledConnection has already been closed.";
+        t[299] = "Cette PooledConnection a déjà été fermée.";
+        t[306] = "Fetch size must be a value greater to or equal to 0.";
+        t[307] = "Fetch size doit être une valeur supérieur ou égal à 0.";
+        t[312] = "A connection could not be made using the requested protocol {0}.";
+        t[313] = "Aucune connexion n''a pu être établie en utilisant le protocole demandé {0}. ";
+        t[322] = "There are no rows in this ResultSet.";
+        t[323] = "Il n''y pas pas de lignes dans ce ResultSet.";
+        t[324] = "Unexpected command status: {0}.";
+        t[325] = "Statut de commande inattendu : {0}.";
+        t[334] = "Not on the insert row.";
+        t[335] = "Pas sur la ligne en insertion.";
+        t[344] = "Server SQLState: {0}";
+        t[345] = "SQLState serveur : {0}";
+        t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
+        t[349] = "Le paramètre serveur standard_conforming_strings a pour valeur {0}. Le driver JDBC attend on ou off.";
+        t[360] = "The driver currently does not support COPY operations.";
+        t[361] = "Le pilote ne supporte pas actuellement les opérations COPY.";
+        t[364] = "The array index is out of range: {0}, number of elements: {1}.";
+        t[365] = "L''indice du tableau est hors limites : {0}, nombre d''éléments : {1}.";
+        t[374] = "suspend/resume not implemented";
+        t[375] = "suspend/resume pas implémenté";
+        t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it";
+        t[379] = "Pas implémenté: le commit à une phase doit avoir lieu en utilisant la même connection que celle où il a commencé";
+        t[398] = "Cannot call cancelRowUpdates() when on the insert row.";
+        t[399] = "Impossible d''appeler cancelRowUpdates() pendant l''insertion d''une ligne.";
+        t[400] = "Cannot reference a savepoint after it has been released.";
+        t[401] = "Impossible de référencer un savepoint après qu''il ait été libéré.";
+        t[402] = "You must specify at least one column value to insert a row.";
+        t[403] = "Vous devez spécifier au moins une valeur de colonne pour insérer une ligne.";
+        t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data.";
+        t[405] = "Incapable de déterminer la valeur de MaxIndexKeys en raison de données manquante dans lecatalogue système.";
+        t[412] = "The JVM claims not to support the encoding: {0}";
+        t[413] = "La JVM prétend ne pas supporter l''encodage: {0}";
+        t[414] = "{0} function takes two or three arguments.";
+        t[415] = "La fonction {0} n''accepte que deux ou trois arguments.";
+        t[440] = "Unexpected error writing large object to database.";
+        t[441] = "Erreur inattendue pendant l''écriture de large object dans la base.";
+        t[442] = "Zero bytes may not occur in string parameters.";
+        t[443] = "Zéro octets ne devrait pas se produire dans les paramètres de type chaîne de caractères.";
+        t[444] = "A result was returned when none was expected.";
+        t[445] = "Un résultat a été retourné alors qu''aucun n''était attendu.";
+        t[450] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
+        t[451] = "Le ResultSet n''est pas modifiable. La requête qui a généré ce résultat doit sélectionner seulement une table, et doit sélectionner toutes les clés primaires de cette table. Voir la spécification de l''API JDBC 2.1, section 5.6 pour plus de détails.";
+        t[454] = "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.";
+        t[455] = "La longueur du message de liaison {0} est trop grande. Cela peut être causé par des spécification de longueur très grandes ou incorrectes pour les paramètres de type InputStream.";
+        t[460] = "Statement has been closed.";
+        t[461] = "Statement a été fermé.";
+        t[462] = "No value specified for parameter {0}.";
+        t[463] = "Pas de valeur spécifiée pour le paramètre {0}.";
+        t[468] = "The array index is out of range: {0}";
+        t[469] = "L''indice du tableau est hors limites : {0}";
+        t[474] = "Unable to bind parameter values for statement.";
+        t[475] = "Incapable de lier les valeurs des paramètres pour la commande.";
+        t[476] = "Can''t refresh the insert row.";
+        t[477] = "Impossible de rafraîchir la ligne insérée.";
+        t[480] = "No primary key found for table {0}.";
+        t[481] = "Pas de clé primaire trouvée pour la table {0}.";
+        t[482] = "Cannot change transaction isolation level in the middle of a transaction.";
+        t[483] = "Impossible de changer le niveau d''isolation des transactions au milieu d''une transaction.";
+        t[498] = "Provided InputStream failed.";
+        t[499] = "L''InputStream fourni a échoué.";
+        t[500] = "The parameter index is out of range: {0}, number of parameters: {1}.";
+        t[501] = "L''indice du paramètre est hors limites : {0}, nombre de paramètres : {1}.";
+        t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
+        t[503] = "Le paramètre DateStyle du serveur a été changé pour {0}. Le pilote JDBC nécessite que DateStyle commence par ISO pour un fonctionnement correct.";
+        t[508] = "Connection attempt timed out.";
+        t[509] = "La tentative de connexion a échoué dans le délai imparti.";
+        t[512] = "Internal Query: {0}";
+        t[513] = "Requête interne: {0}";
+        t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
+        t[519] = "Le type d''authentification {0} n''est pas supporté. Vérifiez que vous avez configuré le fichier pg_hba.conf pour inclure l''adresse IP du client ou le sous-réseau et qu''il utilise un schéma d''authentification supporté par le pilote.";
+        t[526] = "Interval {0} not yet implemented";
+        t[527] = "L''interval {0} n''est pas encore implémenté";
+        t[532] = "Conversion of interval failed";
+        t[533] = "La conversion de l''intervalle a échoué";
+        t[540] = "Query timeout must be a value greater than or equals to 0.";
+        t[541] = "Query timeout doit être une valeur supérieure ou égale à 0.";
+        t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
+        t[543] = "La connexion a été fermée automatiquement car une nouvelle connexion a été ouverte pour la même PooledConnection ou la PooledConnection a été fermée.";
+        t[544] = "ResultSet not positioned properly, perhaps you need to call next.";
+        t[545] = "Le ResultSet n''est pas positionné correctement, vous devez peut-être appeler next().";
+        t[550] = "This statement has been closed.";
+        t[551] = "Ce statement a été fermé.";
+        t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.";
+        t[553] = "Impossible de déduire le type SQL à utiliser pour une instance de {0}. Utilisez setObject() avec une valeur de type explicite pour spécifier le type à utiliser.";
+        t[554] = "Cannot call updateRow() when on the insert row.";
+        t[555] = "Impossible d''appeler updateRow() tant que l''on est sur la ligne insérée.";
+        t[562] = "Detail: {0}";
+        t[563] = "Détail : {0}";
+        t[566] = "Cannot call deleteRow() when on the insert row.";
+        t[567] = "Impossible d''appeler deleteRow() pendant l''insertion d''une ligne.";
+        t[568] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
+        t[569] = "Actuellement positionné avant le début du ResultSet. Vous ne pouvez pas appeler deleteRow() ici.";
+        t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
+        t[577] = "Séquence UTF-8 illégale: la valeur finale est une valeur de remplacement: {0}";
+        t[578] = "Unknown Response Type {0}.";
+        t[579] = "Type de réponse inconnu {0}.";
+        t[582] = "Unsupported value for stringtype parameter: {0}";
+        t[583] = "Valeur non supportée pour les paramètre de type chaîne de caractères : {0}";
+        t[584] = "Conversion to type {0} failed: {1}.";
+        t[585] = "La conversion vers le type {0} a échoué : {1}.";
+        t[586] = "Conversion of money failed.";
+        t[587] = "La conversion de money a échoué.";
+        t[600] = "Unable to load the class {0} responsible for the datatype {1}";
+        t[601] = "Incapable de charger la classe {0} responsable du type de données {1}";
+        t[604] = "The fastpath function {0} is unknown.";
+        t[605] = "La fonction fastpath {0} est inconnue.";
+        t[608] = "Malformed function or procedure escape syntax at offset {0}.";
+        t[609] = "Syntaxe de fonction ou d''échappement de procédure malformée à l''indice {0}.";
+        t[612] = "Provided Reader failed.";
+        t[613] = "Le Reader fourni a échoué.";
+        t[614] = "Maximum number of rows must be a value grater than or equal to 0.";
+        t[615] = "Le nombre maximum de lignes doit être une valeur supérieure ou égale à 0.";
+        t[616] = "Failed to create object for: {0}.";
+        t[617] = "Échec à la création de l''objet pour : {0}.";
+        t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
+        t[623] = "Fin prématurée du flux en entrée, {0} octets attendus, mais seulement {1} lus.";
+        t[626] = "An unexpected result was returned by a query.";
+        t[627] = "Un résultat inattendu a été retourné par une requête.";
+        t[646] = "An error occurred while setting up the SSL connection.";
+        t[647] = "Une erreur s''est produite pendant l''établissement de la connexion SSL.";
+        t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
+        t[655] = "Séquence UTF-8 illégale: {0} octets utilisé pour encoder une valeur à {1} octets: {2}";
+        t[658] = "The SSLSocketFactory class provided {0} could not be instantiated.";
+        t[659] = "La classe SSLSocketFactory fournie {0} n''a pas pu être instanciée.";
+        t[670] = "Position: {0}";
+        t[671] = "Position : {0}";
+        t[676] = "Location: File: {0}, Routine: {1}, Line: {2}";
+        t[677] = "Localisation : Fichier : {0}, Routine : {1}, Ligne : {2}";
+        t[684] = "Cannot tell if path is open or closed: {0}.";
+        t[685] = "Impossible de dire si path est fermé ou ouvert : {0}.";
+        t[700] = "Cannot convert an instance of {0} to type {1}";
+        t[701] = "Impossible de convertir une instance de type {0} vers le type {1}";
+        t[710] = "{0} function takes four and only four argument.";
+        t[711] = "La fonction {0} n''accepte que quatre et seulement quatre arguments.";
+        t[718] = "Interrupted while attempting to connect.";
+        t[719] = "Interrompu pendant l''établissement de la connexion.";
+        t[722] = "Illegal UTF-8 sequence: final value is out of range: {0}";
+        t[723] = "Séquence UTF-8 illégale: la valeur finale est en dehors des limites: {0}";
+        t[734] = "No function outputs were registered.";
+        t[735] = "Aucune fonction outputs n''a été enregistrée.";
+        t[736] = "{0} function takes one and only one argument.";
+        t[737] = "La fonction {0} n''accepte qu''un et un seul argument.";
+        t[744] = "This ResultSet is closed.";
+        t[745] = "Ce ResultSet est fermé.";
+        t[746] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
+        t[747] = "Des données de caractères invalides ont été trouvées. C''est probablement causé par le stockage de caractères invalides pour le jeu de caractères de création de la base. L''exemple le plus courant est le stockage de données 8bit dans une base SQL_ASCII.";
+        t[750] = "An I/O error occurred while sending to the backend.";
+        t[751] = "Une erreur d''entrée/sortie a eu lieu lors d''envoi vers le serveur.";
+        t[752] = "Error disabling autocommit";
+        t[753] = "Erreur en désactivant autocommit";
+        t[754] = "Ran out of memory retrieving query results.";
+        t[755] = "Ai manqué de mémoire en récupérant les résultats de la requête.";
+        t[756] = "Returning autogenerated keys is not supported.";
+        t[757] = "Le renvoi des clés automatiquement générées n''est pas supporté.";
+        t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
+        t[761] = "L''opération nécessite un scrollable ResultSet, mais ce ResultSet est FORWARD_ONLY.";
+        t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
+        t[763] = "Une fonction CallableStatement a été exécutée et le paramètre en sortie {0} était du type {1} alors que le type {2} était prévu.";
+        t[768] = "Unknown ResultSet holdability setting: {0}.";
+        t[769] = "Paramètre holdability du ResultSet inconnu : {0}.";
+        t[772] = "Transaction isolation level {0} not supported.";
+        t[773] = "Le niveau d''isolation de transaction {0} n''est pas supporté.";
+        t[774] = "Zero bytes may not occur in identifiers.";
+        t[775] = "Des octects à 0 ne devraient pas apparaître dans les identifiants.";
+        t[776] = "No results were returned by the query.";
+        t[777] = "Aucun résultat retourné par la requête.";
+        t[778] = "A CallableStatement was executed with nothing returned.";
+        t[779] = "Un CallableStatement a été exécuté mais n''a rien retourné.";
+        t[780] = "wasNull cannot be call before fetching a result.";
+        t[781] = "wasNull ne peut pas être appelé avant la récupération d''un résultat.";
+        t[786] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
+        t[787] = "Cette requête ne déclare pas de paramètre OUT. Utilisez '{' ?= call ... '}' pour en déclarer un.";
+        t[788] = "Can''t use relative move methods while on the insert row.";
+        t[789] = "Impossible d''utiliser les fonctions de déplacement relatif pendant l''insertion d''une ligne.";
+        t[792] = "Connection is busy with another transaction";
+        t[793] = "La connection est occupée avec une autre transaction";
+        table = t;
     }
-    int incr = ((hash_val % 395) + 1) << 1;
-    for (;;) {
-      idx += incr;
-      if (idx >= 794)
-        idx -= 794;
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+
+    public Object handleGetObject(String msgid) throws MissingResourceException {
+        int hash_val = msgid.hashCode() & 0x7fffffff;
+        int idx = (hash_val % 397) << 1;
+        {
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
+        int incr = ((hash_val % 395) + 1) << 1;
+        for (; ; ) {
+            idx += incr;
+            if (idx >= 794)
+                idx -= 794;
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
     }
-  }
 
-  @Override
-  public Enumeration<String> getKeys () {
-    return
-      new Enumeration<>() {
-        private int idx = 0;
-        { while (idx < 794 && table[idx] == null) idx += 2; }
-        @Override
-        public boolean hasMoreElements () {
-          return (idx < 794);
-        }
-        @Override
-        public String nextElement () {
-          Object key = table[idx];
-          do idx += 2; while (idx < 794 && table[idx] == null);
-          return key.toString();
-        }
-      };
-  }
+    @Override
+    public Enumeration<String> getKeys() {
+        return
+                new Enumeration<>() {
+                    private int idx = 0;
 
-  public ResourceBundle getParent () {
-    return parent;
-  }
+                    {
+                        while (idx < 794 && table[idx] == null) idx += 2;
+                    }
+
+                    @Override
+                    public boolean hasMoreElements() {
+                        return (idx < 794);
+                    }
+
+                    @Override
+                    public String nextElement() {
+                        Object key = table[idx];
+                        do idx += 2; while (idx < 794 && table[idx] == null);
+                        return key.toString();
+                    }
+                };
+    }
+
+    public ResourceBundle getParent() {
+        return parent;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_it.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_it.java
index 4498eb8..f27e887 100644
--- a/pgjdbc/src/main/java/org/postgresql/translation/messages_it.java
+++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_it.java
@@ -5,329 +5,335 @@ import java.util.MissingResourceException;
 import java.util.ResourceBundle;
 
 public class messages_it extends ResourceBundle {
-  private static final String[] table;
-  static {
-    java.lang.String[] t = new java.lang.String[794];
-    t[0] = "";
-    t[1] = "Project-Id-Version: PostgreSQL JDBC Driver 8.2\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2006-06-23 17:25+0200\nLast-Translator: Giuseppe Sacco <eppesuig@debian.org>\nLanguage-Team: Italian <tp@lists.linux.it>\nLanguage: it\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n";
-    t[4] = "DataSource has been closed.";
-    t[5] = "Questo «DataSource» è stato chiuso.";
-    t[18] = "Where: {0}";
-    t[19] = "Dove: {0}";
-    t[26] = "The connection attempt failed.";
-    t[27] = "Il tentativo di connessione è fallito.";
-    t[28] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
-    t[29] = "La posizione attuale è successiva alla fine del ResultSet. Non è possibile invocare «deleteRow()» qui.";
-    t[32] = "Can''t use query methods that take a query string on a PreparedStatement.";
-    t[33] = "Non si possono utilizzare i metodi \"query\" che hanno come argomento una stringa nel caso di «PreparedStatement».";
-    t[36] = "Multiple ResultSets were returned by the query.";
-    t[37] = "La query ha restituito «ResultSet» multipli.";
-    t[50] = "Too many update results were returned.";
-    t[51] = "Sono stati restituiti troppi aggiornamenti.";
-    t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
-    t[59] = "Sequenza UTF-8 illegale: il byte iniziale è {0}: {1}";
-    t[66] = "The column name {0} was not found in this ResultSet.";
-    t[67] = "Colonna denominata «{0}» non è presente in questo «ResultSet».";
-    t[70] = "Fastpath call {0} - No result was returned and we expected an integer.";
-    t[71] = "Chiamata Fastpath «{0}»: Nessun risultato restituito mentre ci si aspettava un intero.";
-    t[74] = "Protocol error.  Session setup failed.";
-    t[75] = "Errore di protocollo. Impostazione della sessione fallita.";
-    t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
-    t[77] = "È stato definito un «CallableStatement» ma non è stato invocato il metodo «registerOutParameter(1, <tipo>)».";
-    t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
-    t[79] = "I «ResultSet» in modalità CONCUR_READ_ONLY non possono essere aggiornati.";
-    t[90] = "LOB positioning offsets start at 1.";
-    t[91] = "L''offset per la posizione dei LOB comincia da 1.";
-    t[92] = "Internal Position: {0}";
-    t[93] = "Posizione interna: {0}";
-    t[100] = "Cannot change transaction read-only property in the middle of a transaction.";
-    t[101] = "Non è possibile modificare la proprietà «read-only» delle transazioni nel mezzo di una transazione.";
-    t[102] = "The JVM claims not to support the {0} encoding.";
-    t[103] = "La JVM sostiene di non supportare la codifica {0}.";
-    t[108] = "{0} function doesn''t take any argument.";
-    t[109] = "Il metodo «{0}» non accetta argomenti.";
-    t[112] = "xid must not be null";
-    t[113] = "xid non può essere NULL";
-    t[114] = "Connection has been closed.";
-    t[115] = "Questo «Connection» è stato chiuso.";
-    t[122] = "The server does not support SSL.";
-    t[123] = "Il server non supporta SSL.";
-    t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
-    t[141] = "Sequenza UTF-8 illegale: il byte {0} di una sequenza di {1} byte non è 10xxxxxx: {2}";
-    t[148] = "Hint: {0}";
-    t[149] = "Suggerimento: {0}";
-    t[152] = "Unable to find name datatype in the system catalogs.";
-    t[153] = "Non è possibile trovare il datatype «name» nel catalogo di sistema.";
-    t[156] = "Unsupported Types value: {0}";
-    t[157] = "Valore di tipo «{0}» non supportato.";
-    t[158] = "Unknown type {0}.";
-    t[159] = "Tipo sconosciuto {0}.";
-    t[166] = "{0} function takes two and only two arguments.";
-    t[167] = "Il metodo «{0}» accetta due e solo due argomenti.";
-    t[170] = "Finalizing a Connection that was never closed:";
-    t[171] = "Finalizzazione di una «Connection» che non è stata chiusa.";
-    t[186] = "PostgreSQL LOBs can only index to: {0}";
-    t[187] = "Il massimo valore per l''indice dei LOB di PostgreSQL è {0}. ";
-    t[194] = "Method {0} is not yet implemented.";
-    t[195] = "Il metodo «{0}» non è stato ancora implementato.";
-    t[198] = "Error loading default settings from driverconfig.properties";
-    t[199] = "Si è verificato un errore caricando le impostazioni predefinite da «driverconfig.properties».";
-    t[202] = "Large Objects may not be used in auto-commit mode.";
-    t[203] = "Non è possibile impostare i «Large Object» in modalità «auto-commit».";
-    t[208] = "Expected command status BEGIN, got {0}.";
-    t[209] = "Lo stato del comando avrebbe dovuto essere BEGIN, mentre invece è {0}.";
-    t[218] = "Invalid fetch direction constant: {0}.";
-    t[219] = "Costante per la direzione dell''estrazione non valida: {0}.";
-    t[222] = "{0} function takes three and only three arguments.";
-    t[223] = "Il metodo «{0}» accetta tre e solo tre argomenti.";
-    t[226] = "Error during recover";
-    t[227] = "Errore durante il ripristino";
-    t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
-    t[229] = "Non è possibile aggiornare il «ResultSet» perché la posizione attuale è precedente all''inizio o successiva alla file dei risultati.";
-    t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
-    t[233] = "È stato definito il parametro di tipo «{0}», ma poi è stato invocato il metodo «get{1}()» (sqltype={2}).";
-    t[240] = "Cannot establish a savepoint in auto-commit mode.";
-    t[241] = "Non è possibile impostare i punti di ripristino in modalità «auto-commit».";
-    t[242] = "Cannot retrieve the id of a named savepoint.";
-    t[243] = "Non è possibile trovare l''id del punto di ripristino indicato.";
-    t[244] = "The column index is out of range: {0}, number of columns: {1}.";
-    t[245] = "Indice di colonna, {0}, è maggiore del numero di colonne {1}.";
-    t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
-    t[251] = "Qualcosa di insolito si è verificato causando il fallimento del driver. Per favore riferire all''autore del driver questa eccezione.";
-    t[260] = "Cannot cast an instance of {0} to type {1}";
-    t[261] = "Non è possibile fare il cast di una istanza di «{0}» al tipo «{1}».";
-    t[264] = "Unknown Types value.";
-    t[265] = "Valore di tipo sconosciuto.";
-    t[266] = "Invalid stream length {0}.";
-    t[267] = "La dimensione specificata, {0}, per lo «stream» non è valida.";
-    t[272] = "Cannot retrieve the name of an unnamed savepoint.";
-    t[273] = "Non è possibile trovare il nome di un punto di ripristino anonimo.";
-    t[274] = "Unable to translate data into the desired encoding.";
-    t[275] = "Impossibile tradurre i dati nella codifica richiesta.";
-    t[276] = "Expected an EOF from server, got: {0}";
-    t[277] = "Ricevuto dal server «{0}» mentre era atteso un EOF";
-    t[278] = "Bad value for type {0} : {1}";
-    t[279] = "Il valore «{1}» non è adeguato al tipo «{0}».";
-    t[280] = "The server requested password-based authentication, but no password was provided.";
-    t[281] = "Il server ha richiesto l''autenticazione con password, ma tale password non è stata fornita.";
-    t[298] = "This PooledConnection has already been closed.";
-    t[299] = "Questo «PooledConnection» è stato chiuso.";
-    t[306] = "Fetch size must be a value greater to or equal to 0.";
-    t[307] = "La dimensione dell''area di «fetch» deve essere maggiore o eguale a 0.";
-    t[312] = "A connection could not be made using the requested protocol {0}.";
-    t[313] = "Non è stato possibile attivare la connessione utilizzando il protocollo richiesto {0}.";
-    t[322] = "There are no rows in this ResultSet.";
-    t[323] = "Non ci sono righe in questo «ResultSet».";
-    t[324] = "Unexpected command status: {0}.";
-    t[325] = "Stato del comando non previsto: {0}.";
-    t[334] = "Not on the insert row.";
-    t[335] = "Non si è in una nuova riga.";
-    t[344] = "Server SQLState: {0}";
-    t[345] = "SQLState del server: {0}";
-    t[360] = "The driver currently does not support COPY operations.";
-    t[361] = "Il driver non supporta al momento l''operazione «COPY».";
-    t[364] = "The array index is out of range: {0}, number of elements: {1}.";
-    t[365] = "L''indice dell''array è fuori intervallo: {0}, numero di elementi: {1}.";
-    t[374] = "suspend/resume not implemented";
-    t[375] = "«suspend»/«resume» non implementato";
-    t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it";
-    t[379] = "Non implementato: il commit \"one-phase\" deve essere invocato sulla stessa connessione che ha iniziato la transazione.";
-    t[398] = "Cannot call cancelRowUpdates() when on the insert row.";
-    t[399] = "Non è possibile invocare «cancelRowUpdates()» durante l''inserimento di una riga.";
-    t[400] = "Cannot reference a savepoint after it has been released.";
-    t[401] = "Non è possibile utilizzare un punto di ripristino successivamente al suo rilascio.";
-    t[402] = "You must specify at least one column value to insert a row.";
-    t[403] = "Per inserire un record si deve specificare almeno il valore di una colonna.";
-    t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data.";
-    t[405] = "Non è possibile trovare il valore di «MaxIndexKeys» nel catalogo si sistema.";
-    t[412] = "The JVM claims not to support the encoding: {0}";
-    t[413] = "La JVM sostiene di non supportare la codifica: {0}.";
-    t[414] = "{0} function takes two or three arguments.";
-    t[415] = "Il metodo «{0}» accetta due o tre argomenti.";
-    t[440] = "Unexpected error writing large object to database.";
-    t[441] = "Errore inatteso inviando un «large object» al database.";
-    t[442] = "Zero bytes may not occur in string parameters.";
-    t[443] = "Byte con valore zero non possono essere contenuti nei parametri stringa.";
-    t[444] = "A result was returned when none was expected.";
-    t[445] = "È stato restituito un valore nonostante non ne fosse atteso nessuno.";
-    t[450] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
-    t[451] = "Il «ResultSet» non è aggiornabile. La query che lo genera deve selezionare una sola tabella e deve selezionarne tutti i campi che ne compongono la chiave primaria. Si vedano le specifiche dell''API JDBC 2.1, sezione 5.6, per ulteriori dettagli.";
-    t[454] = "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.";
-    t[455] = "Il messaggio di «bind» è troppo lungo ({0}). Questo può essere causato da una dimensione eccessiva o non corretta dei parametri dell''«InputStream».";
-    t[460] = "Statement has been closed.";
-    t[461] = "Questo «Statement» è stato chiuso.";
-    t[462] = "No value specified for parameter {0}.";
-    t[463] = "Nessun valore specificato come parametro {0}.";
-    t[468] = "The array index is out of range: {0}";
-    t[469] = "Indice di colonna fuori dall''intervallo ammissibile: {0}";
-    t[474] = "Unable to bind parameter values for statement.";
-    t[475] = "Impossibile fare il «bind» dei valori passati come parametri per lo statement.";
-    t[476] = "Can''t refresh the insert row.";
-    t[477] = "Non è possibile aggiornare la riga in inserimento.";
-    t[480] = "No primary key found for table {0}.";
-    t[481] = "Non è stata trovata la chiave primaria della tabella «{0}».";
-    t[482] = "Cannot change transaction isolation level in the middle of a transaction.";
-    t[483] = "Non è possibile cambiare il livello di isolamento delle transazioni nel mezzo di una transazione.";
-    t[498] = "Provided InputStream failed.";
-    t[499] = "L''«InputStream» fornito è fallito.";
-    t[500] = "The parameter index is out of range: {0}, number of parameters: {1}.";
-    t[501] = "Il parametro indice è fuori intervallo: {0}, numero di elementi: {1}.";
-    t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
-    t[503] = "Il parametro del server «DateStyle» è stato cambiato in {0}. Il driver JDBC richiede che «DateStyle» cominci con «ISO» per un corretto funzionamento.";
-    t[508] = "Connection attempt timed out.";
-    t[509] = "Il tentativo di connessione è scaduto.";
-    t[512] = "Internal Query: {0}";
-    t[513] = "Query interna: {0}";
-    t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
-    t[519] = "L''autenticazione di tipo {0} non è supportata. Verificare che nel file di configurazione pg_hba.conf sia presente l''indirizzo IP o la sottorete del client, e che lo schema di autenticazione utilizzato sia supportato dal driver.";
-    t[526] = "Interval {0} not yet implemented";
-    t[527] = "L''intervallo «{0}» non è stato ancora implementato.";
-    t[532] = "Conversion of interval failed";
-    t[533] = "Fallita la conversione di un «interval».";
-    t[540] = "Query timeout must be a value greater than or equals to 0.";
-    t[541] = "Il timeout relativo alle query deve essere maggiore o eguale a 0.";
-    t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
-    t[543] = "La «Connection» è stata chiusa automaticamente perché una nuova l''ha sostituita nello stesso «PooledConnection», oppure il «PooledConnection» è stato chiuso.";
-    t[544] = "ResultSet not positioned properly, perhaps you need to call next.";
-    t[545] = "Il «ResultSet» non è correttamente posizionato; forse è necessario invocare «next()».";
-    t[550] = "This statement has been closed.";
-    t[551] = "Questo statement è stato chiuso.";
-    t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.";
-    t[553] = "Non è possibile identificare il tipo SQL da usare per l''istanza di tipo «{0}». Usare «setObject()» specificando esplicitamente il tipo da usare per questo valore.";
-    t[554] = "Cannot call updateRow() when on the insert row.";
-    t[555] = "Non è possibile invocare «updateRow()» durante l''inserimento di una riga.";
-    t[562] = "Detail: {0}";
-    t[563] = "Dettaglio: {0}";
-    t[566] = "Cannot call deleteRow() when on the insert row.";
-    t[567] = "Non è possibile invocare «deleteRow()» durante l''inserimento di una riga.";
-    t[568] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
-    t[569] = "La posizione attuale è precedente all''inizio del ResultSet. Non è possibile invocare «deleteRow()» qui.";
-    t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
-    t[577] = "Sequenza UTF-8 illegale: il valore è finale è un surrogato: {0}";
-    t[578] = "Unknown Response Type {0}.";
-    t[579] = "Risposta di tipo sconosciuto {0}.";
-    t[582] = "Unsupported value for stringtype parameter: {0}";
-    t[583] = "Il valore per il parametro di tipo string «{0}» non è supportato.";
-    t[584] = "Conversion to type {0} failed: {1}.";
-    t[585] = "Conversione al tipo {0} fallita: {1}.";
-    t[586] = "Conversion of money failed.";
-    t[587] = "Fallita la conversione di un «money».";
-    t[600] = "Unable to load the class {0} responsible for the datatype {1}";
-    t[601] = "Non è possibile caricare la class «{0}» per gestire il tipo «{1}».";
-    t[604] = "The fastpath function {0} is unknown.";
-    t[605] = "La funzione fastpath «{0}» è sconosciuta.";
-    t[608] = "Malformed function or procedure escape syntax at offset {0}.";
-    t[609] = "Sequenza di escape definita erroneamente nella funzione o procedura all''offset {0}.";
-    t[612] = "Provided Reader failed.";
-    t[613] = "Il «Reader» fornito è fallito.";
-    t[614] = "Maximum number of rows must be a value grater than or equal to 0.";
-    t[615] = "Il numero massimo di righe deve essere maggiore o eguale a 0.";
-    t[616] = "Failed to create object for: {0}.";
-    t[617] = "Fallita la creazione dell''oggetto per: {0}.";
-    t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
-    t[623] = "Il flusso di input è stato interrotto, sono arrivati {1} byte al posto dei {0} attesi.";
-    t[626] = "An unexpected result was returned by a query.";
-    t[627] = "Un risultato inaspettato è stato ricevuto dalla query.";
-    t[646] = "An error occurred while setting up the SSL connection.";
-    t[647] = "Si è verificato un errore impostando la connessione SSL.";
-    t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
-    t[655] = "Sequenza UTF-8 illegale: {0} byte utilizzati per codificare un valore di {1} byte: {2}";
-    t[658] = "The SSLSocketFactory class provided {0} could not be instantiated.";
-    t[659] = "La classe «SSLSocketFactory» specificata, «{0}», non può essere istanziata.";
-    t[670] = "Position: {0}";
-    t[671] = "Posizione: {0}";
-    t[676] = "Location: File: {0}, Routine: {1}, Line: {2}";
-    t[677] = "Individuazione: file: \"{0}\", routine: {1}, linea: {2}";
-    t[684] = "Cannot tell if path is open or closed: {0}.";
-    t[685] = "Impossibile stabilire se il percorso è aperto o chiuso: {0}.";
-    t[700] = "Cannot convert an instance of {0} to type {1}";
-    t[701] = "Non è possibile convertire una istanza di «{0}» nel tipo «{1}»";
-    t[710] = "{0} function takes four and only four argument.";
-    t[711] = "Il metodo «{0}» accetta quattro e solo quattro argomenti.";
-    t[718] = "Interrupted while attempting to connect.";
-    t[719] = "Si è verificata una interruzione durante il tentativo di connessione.";
-    t[722] = "Illegal UTF-8 sequence: final value is out of range: {0}";
-    t[723] = "Sequenza UTF-8 illegale: il valore finale è fuori dall''intervallo permesso: {0}";
-    t[736] = "{0} function takes one and only one argument.";
-    t[737] = "Il metodo «{0}» accetta un ed un solo argomento.";
-    t[744] = "This ResultSet is closed.";
-    t[745] = "Questo «ResultSet» è chiuso.";
-    t[746] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
-    t[747] = "Sono stati trovati caratteri non validi tra i dati. Molto probabilmente sono stati memorizzati dei caratteri che non sono validi per la codifica dei caratteri impostata alla creazione del database. Il caso più diffuso è quello nel quale si memorizzano caratteri a 8bit in un database con codifica SQL_ASCII.";
-    t[750] = "An I/O error occurred while sending to the backend.";
-    t[751] = "Si è verificato un errore di I/O nella spedizione di dati al server.";
-    t[754] = "Ran out of memory retrieving query results.";
-    t[755] = "Fine memoria scaricando i risultati della query.";
-    t[756] = "Returning autogenerated keys is not supported.";
-    t[757] = "La restituzione di chiavi autogenerate non è supportata.";
-    t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
-    t[761] = "L''operazione richiete un «ResultSet» scorribile mentre questo è «FORWARD_ONLY».";
-    t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
-    t[763] = "È stato eseguito un «CallableStatement» ma il parametro in uscita «{0}» era di tipo «{1}» al posto di «{2}», che era stato dichiarato.";
-    t[768] = "Unknown ResultSet holdability setting: {0}.";
-    t[769] = "Il parametro «holdability» per il «ResultSet» è sconosciuto: {0}.";
-    t[772] = "Transaction isolation level {0} not supported.";
-    t[773] = "Il livello di isolamento delle transazioni «{0}» non è supportato.";
-    t[776] = "No results were returned by the query.";
-    t[777] = "Nessun risultato è stato restituito dalla query.";
-    t[778] = "A CallableStatement was executed with nothing returned.";
-    t[779] = "Un «CallableStatement» è stato eseguito senza produrre alcun risultato. ";
-    t[780] = "The maximum field size must be a value greater than or equal to 0.";
-    t[781] = "La dimensione massima del campo deve essere maggiore o eguale a 0.";
-    t[786] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
-    t[787] = "Questo statement non dichiara il parametro in uscita. Usare «{ ?= call ... }» per farlo.";
-    t[788] = "Can''t use relative move methods while on the insert row.";
-    t[789] = "Non è possibile utilizzare gli spostamenti relativi durante l''inserimento di una riga.";
-    t[792] = "Connection is busy with another transaction";
-    t[793] = "La connessione è utilizzata da un''altra transazione";
-    table = t;
-  }
+    private static final String[] table;
 
-  @Override
-  public Object handleGetObject (String msgid) throws MissingResourceException {
-    int hash_val = msgid.hashCode() & 0x7fffffff;
-    int idx = (hash_val % 397) << 1;
-    {
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+    static {
+        java.lang.String[] t = new java.lang.String[794];
+        t[0] = "";
+        t[1] = "Project-Id-Version: PostgreSQL JDBC Driver 8.2\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2006-06-23 17:25+0200\nLast-Translator: Giuseppe Sacco <eppesuig@debian.org>\nLanguage-Team: Italian <tp@lists.linux.it>\nLanguage: it\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n";
+        t[4] = "DataSource has been closed.";
+        t[5] = "Questo «DataSource» è stato chiuso.";
+        t[18] = "Where: {0}";
+        t[19] = "Dove: {0}";
+        t[26] = "The connection attempt failed.";
+        t[27] = "Il tentativo di connessione è fallito.";
+        t[28] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
+        t[29] = "La posizione attuale è successiva alla fine del ResultSet. Non è possibile invocare «deleteRow()» qui.";
+        t[32] = "Can''t use query methods that take a query string on a PreparedStatement.";
+        t[33] = "Non si possono utilizzare i metodi \"query\" che hanno come argomento una stringa nel caso di «PreparedStatement».";
+        t[36] = "Multiple ResultSets were returned by the query.";
+        t[37] = "La query ha restituito «ResultSet» multipli.";
+        t[50] = "Too many update results were returned.";
+        t[51] = "Sono stati restituiti troppi aggiornamenti.";
+        t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
+        t[59] = "Sequenza UTF-8 illegale: il byte iniziale è {0}: {1}";
+        t[66] = "The column name {0} was not found in this ResultSet.";
+        t[67] = "Colonna denominata «{0}» non è presente in questo «ResultSet».";
+        t[70] = "Fastpath call {0} - No result was returned and we expected an integer.";
+        t[71] = "Chiamata Fastpath «{0}»: Nessun risultato restituito mentre ci si aspettava un intero.";
+        t[74] = "Protocol error.  Session setup failed.";
+        t[75] = "Errore di protocollo. Impostazione della sessione fallita.";
+        t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
+        t[77] = "È stato definito un «CallableStatement» ma non è stato invocato il metodo «registerOutParameter(1, <tipo>)».";
+        t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
+        t[79] = "I «ResultSet» in modalità CONCUR_READ_ONLY non possono essere aggiornati.";
+        t[90] = "LOB positioning offsets start at 1.";
+        t[91] = "L''offset per la posizione dei LOB comincia da 1.";
+        t[92] = "Internal Position: {0}";
+        t[93] = "Posizione interna: {0}";
+        t[100] = "Cannot change transaction read-only property in the middle of a transaction.";
+        t[101] = "Non è possibile modificare la proprietà «read-only» delle transazioni nel mezzo di una transazione.";
+        t[102] = "The JVM claims not to support the {0} encoding.";
+        t[103] = "La JVM sostiene di non supportare la codifica {0}.";
+        t[108] = "{0} function doesn''t take any argument.";
+        t[109] = "Il metodo «{0}» non accetta argomenti.";
+        t[112] = "xid must not be null";
+        t[113] = "xid non può essere NULL";
+        t[114] = "Connection has been closed.";
+        t[115] = "Questo «Connection» è stato chiuso.";
+        t[122] = "The server does not support SSL.";
+        t[123] = "Il server non supporta SSL.";
+        t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
+        t[141] = "Sequenza UTF-8 illegale: il byte {0} di una sequenza di {1} byte non è 10xxxxxx: {2}";
+        t[148] = "Hint: {0}";
+        t[149] = "Suggerimento: {0}";
+        t[152] = "Unable to find name datatype in the system catalogs.";
+        t[153] = "Non è possibile trovare il datatype «name» nel catalogo di sistema.";
+        t[156] = "Unsupported Types value: {0}";
+        t[157] = "Valore di tipo «{0}» non supportato.";
+        t[158] = "Unknown type {0}.";
+        t[159] = "Tipo sconosciuto {0}.";
+        t[166] = "{0} function takes two and only two arguments.";
+        t[167] = "Il metodo «{0}» accetta due e solo due argomenti.";
+        t[170] = "Finalizing a Connection that was never closed:";
+        t[171] = "Finalizzazione di una «Connection» che non è stata chiusa.";
+        t[186] = "PostgreSQL LOBs can only index to: {0}";
+        t[187] = "Il massimo valore per l''indice dei LOB di PostgreSQL è {0}. ";
+        t[194] = "Method {0} is not yet implemented.";
+        t[195] = "Il metodo «{0}» non è stato ancora implementato.";
+        t[198] = "Error loading default settings from driverconfig.properties";
+        t[199] = "Si è verificato un errore caricando le impostazioni predefinite da «driverconfig.properties».";
+        t[202] = "Large Objects may not be used in auto-commit mode.";
+        t[203] = "Non è possibile impostare i «Large Object» in modalità «auto-commit».";
+        t[208] = "Expected command status BEGIN, got {0}.";
+        t[209] = "Lo stato del comando avrebbe dovuto essere BEGIN, mentre invece è {0}.";
+        t[218] = "Invalid fetch direction constant: {0}.";
+        t[219] = "Costante per la direzione dell''estrazione non valida: {0}.";
+        t[222] = "{0} function takes three and only three arguments.";
+        t[223] = "Il metodo «{0}» accetta tre e solo tre argomenti.";
+        t[226] = "Error during recover";
+        t[227] = "Errore durante il ripristino";
+        t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
+        t[229] = "Non è possibile aggiornare il «ResultSet» perché la posizione attuale è precedente all''inizio o successiva alla file dei risultati.";
+        t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
+        t[233] = "È stato definito il parametro di tipo «{0}», ma poi è stato invocato il metodo «get{1}()» (sqltype={2}).";
+        t[240] = "Cannot establish a savepoint in auto-commit mode.";
+        t[241] = "Non è possibile impostare i punti di ripristino in modalità «auto-commit».";
+        t[242] = "Cannot retrieve the id of a named savepoint.";
+        t[243] = "Non è possibile trovare l''id del punto di ripristino indicato.";
+        t[244] = "The column index is out of range: {0}, number of columns: {1}.";
+        t[245] = "Indice di colonna, {0}, è maggiore del numero di colonne {1}.";
+        t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
+        t[251] = "Qualcosa di insolito si è verificato causando il fallimento del driver. Per favore riferire all''autore del driver questa eccezione.";
+        t[260] = "Cannot cast an instance of {0} to type {1}";
+        t[261] = "Non è possibile fare il cast di una istanza di «{0}» al tipo «{1}».";
+        t[264] = "Unknown Types value.";
+        t[265] = "Valore di tipo sconosciuto.";
+        t[266] = "Invalid stream length {0}.";
+        t[267] = "La dimensione specificata, {0}, per lo «stream» non è valida.";
+        t[272] = "Cannot retrieve the name of an unnamed savepoint.";
+        t[273] = "Non è possibile trovare il nome di un punto di ripristino anonimo.";
+        t[274] = "Unable to translate data into the desired encoding.";
+        t[275] = "Impossibile tradurre i dati nella codifica richiesta.";
+        t[276] = "Expected an EOF from server, got: {0}";
+        t[277] = "Ricevuto dal server «{0}» mentre era atteso un EOF";
+        t[278] = "Bad value for type {0} : {1}";
+        t[279] = "Il valore «{1}» non è adeguato al tipo «{0}».";
+        t[280] = "The server requested password-based authentication, but no password was provided.";
+        t[281] = "Il server ha richiesto l''autenticazione con password, ma tale password non è stata fornita.";
+        t[298] = "This PooledConnection has already been closed.";
+        t[299] = "Questo «PooledConnection» è stato chiuso.";
+        t[306] = "Fetch size must be a value greater to or equal to 0.";
+        t[307] = "La dimensione dell''area di «fetch» deve essere maggiore o eguale a 0.";
+        t[312] = "A connection could not be made using the requested protocol {0}.";
+        t[313] = "Non è stato possibile attivare la connessione utilizzando il protocollo richiesto {0}.";
+        t[322] = "There are no rows in this ResultSet.";
+        t[323] = "Non ci sono righe in questo «ResultSet».";
+        t[324] = "Unexpected command status: {0}.";
+        t[325] = "Stato del comando non previsto: {0}.";
+        t[334] = "Not on the insert row.";
+        t[335] = "Non si è in una nuova riga.";
+        t[344] = "Server SQLState: {0}";
+        t[345] = "SQLState del server: {0}";
+        t[360] = "The driver currently does not support COPY operations.";
+        t[361] = "Il driver non supporta al momento l''operazione «COPY».";
+        t[364] = "The array index is out of range: {0}, number of elements: {1}.";
+        t[365] = "L''indice dell''array è fuori intervallo: {0}, numero di elementi: {1}.";
+        t[374] = "suspend/resume not implemented";
+        t[375] = "«suspend»/«resume» non implementato";
+        t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it";
+        t[379] = "Non implementato: il commit \"one-phase\" deve essere invocato sulla stessa connessione che ha iniziato la transazione.";
+        t[398] = "Cannot call cancelRowUpdates() when on the insert row.";
+        t[399] = "Non è possibile invocare «cancelRowUpdates()» durante l''inserimento di una riga.";
+        t[400] = "Cannot reference a savepoint after it has been released.";
+        t[401] = "Non è possibile utilizzare un punto di ripristino successivamente al suo rilascio.";
+        t[402] = "You must specify at least one column value to insert a row.";
+        t[403] = "Per inserire un record si deve specificare almeno il valore di una colonna.";
+        t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data.";
+        t[405] = "Non è possibile trovare il valore di «MaxIndexKeys» nel catalogo si sistema.";
+        t[412] = "The JVM claims not to support the encoding: {0}";
+        t[413] = "La JVM sostiene di non supportare la codifica: {0}.";
+        t[414] = "{0} function takes two or three arguments.";
+        t[415] = "Il metodo «{0}» accetta due o tre argomenti.";
+        t[440] = "Unexpected error writing large object to database.";
+        t[441] = "Errore inatteso inviando un «large object» al database.";
+        t[442] = "Zero bytes may not occur in string parameters.";
+        t[443] = "Byte con valore zero non possono essere contenuti nei parametri stringa.";
+        t[444] = "A result was returned when none was expected.";
+        t[445] = "È stato restituito un valore nonostante non ne fosse atteso nessuno.";
+        t[450] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
+        t[451] = "Il «ResultSet» non è aggiornabile. La query che lo genera deve selezionare una sola tabella e deve selezionarne tutti i campi che ne compongono la chiave primaria. Si vedano le specifiche dell''API JDBC 2.1, sezione 5.6, per ulteriori dettagli.";
+        t[454] = "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.";
+        t[455] = "Il messaggio di «bind» è troppo lungo ({0}). Questo può essere causato da una dimensione eccessiva o non corretta dei parametri dell''«InputStream».";
+        t[460] = "Statement has been closed.";
+        t[461] = "Questo «Statement» è stato chiuso.";
+        t[462] = "No value specified for parameter {0}.";
+        t[463] = "Nessun valore specificato come parametro {0}.";
+        t[468] = "The array index is out of range: {0}";
+        t[469] = "Indice di colonna fuori dall''intervallo ammissibile: {0}";
+        t[474] = "Unable to bind parameter values for statement.";
+        t[475] = "Impossibile fare il «bind» dei valori passati come parametri per lo statement.";
+        t[476] = "Can''t refresh the insert row.";
+        t[477] = "Non è possibile aggiornare la riga in inserimento.";
+        t[480] = "No primary key found for table {0}.";
+        t[481] = "Non è stata trovata la chiave primaria della tabella «{0}».";
+        t[482] = "Cannot change transaction isolation level in the middle of a transaction.";
+        t[483] = "Non è possibile cambiare il livello di isolamento delle transazioni nel mezzo di una transazione.";
+        t[498] = "Provided InputStream failed.";
+        t[499] = "L''«InputStream» fornito è fallito.";
+        t[500] = "The parameter index is out of range: {0}, number of parameters: {1}.";
+        t[501] = "Il parametro indice è fuori intervallo: {0}, numero di elementi: {1}.";
+        t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
+        t[503] = "Il parametro del server «DateStyle» è stato cambiato in {0}. Il driver JDBC richiede che «DateStyle» cominci con «ISO» per un corretto funzionamento.";
+        t[508] = "Connection attempt timed out.";
+        t[509] = "Il tentativo di connessione è scaduto.";
+        t[512] = "Internal Query: {0}";
+        t[513] = "Query interna: {0}";
+        t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
+        t[519] = "L''autenticazione di tipo {0} non è supportata. Verificare che nel file di configurazione pg_hba.conf sia presente l''indirizzo IP o la sottorete del client, e che lo schema di autenticazione utilizzato sia supportato dal driver.";
+        t[526] = "Interval {0} not yet implemented";
+        t[527] = "L''intervallo «{0}» non è stato ancora implementato.";
+        t[532] = "Conversion of interval failed";
+        t[533] = "Fallita la conversione di un «interval».";
+        t[540] = "Query timeout must be a value greater than or equals to 0.";
+        t[541] = "Il timeout relativo alle query deve essere maggiore o eguale a 0.";
+        t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
+        t[543] = "La «Connection» è stata chiusa automaticamente perché una nuova l''ha sostituita nello stesso «PooledConnection», oppure il «PooledConnection» è stato chiuso.";
+        t[544] = "ResultSet not positioned properly, perhaps you need to call next.";
+        t[545] = "Il «ResultSet» non è correttamente posizionato; forse è necessario invocare «next()».";
+        t[550] = "This statement has been closed.";
+        t[551] = "Questo statement è stato chiuso.";
+        t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.";
+        t[553] = "Non è possibile identificare il tipo SQL da usare per l''istanza di tipo «{0}». Usare «setObject()» specificando esplicitamente il tipo da usare per questo valore.";
+        t[554] = "Cannot call updateRow() when on the insert row.";
+        t[555] = "Non è possibile invocare «updateRow()» durante l''inserimento di una riga.";
+        t[562] = "Detail: {0}";
+        t[563] = "Dettaglio: {0}";
+        t[566] = "Cannot call deleteRow() when on the insert row.";
+        t[567] = "Non è possibile invocare «deleteRow()» durante l''inserimento di una riga.";
+        t[568] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
+        t[569] = "La posizione attuale è precedente all''inizio del ResultSet. Non è possibile invocare «deleteRow()» qui.";
+        t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
+        t[577] = "Sequenza UTF-8 illegale: il valore è finale è un surrogato: {0}";
+        t[578] = "Unknown Response Type {0}.";
+        t[579] = "Risposta di tipo sconosciuto {0}.";
+        t[582] = "Unsupported value for stringtype parameter: {0}";
+        t[583] = "Il valore per il parametro di tipo string «{0}» non è supportato.";
+        t[584] = "Conversion to type {0} failed: {1}.";
+        t[585] = "Conversione al tipo {0} fallita: {1}.";
+        t[586] = "Conversion of money failed.";
+        t[587] = "Fallita la conversione di un «money».";
+        t[600] = "Unable to load the class {0} responsible for the datatype {1}";
+        t[601] = "Non è possibile caricare la class «{0}» per gestire il tipo «{1}».";
+        t[604] = "The fastpath function {0} is unknown.";
+        t[605] = "La funzione fastpath «{0}» è sconosciuta.";
+        t[608] = "Malformed function or procedure escape syntax at offset {0}.";
+        t[609] = "Sequenza di escape definita erroneamente nella funzione o procedura all''offset {0}.";
+        t[612] = "Provided Reader failed.";
+        t[613] = "Il «Reader» fornito è fallito.";
+        t[614] = "Maximum number of rows must be a value grater than or equal to 0.";
+        t[615] = "Il numero massimo di righe deve essere maggiore o eguale a 0.";
+        t[616] = "Failed to create object for: {0}.";
+        t[617] = "Fallita la creazione dell''oggetto per: {0}.";
+        t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
+        t[623] = "Il flusso di input è stato interrotto, sono arrivati {1} byte al posto dei {0} attesi.";
+        t[626] = "An unexpected result was returned by a query.";
+        t[627] = "Un risultato inaspettato è stato ricevuto dalla query.";
+        t[646] = "An error occurred while setting up the SSL connection.";
+        t[647] = "Si è verificato un errore impostando la connessione SSL.";
+        t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
+        t[655] = "Sequenza UTF-8 illegale: {0} byte utilizzati per codificare un valore di {1} byte: {2}";
+        t[658] = "The SSLSocketFactory class provided {0} could not be instantiated.";
+        t[659] = "La classe «SSLSocketFactory» specificata, «{0}», non può essere istanziata.";
+        t[670] = "Position: {0}";
+        t[671] = "Posizione: {0}";
+        t[676] = "Location: File: {0}, Routine: {1}, Line: {2}";
+        t[677] = "Individuazione: file: \"{0}\", routine: {1}, linea: {2}";
+        t[684] = "Cannot tell if path is open or closed: {0}.";
+        t[685] = "Impossibile stabilire se il percorso è aperto o chiuso: {0}.";
+        t[700] = "Cannot convert an instance of {0} to type {1}";
+        t[701] = "Non è possibile convertire una istanza di «{0}» nel tipo «{1}»";
+        t[710] = "{0} function takes four and only four argument.";
+        t[711] = "Il metodo «{0}» accetta quattro e solo quattro argomenti.";
+        t[718] = "Interrupted while attempting to connect.";
+        t[719] = "Si è verificata una interruzione durante il tentativo di connessione.";
+        t[722] = "Illegal UTF-8 sequence: final value is out of range: {0}";
+        t[723] = "Sequenza UTF-8 illegale: il valore finale è fuori dall''intervallo permesso: {0}";
+        t[736] = "{0} function takes one and only one argument.";
+        t[737] = "Il metodo «{0}» accetta un ed un solo argomento.";
+        t[744] = "This ResultSet is closed.";
+        t[745] = "Questo «ResultSet» è chiuso.";
+        t[746] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
+        t[747] = "Sono stati trovati caratteri non validi tra i dati. Molto probabilmente sono stati memorizzati dei caratteri che non sono validi per la codifica dei caratteri impostata alla creazione del database. Il caso più diffuso è quello nel quale si memorizzano caratteri a 8bit in un database con codifica SQL_ASCII.";
+        t[750] = "An I/O error occurred while sending to the backend.";
+        t[751] = "Si è verificato un errore di I/O nella spedizione di dati al server.";
+        t[754] = "Ran out of memory retrieving query results.";
+        t[755] = "Fine memoria scaricando i risultati della query.";
+        t[756] = "Returning autogenerated keys is not supported.";
+        t[757] = "La restituzione di chiavi autogenerate non è supportata.";
+        t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
+        t[761] = "L''operazione richiete un «ResultSet» scorribile mentre questo è «FORWARD_ONLY».";
+        t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
+        t[763] = "È stato eseguito un «CallableStatement» ma il parametro in uscita «{0}» era di tipo «{1}» al posto di «{2}», che era stato dichiarato.";
+        t[768] = "Unknown ResultSet holdability setting: {0}.";
+        t[769] = "Il parametro «holdability» per il «ResultSet» è sconosciuto: {0}.";
+        t[772] = "Transaction isolation level {0} not supported.";
+        t[773] = "Il livello di isolamento delle transazioni «{0}» non è supportato.";
+        t[776] = "No results were returned by the query.";
+        t[777] = "Nessun risultato è stato restituito dalla query.";
+        t[778] = "A CallableStatement was executed with nothing returned.";
+        t[779] = "Un «CallableStatement» è stato eseguito senza produrre alcun risultato. ";
+        t[780] = "The maximum field size must be a value greater than or equal to 0.";
+        t[781] = "La dimensione massima del campo deve essere maggiore o eguale a 0.";
+        t[786] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
+        t[787] = "Questo statement non dichiara il parametro in uscita. Usare «{ ?= call ... }» per farlo.";
+        t[788] = "Can''t use relative move methods while on the insert row.";
+        t[789] = "Non è possibile utilizzare gli spostamenti relativi durante l''inserimento di una riga.";
+        t[792] = "Connection is busy with another transaction";
+        t[793] = "La connessione è utilizzata da un''altra transazione";
+        table = t;
     }
-    int incr = ((hash_val % 395) + 1) << 1;
-    for (;;) {
-      idx += incr;
-      if (idx >= 794)
-        idx -= 794;
-      java.lang.Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+
+    @Override
+    public Object handleGetObject(String msgid) throws MissingResourceException {
+        int hash_val = msgid.hashCode() & 0x7fffffff;
+        int idx = (hash_val % 397) << 1;
+        {
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
+        int incr = ((hash_val % 395) + 1) << 1;
+        for (; ; ) {
+            idx += incr;
+            if (idx >= 794)
+                idx -= 794;
+            java.lang.Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
     }
-  }
 
-  @Override
-  public Enumeration<String> getKeys () {
-    return
-      new Enumeration<>() {
-        private int idx = 0;
-        { while (idx < 794 && table[idx] == null) idx += 2; }
-        @Override
-        public boolean hasMoreElements () {
-          return (idx < 794);
-        }
-        @Override
-        public String nextElement () {
-          Object key = table[idx];
-          do idx += 2;
-          while (idx < 794 && table[idx] == null);
-          return key.toString();
-        }
-      };
-  }
+    @Override
+    public Enumeration<String> getKeys() {
+        return
+                new Enumeration<>() {
+                    private int idx = 0;
 
-  public ResourceBundle getParent () {
-    return parent;
-  }
+                    {
+                        while (idx < 794 && table[idx] == null) idx += 2;
+                    }
+
+                    @Override
+                    public boolean hasMoreElements() {
+                        return (idx < 794);
+                    }
+
+                    @Override
+                    public String nextElement() {
+                        Object key = table[idx];
+                        do idx += 2;
+                        while (idx < 794 && table[idx] == null);
+                        return key.toString();
+                    }
+                };
+    }
+
+    public ResourceBundle getParent() {
+        return parent;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_ja.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_ja.java
index 6346a35..9743f04 100644
--- a/pgjdbc/src/main/java/org/postgresql/translation/messages_ja.java
+++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_ja.java
@@ -5,627 +5,633 @@ import java.util.MissingResourceException;
 import java.util.ResourceBundle;
 
 public class messages_ja extends ResourceBundle {
-  private static final String[] table;
-  static {
-    String[] t = new String[1426];
-    t[0] = "";
-    t[1] = "Project-Id-Version: head-ja\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2018-07-23 11:10+0900\nLast-Translator: Kyotaro Horiguchi <horiguchi.kyotaro@lab.ntt.co.jp>\nLanguage-Team: PostgreSQL <z-saito@guitar.ocn.ne.jp>\nLanguage: ja_JP\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: Poedit 1.5.4\n";
-    t[2] = "Method {0} is not yet implemented.";
-    t[3] = "{0} メソッドはまだ実装されていません。";
-    t[10] = "Got {0} error responses to single copy cancel request";
-    t[11] = "一つのコピー中断要求にたいして {0} 個のエラー応答が返されました";
-    t[20] = "The array index is out of range: {0}, number of elements: {1}.";
-    t[21] = "配列インデックスが範囲外です: {0} 、要素の数: {1}";
-    t[26] = "Tried to obtain lock while already holding it";
-    t[27] = "すでに取得中のロックを取得しようとしました";
-    t[28] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}";
-    t[29] = "不正なプロトコル状態が要求されました。Transaction interleaving を試みましたが実装されていません。xid={0}, currentXid={1}, state={2}, flags={3}";
-    t[34] = "Unsupported property name: {0}";
-    t[35] = "サポートされていないプロパティ名: {0}";
-    t[36] = "Unsupported Types value: {0}";
-    t[37] = "サポートされない Types の値: {0}.";
-    t[44] = "The hostname {0} could not be verified by hostnameverifier {1}.";
-    t[45] = "ホスト名 {0} は、hostnameverifier {1} で検証できませんでした。";
-    t[52] = "Invalid UUID data.";
-    t[53] = "不正なUUIDデータです。";
-    t[54] = "{0} parameter value must be an integer but was: {1}";
-    t[55] = "パラメータ {0} の値は整数でなければなりませんが指定された値は {1} でした";
-    t[56] = "Copying from database failed: {0}";
-    t[57] = "データベースからのコピーに失敗しました: {0}";
-    t[58] = "Requested CopyDual but got {0}";
-    t[59] = "CopyDualを要求しましたが {0} が返却されました。";
-    t[64] = "Multiple ResultSets were returned by the query.";
-    t[65] = "クエリの実行により、複数のResultSetが返されました。";
-    t[76] = "Too many update results were returned.";
-    t[77] = "返却された更新結果が多すぎます。";
-    t[84] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data.";
-    t[85] = "システムカタログにデータがないため MaxIndexKeys の値を決定できません。";
-    t[90] = "Database connection failed when starting copy";
-    t[91] = "コピー開始時のデータベース接続に失敗しました";
-    t[94] = "Unknown XML Result class: {0}";
-    t[95] = "未知のXML結果クラス: {0}";
-    t[100] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
-    t[101] = "サーバのstandard_conforming_stringsパラメータは、{0}であると報告されました。JDBCドライバは、on または off を想定しています。";
-    t[102] = "Batch entry {0} {1} was aborted: {2}  Call getNextException to see other errors in the batch.";
-    t[103] = "バッチ {0} {1} はアボートしました: {2} このバッチの他のエラーは getNextException を呼び出すことで確認できます。";
-    t[104] = "Protocol error.  Session setup failed.";
-    t[105] = "プロトコルエラー。セッションは準備できませんでした。";
-    t[106] = "This SQLXML object has not been initialized, so you cannot retrieve data from it.";
-    t[107] = "このSQLXMLオブジェクトは初期化されてなかったため、そこからデータを取得できません。";
-    t[116] = "Bad value for type {0} : {1}";
-    t[117] = "型 {0} に対する不正な値 : {1}";
-    t[120] = "A CallableStatement was executed with an invalid number of parameters";
-    t[121] = "CallableStatement は不正な数のパラメータで実行されました。";
-    t[124] = "Error preparing transaction. prepare xid={0}";
-    t[125] = "トランザクションの準備エラー。prepare xid={0}";
-    t[126] = "Can''t use relative move methods while on the insert row.";
-    t[127] = "行挿入中に相対移動メソッドは使えません。";
-    t[130] = "Failed to create object for: {0}.";
-    t[131] = "{0} のオブジェクトの生成に失敗しました。";
-    t[138] = "Cannot change transaction read-only property in the middle of a transaction.";
-    t[139] = "トランザクションの中で read-only プロパティは変更できません。";
-    t[154] = "{0} function takes three and only three arguments.";
-    t[155] = "{0} 関数はちょうど3個の引数を取ります。";
-    t[158] = "One-phase commit called for xid {0} but connection was prepared with xid {1}";
-    t[159] = "単相コミットが xid {0} に対してよびだされましたが、コネクションは xid {1} と関連付けられています";
-    t[160] = "Validating connection.";
-    t[161] = "コネクションを検証しています";
-    t[166] = "This replication stream has been closed.";
-    t[167] = "このレプリケーション接続は既にクローズされています。";
-    t[168] = "An error occurred while trying to get the socket timeout.";
-    t[169] = "ソケットタイムアウト取得中にエラーが発生しました。";
-    t[170] = "Conversion of money failed.";
-    t[171] = "貨幣金額の変換に失敗しました。";
-    t[172] = "Provided Reader failed.";
-    t[173] = "渡された Reader で異常が発生しました。";
-    t[174] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
-    t[175] = "対応する start の呼び出しなしで、end を呼び出しました。state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
-    t[178] = "Got CopyBothResponse from server during an active {0}";
-    t[179] = "{0} を実行中のサーバから CopyOutResponse を受け取りました";
-    t[186] = "Unknown ResultSet holdability setting: {0}.";
-    t[187] = "ResultSet の holdability に対する未知の設定値です: {0}";
-    t[188] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
-    t[189] = "実装されていません: 第二フェーズの COMMIT は、待機接続で使わなくてはなりません。xid={0}, currentXid={1}, state={2}, transactionState={3}";
-    t[190] = "Invalid server SCRAM signature";
-    t[191] = "不正なサーバSCRAM署名です";
-    t[192] = "The server''s client_encoding parameter was changed to {0}. The JDBC driver requires client_encoding to be UTF8 for correct operation.";
-    t[193] = "サーバの client_encoding パラメータが {0} に変わりました。JDBCドライバが正しく動作するためには、 client_encoding は UTF8 である必要があります。";
-    t[198] = "Detail: {0}";
-    t[199] = "詳細: {0}";
-    t[200] = "Unexpected packet type during copy: {0}";
-    t[201] = "コピー中の想定外のパケット型です: {0}";
-    t[206] = "Transaction isolation level {0} not supported.";
-    t[207] = "トランザクション分離レベル{0} はサポートされていません。";
-    t[210] = "The server requested password-based authentication, but no password was provided.";
-    t[211] = "サーバはパスワード・ベースの認証を要求しましたが、パスワードが渡されませんでした。";
-    t[214] = "Interrupted while attempting to connect.";
-    t[215] = "接続試行中に割り込みがありました。";
-    t[216] = "Fetch size must be a value greater to or equal to 0.";
-    t[217] = "フェッチサイズは、0または、より大きな値でなくてはなりません。";
-    t[228] = "Added parameters index out of range: {0}, number of columns: {1}.";
-    t[229] = "パラメータ・インデックスは範囲外です: {0} , カラム数: {1}";
-    t[230] = "Could not decrypt SSL key file {0}.";
-    t[231] = "SSL keyファイル {0} を復号できませんでした。";
-    t[242] = "Could not initialize SSL context.";
-    t[243] = "SSLコンテクストを初期化できませんでした。";
-    t[244] = "{0} function takes one and only one argument.";
-    t[245] = "{0} 関数はちょうど1個の引数を取ります。";
-    t[248] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
-    t[249] = "{0} 型のパラメータが登録されましたが、get{1} (sqltype={2}) が呼び出されました。";
-    t[258] = "Conversion of interval failed";
-    t[259] = "時間間隔の変換に失敗しました。";
-    t[262] = "xid must not be null";
-    t[263] = "xidはnullではいけません。";
-    t[264] = "Your security policy has prevented the connection from being attempted.  You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to.";
-    t[265] = "セキュリティ・ポリシーにより、接続が妨げられました。おそらく、接続先のデータベースサーバのホストとポートに対して java.net.SocketPermission の connect 権限を許可する必要があります。";
-    t[270] = "ClientInfo property not supported.";
-    t[271] = "ClientInfo プロパティはサポートされていません。";
-    t[272] = "LOB positioning offsets start at 1.";
-    t[273] = "LOB 位置指定のオフセット値は 1 以上です。";
-    t[276] = "Tried to write to an inactive copy operation";
-    t[277] = "実行中ではないコピー操作に書き込もうとしました";
-    t[278] = "suspend/resume not implemented";
-    t[279] = "停止/再開 は実装されていません。";
-    t[290] = "Transaction control methods setAutoCommit(true), commit, rollback and setSavePoint not allowed while an XA transaction is active.";
-    t[291] = "トランザクション制御メソッド setAutoCommit(true), commit, rollback, setSavePoint は、XAトランザクションが有効である間は利用できません。";
-    t[292] = "Unable to find server array type for provided name {0}.";
-    t[293] = "指定された名前 {0} のサーバ配列型はありません。";
-    t[300] = "Statement has been closed.";
-    t[301] = "ステートメントはクローズされました。";
-    t[302] = "The fastpath function {0} is unknown.";
-    t[303] = "{0} は未知の fastpath 関数です。";
-    t[306] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
-    t[307] = "サーバのDateStyleパラメータは、{0} に変わりました。JDBCドライバが正しく動作するためには、DateStyle が ISO で始まる値である必要があります。";
-    t[308] = "Invalid flags {0}";
-    t[309] = "不正なフラグ {0}";
-    t[324] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
-    t[325] = "CallableStatementは宣言されましたが、registerOutParameter(1, <some type>) は呼び出されませんでした。";
-    t[328] = "Cannot commit when autoCommit is enabled.";
-    t[329] = "autoCommit有効時に、明示的なコミットはできません。";
-    t[330] = "Database connection failed when writing to copy";
-    t[331] = "コピーへの書き込み中にデータベース接続で異常が発生しました";
-    t[334] = "Hint: {0}";
-    t[335] = "ヒント: {0}";
-    t[336] = "Interval {0} not yet implemented";
-    t[337] = "時間間隔 {0} は実装されていません";
-    t[338] = "No X509TrustManager found";
-    t[339] = "X509TrustManager が見つかりません";
-    t[346] = "No results were returned by the query.";
-    t[347] = "クエリは結果を返却しませんでした。";
-    t[354] = "Heuristic commit/rollback not supported. forget xid={0}";
-    t[355] = "ヒューリスティック commit/rollback はサポートされません。forget xid={0}";
-    t[362] = "Fastpath call {0} - No result was returned or wrong size while expecting an integer.";
-    t[363] = "Fastpath 呼び出し {0} - integer を想定していましたが、結果は返却されないかまたは間違った大きさでした。";
-    t[364] = "Cannot cast an instance of {0} to type {1}";
-    t[365] = "{0} のインスタンスは {1} 型へキャストできません";
-    t[366] = "ResultSet not positioned properly, perhaps you need to call next.";
-    t[367] = "適切な位置にいない ResultSetです。おそらく、nextを呼ぶ必要があります。";
-    t[372] = "Cannot establish a savepoint in auto-commit mode.";
-    t[373] = "自動コミットモードでsavepointを作成できません。";
-    t[374] = "Prepare called before end. prepare xid={0}, state={1}";
-    t[375] = "end より前に prepare が呼ばれました prepare xid={0}, state={1}";
-    t[382] = "You must specify at least one column value to insert a row.";
-    t[383] = "行挿入には、最低でも1つの列の値が必要です。";
-    t[388] = "Query timeout must be a value greater than or equals to 0.";
-    t[389] = "クエリタイムアウトは、0またはより大きな値でなくてはなりません。";
-    t[394] = "The SSLSocketFactory class provided {0} could not be instantiated.";
-    t[395] = "渡された SSLSocketFactoryクラス {0} はインスタンス化できませんでした。";
-    t[396] = "The parameter index is out of range: {0}, number of parameters: {1}.";
-    t[397] = "パラメータのインデックスが範囲外です: {0} , パラメータ数: {1}";
-    t[400] = "This ResultSet is closed.";
-    t[401] = "この ResultSet はクローズされています。";
-    t[402] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
-    t[403] = "開始位置より前もしくは終了位置より後ろであるため、ResultSetを更新することができません。";
-    t[404] = "SSL error: {0}";
-    t[405] = "SSL エラー: {0}";
-    t[408] = "The column name {0} was not found in this ResultSet.";
-    t[409] = "この ResultSet に列名 {0} ありません。";
-    t[412] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
-    t[413] = "認証タイプ {0} はサポートされません。pg_hba.confでクライアントのIPアドレスまたはサブネットの指定があり、そのエントリでこのドライバがサポートする認証機構を使うように設定されていることを確認してください。";
-    t[440] = "The driver currently does not support COPY operations.";
-    t[441] = "ドライバはコピー操作をサポートしていません。";
-    t[442] = "This statement has been closed.";
-    t[443] = "このステートメントはクローズされています。";
-    t[444] = "Object is too large to send over the protocol.";
-    t[445] = "オブジェクトが大きすぎてこのプロトコルでは送信できません。";
-    t[448] = "oid type {0} not known and not a number";
-    t[449] = "OID型 {0} は未知でかつ数値でもありません";
-    t[452] = "No hstore extension installed.";
-    t[453] = "hstore 拡張がインストールされてません。";
-    t[454] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
-    t[455] = "ResultSet の最後尾より後ろにいるため、deleteRow() を呼ぶことはできません。";
-    t[462] = "The column index is out of range: {0}, number of columns: {1}.";
-    t[463] = "列インデックスは範囲外です: {0} , 列の数: {1}";
-    t[468] = "Got CopyInResponse from server during an active {0}";
-    t[469] = "{0} を実行中のサーバから CopyInResponse を受け取りました";
-    t[474] = "Fastpath call {0} - No result was returned and we expected a numeric.";
-    t[475] = "Fastpath 呼び出し {0} - numeric を想定していましたが、結果は返却されませんでした。";
-    t[482] = "An error occurred while setting up the SSL connection.";
-    t[483] = "SSL接続のセットアップ中に、エラーが起こりました。";
-    t[484] = "Could not open SSL certificate file {0}.";
-    t[485] = "SSL証明書ファイル {0} を開けませんでした。";
-    t[490] = "free() was called on this LOB previously";
-    t[491] = "このLOBに対して free() はすでに呼び出し済みです";
-    t[492] = "Finalizing a Connection that was never closed:";
-    t[493] = "クローズされていないコネクションの終了処理を行います: ";
-    t[494] = "Unsupported properties: {0}";
-    t[495] = "サポートされないプロパティ: {0}";
-    t[498] = "Interrupted while waiting to obtain lock on database connection";
-    t[499] = "データベース接続のロック待ちの最中に割り込みがありました";
-    t[504] = "The HostnameVerifier class provided {0} could not be instantiated.";
-    t[505] = "与えれた HostnameVerifier クラス {0} はインスタンス化できませんした。";
-    t[506] = "Unable to create SAXResult for SQLXML.";
-    t[507] = "SQLXMLに対するSAXResultを生成できません。";
-    t[510] = "The server does not support SSL.";
-    t[511] = "サーバはSSLをサポートしていません。";
-    t[516] = "Got CopyData without an active copy operation";
-    t[517] = "実行中のコピー操作がないにもかかわらず CopyData を受け取りました";
-    t[518] = "Error during one-phase commit. commit xid={0}";
-    t[519] = "単一フェーズのCOMMITの処理中のエラー commit xid={0}";
-    t[522] = "Network timeout must be a value greater than or equal to 0.";
-    t[523] = "ネットワークタイムアウトは、0またはより大きな値でなくてはなりません。";
-    t[532] = "Unsupported type conversion to {1}.";
-    t[533] = "{1} への型変換はサポートされていません。";
-    t[534] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
-    t[535] = "入力ストリームが途中で終了しました、{0} バイトを読み込もうとしましたが、 {1} バイトしかありませんでした。";
-    t[536] = "Zero bytes may not occur in string parameters.";
-    t[537] = "バイト値0を文字列ラメータに含めることはできません。";
-    t[538] = "This connection has been closed.";
-    t[539] = "このコネクションは既にクローズされています。";
-    t[540] = "Cannot call deleteRow() when on the insert row.";
-    t[541] = "行挿入時に deleteRow() を呼び出せません。";
-    t[544] = "Unable to bind parameter values for statement.";
-    t[545] = "ステートメントのパラメータ値をバインドできませんでした。";
-    t[552] = "Cannot convert an instance of {0} to type {1}";
-    t[553] = "{0} のインスタンスは {1} 型に変換できません";
-    t[554] = "Conversion to type {0} failed: {1}.";
-    t[555] = "{0} への型変換に失敗しました: {1}";
-    t[556] = "Error loading default settings from driverconfig.properties";
-    t[557] = "driverconfig.properties からの初期設定ロード中のエラー";
-    t[558] = "Expected command status BEGIN, got {0}.";
-    t[559] = "BEGINコマンドステータスを想定しましたが、{0} が返却されました。";
-    t[564] = "An unexpected result was returned by a query.";
-    t[565] = "クエリが想定外の結果を返却しました。";
-    t[568] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
-    t[569] = "何らかの異常によりドライバが動作できません。この例外を報告して下さい。";
-    t[576] = "One or more ClientInfo failed.";
-    t[577] = "1つ以上の ClinentInfo で問題が発生しました。";
-    t[578] = "Location: File: {0}, Routine: {1}, Line: {2}";
-    t[579] = "場所: ファイル: {0}, ルーチン: {1},行: {2}";
-    t[582] = "Unknown type {0}.";
-    t[583] = "未知の型 {0}.";
-    t[590] = "This SQLXML object has already been freed.";
-    t[591] = "このSQLXMLオブジェクトはすでに解放されています。";
-    t[594] = "Unexpected copydata from server for {0}";
-    t[595] = "{0} を実行中のサーバからのあり得ない CopyData";
-    t[596] = "{0} function takes two or three arguments.";
-    t[597] = "{0} 関数は2個、または3個の引数を取ります。";
-    t[602] = "Connection to {0} refused. Check that the hostname and port are correct and that the postmaster is accepting TCP/IP connections.";
-    t[603] = "{0} への接続が拒絶されました。ホスト名とポート番号が正しいことと、postmaster がTCP/IP接続を受け付けていることを確認してください。";
-    t[612] = "Unsupported binary encoding of {0}.";
-    t[613] = "{0} 型に対するサポートされないバイナリエンコーディング。";
-    t[616] = "Returning autogenerated keys is not supported.";
-    t[617] = "自動生成キーを返すことはサポートされていません。";
-    t[620] = "Provided InputStream failed.";
-    t[621] = "渡された InputStream で異常が発生しました。";
-    t[626] = "No IOException expected from StringBuffer or StringBuilder";
-    t[627] = "StringBuffer または StringBuilder からの IOException は想定されていません";
-    t[638] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it";
-    t[639] = "実装されていません: 単一フェーズのCOMMITは、開始時と同じ接続で発行されなければなりません。";
-    t[640] = "Cannot reference a savepoint after it has been released.";
-    t[641] = "解放された savepoint は参照できません。";
-    t[642] = "Ran out of memory retrieving query results.";
-    t[643] = "クエリの結果取得中にメモリ不足が起きました。";
-    t[654] = "No primary key found for table {0}.";
-    t[655] = "テーブル {0} には主キーがありません。";
-    t[658] = "Error during recover";
-    t[659] = "recover 処理中のエラー";
-    t[666] = "This copy stream is closed.";
-    t[667] = "このコピーストリームはクローズされています。";
-    t[668] = "Could not open SSL root certificate file {0}.";
-    t[669] = "SSLルート証明書ファイル {0} をオープンできませんでした。";
-    t[676] = "Invalid sslmode value: {0}";
-    t[677] = "不正な sslmode 値: {0}";
-    t[678] = "Cannot tell if path is open or closed: {0}.";
-    t[679] = "経路が開いているか、閉じているか判別できません: {0}";
-    t[682] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
-    t[683] = "不正なUTF-8シーケンス: {1} バイトの値のエンコードに{0} バイト使用しています: {2}";
-    t[684] = "Unknown XML Source class: {0}";
-    t[685] = "未知のXMLソースクラス: {0}";
-    t[686] = "Internal Query: {0}";
-    t[687] = "内部クエリ: {0}";
-    t[702] = "Could not find a java cryptographic algorithm: {0}.";
-    t[703] = "javaの暗号化アルゴリズム {0} を見つけることができませんでした。";
-    t[706] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
-    t[707] = "同じ PooledConnection に対して新しい接続をオープンしたか、この PooledConnection がクローズされたため、接続が自動的にクローズされました。";
-    t[708] = "Invalid fetch direction constant: {0}.";
-    t[709] = "不正なフェッチ方向の定数です: {0}";
-    t[714] = "Can''t use query methods that take a query string on a PreparedStatement.";
-    t[715] = "PreparedStatement でクエリ文字列を取るクエリメソッドは使えません。";
-    t[716] = "SCRAM authentication failed, server returned error: {0}";
-    t[717] = "スクラム認証が失敗しました、サーバはエラーを返却しました:  {0}";
-    t[722] = "Invalid elements {0}";
-    t[723] = "不正な要素です: {0}";
-    t[738] = "Not on the insert row.";
-    t[739] = "挿入行上にいません。";
-    t[740] = "Unable to load the class {0} responsible for the datatype {1}";
-    t[741] = "データ型 {1} に対応するクラス{0} をロードできません。";
-    t[752] = "Could not find a java cryptographic algorithm: X.509 CertificateFactory not available.";
-    t[753] = "javaの暗号化アルゴリズムを見つけることができませんでした。X.509 CertificateFactory は利用できません。";
-    t[756] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.";
-    t[757] = "{0} のインスタンスに対して使うべきSQL型を推測できません。明示的な Types 引数をとる setObject() で使うべき型を指定してください。";
-    t[760] = "Invalid server-first-message: {0}";
-    t[761] = "不正な server-first-message: {0}";
-    t[762] = "No value specified for parameter {0}.";
-    t[763] = "パラメータ {0} に値が設定されてません。";
-    t[766] = "Fastpath call {0} - No result was returned and we expected an integer.";
-    t[767] = "Fastpath 呼び出し {0} - integer を想定していましたが、結果は返却されませんでした。";
-    t[774] = "Unable to create StAXResult for SQLXML";
-    t[775] = "SQLXMLに対するStAXResultを生成できません。";
-    t[798] = "CommandComplete expected COPY but got: ";
-    t[799] = "CommandComplete はCOPYを想定しましたが、次の結果が返却されました:";
-    t[800] = "Enter SSL password: ";
-    t[801] = "SSLパスワード入力: ";
-    t[802] = "Failed to convert binary xml data to encoding: {0}.";
-    t[803] = "バイナリxmlデータのエンコード: {0} への変換に失敗しました。";
-    t[804] = "No SCRAM mechanism(s) advertised by the server";
-    t[805] = "サーバは SCRAM認証機構を広告していません";
-    t[818] = "Custom type maps are not supported.";
-    t[819] = "カスタム型マップはサポートされません。";
-    t[822] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
-    t[823] = "不正なUTF-8シーケンス: 変換後の値がサロゲート値です: {0}";
-    t[824] = "The SocketFactory class provided {0} could not be instantiated.";
-    t[825] = "渡された SocketFactoryクラス {0} はインスタンス化できませんでした。";
-    t[832] = "Large Objects may not be used in auto-commit mode.";
-    t[833] = "ラージオブジェクトは、自動コミットモードで使うことができません。";
-    t[834] = "Fastpath call {0} - No result was returned or wrong size while expecting a long.";
-    t[835] = "Fastpath 呼び出し {0} - long を想定していましたが、結果は返却されないかまたは間違った大きさでした。";
-    t[844] = "Invalid stream length {0}.";
-    t[845] = "不正なストリーム長 {0}。";
-    t[850] = "The sslfactoryarg property must start with the prefix file:, classpath:, env:, sys:, or -----BEGIN CERTIFICATE-----.";
-    t[851] = "プロパティ sslfactoryarg の先頭はプリフィクス file:, classpath:, env:, sys: もしくは -----BEGIN CERTIFICATE----- のいずれかでなければなりません。";
-    t[852] = "Can''t use executeWithFlags(int) on a Statement.";
-    t[853] = "executeWithFlags(int) は Statement インスタンスでは使えません。";
-    t[856] = "Cannot retrieve the id of a named savepoint.";
-    t[857] = "名前付き savepoint の id は取得できません。";
-    t[860] = "Could not read password for SSL key file by callbackhandler {0}.";
-    t[861] = "callbackhandler {0} で、SSL keyファイルを読めませんでした。";
-    t[874] = "Tried to break lock on database connection";
-    t[875] = "データベース接続のロックを破壊しようとしました";
-    t[878] = "Unexpected error writing large object to database.";
-    t[879] = "データベースへのラージオブジェクト書き込み中に想定外のエラーが起きました。";
-    t[880] = "Expected an EOF from server, got: {0}";
-    t[881] = "サーバからの EOF を期待していましたが、{0} が送られてきました";
-    t[886] = "Could not read SSL root certificate file {0}.";
-    t[887] = "SSLルート証明書ファイル {0} を読めませんでした。";
-    t[888] = "This SQLXML object has already been initialized, so you cannot manipulate it further.";
-    t[889] = "このSQLXMLオブジェクトは既に初期化済みであるため、これ以上操作できません。";
-    t[896] = "The array index is out of range: {0}";
-    t[897] = "配列インデックスが範囲外です: {0}";
-    t[898] = "Unable to set network timeout.";
-    t[899] = "ネットワークタイムアウトが設定できません。";
-    t[900] = "{0} function takes four and only four argument.";
-    t[901] = "{0} 関数はちょうど4個の引数を取ります。";
-    t[904] = "Unable to decode xml data.";
-    t[905] = "xmlデータをデコードできません。";
-    t[916] = "Bad value for type timestamp/date/time: {1}";
-    t[917] = "timestamp/date/time 型に対する不正な値: {1}";
-    t[928] = "Illegal UTF-8 sequence: final value is out of range: {0}";
-    t[929] = "不正なUTF-8シーケンス: 変換後の値が範囲外です: {0}";
-    t[932] = "Unable to parse the count in command completion tag: {0}.";
-    t[933] = "コマンド完了タグのカウントをパースできません: {0}";
-    t[942] = "Read from copy failed.";
-    t[943] = "コピーストリームからの読み取りに失敗しました。";
-    t[944] = "Maximum number of rows must be a value grater than or equal to 0.";
-    t[945] = "行数の制限値は 0またはより大きな値でなくてはなりません。";
-    t[958] = "The password callback class provided {0} could not be instantiated.";
-    t[959] = "渡されたパスワードコールバッククラス {0} はインスタンス化できませんでした。";
-    t[960] = "Returning autogenerated keys by column index is not supported.";
-    t[961] = "列インデックスで自動生成キーを返すことはサポートされていません。";
-    t[966] = "Properties for the driver contains a non-string value for the key ";
-    t[967] = "このドライバのプロパティでは以下のキーに対して文字列ではない値が設定されています: ";
-    t[974] = "Database connection failed when canceling copy operation";
-    t[975] = "コピー操作中断のためのデータベース接続に失敗しました";
-    t[976] = "DataSource has been closed.";
-    t[977] = "データソースはクローズされました。";
-    t[996] = "Unable to get network timeout.";
-    t[997] = "ネットワークタイムアウトが取得できません。";
-    t[1000] = "A CallableStatement was executed with nothing returned.";
-    t[1001] = "CallableStatement が実行されましたがなにも返却されませんでした。";
-    t[1002] = "Can''t refresh the insert row.";
-    t[1003] = "挿入行を再フェッチすることはできません。";
-    t[1004] = "Could not find a server with specified targetServerType: {0}";
-    t[1005] = "指定された targetServerType のサーバーが見つかりません: {0}";
-    t[1006] = "This PooledConnection has already been closed.";
-    t[1007] = "この PooledConnectionは、すでに閉じられています。";
-    t[1010] = "Cannot call cancelRowUpdates() when on the insert row.";
-    t[1011] = "行挿入時に cancelRowUpdates() を呼び出せません。";
-    t[1012] = "Preparing already prepared transaction, the prepared xid {0}, prepare xid={1}";
-    t[1013] = "すでにプリペアされているトランザクションをプリペアしようとしました、プリペアされている xid={0}, プリペアしようとした xid={1}";
-    t[1018] = "CopyIn copy direction can't receive data";
-    t[1019] = "コピー方向 CopyIn はデータを受信できません";
-    t[1024] = "conversion to {0} from {1} not supported";
-    t[1025] = "{1} から {0} への変換はサポートされていません。";
-    t[1030] = "An error occurred reading the certificate";
-    t[1031] = "証明書の読み込み中にエラーが起きました";
-    t[1032] = "Invalid or unsupported by client SCRAM mechanisms";
-    t[1033] = "不正であるかクライアントのSCRAM機構でサポートされていません";
-    t[1034] = "Malformed function or procedure escape syntax at offset {0}.";
-    t[1035] = "関数またはプロシージャの間違ったエスケープ構文が位置{0}で見つかりました。";
-    t[1038] = "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.";
-    t[1039] = "バインドメッセージ長 {0} は長すぎます。InputStreamのパラメータにとても大きな長さ、あるいは不正確な長さが設定されている可能性があります。";
-    t[1050] = "Cannot change transaction isolation level in the middle of a transaction.";
-    t[1051] = "トランザクションの中でトランザクション分離レベルは変更できません。";
-    t[1058] = "Internal Position: {0}";
-    t[1059] = "内部位置: {0}";
-    t[1062] = "No function outputs were registered.";
-    t[1063] = "関数出力は登録されていません。";
-    t[1072] = "Unexpected packet type during replication: {0}";
-    t[1073] = "レプリケーション中に想定外のパケット型: {0}";
-    t[1076] = "Error disabling autocommit";
-    t[1077] = "自動コミットの無効化処理中のエラー";
-    t[1080] = "Requested CopyOut but got {0}";
-    t[1081] = "CopyOut を要求しましたが {0} が返却されました";
-    t[1084] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}";
-    t[1085] = "プリペアドトランザクションのロールバック中のエラー rollback xid={0}, preparedXid={1}, currentXid={2}";
-    t[1086] = "Database connection failed when ending copy";
-    t[1087] = "コピー操作の終了中にデータベース接続で異常が発生しました";
-    t[1090] = "Unsupported value for stringtype parameter: {0}";
-    t[1091] = "サポートされないstringtypeパラメータ値です: {0}";
-    t[1094] = "The sslfactoryarg property may not be empty.";
-    t[1095] = "プロパティ sslfactoryarg は空であってはなりません。";
-    t[1102] = "Loading the SSL root certificate {0} into a TrustManager failed.";
-    t[1103] = "SSLルート証明書 {0} をTrustManagerへ読み込めませんでした。";
-    t[1104] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
-    t[1105] = "不正なUTF-8シーケンス: 先頭バイトが {0}: {1}";
-    t[1116] = "The environment variable containing the server's SSL certificate must not be empty.";
-    t[1117] = "サーバのSSL証明書を指定する環境変数は空であってはなりません。";
-    t[1118] = "Connection attempt timed out.";
-    t[1119] = "接続試行がタイムアウトしました。";
-    t[1130] = "Cannot write to copy a byte of value {0}";
-    t[1131] = "バイト値{0}はコピーストリームへの書き込みはできません";
-    t[1132] = "Connection has been closed.";
-    t[1133] = "接続はクローズされました。";
-    t[1136] = "Could not read password for SSL key file, console is not available.";
-    t[1137] = "SSL keyファイルのパスワードを読めませんでした。コンソールは利用できません。";
-    t[1140] = "The JVM claims not to support the encoding: {0}";
-    t[1141] = "JVMでサポートされないエンコーディングです: {0}";
-    t[1146] = "Unexpected command status: {0}.";
-    t[1147] = "想定外のコマンドステータス: {0}。";
-    t[1154] = "Cannot rollback when autoCommit is enabled.";
-    t[1155] = "autoCommit有効時に、明示的なロールバックはできません。";
-    t[1158] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}";
-    t[1159] = "実装されていません: Prepareは、トランザクションを開始したものと同じコネクションで発行しなくてはなりません。currentXid={0}, prepare xid={1}";
-    t[1162] = "The connection attempt failed.";
-    t[1163] = "接続試行は失敗しました。";
-    t[1166] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
-    t[1167] = "不正なUTF-8シーケンス: {1} バイトのシーケンス中 {0} バイト目が、10xxxxxx ではありません: {2}";
-    t[1178] = "A connection could not be made using the requested protocol {0}.";
-    t[1179] = "要求されたプロトコル {0} で接続することができませんでした。";
-    t[1182] = "The system property containing the server's SSL certificate must not be empty.";
-    t[1183] = "サーバーのSSL証明書を指定するシステムプロパティは空であってはなりません。";
-    t[1188] = "Cannot call updateRow() when on the insert row.";
-    t[1189] = "挿入行上では updateRow() を呼び出すことができません。";
-    t[1192] = "Fastpath call {0} - No result was returned and we expected a long.";
-    t[1193] = "Fastpath 呼び出し {0} - long を想定していましたが、結果は返却されませんでした。";
-    t[1198] = "Truncation of large objects is only implemented in 8.3 and later servers.";
-    t[1199] = "ラージオブジェクトの切り詰めは、バージョン8.3 以降のサーバでのみ実装されています。";
-    t[1200] = "Cannot convert the column of type {0} to requested type {1}.";
-    t[1201] = "{0}型のカラムの値を指定の型 {1} に変換できませんでした。";
-    t[1204] = "Requested CopyIn but got {0}";
-    t[1205] = "CopyIn を要求しましたが {0} が返却されました";
-    t[1206] = "Cannot cast to boolean: \"{0}\"";
-    t[1207] = "boolean へのキャストはできません: \"{0}\"";
-    t[1212] = "Invalid server-final-message: {0}";
-    t[1213] = "不正な server-final-message: {0}.";
-    t[1214] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
-    t[1215] = "このステートメントは、OUTパラメータを宣言していません。'{' ?= call ... '}' を使って宣言して下さい。";
-    t[1218] = "Cannot truncate LOB to a negative length.";
-    t[1219] = "LOBを負の長さに切り詰めることはできません。";
-    t[1220] = "Zero bytes may not occur in identifiers.";
-    t[1221] = "バイト値0を識別子に含めることはできません。";
-    t[1222] = "Unable to convert DOMResult SQLXML data to a string.";
-    t[1223] = "DOMResult SQLXMLデータを文字列に変換することができません。";
-    t[1224] = "Missing expected error response to copy cancel request";
-    t[1225] = "予期していたコピーの中断要求へのエラー応答がありませんでした";
-    t[1234] = "SCRAM authentication is not supported by this driver. You need JDK >= 8 and pgjdbc >= 42.2.0 (not \".jre\" versions)";
-    t[1235] = "SCRAM認証はこのドライバではサポートされません。JDK8 以降かつ pgjdbc 42.2.0 以降(\".jre\"のバージョンではありません)が必要です。";
-    t[1240] = "Tried to end inactive copy";
-    t[1241] = "実行中ではないコピー操作を終了しようとしました";
-    t[1246] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
-    t[1247] = "CallableStatement 関数が実行され、出力パラメータ {0} は {1} 型 でした。しかし、{2} 型 が登録されました。";
-    t[1250] = "Failed to setup DataSource.";
-    t[1251] = "データソースのセットアップに失敗しました。";
-    t[1252] = "Loading the SSL certificate {0} into a KeyManager failed.";
-    t[1253] = "SSL証明書 {0} をKeyManagerへ読み込めませんでした。";
-    t[1254] = "Could not read SSL key file {0}.";
-    t[1255] = "SSL keyファイル {0} を読めませんでした。";
-    t[1258] = "Tried to read from inactive copy";
-    t[1259] = "実行中ではないコピーから読み取ろうとしました";
-    t[1260] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
-    t[1261] = "ResultSetは更新不可です。この結果セットを生成したクエリは、ただ一つのテーブルを選択して、そのテーブルの全ての主キーを選択する必要があります。詳細に関しては JDBC 2.1 API仕様、章 5.6 を参照して下さい。";
-    t[1264] = "A result was returned when none was expected.";
-    t[1265] = "ないはずの結果が返却されました。";
-    t[1266] = "Tried to cancel an inactive copy operation";
-    t[1267] = "実行中ではないコピー操作の中断を試みました";
-    t[1268] = "Server SQLState: {0}";
-    t[1269] = "サーバ SQLState: {0}";
-    t[1272] = "Unable to find keywords in the system catalogs.";
-    t[1273] = "キーワードはシステムカタログにありません。";
-    t[1276] = "Connection is busy with another transaction";
-    t[1277] = "接続は、別のトランザクションを処理中です";
-    t[1280] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
-    t[1281] = "CONCUR_READ_ONLYに設定されている ResultSet は更新できません。";
-    t[1296] = "commit called before end. commit xid={0}, state={1}";
-    t[1297] = "end の前に COMMIT を呼びました commit xid={0}, state={1}";
-    t[1308] = "PostgreSQL LOBs can only index to: {0}";
-    t[1309] = "PostgreSQL LOB 上の位置指定は最大 {0} までです";
-    t[1310] = "Where: {0}";
-    t[1311] = "場所: {0}";
-    t[1312] = "Unable to find name datatype in the system catalogs.";
-    t[1313] = "name データ型がシステムカタログにありません。";
-    t[1314] = "Invalid targetServerType value: {0}";
-    t[1315] = "不正な  targetServerType 値です。{0}.";
-    t[1318] = "Cannot retrieve the name of an unnamed savepoint.";
-    t[1319] = "無名 savepoint の名前は取得できません。";
-    t[1320] = "Error committing prepared transaction. commit xid={0}, preparedXid={1}, currentXid={2}";
-    t[1321] = "プリペアドトランザクションの COMMIT 処理中のエラー。commit xid={0}, preparedXid={1}, currentXid={2}";
-    t[1324] = "Invalid timeout ({0}<0).";
-    t[1325] = "不正なタイムアウト値 ({0}<0)。";
-    t[1328] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
-    t[1329] = "操作は、スクロール可能なResultSetを必要としますが、このResultSetは、 FORWARD_ONLYです。";
-    t[1330] = "Results cannot be retrieved from a CallableStatement before it is executed.";
-    t[1331] = "実行前の CallableStatement から結果の取得はできません。";
-    t[1332] = "wasNull cannot be call before fetching a result.";
-    t[1333] = "wasNullは、結果フェッチ前に呼び出せません。";
-    t[1336] = "{0} function doesn''t take any argument.";
-    t[1337] = "{0} 関数は引数を取りません。";
-    t[1344] = "Unknown Response Type {0}.";
-    t[1345] = "未知の応答タイプ {0} です。";
-    t[1346] = "The JVM claims not to support the {0} encoding.";
-    t[1347] = "JVMは、エンコーディング {0} をサポートしません。";
-    t[1348] = "{0} function takes two and only two arguments.";
-    t[1349] = "{0} 関数はちょうど2個の引数を取ります。";
-    t[1350] = "The maximum field size must be a value greater than or equal to 0.";
-    t[1351] = "最大の項目サイズは、0またはより大きな値でなくてはなりません。";
-    t[1352] = "Received CommandComplete ''{0}'' without an active copy operation";
-    t[1353] = "実行中のコピー操作がないにもかかわらず CommandComplete ''{0}'' を受信しました";
-    t[1354] = "Unable to translate data into the desired encoding.";
-    t[1355] = "データを指定されたエンコーディングに変換することができません。";
-    t[1368] = "Got CopyOutResponse from server during an active {0}";
-    t[1369] = "{0} を実行中のサーバから CopyOutResponse を受け取りました";
-    t[1370] = "Failed to set ClientInfo property: {0}";
-    t[1371] = "ClientInfo のプロパティの設定に失敗しました: {0}";
-    t[1372] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
-    t[1373] = "不正な文字データが見つかりました。これはデータベース作成時の文字セットに対して不正な文字を含むデータが格納されているために起きている可能性が高いです。最も一般的な例は、SQL_ASCIIデータベースに8bitデータが保存されている場合です。";
-    t[1374] = "Unknown Types value.";
-    t[1375] = "未知の Types の値です。";
-    t[1376] = " (pgjdbc: autodetected server-encoding to be {0}, if the message is not readable, please check database logs and/or host, port, dbname, user, password, pg_hba.conf)";
-    t[1377] = "(pgjdbc: server-encoding として {0}  を自動検出しました、メッセージが読めない場合はデータベースログおよび host, port, dbname, user, password, pg_dba.conf を確認してください)";
-    t[1386] = "GSS Authentication failed";
-    t[1387] = "GSS認証は失敗しました。";
-    t[1390] = "An error occurred while trying to reset the socket timeout.";
-    t[1391] = "ソケットタイムアウトのリセット中にエラーが発生しました。";
-    t[1392] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
-    t[1393] = "RsultSet の開始点より前にいるため、deleteRow() を呼ぶことはできません。";
-    t[1394] = "Current connection does not have an associated xid. prepare xid={0}";
-    t[1395] = "この接続は xid と関連付けられていません。プリペア xid={0}";
-    t[1408] = "An I/O error occurred while sending to the backend.";
-    t[1409] = "バックエンドへの送信中に、入出力エラーが起こりました。";
-    t[1416] = "One-phase commit with unknown xid. commit xid={0}, currentXid={1}";
-    t[1417] = "未知の xid の単相コミット。 コミットxid={0}, 現在のxid={1}";
-    t[1420] = "Position: {0}";
-    t[1421] = "位置: {0}";
-    t[1422] = "There are no rows in this ResultSet.";
-    t[1423] = "このResultSetに行がありません。";
-    t[1424] = "Database connection failed when reading from copy";
-    t[1425] = "コピーからの読み取り中にデータベース接続で異常が発生しました";
-    table = t;
-  }
+    private static final String[] table;
 
-  @Override
-  public Object handleGetObject (String msgid) throws MissingResourceException {
-    int hash_val = msgid.hashCode() & 0x7fffffff;
-    int idx = (hash_val % 713) << 1;
-    {
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+    static {
+        String[] t = new String[1426];
+        t[0] = "";
+        t[1] = "Project-Id-Version: head-ja\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2018-07-23 11:10+0900\nLast-Translator: Kyotaro Horiguchi <horiguchi.kyotaro@lab.ntt.co.jp>\nLanguage-Team: PostgreSQL <z-saito@guitar.ocn.ne.jp>\nLanguage: ja_JP\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: Poedit 1.5.4\n";
+        t[2] = "Method {0} is not yet implemented.";
+        t[3] = "{0} メソッドはまだ実装されていません。";
+        t[10] = "Got {0} error responses to single copy cancel request";
+        t[11] = "一つのコピー中断要求にたいして {0} 個のエラー応答が返されました";
+        t[20] = "The array index is out of range: {0}, number of elements: {1}.";
+        t[21] = "配列インデックスが範囲外です: {0} 、要素の数: {1}";
+        t[26] = "Tried to obtain lock while already holding it";
+        t[27] = "すでに取得中のロックを取得しようとしました";
+        t[28] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}";
+        t[29] = "不正なプロトコル状態が要求されました。Transaction interleaving を試みましたが実装されていません。xid={0}, currentXid={1}, state={2}, flags={3}";
+        t[34] = "Unsupported property name: {0}";
+        t[35] = "サポートされていないプロパティ名: {0}";
+        t[36] = "Unsupported Types value: {0}";
+        t[37] = "サポートされない Types の値: {0}.";
+        t[44] = "The hostname {0} could not be verified by hostnameverifier {1}.";
+        t[45] = "ホスト名 {0} は、hostnameverifier {1} で検証できませんでした。";
+        t[52] = "Invalid UUID data.";
+        t[53] = "不正なUUIDデータです。";
+        t[54] = "{0} parameter value must be an integer but was: {1}";
+        t[55] = "パラメータ {0} の値は整数でなければなりませんが指定された値は {1} でした";
+        t[56] = "Copying from database failed: {0}";
+        t[57] = "データベースからのコピーに失敗しました: {0}";
+        t[58] = "Requested CopyDual but got {0}";
+        t[59] = "CopyDualを要求しましたが {0} が返却されました。";
+        t[64] = "Multiple ResultSets were returned by the query.";
+        t[65] = "クエリの実行により、複数のResultSetが返されました。";
+        t[76] = "Too many update results were returned.";
+        t[77] = "返却された更新結果が多すぎます。";
+        t[84] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data.";
+        t[85] = "システムカタログにデータがないため MaxIndexKeys の値を決定できません。";
+        t[90] = "Database connection failed when starting copy";
+        t[91] = "コピー開始時のデータベース接続に失敗しました";
+        t[94] = "Unknown XML Result class: {0}";
+        t[95] = "未知のXML結果クラス: {0}";
+        t[100] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
+        t[101] = "サーバのstandard_conforming_stringsパラメータは、{0}であると報告されました。JDBCドライバは、on または off を想定しています。";
+        t[102] = "Batch entry {0} {1} was aborted: {2}  Call getNextException to see other errors in the batch.";
+        t[103] = "バッチ {0} {1} はアボートしました: {2} このバッチの他のエラーは getNextException を呼び出すことで確認できます。";
+        t[104] = "Protocol error.  Session setup failed.";
+        t[105] = "プロトコルエラー。セッションは準備できませんでした。";
+        t[106] = "This SQLXML object has not been initialized, so you cannot retrieve data from it.";
+        t[107] = "このSQLXMLオブジェクトは初期化されてなかったため、そこからデータを取得できません。";
+        t[116] = "Bad value for type {0} : {1}";
+        t[117] = "型 {0} に対する不正な値 : {1}";
+        t[120] = "A CallableStatement was executed with an invalid number of parameters";
+        t[121] = "CallableStatement は不正な数のパラメータで実行されました。";
+        t[124] = "Error preparing transaction. prepare xid={0}";
+        t[125] = "トランザクションの準備エラー。prepare xid={0}";
+        t[126] = "Can''t use relative move methods while on the insert row.";
+        t[127] = "行挿入中に相対移動メソッドは使えません。";
+        t[130] = "Failed to create object for: {0}.";
+        t[131] = "{0} のオブジェクトの生成に失敗しました。";
+        t[138] = "Cannot change transaction read-only property in the middle of a transaction.";
+        t[139] = "トランザクションの中で read-only プロパティは変更できません。";
+        t[154] = "{0} function takes three and only three arguments.";
+        t[155] = "{0} 関数はちょうど3個の引数を取ります。";
+        t[158] = "One-phase commit called for xid {0} but connection was prepared with xid {1}";
+        t[159] = "単相コミットが xid {0} に対してよびだされましたが、コネクションは xid {1} と関連付けられています";
+        t[160] = "Validating connection.";
+        t[161] = "コネクションを検証しています";
+        t[166] = "This replication stream has been closed.";
+        t[167] = "このレプリケーション接続は既にクローズされています。";
+        t[168] = "An error occurred while trying to get the socket timeout.";
+        t[169] = "ソケットタイムアウト取得中にエラーが発生しました。";
+        t[170] = "Conversion of money failed.";
+        t[171] = "貨幣金額の変換に失敗しました。";
+        t[172] = "Provided Reader failed.";
+        t[173] = "渡された Reader で異常が発生しました。";
+        t[174] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
+        t[175] = "対応する start の呼び出しなしで、end を呼び出しました。state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
+        t[178] = "Got CopyBothResponse from server during an active {0}";
+        t[179] = "{0} を実行中のサーバから CopyOutResponse を受け取りました";
+        t[186] = "Unknown ResultSet holdability setting: {0}.";
+        t[187] = "ResultSet の holdability に対する未知の設定値です: {0}";
+        t[188] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
+        t[189] = "実装されていません: 第二フェーズの COMMIT は、待機接続で使わなくてはなりません。xid={0}, currentXid={1}, state={2}, transactionState={3}";
+        t[190] = "Invalid server SCRAM signature";
+        t[191] = "不正なサーバSCRAM署名です";
+        t[192] = "The server''s client_encoding parameter was changed to {0}. The JDBC driver requires client_encoding to be UTF8 for correct operation.";
+        t[193] = "サーバの client_encoding パラメータが {0} に変わりました。JDBCドライバが正しく動作するためには、 client_encoding は UTF8 である必要があります。";
+        t[198] = "Detail: {0}";
+        t[199] = "詳細: {0}";
+        t[200] = "Unexpected packet type during copy: {0}";
+        t[201] = "コピー中の想定外のパケット型です: {0}";
+        t[206] = "Transaction isolation level {0} not supported.";
+        t[207] = "トランザクション分離レベル{0} はサポートされていません。";
+        t[210] = "The server requested password-based authentication, but no password was provided.";
+        t[211] = "サーバはパスワード・ベースの認証を要求しましたが、パスワードが渡されませんでした。";
+        t[214] = "Interrupted while attempting to connect.";
+        t[215] = "接続試行中に割り込みがありました。";
+        t[216] = "Fetch size must be a value greater to or equal to 0.";
+        t[217] = "フェッチサイズは、0または、より大きな値でなくてはなりません。";
+        t[228] = "Added parameters index out of range: {0}, number of columns: {1}.";
+        t[229] = "パラメータ・インデックスは範囲外です: {0} , カラム数: {1}";
+        t[230] = "Could not decrypt SSL key file {0}.";
+        t[231] = "SSL keyファイル {0} を復号できませんでした。";
+        t[242] = "Could not initialize SSL context.";
+        t[243] = "SSLコンテクストを初期化できませんでした。";
+        t[244] = "{0} function takes one and only one argument.";
+        t[245] = "{0} 関数はちょうど1個の引数を取ります。";
+        t[248] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
+        t[249] = "{0} 型のパラメータが登録されましたが、get{1} (sqltype={2}) が呼び出されました。";
+        t[258] = "Conversion of interval failed";
+        t[259] = "時間間隔の変換に失敗しました。";
+        t[262] = "xid must not be null";
+        t[263] = "xidはnullではいけません。";
+        t[264] = "Your security policy has prevented the connection from being attempted.  You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to.";
+        t[265] = "セキュリティ・ポリシーにより、接続が妨げられました。おそらく、接続先のデータベースサーバのホストとポートに対して java.net.SocketPermission の connect 権限を許可する必要があります。";
+        t[270] = "ClientInfo property not supported.";
+        t[271] = "ClientInfo プロパティはサポートされていません。";
+        t[272] = "LOB positioning offsets start at 1.";
+        t[273] = "LOB 位置指定のオフセット値は 1 以上です。";
+        t[276] = "Tried to write to an inactive copy operation";
+        t[277] = "実行中ではないコピー操作に書き込もうとしました";
+        t[278] = "suspend/resume not implemented";
+        t[279] = "停止/再開 は実装されていません。";
+        t[290] = "Transaction control methods setAutoCommit(true), commit, rollback and setSavePoint not allowed while an XA transaction is active.";
+        t[291] = "トランザクション制御メソッド setAutoCommit(true), commit, rollback, setSavePoint は、XAトランザクションが有効である間は利用できません。";
+        t[292] = "Unable to find server array type for provided name {0}.";
+        t[293] = "指定された名前 {0} のサーバ配列型はありません。";
+        t[300] = "Statement has been closed.";
+        t[301] = "ステートメントはクローズされました。";
+        t[302] = "The fastpath function {0} is unknown.";
+        t[303] = "{0} は未知の fastpath 関数です。";
+        t[306] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
+        t[307] = "サーバのDateStyleパラメータは、{0} に変わりました。JDBCドライバが正しく動作するためには、DateStyle が ISO で始まる値である必要があります。";
+        t[308] = "Invalid flags {0}";
+        t[309] = "不正なフラグ {0}";
+        t[324] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
+        t[325] = "CallableStatementは宣言されましたが、registerOutParameter(1, <some type>) は呼び出されませんでした。";
+        t[328] = "Cannot commit when autoCommit is enabled.";
+        t[329] = "autoCommit有効時に、明示的なコミットはできません。";
+        t[330] = "Database connection failed when writing to copy";
+        t[331] = "コピーへの書き込み中にデータベース接続で異常が発生しました";
+        t[334] = "Hint: {0}";
+        t[335] = "ヒント: {0}";
+        t[336] = "Interval {0} not yet implemented";
+        t[337] = "時間間隔 {0} は実装されていません";
+        t[338] = "No X509TrustManager found";
+        t[339] = "X509TrustManager が見つかりません";
+        t[346] = "No results were returned by the query.";
+        t[347] = "クエリは結果を返却しませんでした。";
+        t[354] = "Heuristic commit/rollback not supported. forget xid={0}";
+        t[355] = "ヒューリスティック commit/rollback はサポートされません。forget xid={0}";
+        t[362] = "Fastpath call {0} - No result was returned or wrong size while expecting an integer.";
+        t[363] = "Fastpath 呼び出し {0} - integer を想定していましたが、結果は返却されないかまたは間違った大きさでした。";
+        t[364] = "Cannot cast an instance of {0} to type {1}";
+        t[365] = "{0} のインスタンスは {1} 型へキャストできません";
+        t[366] = "ResultSet not positioned properly, perhaps you need to call next.";
+        t[367] = "適切な位置にいない ResultSetです。おそらく、nextを呼ぶ必要があります。";
+        t[372] = "Cannot establish a savepoint in auto-commit mode.";
+        t[373] = "自動コミットモードでsavepointを作成できません。";
+        t[374] = "Prepare called before end. prepare xid={0}, state={1}";
+        t[375] = "end より前に prepare が呼ばれました prepare xid={0}, state={1}";
+        t[382] = "You must specify at least one column value to insert a row.";
+        t[383] = "行挿入には、最低でも1つの列の値が必要です。";
+        t[388] = "Query timeout must be a value greater than or equals to 0.";
+        t[389] = "クエリタイムアウトは、0またはより大きな値でなくてはなりません。";
+        t[394] = "The SSLSocketFactory class provided {0} could not be instantiated.";
+        t[395] = "渡された SSLSocketFactoryクラス {0} はインスタンス化できませんでした。";
+        t[396] = "The parameter index is out of range: {0}, number of parameters: {1}.";
+        t[397] = "パラメータのインデックスが範囲外です: {0} , パラメータ数: {1}";
+        t[400] = "This ResultSet is closed.";
+        t[401] = "この ResultSet はクローズされています。";
+        t[402] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
+        t[403] = "開始位置より前もしくは終了位置より後ろであるため、ResultSetを更新することができません。";
+        t[404] = "SSL error: {0}";
+        t[405] = "SSL エラー: {0}";
+        t[408] = "The column name {0} was not found in this ResultSet.";
+        t[409] = "この ResultSet に列名 {0} ありません。";
+        t[412] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
+        t[413] = "認証タイプ {0} はサポートされません。pg_hba.confでクライアントのIPアドレスまたはサブネットの指定があり、そのエントリでこのドライバがサポートする認証機構を使うように設定されていることを確認してください。";
+        t[440] = "The driver currently does not support COPY operations.";
+        t[441] = "ドライバはコピー操作をサポートしていません。";
+        t[442] = "This statement has been closed.";
+        t[443] = "このステートメントはクローズされています。";
+        t[444] = "Object is too large to send over the protocol.";
+        t[445] = "オブジェクトが大きすぎてこのプロトコルでは送信できません。";
+        t[448] = "oid type {0} not known and not a number";
+        t[449] = "OID型 {0} は未知でかつ数値でもありません";
+        t[452] = "No hstore extension installed.";
+        t[453] = "hstore 拡張がインストールされてません。";
+        t[454] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
+        t[455] = "ResultSet の最後尾より後ろにいるため、deleteRow() を呼ぶことはできません。";
+        t[462] = "The column index is out of range: {0}, number of columns: {1}.";
+        t[463] = "列インデックスは範囲外です: {0} , 列の数: {1}";
+        t[468] = "Got CopyInResponse from server during an active {0}";
+        t[469] = "{0} を実行中のサーバから CopyInResponse を受け取りました";
+        t[474] = "Fastpath call {0} - No result was returned and we expected a numeric.";
+        t[475] = "Fastpath 呼び出し {0} - numeric を想定していましたが、結果は返却されませんでした。";
+        t[482] = "An error occurred while setting up the SSL connection.";
+        t[483] = "SSL接続のセットアップ中に、エラーが起こりました。";
+        t[484] = "Could not open SSL certificate file {0}.";
+        t[485] = "SSL証明書ファイル {0} を開けませんでした。";
+        t[490] = "free() was called on this LOB previously";
+        t[491] = "このLOBに対して free() はすでに呼び出し済みです";
+        t[492] = "Finalizing a Connection that was never closed:";
+        t[493] = "クローズされていないコネクションの終了処理を行います: ";
+        t[494] = "Unsupported properties: {0}";
+        t[495] = "サポートされないプロパティ: {0}";
+        t[498] = "Interrupted while waiting to obtain lock on database connection";
+        t[499] = "データベース接続のロック待ちの最中に割り込みがありました";
+        t[504] = "The HostnameVerifier class provided {0} could not be instantiated.";
+        t[505] = "与えれた HostnameVerifier クラス {0} はインスタンス化できませんした。";
+        t[506] = "Unable to create SAXResult for SQLXML.";
+        t[507] = "SQLXMLに対するSAXResultを生成できません。";
+        t[510] = "The server does not support SSL.";
+        t[511] = "サーバはSSLをサポートしていません。";
+        t[516] = "Got CopyData without an active copy operation";
+        t[517] = "実行中のコピー操作がないにもかかわらず CopyData を受け取りました";
+        t[518] = "Error during one-phase commit. commit xid={0}";
+        t[519] = "単一フェーズのCOMMITの処理中のエラー commit xid={0}";
+        t[522] = "Network timeout must be a value greater than or equal to 0.";
+        t[523] = "ネットワークタイムアウトは、0またはより大きな値でなくてはなりません。";
+        t[532] = "Unsupported type conversion to {1}.";
+        t[533] = "{1} への型変換はサポートされていません。";
+        t[534] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
+        t[535] = "入力ストリームが途中で終了しました、{0} バイトを読み込もうとしましたが、 {1} バイトしかありませんでした。";
+        t[536] = "Zero bytes may not occur in string parameters.";
+        t[537] = "バイト値0を文字列ラメータに含めることはできません。";
+        t[538] = "This connection has been closed.";
+        t[539] = "このコネクションは既にクローズされています。";
+        t[540] = "Cannot call deleteRow() when on the insert row.";
+        t[541] = "行挿入時に deleteRow() を呼び出せません。";
+        t[544] = "Unable to bind parameter values for statement.";
+        t[545] = "ステートメントのパラメータ値をバインドできませんでした。";
+        t[552] = "Cannot convert an instance of {0} to type {1}";
+        t[553] = "{0} のインスタンスは {1} 型に変換できません";
+        t[554] = "Conversion to type {0} failed: {1}.";
+        t[555] = "{0} への型変換に失敗しました: {1}";
+        t[556] = "Error loading default settings from driverconfig.properties";
+        t[557] = "driverconfig.properties からの初期設定ロード中のエラー";
+        t[558] = "Expected command status BEGIN, got {0}.";
+        t[559] = "BEGINコマンドステータスを想定しましたが、{0} が返却されました。";
+        t[564] = "An unexpected result was returned by a query.";
+        t[565] = "クエリが想定外の結果を返却しました。";
+        t[568] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
+        t[569] = "何らかの異常によりドライバが動作できません。この例外を報告して下さい。";
+        t[576] = "One or more ClientInfo failed.";
+        t[577] = "1つ以上の ClinentInfo で問題が発生しました。";
+        t[578] = "Location: File: {0}, Routine: {1}, Line: {2}";
+        t[579] = "場所: ファイル: {0}, ルーチン: {1},行: {2}";
+        t[582] = "Unknown type {0}.";
+        t[583] = "未知の型 {0}.";
+        t[590] = "This SQLXML object has already been freed.";
+        t[591] = "このSQLXMLオブジェクトはすでに解放されています。";
+        t[594] = "Unexpected copydata from server for {0}";
+        t[595] = "{0} を実行中のサーバからのあり得ない CopyData";
+        t[596] = "{0} function takes two or three arguments.";
+        t[597] = "{0} 関数は2個、または3個の引数を取ります。";
+        t[602] = "Connection to {0} refused. Check that the hostname and port are correct and that the postmaster is accepting TCP/IP connections.";
+        t[603] = "{0} への接続が拒絶されました。ホスト名とポート番号が正しいことと、postmaster がTCP/IP接続を受け付けていることを確認してください。";
+        t[612] = "Unsupported binary encoding of {0}.";
+        t[613] = "{0} 型に対するサポートされないバイナリエンコーディング。";
+        t[616] = "Returning autogenerated keys is not supported.";
+        t[617] = "自動生成キーを返すことはサポートされていません。";
+        t[620] = "Provided InputStream failed.";
+        t[621] = "渡された InputStream で異常が発生しました。";
+        t[626] = "No IOException expected from StringBuffer or StringBuilder";
+        t[627] = "StringBuffer または StringBuilder からの IOException は想定されていません";
+        t[638] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it";
+        t[639] = "実装されていません: 単一フェーズのCOMMITは、開始時と同じ接続で発行されなければなりません。";
+        t[640] = "Cannot reference a savepoint after it has been released.";
+        t[641] = "解放された savepoint は参照できません。";
+        t[642] = "Ran out of memory retrieving query results.";
+        t[643] = "クエリの結果取得中にメモリ不足が起きました。";
+        t[654] = "No primary key found for table {0}.";
+        t[655] = "テーブル {0} には主キーがありません。";
+        t[658] = "Error during recover";
+        t[659] = "recover 処理中のエラー";
+        t[666] = "This copy stream is closed.";
+        t[667] = "このコピーストリームはクローズされています。";
+        t[668] = "Could not open SSL root certificate file {0}.";
+        t[669] = "SSLルート証明書ファイル {0} をオープンできませんでした。";
+        t[676] = "Invalid sslmode value: {0}";
+        t[677] = "不正な sslmode 値: {0}";
+        t[678] = "Cannot tell if path is open or closed: {0}.";
+        t[679] = "経路が開いているか、閉じているか判別できません: {0}";
+        t[682] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
+        t[683] = "不正なUTF-8シーケンス: {1} バイトの値のエンコードに{0} バイト使用しています: {2}";
+        t[684] = "Unknown XML Source class: {0}";
+        t[685] = "未知のXMLソースクラス: {0}";
+        t[686] = "Internal Query: {0}";
+        t[687] = "内部クエリ: {0}";
+        t[702] = "Could not find a java cryptographic algorithm: {0}.";
+        t[703] = "javaの暗号化アルゴリズム {0} を見つけることができませんでした。";
+        t[706] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
+        t[707] = "同じ PooledConnection に対して新しい接続をオープンしたか、この PooledConnection がクローズされたため、接続が自動的にクローズされました。";
+        t[708] = "Invalid fetch direction constant: {0}.";
+        t[709] = "不正なフェッチ方向の定数です: {0}";
+        t[714] = "Can''t use query methods that take a query string on a PreparedStatement.";
+        t[715] = "PreparedStatement でクエリ文字列を取るクエリメソッドは使えません。";
+        t[716] = "SCRAM authentication failed, server returned error: {0}";
+        t[717] = "スクラム認証が失敗しました、サーバはエラーを返却しました:  {0}";
+        t[722] = "Invalid elements {0}";
+        t[723] = "不正な要素です: {0}";
+        t[738] = "Not on the insert row.";
+        t[739] = "挿入行上にいません。";
+        t[740] = "Unable to load the class {0} responsible for the datatype {1}";
+        t[741] = "データ型 {1} に対応するクラス{0} をロードできません。";
+        t[752] = "Could not find a java cryptographic algorithm: X.509 CertificateFactory not available.";
+        t[753] = "javaの暗号化アルゴリズムを見つけることができませんでした。X.509 CertificateFactory は利用できません。";
+        t[756] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.";
+        t[757] = "{0} のインスタンスに対して使うべきSQL型を推測できません。明示的な Types 引数をとる setObject() で使うべき型を指定してください。";
+        t[760] = "Invalid server-first-message: {0}";
+        t[761] = "不正な server-first-message: {0}";
+        t[762] = "No value specified for parameter {0}.";
+        t[763] = "パラメータ {0} に値が設定されてません。";
+        t[766] = "Fastpath call {0} - No result was returned and we expected an integer.";
+        t[767] = "Fastpath 呼び出し {0} - integer を想定していましたが、結果は返却されませんでした。";
+        t[774] = "Unable to create StAXResult for SQLXML";
+        t[775] = "SQLXMLに対するStAXResultを生成できません。";
+        t[798] = "CommandComplete expected COPY but got: ";
+        t[799] = "CommandComplete はCOPYを想定しましたが、次の結果が返却されました:";
+        t[800] = "Enter SSL password: ";
+        t[801] = "SSLパスワード入力: ";
+        t[802] = "Failed to convert binary xml data to encoding: {0}.";
+        t[803] = "バイナリxmlデータのエンコード: {0} への変換に失敗しました。";
+        t[804] = "No SCRAM mechanism(s) advertised by the server";
+        t[805] = "サーバは SCRAM認証機構を広告していません";
+        t[818] = "Custom type maps are not supported.";
+        t[819] = "カスタム型マップはサポートされません。";
+        t[822] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
+        t[823] = "不正なUTF-8シーケンス: 変換後の値がサロゲート値です: {0}";
+        t[824] = "The SocketFactory class provided {0} could not be instantiated.";
+        t[825] = "渡された SocketFactoryクラス {0} はインスタンス化できませんでした。";
+        t[832] = "Large Objects may not be used in auto-commit mode.";
+        t[833] = "ラージオブジェクトは、自動コミットモードで使うことができません。";
+        t[834] = "Fastpath call {0} - No result was returned or wrong size while expecting a long.";
+        t[835] = "Fastpath 呼び出し {0} - long を想定していましたが、結果は返却されないかまたは間違った大きさでした。";
+        t[844] = "Invalid stream length {0}.";
+        t[845] = "不正なストリーム長 {0}。";
+        t[850] = "The sslfactoryarg property must start with the prefix file:, classpath:, env:, sys:, or -----BEGIN CERTIFICATE-----.";
+        t[851] = "プロパティ sslfactoryarg の先頭はプリフィクス file:, classpath:, env:, sys: もしくは -----BEGIN CERTIFICATE----- のいずれかでなければなりません。";
+        t[852] = "Can''t use executeWithFlags(int) on a Statement.";
+        t[853] = "executeWithFlags(int) は Statement インスタンスでは使えません。";
+        t[856] = "Cannot retrieve the id of a named savepoint.";
+        t[857] = "名前付き savepoint の id は取得できません。";
+        t[860] = "Could not read password for SSL key file by callbackhandler {0}.";
+        t[861] = "callbackhandler {0} で、SSL keyファイルを読めませんでした。";
+        t[874] = "Tried to break lock on database connection";
+        t[875] = "データベース接続のロックを破壊しようとしました";
+        t[878] = "Unexpected error writing large object to database.";
+        t[879] = "データベースへのラージオブジェクト書き込み中に想定外のエラーが起きました。";
+        t[880] = "Expected an EOF from server, got: {0}";
+        t[881] = "サーバからの EOF を期待していましたが、{0} が送られてきました";
+        t[886] = "Could not read SSL root certificate file {0}.";
+        t[887] = "SSLルート証明書ファイル {0} を読めませんでした。";
+        t[888] = "This SQLXML object has already been initialized, so you cannot manipulate it further.";
+        t[889] = "このSQLXMLオブジェクトは既に初期化済みであるため、これ以上操作できません。";
+        t[896] = "The array index is out of range: {0}";
+        t[897] = "配列インデックスが範囲外です: {0}";
+        t[898] = "Unable to set network timeout.";
+        t[899] = "ネットワークタイムアウトが設定できません。";
+        t[900] = "{0} function takes four and only four argument.";
+        t[901] = "{0} 関数はちょうど4個の引数を取ります。";
+        t[904] = "Unable to decode xml data.";
+        t[905] = "xmlデータをデコードできません。";
+        t[916] = "Bad value for type timestamp/date/time: {1}";
+        t[917] = "timestamp/date/time 型に対する不正な値: {1}";
+        t[928] = "Illegal UTF-8 sequence: final value is out of range: {0}";
+        t[929] = "不正なUTF-8シーケンス: 変換後の値が範囲外です: {0}";
+        t[932] = "Unable to parse the count in command completion tag: {0}.";
+        t[933] = "コマンド完了タグのカウントをパースできません: {0}";
+        t[942] = "Read from copy failed.";
+        t[943] = "コピーストリームからの読み取りに失敗しました。";
+        t[944] = "Maximum number of rows must be a value grater than or equal to 0.";
+        t[945] = "行数の制限値は 0またはより大きな値でなくてはなりません。";
+        t[958] = "The password callback class provided {0} could not be instantiated.";
+        t[959] = "渡されたパスワードコールバッククラス {0} はインスタンス化できませんでした。";
+        t[960] = "Returning autogenerated keys by column index is not supported.";
+        t[961] = "列インデックスで自動生成キーを返すことはサポートされていません。";
+        t[966] = "Properties for the driver contains a non-string value for the key ";
+        t[967] = "このドライバのプロパティでは以下のキーに対して文字列ではない値が設定されています: ";
+        t[974] = "Database connection failed when canceling copy operation";
+        t[975] = "コピー操作中断のためのデータベース接続に失敗しました";
+        t[976] = "DataSource has been closed.";
+        t[977] = "データソースはクローズされました。";
+        t[996] = "Unable to get network timeout.";
+        t[997] = "ネットワークタイムアウトが取得できません。";
+        t[1000] = "A CallableStatement was executed with nothing returned.";
+        t[1001] = "CallableStatement が実行されましたがなにも返却されませんでした。";
+        t[1002] = "Can''t refresh the insert row.";
+        t[1003] = "挿入行を再フェッチすることはできません。";
+        t[1004] = "Could not find a server with specified targetServerType: {0}";
+        t[1005] = "指定された targetServerType のサーバーが見つかりません: {0}";
+        t[1006] = "This PooledConnection has already been closed.";
+        t[1007] = "この PooledConnectionは、すでに閉じられています。";
+        t[1010] = "Cannot call cancelRowUpdates() when on the insert row.";
+        t[1011] = "行挿入時に cancelRowUpdates() を呼び出せません。";
+        t[1012] = "Preparing already prepared transaction, the prepared xid {0}, prepare xid={1}";
+        t[1013] = "すでにプリペアされているトランザクションをプリペアしようとしました、プリペアされている xid={0}, プリペアしようとした xid={1}";
+        t[1018] = "CopyIn copy direction can't receive data";
+        t[1019] = "コピー方向 CopyIn はデータを受信できません";
+        t[1024] = "conversion to {0} from {1} not supported";
+        t[1025] = "{1} から {0} への変換はサポートされていません。";
+        t[1030] = "An error occurred reading the certificate";
+        t[1031] = "証明書の読み込み中にエラーが起きました";
+        t[1032] = "Invalid or unsupported by client SCRAM mechanisms";
+        t[1033] = "不正であるかクライアントのSCRAM機構でサポートされていません";
+        t[1034] = "Malformed function or procedure escape syntax at offset {0}.";
+        t[1035] = "関数またはプロシージャの間違ったエスケープ構文が位置{0}で見つかりました。";
+        t[1038] = "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.";
+        t[1039] = "バインドメッセージ長 {0} は長すぎます。InputStreamのパラメータにとても大きな長さ、あるいは不正確な長さが設定されている可能性があります。";
+        t[1050] = "Cannot change transaction isolation level in the middle of a transaction.";
+        t[1051] = "トランザクションの中でトランザクション分離レベルは変更できません。";
+        t[1058] = "Internal Position: {0}";
+        t[1059] = "内部位置: {0}";
+        t[1062] = "No function outputs were registered.";
+        t[1063] = "関数出力は登録されていません。";
+        t[1072] = "Unexpected packet type during replication: {0}";
+        t[1073] = "レプリケーション中に想定外のパケット型: {0}";
+        t[1076] = "Error disabling autocommit";
+        t[1077] = "自動コミットの無効化処理中のエラー";
+        t[1080] = "Requested CopyOut but got {0}";
+        t[1081] = "CopyOut を要求しましたが {0} が返却されました";
+        t[1084] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}";
+        t[1085] = "プリペアドトランザクションのロールバック中のエラー rollback xid={0}, preparedXid={1}, currentXid={2}";
+        t[1086] = "Database connection failed when ending copy";
+        t[1087] = "コピー操作の終了中にデータベース接続で異常が発生しました";
+        t[1090] = "Unsupported value for stringtype parameter: {0}";
+        t[1091] = "サポートされないstringtypeパラメータ値です: {0}";
+        t[1094] = "The sslfactoryarg property may not be empty.";
+        t[1095] = "プロパティ sslfactoryarg は空であってはなりません。";
+        t[1102] = "Loading the SSL root certificate {0} into a TrustManager failed.";
+        t[1103] = "SSLルート証明書 {0} をTrustManagerへ読み込めませんでした。";
+        t[1104] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
+        t[1105] = "不正なUTF-8シーケンス: 先頭バイトが {0}: {1}";
+        t[1116] = "The environment variable containing the server's SSL certificate must not be empty.";
+        t[1117] = "サーバのSSL証明書を指定する環境変数は空であってはなりません。";
+        t[1118] = "Connection attempt timed out.";
+        t[1119] = "接続試行がタイムアウトしました。";
+        t[1130] = "Cannot write to copy a byte of value {0}";
+        t[1131] = "バイト値{0}はコピーストリームへの書き込みはできません";
+        t[1132] = "Connection has been closed.";
+        t[1133] = "接続はクローズされました。";
+        t[1136] = "Could not read password for SSL key file, console is not available.";
+        t[1137] = "SSL keyファイルのパスワードを読めませんでした。コンソールは利用できません。";
+        t[1140] = "The JVM claims not to support the encoding: {0}";
+        t[1141] = "JVMでサポートされないエンコーディングです: {0}";
+        t[1146] = "Unexpected command status: {0}.";
+        t[1147] = "想定外のコマンドステータス: {0}。";
+        t[1154] = "Cannot rollback when autoCommit is enabled.";
+        t[1155] = "autoCommit有効時に、明示的なロールバックはできません。";
+        t[1158] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}";
+        t[1159] = "実装されていません: Prepareは、トランザクションを開始したものと同じコネクションで発行しなくてはなりません。currentXid={0}, prepare xid={1}";
+        t[1162] = "The connection attempt failed.";
+        t[1163] = "接続試行は失敗しました。";
+        t[1166] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
+        t[1167] = "不正なUTF-8シーケンス: {1} バイトのシーケンス中 {0} バイト目が、10xxxxxx ではありません: {2}";
+        t[1178] = "A connection could not be made using the requested protocol {0}.";
+        t[1179] = "要求されたプロトコル {0} で接続することができませんでした。";
+        t[1182] = "The system property containing the server's SSL certificate must not be empty.";
+        t[1183] = "サーバーのSSL証明書を指定するシステムプロパティは空であってはなりません。";
+        t[1188] = "Cannot call updateRow() when on the insert row.";
+        t[1189] = "挿入行上では updateRow() を呼び出すことができません。";
+        t[1192] = "Fastpath call {0} - No result was returned and we expected a long.";
+        t[1193] = "Fastpath 呼び出し {0} - long を想定していましたが、結果は返却されませんでした。";
+        t[1198] = "Truncation of large objects is only implemented in 8.3 and later servers.";
+        t[1199] = "ラージオブジェクトの切り詰めは、バージョン8.3 以降のサーバでのみ実装されています。";
+        t[1200] = "Cannot convert the column of type {0} to requested type {1}.";
+        t[1201] = "{0}型のカラムの値を指定の型 {1} に変換できませんでした。";
+        t[1204] = "Requested CopyIn but got {0}";
+        t[1205] = "CopyIn を要求しましたが {0} が返却されました";
+        t[1206] = "Cannot cast to boolean: \"{0}\"";
+        t[1207] = "boolean へのキャストはできません: \"{0}\"";
+        t[1212] = "Invalid server-final-message: {0}";
+        t[1213] = "不正な server-final-message: {0}.";
+        t[1214] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
+        t[1215] = "このステートメントは、OUTパラメータを宣言していません。'{' ?= call ... '}' を使って宣言して下さい。";
+        t[1218] = "Cannot truncate LOB to a negative length.";
+        t[1219] = "LOBを負の長さに切り詰めることはできません。";
+        t[1220] = "Zero bytes may not occur in identifiers.";
+        t[1221] = "バイト値0を識別子に含めることはできません。";
+        t[1222] = "Unable to convert DOMResult SQLXML data to a string.";
+        t[1223] = "DOMResult SQLXMLデータを文字列に変換することができません。";
+        t[1224] = "Missing expected error response to copy cancel request";
+        t[1225] = "予期していたコピーの中断要求へのエラー応答がありませんでした";
+        t[1234] = "SCRAM authentication is not supported by this driver. You need JDK >= 8 and pgjdbc >= 42.2.0 (not \".jre\" versions)";
+        t[1235] = "SCRAM認証はこのドライバではサポートされません。JDK8 以降かつ pgjdbc 42.2.0 以降(\".jre\"のバージョンではありません)が必要です。";
+        t[1240] = "Tried to end inactive copy";
+        t[1241] = "実行中ではないコピー操作を終了しようとしました";
+        t[1246] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
+        t[1247] = "CallableStatement 関数が実行され、出力パラメータ {0} は {1} 型 でした。しかし、{2} 型 が登録されました。";
+        t[1250] = "Failed to setup DataSource.";
+        t[1251] = "データソースのセットアップに失敗しました。";
+        t[1252] = "Loading the SSL certificate {0} into a KeyManager failed.";
+        t[1253] = "SSL証明書 {0} をKeyManagerへ読み込めませんでした。";
+        t[1254] = "Could not read SSL key file {0}.";
+        t[1255] = "SSL keyファイル {0} を読めませんでした。";
+        t[1258] = "Tried to read from inactive copy";
+        t[1259] = "実行中ではないコピーから読み取ろうとしました";
+        t[1260] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
+        t[1261] = "ResultSetは更新不可です。この結果セットを生成したクエリは、ただ一つのテーブルを選択して、そのテーブルの全ての主キーを選択する必要があります。詳細に関しては JDBC 2.1 API仕様、章 5.6 を参照して下さい。";
+        t[1264] = "A result was returned when none was expected.";
+        t[1265] = "ないはずの結果が返却されました。";
+        t[1266] = "Tried to cancel an inactive copy operation";
+        t[1267] = "実行中ではないコピー操作の中断を試みました";
+        t[1268] = "Server SQLState: {0}";
+        t[1269] = "サーバ SQLState: {0}";
+        t[1272] = "Unable to find keywords in the system catalogs.";
+        t[1273] = "キーワードはシステムカタログにありません。";
+        t[1276] = "Connection is busy with another transaction";
+        t[1277] = "接続は、別のトランザクションを処理中です";
+        t[1280] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
+        t[1281] = "CONCUR_READ_ONLYに設定されている ResultSet は更新できません。";
+        t[1296] = "commit called before end. commit xid={0}, state={1}";
+        t[1297] = "end の前に COMMIT を呼びました commit xid={0}, state={1}";
+        t[1308] = "PostgreSQL LOBs can only index to: {0}";
+        t[1309] = "PostgreSQL LOB 上の位置指定は最大 {0} までです";
+        t[1310] = "Where: {0}";
+        t[1311] = "場所: {0}";
+        t[1312] = "Unable to find name datatype in the system catalogs.";
+        t[1313] = "name データ型がシステムカタログにありません。";
+        t[1314] = "Invalid targetServerType value: {0}";
+        t[1315] = "不正な  targetServerType 値です。{0}.";
+        t[1318] = "Cannot retrieve the name of an unnamed savepoint.";
+        t[1319] = "無名 savepoint の名前は取得できません。";
+        t[1320] = "Error committing prepared transaction. commit xid={0}, preparedXid={1}, currentXid={2}";
+        t[1321] = "プリペアドトランザクションの COMMIT 処理中のエラー。commit xid={0}, preparedXid={1}, currentXid={2}";
+        t[1324] = "Invalid timeout ({0}<0).";
+        t[1325] = "不正なタイムアウト値 ({0}<0)。";
+        t[1328] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
+        t[1329] = "操作は、スクロール可能なResultSetを必要としますが、このResultSetは、 FORWARD_ONLYです。";
+        t[1330] = "Results cannot be retrieved from a CallableStatement before it is executed.";
+        t[1331] = "実行前の CallableStatement から結果の取得はできません。";
+        t[1332] = "wasNull cannot be call before fetching a result.";
+        t[1333] = "wasNullは、結果フェッチ前に呼び出せません。";
+        t[1336] = "{0} function doesn''t take any argument.";
+        t[1337] = "{0} 関数は引数を取りません。";
+        t[1344] = "Unknown Response Type {0}.";
+        t[1345] = "未知の応答タイプ {0} です。";
+        t[1346] = "The JVM claims not to support the {0} encoding.";
+        t[1347] = "JVMは、エンコーディング {0} をサポートしません。";
+        t[1348] = "{0} function takes two and only two arguments.";
+        t[1349] = "{0} 関数はちょうど2個の引数を取ります。";
+        t[1350] = "The maximum field size must be a value greater than or equal to 0.";
+        t[1351] = "最大の項目サイズは、0またはより大きな値でなくてはなりません。";
+        t[1352] = "Received CommandComplete ''{0}'' without an active copy operation";
+        t[1353] = "実行中のコピー操作がないにもかかわらず CommandComplete ''{0}'' を受信しました";
+        t[1354] = "Unable to translate data into the desired encoding.";
+        t[1355] = "データを指定されたエンコーディングに変換することができません。";
+        t[1368] = "Got CopyOutResponse from server during an active {0}";
+        t[1369] = "{0} を実行中のサーバから CopyOutResponse を受け取りました";
+        t[1370] = "Failed to set ClientInfo property: {0}";
+        t[1371] = "ClientInfo のプロパティの設定に失敗しました: {0}";
+        t[1372] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
+        t[1373] = "不正な文字データが見つかりました。これはデータベース作成時の文字セットに対して不正な文字を含むデータが格納されているために起きている可能性が高いです。最も一般的な例は、SQL_ASCIIデータベースに8bitデータが保存されている場合です。";
+        t[1374] = "Unknown Types value.";
+        t[1375] = "未知の Types の値です。";
+        t[1376] = " (pgjdbc: autodetected server-encoding to be {0}, if the message is not readable, please check database logs and/or host, port, dbname, user, password, pg_hba.conf)";
+        t[1377] = "(pgjdbc: server-encoding として {0}  を自動検出しました、メッセージが読めない場合はデータベースログおよび host, port, dbname, user, password, pg_dba.conf を確認してください)";
+        t[1386] = "GSS Authentication failed";
+        t[1387] = "GSS認証は失敗しました。";
+        t[1390] = "An error occurred while trying to reset the socket timeout.";
+        t[1391] = "ソケットタイムアウトのリセット中にエラーが発生しました。";
+        t[1392] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
+        t[1393] = "RsultSet の開始点より前にいるため、deleteRow() を呼ぶことはできません。";
+        t[1394] = "Current connection does not have an associated xid. prepare xid={0}";
+        t[1395] = "この接続は xid と関連付けられていません。プリペア xid={0}";
+        t[1408] = "An I/O error occurred while sending to the backend.";
+        t[1409] = "バックエンドへの送信中に、入出力エラーが起こりました。";
+        t[1416] = "One-phase commit with unknown xid. commit xid={0}, currentXid={1}";
+        t[1417] = "未知の xid の単相コミット。 コミットxid={0}, 現在のxid={1}";
+        t[1420] = "Position: {0}";
+        t[1421] = "位置: {0}";
+        t[1422] = "There are no rows in this ResultSet.";
+        t[1423] = "このResultSetに行がありません。";
+        t[1424] = "Database connection failed when reading from copy";
+        t[1425] = "コピーからの読み取り中にデータベース接続で異常が発生しました";
+        table = t;
     }
-    int incr = ((hash_val % 711) + 1) << 1;
-    for (;;) {
-      idx += incr;
-      if (idx >= 1426)
-        idx -= 1426;
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+
+    @Override
+    public Object handleGetObject(String msgid) throws MissingResourceException {
+        int hash_val = msgid.hashCode() & 0x7fffffff;
+        int idx = (hash_val % 713) << 1;
+        {
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
+        int incr = ((hash_val % 711) + 1) << 1;
+        for (; ; ) {
+            idx += incr;
+            if (idx >= 1426)
+                idx -= 1426;
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
     }
-  }
 
-  @Override
-  public Enumeration<String> getKeys () {
-    return new Enumeration<>() {
-        private int idx = 0;
-        { while (idx < 1426 && table[idx] == null) idx += 2; }
-      @Override
-        public boolean hasMoreElements () {
-          return (idx < 1426);
-        }
-      @Override
-        public String nextElement () {
-          Object key = table[idx];
-          do idx += 2; while (idx < 1426 && table[idx] == null);
-          return key.toString();
-        }
-      };
-  }
+    @Override
+    public Enumeration<String> getKeys() {
+        return new Enumeration<>() {
+            private int idx = 0;
 
-  public ResourceBundle getParent () {
-    return parent;
-  }
+            {
+                while (idx < 1426 && table[idx] == null) idx += 2;
+            }
+
+            @Override
+            public boolean hasMoreElements() {
+                return (idx < 1426);
+            }
+
+            @Override
+            public String nextElement() {
+                Object key = table[idx];
+                do idx += 2; while (idx < 1426 && table[idx] == null);
+                return key.toString();
+            }
+        };
+    }
+
+    public ResourceBundle getParent() {
+        return parent;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_nl.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_nl.java
index 59fdbf4..9b8661a 100644
--- a/pgjdbc/src/main/java/org/postgresql/translation/messages_nl.java
+++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_nl.java
@@ -5,55 +5,61 @@ import java.util.MissingResourceException;
 import java.util.ResourceBundle;
 
 public class messages_nl extends ResourceBundle {
-  private static final String[] table;
-  static {
-    String[] t = new String[36];
-    t[0] = "";
-    t[1] = "Project-Id-Version: PostgreSQL JDBC Driver 8.0\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2004-10-11 23:55-0700\nLast-Translator: Arnout Kuiper <ajkuiper@wxs.nl>\nLanguage-Team: Dutch <ajkuiper@wxs.nl>\nLanguage: nl\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n";
-    t[2] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
-    t[3] = "Iets ongewoons is opgetreden, wat deze driver doet falen. Rapporteer deze fout AUB: {0}";
-    t[8] = "Unknown Types value.";
-    t[9] = "Onbekende Types waarde.";
-    t[12] = "Fastpath call {0} - No result was returned and we expected an integer.";
-    t[13] = "Fastpath aanroep {0} - Geen resultaat werd teruggegeven, terwijl we een integer verwacht hadden.";
-    t[20] = "The fastpath function {0} is unknown.";
-    t[21] = "De fastpath functie {0} is onbekend.";
-    t[22] = "No results were returned by the query.";
-    t[23] = "Geen resultaten werden teruggegeven door de query.";
-    t[26] = "An unexpected result was returned by a query.";
-    t[27] = "Een onverwacht resultaat werd teruggegeven door een query";
-    table = t;
-  }
+    private static final String[] table;
 
-  @Override
-  public Object handleGetObject (String msgid) throws MissingResourceException {
-    int hash_val = msgid.hashCode() & 0x7fffffff;
-    int idx = (hash_val % 18) << 1;
-    Object found = table[idx];
-    if (found != null && msgid.equals(found))
-      return table[idx + 1];
-    return null;
-  }
+    static {
+        String[] t = new String[36];
+        t[0] = "";
+        t[1] = "Project-Id-Version: PostgreSQL JDBC Driver 8.0\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2004-10-11 23:55-0700\nLast-Translator: Arnout Kuiper <ajkuiper@wxs.nl>\nLanguage-Team: Dutch <ajkuiper@wxs.nl>\nLanguage: nl\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n";
+        t[2] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
+        t[3] = "Iets ongewoons is opgetreden, wat deze driver doet falen. Rapporteer deze fout AUB: {0}";
+        t[8] = "Unknown Types value.";
+        t[9] = "Onbekende Types waarde.";
+        t[12] = "Fastpath call {0} - No result was returned and we expected an integer.";
+        t[13] = "Fastpath aanroep {0} - Geen resultaat werd teruggegeven, terwijl we een integer verwacht hadden.";
+        t[20] = "The fastpath function {0} is unknown.";
+        t[21] = "De fastpath functie {0} is onbekend.";
+        t[22] = "No results were returned by the query.";
+        t[23] = "Geen resultaten werden teruggegeven door de query.";
+        t[26] = "An unexpected result was returned by a query.";
+        t[27] = "Een onverwacht resultaat werd teruggegeven door een query";
+        table = t;
+    }
 
-  @Override
-  public Enumeration<String> getKeys () {
-    return new Enumeration<>() {
-        private int idx = 0;
-        { while (idx < 36 && table[idx] == null) idx += 2; }
-      @Override
-        public boolean hasMoreElements () {
-          return (idx < 36);
-        }
-        @Override
-        public String nextElement () {
-          Object key = table[idx];
-          do idx += 2; while (idx < 36 && table[idx] == null);
-          return key.toString();
-        }
-      };
-  }
+    @Override
+    public Object handleGetObject(String msgid) throws MissingResourceException {
+        int hash_val = msgid.hashCode() & 0x7fffffff;
+        int idx = (hash_val % 18) << 1;
+        Object found = table[idx];
+        if (found != null && msgid.equals(found))
+            return table[idx + 1];
+        return null;
+    }
 
-  public ResourceBundle getParent () {
-    return parent;
-  }
+    @Override
+    public Enumeration<String> getKeys() {
+        return new Enumeration<>() {
+            private int idx = 0;
+
+            {
+                while (idx < 36 && table[idx] == null) idx += 2;
+            }
+
+            @Override
+            public boolean hasMoreElements() {
+                return (idx < 36);
+            }
+
+            @Override
+            public String nextElement() {
+                Object key = table[idx];
+                do idx += 2; while (idx < 36 && table[idx] == null);
+                return key.toString();
+            }
+        };
+    }
+
+    public ResourceBundle getParent() {
+        return parent;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_pl.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_pl.java
index 097627c..a4a2b90 100644
--- a/pgjdbc/src/main/java/org/postgresql/translation/messages_pl.java
+++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_pl.java
@@ -5,191 +5,195 @@ import java.util.MissingResourceException;
 import java.util.ResourceBundle;
 
 public class messages_pl extends ResourceBundle {
-  private static final String[] table;
-  static {
-    String[] t = new String[346];
-    t[0] = "";
-    t[1] = "Project-Id-Version: head-pl\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2005-05-22 03:01+0200\nLast-Translator: Jarosław Jan Pyszny <jarek@pyszny.net>\nLanguage-Team:  <pl@li.org>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: KBabel 1.10\nPlural-Forms:  nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n";
-    t[2] = "The driver currently does not support COPY operations.";
-    t[3] = "Sterownik nie obsługuje aktualnie operacji COPY.";
-    t[4] = "Internal Query: {0}";
-    t[5] = "Wewnętrzne Zapytanie: {0}";
-    t[6] = "There are no rows in this ResultSet.";
-    t[7] = "Nie ma żadnych wierszy w tym ResultSet.";
-    t[8] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
-    t[9] = "Znaleziono nieprawidłowy znak. Najprawdopodobniej jest to spowodowane przechowywaniem w bazie znaków, które nie pasują do zestawu znaków wybranego podczas tworzenia bazy danych. Najczęstszy przykład to przechowywanie 8-bitowych znaków w bazie o kodowaniu SQL_ASCII.";
-    t[12] = "Fastpath call {0} - No result was returned and we expected an integer.";
-    t[13] = "Wywołanie fastpath {0} - Nie otrzymano żadnego wyniku, a oczekiwano liczby całkowitej.";
-    t[14] = "An error occurred while setting up the SSL connection.";
-    t[15] = "Wystąpił błąd podczas ustanawiania połączenia SSL.";
-    t[20] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
-    t[21] = "Funkcja CallableStatement została zadeklarowana, ale nie wywołano registerOutParameter (1, <jakiś typ>).";
-    t[24] = "Unexpected command status: {0}.";
-    t[25] = "Nieoczekiwany status komendy: {0}.";
-    t[32] = "A connection could not be made using the requested protocol {0}.";
-    t[33] = "Nie można było nawiązać połączenia stosując żądany protokołu {0}.";
-    t[38] = "Bad value for type {0} : {1}";
-    t[39] = "Zła wartość dla typu {0}: {1}";
-    t[40] = "Not on the insert row.";
-    t[41] = "Nie na wstawianym rekordzie.";
-    t[42] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
-    t[43] = "Przedwczesny koniec strumienia wejściowego, oczekiwano {0} bajtów, odczytano tylko {1}.";
-    t[48] = "Unknown type {0}.";
-    t[49] = "Nieznany typ {0}.";
-    t[52] = "The server does not support SSL.";
-    t[53] = "Serwer nie obsługuje SSL.";
-    t[60] = "Cannot call updateRow() when on the insert row.";
-    t[61] = "Nie można wywołać updateRow() na wstawianym rekordzie.";
-    t[62] = "Where: {0}";
-    t[63] = "Gdzie: {0}";
-    t[72] = "Cannot call cancelRowUpdates() when on the insert row.";
-    t[73] = "Nie można wywołać cancelRowUpdates() na wstawianym rekordzie.";
-    t[82] = "Server SQLState: {0}";
-    t[83] = "Serwer SQLState: {0}";
-    t[92] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
-    t[93] = "ResultSet nie jest modyfikowalny (not updateable). Zapytanie, które zwróciło ten wynik musi dotyczyć tylko jednej tabeli oraz musi pobierać wszystkie klucze główne tej tabeli. Zobacz Specyfikację JDBC 2.1 API, rozdział 5.6, by uzyskać więcej szczegółów.";
-    t[102] = "Cannot tell if path is open or closed: {0}.";
-    t[103] = "Nie można stwierdzić, czy ścieżka jest otwarta czy zamknięta: {0}.";
-    t[108] = "The parameter index is out of range: {0}, number of parameters: {1}.";
-    t[109] = "Indeks parametru jest poza zakresem: {0}, liczba parametrów: {1}.";
-    t[110] = "Unsupported Types value: {0}";
-    t[111] = "Nieznana wartość Types: {0}";
-    t[112] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
-    t[113] = "Aktualna pozycja za końcem ResultSet. Nie można wywołać deleteRow().";
-    t[114] = "This ResultSet is closed.";
-    t[115] = "Ten ResultSet jest zamknięty.";
-    t[120] = "Conversion of interval failed";
-    t[121] = "Konwersja typu interval nie powiodła się";
-    t[122] = "Unable to load the class {0} responsible for the datatype {1}";
-    t[123] = "Nie jest możliwe załadowanie klasy {0} odpowiedzialnej za typ danych {1}";
-    t[138] = "Error loading default settings from driverconfig.properties";
-    t[139] = "Błąd podczas wczytywania ustawień domyślnych z driverconfig.properties";
-    t[142] = "The array index is out of range: {0}";
-    t[143] = "Indeks tablicy jest poza zakresem: {0}";
-    t[146] = "Unknown Types value.";
-    t[147] = "Nieznana wartość Types.";
-    t[154] = "The maximum field size must be a value greater than or equal to 0.";
-    t[155] = "Maksymalny rozmiar pola musi być wartością dodatnią lub 0.";
-    t[168] = "Detail: {0}";
-    t[169] = "Szczegóły: {0}";
-    t[170] = "Unknown Response Type {0}.";
-    t[171] = "Nieznany typ odpowiedzi {0}.";
-    t[172] = "Maximum number of rows must be a value grater than or equal to 0.";
-    t[173] = "Maksymalna liczba rekordów musi być wartością dodatnią lub 0.";
-    t[184] = "Query timeout must be a value greater than or equals to 0.";
-    t[185] = "Timeout zapytania musi być wartością dodatnią lub 0.";
-    t[186] = "Too many update results were returned.";
-    t[187] = "Zapytanie nie zwróciło żadnych wyników.";
-    t[190] = "The connection attempt failed.";
-    t[191] = "Próba nawiązania połączenia nie powiodła się.";
-    t[198] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
-    t[199] = "Połączenie zostało zamknięte automatycznie, ponieważ nowe połączenie zostało otwarte dla tego samego PooledConnection lub PooledConnection zostało zamknięte.";
-    t[204] = "Protocol error.  Session setup failed.";
-    t[205] = "Błąd protokołu. Nie udało się utworzyć sesji.";
-    t[206] = "This PooledConnection has already been closed.";
-    t[207] = "To PooledConnection zostało już zamknięte.";
-    t[208] = "DataSource has been closed.";
-    t[209] = "DataSource zostało zamknięte.";
-    t[212] = "Method {0} is not yet implemented.";
-    t[213] = "Metoda {0}nie jest jeszcze obsługiwana.";
-    t[216] = "Hint: {0}";
-    t[217] = "Wskazówka: {0}";
-    t[218] = "No value specified for parameter {0}.";
-    t[219] = "Nie podano wartości dla parametru {0}.";
-    t[222] = "Position: {0}";
-    t[223] = "Pozycja: {0}";
-    t[226] = "Cannot call deleteRow() when on the insert row.";
-    t[227] = "Nie można wywołać deleteRow() na wstawianym rekordzie.";
-    t[240] = "Conversion of money failed.";
-    t[241] = "Konwersja typu money nie powiodła się.";
-    t[244] = "Internal Position: {0}";
-    t[245] = "Wewnętrzna Pozycja: {0}";
-    t[248] = "Connection has been closed.";
-    t[249] = "Połączenie zostało zamknięte.";
-    t[254] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
-    t[255] = "Aktualna pozycja przed początkiem ResultSet. Nie można wywołać deleteRow().";
-    t[258] = "Failed to create object for: {0}.";
-    t[259] = "Nie powiodło się utworzenie obiektu dla: {0}.";
-    t[262] = "Fetch size must be a value greater to or equal to 0.";
-    t[263] = "Rozmiar pobierania musi być wartością dodatnią lub 0.";
-    t[270] = "No results were returned by the query.";
-    t[271] = "Zapytanie nie zwróciło żadnych wyników.";
-    t[276] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
-    t[277] = "Uwierzytelnienie typu {0} nie jest obsługiwane. Upewnij się, że skonfigurowałeś plik pg_hba.conf tak, że zawiera on adres IP lub podsieć klienta oraz że użyta metoda uwierzytelnienia jest wspierana przez ten sterownik.";
-    t[280] = "Conversion to type {0} failed: {1}.";
-    t[281] = "Konwersja do typu {0} nie powiodła się: {1}.";
-    t[282] = "A result was returned when none was expected.";
-    t[283] = "Zwrócono wynik zapytania, choć nie był on oczekiwany.";
-    t[292] = "Transaction isolation level {0} not supported.";
-    t[293] = "Poziom izolacji transakcji {0} nie jest obsługiwany.";
-    t[306] = "ResultSet not positioned properly, perhaps you need to call next.";
-    t[307] = "Zła pozycja w ResultSet, może musisz wywołać next.";
-    t[308] = "Location: File: {0}, Routine: {1}, Line: {2}";
-    t[309] = "Lokalizacja: Plik: {0}, Procedura: {1}, Linia: {2}";
-    t[314] = "An unexpected result was returned by a query.";
-    t[315] = "Zapytanie zwróciło nieoczekiwany wynik.";
-    t[316] = "The column index is out of range: {0}, number of columns: {1}.";
-    t[317] = "Indeks kolumny jest poza zakresem: {0}, liczba kolumn: {1}.";
-    t[318] = "Expected command status BEGIN, got {0}.";
-    t[319] = "Spodziewano się statusu komendy BEGIN, otrzymano {0}.";
-    t[320] = "The fastpath function {0} is unknown.";
-    t[321] = "Funkcja fastpath {0} jest nieznana.";
-    t[324] = "The server requested password-based authentication, but no password was provided.";
-    t[325] = "Serwer zażądał uwierzytelnienia opartego na haśle, ale żadne hasło nie zostało dostarczone.";
-    t[332] = "The array index is out of range: {0}, number of elements: {1}.";
-    t[333] = "Indeks tablicy jest poza zakresem: {0}, liczba elementów: {1}.";
-    t[338] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
-    t[339] = "Coś niezwykłego spowodowało pad sterownika. Proszę, zgłoś ten wyjątek.";
-    t[342] = "Zero bytes may not occur in string parameters.";
-    t[343] = "Zerowe bajty nie mogą pojawiać się w parametrach typu łańcuch znakowy.";
-    table = t;
-  }
+    private static final String[] table;
 
-  @Override
-  public Object handleGetObject (String msgid) throws MissingResourceException {
-    int hash_val = msgid.hashCode() & 0x7fffffff;
-    int idx = (hash_val % 173) << 1;
-    {
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+    static {
+        String[] t = new String[346];
+        t[0] = "";
+        t[1] = "Project-Id-Version: head-pl\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2005-05-22 03:01+0200\nLast-Translator: Jarosław Jan Pyszny <jarek@pyszny.net>\nLanguage-Team:  <pl@li.org>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: KBabel 1.10\nPlural-Forms:  nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n";
+        t[2] = "The driver currently does not support COPY operations.";
+        t[3] = "Sterownik nie obsługuje aktualnie operacji COPY.";
+        t[4] = "Internal Query: {0}";
+        t[5] = "Wewnętrzne Zapytanie: {0}";
+        t[6] = "There are no rows in this ResultSet.";
+        t[7] = "Nie ma żadnych wierszy w tym ResultSet.";
+        t[8] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
+        t[9] = "Znaleziono nieprawidłowy znak. Najprawdopodobniej jest to spowodowane przechowywaniem w bazie znaków, które nie pasują do zestawu znaków wybranego podczas tworzenia bazy danych. Najczęstszy przykład to przechowywanie 8-bitowych znaków w bazie o kodowaniu SQL_ASCII.";
+        t[12] = "Fastpath call {0} - No result was returned and we expected an integer.";
+        t[13] = "Wywołanie fastpath {0} - Nie otrzymano żadnego wyniku, a oczekiwano liczby całkowitej.";
+        t[14] = "An error occurred while setting up the SSL connection.";
+        t[15] = "Wystąpił błąd podczas ustanawiania połączenia SSL.";
+        t[20] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
+        t[21] = "Funkcja CallableStatement została zadeklarowana, ale nie wywołano registerOutParameter (1, <jakiś typ>).";
+        t[24] = "Unexpected command status: {0}.";
+        t[25] = "Nieoczekiwany status komendy: {0}.";
+        t[32] = "A connection could not be made using the requested protocol {0}.";
+        t[33] = "Nie można było nawiązać połączenia stosując żądany protokołu {0}.";
+        t[38] = "Bad value for type {0} : {1}";
+        t[39] = "Zła wartość dla typu {0}: {1}";
+        t[40] = "Not on the insert row.";
+        t[41] = "Nie na wstawianym rekordzie.";
+        t[42] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
+        t[43] = "Przedwczesny koniec strumienia wejściowego, oczekiwano {0} bajtów, odczytano tylko {1}.";
+        t[48] = "Unknown type {0}.";
+        t[49] = "Nieznany typ {0}.";
+        t[52] = "The server does not support SSL.";
+        t[53] = "Serwer nie obsługuje SSL.";
+        t[60] = "Cannot call updateRow() when on the insert row.";
+        t[61] = "Nie można wywołać updateRow() na wstawianym rekordzie.";
+        t[62] = "Where: {0}";
+        t[63] = "Gdzie: {0}";
+        t[72] = "Cannot call cancelRowUpdates() when on the insert row.";
+        t[73] = "Nie można wywołać cancelRowUpdates() na wstawianym rekordzie.";
+        t[82] = "Server SQLState: {0}";
+        t[83] = "Serwer SQLState: {0}";
+        t[92] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
+        t[93] = "ResultSet nie jest modyfikowalny (not updateable). Zapytanie, które zwróciło ten wynik musi dotyczyć tylko jednej tabeli oraz musi pobierać wszystkie klucze główne tej tabeli. Zobacz Specyfikację JDBC 2.1 API, rozdział 5.6, by uzyskać więcej szczegółów.";
+        t[102] = "Cannot tell if path is open or closed: {0}.";
+        t[103] = "Nie można stwierdzić, czy ścieżka jest otwarta czy zamknięta: {0}.";
+        t[108] = "The parameter index is out of range: {0}, number of parameters: {1}.";
+        t[109] = "Indeks parametru jest poza zakresem: {0}, liczba parametrów: {1}.";
+        t[110] = "Unsupported Types value: {0}";
+        t[111] = "Nieznana wartość Types: {0}";
+        t[112] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
+        t[113] = "Aktualna pozycja za końcem ResultSet. Nie można wywołać deleteRow().";
+        t[114] = "This ResultSet is closed.";
+        t[115] = "Ten ResultSet jest zamknięty.";
+        t[120] = "Conversion of interval failed";
+        t[121] = "Konwersja typu interval nie powiodła się";
+        t[122] = "Unable to load the class {0} responsible for the datatype {1}";
+        t[123] = "Nie jest możliwe załadowanie klasy {0} odpowiedzialnej za typ danych {1}";
+        t[138] = "Error loading default settings from driverconfig.properties";
+        t[139] = "Błąd podczas wczytywania ustawień domyślnych z driverconfig.properties";
+        t[142] = "The array index is out of range: {0}";
+        t[143] = "Indeks tablicy jest poza zakresem: {0}";
+        t[146] = "Unknown Types value.";
+        t[147] = "Nieznana wartość Types.";
+        t[154] = "The maximum field size must be a value greater than or equal to 0.";
+        t[155] = "Maksymalny rozmiar pola musi być wartością dodatnią lub 0.";
+        t[168] = "Detail: {0}";
+        t[169] = "Szczegóły: {0}";
+        t[170] = "Unknown Response Type {0}.";
+        t[171] = "Nieznany typ odpowiedzi {0}.";
+        t[172] = "Maximum number of rows must be a value grater than or equal to 0.";
+        t[173] = "Maksymalna liczba rekordów musi być wartością dodatnią lub 0.";
+        t[184] = "Query timeout must be a value greater than or equals to 0.";
+        t[185] = "Timeout zapytania musi być wartością dodatnią lub 0.";
+        t[186] = "Too many update results were returned.";
+        t[187] = "Zapytanie nie zwróciło żadnych wyników.";
+        t[190] = "The connection attempt failed.";
+        t[191] = "Próba nawiązania połączenia nie powiodła się.";
+        t[198] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
+        t[199] = "Połączenie zostało zamknięte automatycznie, ponieważ nowe połączenie zostało otwarte dla tego samego PooledConnection lub PooledConnection zostało zamknięte.";
+        t[204] = "Protocol error.  Session setup failed.";
+        t[205] = "Błąd protokołu. Nie udało się utworzyć sesji.";
+        t[206] = "This PooledConnection has already been closed.";
+        t[207] = "To PooledConnection zostało już zamknięte.";
+        t[208] = "DataSource has been closed.";
+        t[209] = "DataSource zostało zamknięte.";
+        t[212] = "Method {0} is not yet implemented.";
+        t[213] = "Metoda {0}nie jest jeszcze obsługiwana.";
+        t[216] = "Hint: {0}";
+        t[217] = "Wskazówka: {0}";
+        t[218] = "No value specified for parameter {0}.";
+        t[219] = "Nie podano wartości dla parametru {0}.";
+        t[222] = "Position: {0}";
+        t[223] = "Pozycja: {0}";
+        t[226] = "Cannot call deleteRow() when on the insert row.";
+        t[227] = "Nie można wywołać deleteRow() na wstawianym rekordzie.";
+        t[240] = "Conversion of money failed.";
+        t[241] = "Konwersja typu money nie powiodła się.";
+        t[244] = "Internal Position: {0}";
+        t[245] = "Wewnętrzna Pozycja: {0}";
+        t[248] = "Connection has been closed.";
+        t[249] = "Połączenie zostało zamknięte.";
+        t[254] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
+        t[255] = "Aktualna pozycja przed początkiem ResultSet. Nie można wywołać deleteRow().";
+        t[258] = "Failed to create object for: {0}.";
+        t[259] = "Nie powiodło się utworzenie obiektu dla: {0}.";
+        t[262] = "Fetch size must be a value greater to or equal to 0.";
+        t[263] = "Rozmiar pobierania musi być wartością dodatnią lub 0.";
+        t[270] = "No results were returned by the query.";
+        t[271] = "Zapytanie nie zwróciło żadnych wyników.";
+        t[276] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
+        t[277] = "Uwierzytelnienie typu {0} nie jest obsługiwane. Upewnij się, że skonfigurowałeś plik pg_hba.conf tak, że zawiera on adres IP lub podsieć klienta oraz że użyta metoda uwierzytelnienia jest wspierana przez ten sterownik.";
+        t[280] = "Conversion to type {0} failed: {1}.";
+        t[281] = "Konwersja do typu {0} nie powiodła się: {1}.";
+        t[282] = "A result was returned when none was expected.";
+        t[283] = "Zwrócono wynik zapytania, choć nie był on oczekiwany.";
+        t[292] = "Transaction isolation level {0} not supported.";
+        t[293] = "Poziom izolacji transakcji {0} nie jest obsługiwany.";
+        t[306] = "ResultSet not positioned properly, perhaps you need to call next.";
+        t[307] = "Zła pozycja w ResultSet, może musisz wywołać next.";
+        t[308] = "Location: File: {0}, Routine: {1}, Line: {2}";
+        t[309] = "Lokalizacja: Plik: {0}, Procedura: {1}, Linia: {2}";
+        t[314] = "An unexpected result was returned by a query.";
+        t[315] = "Zapytanie zwróciło nieoczekiwany wynik.";
+        t[316] = "The column index is out of range: {0}, number of columns: {1}.";
+        t[317] = "Indeks kolumny jest poza zakresem: {0}, liczba kolumn: {1}.";
+        t[318] = "Expected command status BEGIN, got {0}.";
+        t[319] = "Spodziewano się statusu komendy BEGIN, otrzymano {0}.";
+        t[320] = "The fastpath function {0} is unknown.";
+        t[321] = "Funkcja fastpath {0} jest nieznana.";
+        t[324] = "The server requested password-based authentication, but no password was provided.";
+        t[325] = "Serwer zażądał uwierzytelnienia opartego na haśle, ale żadne hasło nie zostało dostarczone.";
+        t[332] = "The array index is out of range: {0}, number of elements: {1}.";
+        t[333] = "Indeks tablicy jest poza zakresem: {0}, liczba elementów: {1}.";
+        t[338] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
+        t[339] = "Coś niezwykłego spowodowało pad sterownika. Proszę, zgłoś ten wyjątek.";
+        t[342] = "Zero bytes may not occur in string parameters.";
+        t[343] = "Zerowe bajty nie mogą pojawiać się w parametrach typu łańcuch znakowy.";
+        table = t;
     }
-    int incr = ((hash_val % 171) + 1) << 1;
-    for (;;) {
-      idx += incr;
-      if (idx >= 346)
-        idx -= 346;
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+
+    @Override
+    public Object handleGetObject(String msgid) throws MissingResourceException {
+        int hash_val = msgid.hashCode() & 0x7fffffff;
+        int idx = (hash_val % 173) << 1;
+        {
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
+        int incr = ((hash_val % 171) + 1) << 1;
+        for (; ; ) {
+            idx += incr;
+            if (idx >= 346)
+                idx -= 346;
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
     }
-  }
 
-  @Override
-  public Enumeration<String> getKeys () {
-    return new Enumeration<>() {
-        private int idx = 0;
-        { while (idx < 346 && table[idx] == null) idx += 2; }
+    @Override
+    public Enumeration<String> getKeys() {
+        return new Enumeration<>() {
+            private int idx = 0;
 
-      @Override
-        public boolean hasMoreElements () {
-          return (idx < 346);
-        }
+            {
+                while (idx < 346 && table[idx] == null) idx += 2;
+            }
 
-      @Override
-        public String nextElement () {
-          Object key = table[idx];
-          do idx += 2; while (idx < 346 && table[idx] == null);
-          return key.toString();
-        }
-      };
-  }
+            @Override
+            public boolean hasMoreElements() {
+                return (idx < 346);
+            }
 
-  public ResourceBundle getParent () {
-    return parent;
-  }
+            @Override
+            public String nextElement() {
+                Object key = table[idx];
+                do idx += 2; while (idx < 346 && table[idx] == null);
+                return key.toString();
+            }
+        };
+    }
+
+    public ResourceBundle getParent() {
+        return parent;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_pt_BR.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_pt_BR.java
index 1ee4680..87a74bd 100644
--- a/pgjdbc/src/main/java/org/postgresql/translation/messages_pt_BR.java
+++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_pt_BR.java
@@ -5,395 +5,401 @@ import java.util.MissingResourceException;
 import java.util.ResourceBundle;
 
 public class messages_pt_BR extends ResourceBundle {
-  private static final String[] table;
-  static {
-    String[] t = new String[794];
-    t[0] = "";
-    t[1] = "Project-Id-Version: PostgreSQL 8.4\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2004-10-31 20:48-0300\nLast-Translator: Euler Taveira de Oliveira <euler@timbira.com>\nLanguage-Team: Brazilian Portuguese <pgbr-dev@listas.postgresql.org.br>\nLanguage: pt_BR\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n";
-    t[2] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
-    t[3] = "Não está implementado: efetivação da segunda fase deve ser executada utilizado uma conexão ociosa. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
-    t[4] = "DataSource has been closed.";
-    t[5] = "DataSource foi fechado.";
-    t[8] = "Invalid flags {0}";
-    t[9] = "Marcadores={0} inválidos";
-    t[18] = "Where: {0}";
-    t[19] = "Onde: {0}";
-    t[24] = "Unknown XML Source class: {0}";
-    t[25] = "Classe XML Source desconhecida: {0}";
-    t[26] = "The connection attempt failed.";
-    t[27] = "A tentativa de conexão falhou.";
-    t[28] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
-    t[29] = "Posicionado depois do fim do ResultSet.  Você não pode chamar deleteRow() aqui.";
-    t[32] = "Can''t use query methods that take a query string on a PreparedStatement.";
-    t[33] = "Não pode utilizar métodos de consulta que pegam uma consulta de um comando preparado.";
-    t[36] = "Multiple ResultSets were returned by the query.";
-    t[37] = "ResultSets múltiplos foram retornados pela consulta.";
-    t[50] = "Too many update results were returned.";
-    t[51] = "Muitos resultados de atualização foram retornados.";
-    t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
-    t[59] = "Sequência UTF-8 ilegal: byte inicial é {0}: {1}";
-    t[66] = "The column name {0} was not found in this ResultSet.";
-    t[67] = "A nome da coluna {0} não foi encontrado neste ResultSet.";
-    t[70] = "Fastpath call {0} - No result was returned and we expected an integer.";
-    t[71] = "Chamada ao Fastpath {0} - Nenhum resultado foi retornado e nós esperávamos um inteiro.";
-    t[74] = "Protocol error.  Session setup failed.";
-    t[75] = "Erro de Protocolo. Configuração da sessão falhou.";
-    t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
-    t[77] = "Uma função foi declarada mas nenhuma chamada a registerOutParameter (1, <algum_tipo>) foi feita.";
-    t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
-    t[79] = "ResultSets com CONCUR_READ_ONLY concorrentes não podem ser atualizados.";
-    t[90] = "LOB positioning offsets start at 1.";
-    t[91] = "Deslocamentos da posição de LOB começam em 1.";
-    t[92] = "Internal Position: {0}";
-    t[93] = "Posição Interna: {0}";
-    t[96] = "free() was called on this LOB previously";
-    t[97] = "free() já foi chamado neste LOB";
-    t[100] = "Cannot change transaction read-only property in the middle of a transaction.";
-    t[101] = "Não pode mudar propriedade somente-leitura da transação no meio de uma transação.";
-    t[102] = "The JVM claims not to support the {0} encoding.";
-    t[103] = "A JVM reclamou que não suporta a codificação {0}.";
-    t[108] = "{0} function doesn''t take any argument.";
-    t[109] = "função {0} não recebe nenhum argumento.";
-    t[112] = "xid must not be null";
-    t[113] = "xid não deve ser nulo";
-    t[114] = "Connection has been closed.";
-    t[115] = "Conexão foi fechada.";
-    t[122] = "The server does not support SSL.";
-    t[123] = "O servidor não suporta SSL.";
-    t[124] = "Custom type maps are not supported.";
-    t[125] = "Mapeamento de tipos personalizados não são suportados.";
-    t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
-    t[141] = "Sequência UTF-8 ilegal: byte {0} da sequência de bytes {1} não é 10xxxxxx: {2}";
-    t[148] = "Hint: {0}";
-    t[149] = "Dica: {0}";
-    t[152] = "Unable to find name datatype in the system catalogs.";
-    t[153] = "Não foi possível encontrar tipo de dado name nos catálogos do sistema.";
-    t[156] = "Unsupported Types value: {0}";
-    t[157] = "Valor de Types não é suportado: {0}";
-    t[158] = "Unknown type {0}.";
-    t[159] = "Tipo desconhecido {0}.";
-    t[166] = "{0} function takes two and only two arguments.";
-    t[167] = "função {0} recebe somente dois argumentos.";
-    t[170] = "Finalizing a Connection that was never closed:";
-    t[171] = "Fechando uma Conexão que não foi fechada:";
-    t[180] = "The maximum field size must be a value greater than or equal to 0.";
-    t[181] = "O tamanho máximo de um campo deve ser um valor maior ou igual a 0.";
-    t[186] = "PostgreSQL LOBs can only index to: {0}";
-    t[187] = "LOBs do PostgreSQL só podem indexar até: {0}";
-    t[194] = "Method {0} is not yet implemented.";
-    t[195] = "Método {0} ainda não foi implementado.";
-    t[198] = "Error loading default settings from driverconfig.properties";
-    t[199] = "Erro ao carregar configurações padrão do driverconfig.properties";
-    t[200] = "Results cannot be retrieved from a CallableStatement before it is executed.";
-    t[201] = "Resultados não podem ser recuperados de uma função antes dela ser executada.";
-    t[202] = "Large Objects may not be used in auto-commit mode.";
-    t[203] = "Objetos Grandes não podem ser usados no modo de efetivação automática (auto-commit).";
-    t[208] = "Expected command status BEGIN, got {0}.";
-    t[209] = "Status do comando BEGIN esperado, recebeu {0}.";
-    t[218] = "Invalid fetch direction constant: {0}.";
-    t[219] = "Constante de direção da busca é inválida: {0}.";
-    t[222] = "{0} function takes three and only three arguments.";
-    t[223] = "função {0} recebe três e somente três argumentos.";
-    t[226] = "This SQLXML object has already been freed.";
-    t[227] = "Este objeto SQLXML já foi liberado.";
-    t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
-    t[229] = "Não pode atualizar o ResultSet porque ele está antes do início ou depois do fim dos resultados.";
-    t[230] = "The JVM claims not to support the encoding: {0}";
-    t[231] = "A JVM reclamou que não suporta a codificação: {0}";
-    t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
-    t[233] = "Parâmetro do tipo {0} foi registrado, mas uma chamada a get{1} (tiposql={2}) foi feita.";
-    t[240] = "Cannot establish a savepoint in auto-commit mode.";
-    t[241] = "Não pode estabelecer um savepoint no modo de efetivação automática (auto-commit).";
-    t[242] = "Cannot retrieve the id of a named savepoint.";
-    t[243] = "Não pode recuperar o id de um savepoint com nome.";
-    t[244] = "The column index is out of range: {0}, number of columns: {1}.";
-    t[245] = "O índice da coluna está fora do intervalo: {0}, número de colunas: {1}.";
-    t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
-    t[251] = "Alguma coisa não usual ocorreu para causar a falha do driver. Por favor reporte esta exceção.";
-    t[260] = "Cannot cast an instance of {0} to type {1}";
-    t[261] = "Não pode converter uma instância de {0} para tipo {1}";
-    t[264] = "Unknown Types value.";
-    t[265] = "Valor de Types desconhecido.";
-    t[266] = "Invalid stream length {0}.";
-    t[267] = "Tamanho de dado {0} é inválido.";
-    t[272] = "Cannot retrieve the name of an unnamed savepoint.";
-    t[273] = "Não pode recuperar o nome de um savepoint sem nome.";
-    t[274] = "Unable to translate data into the desired encoding.";
-    t[275] = "Não foi possível traduzir dado para codificação desejada.";
-    t[276] = "Expected an EOF from server, got: {0}";
-    t[277] = "Esperado um EOF do servidor, recebido: {0}";
-    t[278] = "Bad value for type {0} : {1}";
-    t[279] = "Valor inválido para tipo {0} : {1}";
-    t[280] = "The server requested password-based authentication, but no password was provided.";
-    t[281] = "O servidor pediu autenticação baseada em senha, mas nenhuma senha foi fornecida.";
-    t[286] = "Unable to create SAXResult for SQLXML.";
-    t[287] = "Não foi possível criar SAXResult para SQLXML.";
-    t[292] = "Error during recover";
-    t[293] = "Erro durante recuperação";
-    t[294] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
-    t[295] = "tentou executar end sem a chamada ao start correspondente. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
-    t[296] = "Truncation of large objects is only implemented in 8.3 and later servers.";
-    t[297] = "Truncar objetos grandes só é implementado por servidores 8.3 ou superiores.";
-    t[298] = "This PooledConnection has already been closed.";
-    t[299] = "Este PooledConnection já foi fechado.";
-    t[302] = "ClientInfo property not supported.";
-    t[303] = "propriedade ClientInfo não é suportada.";
-    t[306] = "Fetch size must be a value greater to or equal to 0.";
-    t[307] = "Tamanho da busca deve ser um valor maior ou igual a 0.";
-    t[312] = "A connection could not be made using the requested protocol {0}.";
-    t[313] = "A conexão não pode ser feita usando protocolo informado {0}.";
-    t[318] = "Unknown XML Result class: {0}";
-    t[319] = "Classe XML Result desconhecida: {0}";
-    t[322] = "There are no rows in this ResultSet.";
-    t[323] = "Não há nenhum registro neste ResultSet.";
-    t[324] = "Unexpected command status: {0}.";
-    t[325] = "Status do comando inesperado: {0}.";
-    t[330] = "Heuristic commit/rollback not supported. forget xid={0}";
-    t[331] = "Efetivação/Cancelamento heurístico não é suportado. forget xid={0}";
-    t[334] = "Not on the insert row.";
-    t[335] = "Não está inserindo um registro.";
-    t[336] = "This SQLXML object has already been initialized, so you cannot manipulate it further.";
-    t[337] = "Este objeto SQLXML já foi inicializado, então você não pode manipulá-lo depois.";
-    t[344] = "Server SQLState: {0}";
-    t[345] = "SQLState: {0}";
-    t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
-    t[349] = "O parâmetro do servidor standard_conforming_strings foi definido como {0}. O driver JDBC espera que seja on ou off.";
-    t[360] = "The driver currently does not support COPY operations.";
-    t[361] = "O driver atualmente não suporta operações COPY.";
-    t[364] = "The array index is out of range: {0}, number of elements: {1}.";
-    t[365] = "O índice da matriz está fora do intervalo: {0}, número de elementos: {1}.";
-    t[374] = "suspend/resume not implemented";
-    t[375] = "suspender/recomeçar não está implementado";
-    t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it";
-    t[379] = "Não está implementado: efetivada da primeira fase deve ser executada utilizando a mesma conexão que foi utilizada para iniciá-la";
-    t[380] = "Error during one-phase commit. commit xid={0}";
-    t[381] = "Erro durante efetivação de uma fase. commit xid={0}";
-    t[398] = "Cannot call cancelRowUpdates() when on the insert row.";
-    t[399] = "Não pode chamar cancelRowUpdates() quando estiver inserindo registro.";
-    t[400] = "Cannot reference a savepoint after it has been released.";
-    t[401] = "Não pode referenciar um savepoint após ele ser descartado.";
-    t[402] = "You must specify at least one column value to insert a row.";
-    t[403] = "Você deve especificar pelo menos uma coluna para inserir um registro.";
-    t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data.";
-    t[405] = "Não foi possível determinar um valor para MaxIndexKeys por causa de falta de dados no catálogo do sistema.";
-    t[410] = "commit called before end. commit xid={0}, state={1}";
-    t[411] = "commit executado antes do end. commit xid={0}, state={1}";
-    t[412] = "Illegal UTF-8 sequence: final value is out of range: {0}";
-    t[413] = "Sequência UTF-8 ilegal: valor final está fora do intervalo: {0}";
-    t[414] = "{0} function takes two or three arguments.";
-    t[415] = "função {0} recebe dois ou três argumentos.";
-    t[428] = "Unable to convert DOMResult SQLXML data to a string.";
-    t[429] = "Não foi possível converter dado SQLXML do DOMResult para uma cadeia de caracteres.";
-    t[434] = "Unable to decode xml data.";
-    t[435] = "Não foi possível decodificar dado xml.";
-    t[440] = "Unexpected error writing large object to database.";
-    t[441] = "Erro inesperado ao escrever objeto grande no banco de dados.";
-    t[442] = "Zero bytes may not occur in string parameters.";
-    t[443] = "Zero bytes não podem ocorrer em parâmetros de cadeia de caracteres.";
-    t[444] = "A result was returned when none was expected.";
-    t[445] = "Um resultado foi retornado quando nenhum era esperado.";
-    t[450] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
-    t[451] = "ResultSet não é atualizável. A consulta que gerou esse conjunto de resultados deve selecionar somente uma tabela, e deve selecionar todas as chaves primárias daquela tabela. Veja a especificação na API do JDBC 2.1, seção 5.6 para obter mais detalhes.";
-    t[454] = "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.";
-    t[455] = "Tamanho de mensagem de ligação {0} é muito longo. Isso pode ser causado por especificações de tamanho incorretas ou muito grandes nos parâmetros do InputStream.";
-    t[460] = "Statement has been closed.";
-    t[461] = "Comando foi fechado.";
-    t[462] = "No value specified for parameter {0}.";
-    t[463] = "Nenhum valor especificado para parâmetro {0}.";
-    t[468] = "The array index is out of range: {0}";
-    t[469] = "O índice da matriz está fora do intervalo: {0}";
-    t[474] = "Unable to bind parameter values for statement.";
-    t[475] = "Não foi possível ligar valores de parâmetro ao comando.";
-    t[476] = "Can''t refresh the insert row.";
-    t[477] = "Não pode renovar um registro inserido.";
-    t[480] = "No primary key found for table {0}.";
-    t[481] = "Nenhuma chave primária foi encontrada para tabela {0}.";
-    t[482] = "Cannot change transaction isolation level in the middle of a transaction.";
-    t[483] = "Não pode mudar nível de isolamento da transação no meio de uma transação.";
-    t[498] = "Provided InputStream failed.";
-    t[499] = "InputStream fornecido falhou.";
-    t[500] = "The parameter index is out of range: {0}, number of parameters: {1}.";
-    t[501] = "O índice de parâmetro está fora do intervalo: {0}, número de parâmetros: {1}.";
-    t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
-    t[503] = "O parâmetro do servidor DateStyle foi alterado para {0}. O driver JDBC requer que o DateStyle começe com ISO para operação normal.";
-    t[508] = "Connection attempt timed out.";
-    t[509] = "Tentativa de conexão falhou.";
-    t[512] = "Internal Query: {0}";
-    t[513] = "Consulta Interna: {0}";
-    t[514] = "Error preparing transaction. prepare xid={0}";
-    t[515] = "Erro ao preparar transação. prepare xid={0}";
-    t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
-    t[519] = "O tipo de autenticação {0} não é suportado. Verifique se você configurou o arquivo pg_hba.conf incluindo a subrede ou endereço IP do cliente, e se está utilizando o esquema de autenticação suportado pelo driver.";
-    t[526] = "Interval {0} not yet implemented";
-    t[527] = "Intervalo {0} ainda não foi implementado";
-    t[532] = "Conversion of interval failed";
-    t[533] = "Conversão de interval falhou";
-    t[540] = "Query timeout must be a value greater than or equals to 0.";
-    t[541] = "Tempo de espera da consulta deve ser um valor maior ou igual a 0.";
-    t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
-    t[543] = "Conexão foi fechada automaticamente porque uma nova conexão foi aberta pelo mesmo PooledConnection ou o PooledConnection foi fechado.";
-    t[544] = "ResultSet not positioned properly, perhaps you need to call next.";
-    t[545] = "ResultSet não está posicionado corretamente, talvez você precise chamar next.";
-    t[546] = "Prepare called before end. prepare xid={0}, state={1}";
-    t[547] = "Prepare executado antes do end. prepare xid={0}, state={1}";
-    t[548] = "Invalid UUID data.";
-    t[549] = "dado UUID é inválido.";
-    t[550] = "This statement has been closed.";
-    t[551] = "Este comando foi fechado.";
-    t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.";
-    t[553] = "Não pode inferir um tipo SQL a ser usado para uma instância de {0}. Use setObject() com um valor de Types explícito para especificar o tipo a ser usado.";
-    t[554] = "Cannot call updateRow() when on the insert row.";
-    t[555] = "Não pode chamar updateRow() quando estiver inserindo registro.";
-    t[562] = "Detail: {0}";
-    t[563] = "Detalhe: {0}";
-    t[566] = "Cannot call deleteRow() when on the insert row.";
-    t[567] = "Não pode chamar deleteRow() quando estiver inserindo registro.";
-    t[568] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
-    t[569] = "Posicionado antes do início do ResultSet.  Você não pode chamar deleteRow() aqui.";
-    t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
-    t[577] = "Sequência UTF-8 ilegal: valor final é um valor suplementar: {0}";
-    t[578] = "Unknown Response Type {0}.";
-    t[579] = "Tipo de Resposta Desconhecido {0}.";
-    t[582] = "Unsupported value for stringtype parameter: {0}";
-    t[583] = "Valor do parâmetro stringtype não é suportado: {0}";
-    t[584] = "Conversion to type {0} failed: {1}.";
-    t[585] = "Conversão para tipo {0} falhou: {1}.";
-    t[586] = "This SQLXML object has not been initialized, so you cannot retrieve data from it.";
-    t[587] = "Este objeto SQLXML não foi inicializado, então você não pode recuperar dados dele.";
-    t[600] = "Unable to load the class {0} responsible for the datatype {1}";
-    t[601] = "Não foi possível carregar a classe {0} responsável pelo tipo de dado {1}";
-    t[604] = "The fastpath function {0} is unknown.";
-    t[605] = "A função do fastpath {0} é desconhecida.";
-    t[608] = "Malformed function or procedure escape syntax at offset {0}.";
-    t[609] = "Sintaxe de escape mal formada da função ou do procedimento no deslocamento {0}.";
-    t[612] = "Provided Reader failed.";
-    t[613] = "Reader fornecido falhou.";
-    t[614] = "Maximum number of rows must be a value grater than or equal to 0.";
-    t[615] = "Número máximo de registros deve ser um valor maior ou igual a 0.";
-    t[616] = "Failed to create object for: {0}.";
-    t[617] = "Falhou ao criar objeto para: {0}.";
-    t[620] = "Conversion of money failed.";
-    t[621] = "Conversão de money falhou.";
-    t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
-    t[623] = "Fim de entrada prematuro, eram esperados {0} bytes, mas somente {1} foram lidos.";
-    t[626] = "An unexpected result was returned by a query.";
-    t[627] = "Um resultado inesperado foi retornado pela consulta.";
-    t[644] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}";
-    t[645] = "Intercalação de transação não está implementado. xid={0}, currentXid={1}, state={2}, flags={3}";
-    t[646] = "An error occurred while setting up the SSL connection.";
-    t[647] = "Um erro ocorreu ao estabelecer uma conexão SSL.";
-    t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
-    t[655] = "Sequência UTF-8 ilegal: {0} bytes utilizados para codificar um valor de {1} bytes: {2}";
-    t[656] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}";
-    t[657] = "Não está implementado: Prepare deve ser executado utilizando a mesma conexão que iniciou a transação. currentXid={0}, prepare xid={1}";
-    t[658] = "The SSLSocketFactory class provided {0} could not be instantiated.";
-    t[659] = "A classe SSLSocketFactory forneceu {0} que não pôde ser instanciado.";
-    t[662] = "Failed to convert binary xml data to encoding: {0}.";
-    t[663] = "Falhou ao converter dados xml binários para codificação: {0}.";
-    t[670] = "Position: {0}";
-    t[671] = "Posição: {0}";
-    t[676] = "Location: File: {0}, Routine: {1}, Line: {2}";
-    t[677] = "Local: Arquivo: {0}, Rotina: {1}, Linha: {2}";
-    t[684] = "Cannot tell if path is open or closed: {0}.";
-    t[685] = "Não pode dizer se caminho está aberto ou fechado: {0}.";
-    t[690] = "Unable to create StAXResult for SQLXML";
-    t[691] = "Não foi possível criar StAXResult para SQLXML";
-    t[700] = "Cannot convert an instance of {0} to type {1}";
-    t[701] = "Não pode converter uma instância de {0} para tipo {1}";
-    t[710] = "{0} function takes four and only four argument.";
-    t[711] = "função {0} recebe somente quatro argumentos.";
-    t[716] = "Error disabling autocommit";
-    t[717] = "Erro ao desabilitar autocommit";
-    t[718] = "Interrupted while attempting to connect.";
-    t[719] = "Interrompido ao tentar se conectar.";
-    t[722] = "Your security policy has prevented the connection from being attempted.  You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to.";
-    t[723] = "Sua política de segurança impediu que a conexão pudesse ser estabelecida. Você provavelmente precisa conceder permissão em java.net.SocketPermission para a máquina e a porta do servidor de banco de dados que você deseja se conectar.";
-    t[734] = "No function outputs were registered.";
-    t[735] = "Nenhum saída de função foi registrada.";
-    t[736] = "{0} function takes one and only one argument.";
-    t[737] = "função {0} recebe somente um argumento.";
-    t[744] = "This ResultSet is closed.";
-    t[745] = "Este ResultSet está fechado.";
-    t[746] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
-    t[747] = "Caracter inválido foi encontrado. Isso é mais comumente causado por dado armazenado que contém caracteres que são inválidos para a codificação que foi criado o banco de dados. O exemplo mais comum disso é armazenar dados de 8 bits em um banco de dados SQL_ASCII.";
-    t[752] = "GSS Authentication failed";
-    t[753] = "Autenticação GSS falhou";
-    t[754] = "Ran out of memory retrieving query results.";
-    t[755] = "Memória insuficiente ao recuperar resultados da consulta.";
-    t[756] = "Returning autogenerated keys is not supported.";
-    t[757] = "Retorno de chaves geradas automaticamente não é suportado.";
-    t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
-    t[761] = "Operação requer um ResultSet rolável, mas este ResultSet é FORWARD_ONLY (somente para frente).";
-    t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
-    t[763] = "Uma função foi executada e o parâmetro de retorno {0} era do tipo {1} contudo tipo {2} foi registrado.";
-    t[764] = "Unable to find server array type for provided name {0}.";
-    t[765] = "Não foi possível encontrar tipo matriz para nome fornecido {0}.";
-    t[768] = "Unknown ResultSet holdability setting: {0}.";
-    t[769] = "Definição de durabilidade do ResultSet desconhecida: {0}.";
-    t[772] = "Transaction isolation level {0} not supported.";
-    t[773] = "Nível de isolamento da transação {0} não é suportado.";
-    t[774] = "Zero bytes may not occur in identifiers.";
-    t[775] = "Zero bytes não podem ocorrer em identificadores.";
-    t[776] = "No results were returned by the query.";
-    t[777] = "Nenhum resultado foi retornado pela consulta.";
-    t[778] = "A CallableStatement was executed with nothing returned.";
-    t[779] = "Uma função foi executada e nada foi retornado.";
-    t[780] = "wasNull cannot be call before fetching a result.";
-    t[781] = "wasNull não pode ser chamado antes de obter um resultado.";
-    t[784] = "Returning autogenerated keys by column index is not supported.";
-    t[785] = "Retorno de chaves geradas automaticamente por índice de coluna não é suportado.";
-    t[786] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
-    t[787] = "Este comando não declara um parâmetro de saída. Utilize '{' ?= chamada ... '}' para declarar um)";
-    t[788] = "Can''t use relative move methods while on the insert row.";
-    t[789] = "Não pode utilizar métodos de movimentação relativos enquanto estiver inserindo registro.";
-    t[790] = "A CallableStatement was executed with an invalid number of parameters";
-    t[791] = "Uma função foi executada com um número inválido de parâmetros";
-    t[792] = "Connection is busy with another transaction";
-    t[793] = "Conexão está ocupada com outra transação";
-    table = t;
-  }
+    private static final String[] table;
 
-  @Override
-  public Object handleGetObject (String msgid) throws MissingResourceException {
-    int hash_val = msgid.hashCode() & 0x7fffffff;
-    int idx = (hash_val % 397) << 1;
-    {
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+    static {
+        String[] t = new String[794];
+        t[0] = "";
+        t[1] = "Project-Id-Version: PostgreSQL 8.4\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2004-10-31 20:48-0300\nLast-Translator: Euler Taveira de Oliveira <euler@timbira.com>\nLanguage-Team: Brazilian Portuguese <pgbr-dev@listas.postgresql.org.br>\nLanguage: pt_BR\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n";
+        t[2] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
+        t[3] = "Não está implementado: efetivação da segunda fase deve ser executada utilizado uma conexão ociosa. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
+        t[4] = "DataSource has been closed.";
+        t[5] = "DataSource foi fechado.";
+        t[8] = "Invalid flags {0}";
+        t[9] = "Marcadores={0} inválidos";
+        t[18] = "Where: {0}";
+        t[19] = "Onde: {0}";
+        t[24] = "Unknown XML Source class: {0}";
+        t[25] = "Classe XML Source desconhecida: {0}";
+        t[26] = "The connection attempt failed.";
+        t[27] = "A tentativa de conexão falhou.";
+        t[28] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
+        t[29] = "Posicionado depois do fim do ResultSet.  Você não pode chamar deleteRow() aqui.";
+        t[32] = "Can''t use query methods that take a query string on a PreparedStatement.";
+        t[33] = "Não pode utilizar métodos de consulta que pegam uma consulta de um comando preparado.";
+        t[36] = "Multiple ResultSets were returned by the query.";
+        t[37] = "ResultSets múltiplos foram retornados pela consulta.";
+        t[50] = "Too many update results were returned.";
+        t[51] = "Muitos resultados de atualização foram retornados.";
+        t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
+        t[59] = "Sequência UTF-8 ilegal: byte inicial é {0}: {1}";
+        t[66] = "The column name {0} was not found in this ResultSet.";
+        t[67] = "A nome da coluna {0} não foi encontrado neste ResultSet.";
+        t[70] = "Fastpath call {0} - No result was returned and we expected an integer.";
+        t[71] = "Chamada ao Fastpath {0} - Nenhum resultado foi retornado e nós esperávamos um inteiro.";
+        t[74] = "Protocol error.  Session setup failed.";
+        t[75] = "Erro de Protocolo. Configuração da sessão falhou.";
+        t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
+        t[77] = "Uma função foi declarada mas nenhuma chamada a registerOutParameter (1, <algum_tipo>) foi feita.";
+        t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
+        t[79] = "ResultSets com CONCUR_READ_ONLY concorrentes não podem ser atualizados.";
+        t[90] = "LOB positioning offsets start at 1.";
+        t[91] = "Deslocamentos da posição de LOB começam em 1.";
+        t[92] = "Internal Position: {0}";
+        t[93] = "Posição Interna: {0}";
+        t[96] = "free() was called on this LOB previously";
+        t[97] = "free() já foi chamado neste LOB";
+        t[100] = "Cannot change transaction read-only property in the middle of a transaction.";
+        t[101] = "Não pode mudar propriedade somente-leitura da transação no meio de uma transação.";
+        t[102] = "The JVM claims not to support the {0} encoding.";
+        t[103] = "A JVM reclamou que não suporta a codificação {0}.";
+        t[108] = "{0} function doesn''t take any argument.";
+        t[109] = "função {0} não recebe nenhum argumento.";
+        t[112] = "xid must not be null";
+        t[113] = "xid não deve ser nulo";
+        t[114] = "Connection has been closed.";
+        t[115] = "Conexão foi fechada.";
+        t[122] = "The server does not support SSL.";
+        t[123] = "O servidor não suporta SSL.";
+        t[124] = "Custom type maps are not supported.";
+        t[125] = "Mapeamento de tipos personalizados não são suportados.";
+        t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
+        t[141] = "Sequência UTF-8 ilegal: byte {0} da sequência de bytes {1} não é 10xxxxxx: {2}";
+        t[148] = "Hint: {0}";
+        t[149] = "Dica: {0}";
+        t[152] = "Unable to find name datatype in the system catalogs.";
+        t[153] = "Não foi possível encontrar tipo de dado name nos catálogos do sistema.";
+        t[156] = "Unsupported Types value: {0}";
+        t[157] = "Valor de Types não é suportado: {0}";
+        t[158] = "Unknown type {0}.";
+        t[159] = "Tipo desconhecido {0}.";
+        t[166] = "{0} function takes two and only two arguments.";
+        t[167] = "função {0} recebe somente dois argumentos.";
+        t[170] = "Finalizing a Connection that was never closed:";
+        t[171] = "Fechando uma Conexão que não foi fechada:";
+        t[180] = "The maximum field size must be a value greater than or equal to 0.";
+        t[181] = "O tamanho máximo de um campo deve ser um valor maior ou igual a 0.";
+        t[186] = "PostgreSQL LOBs can only index to: {0}";
+        t[187] = "LOBs do PostgreSQL só podem indexar até: {0}";
+        t[194] = "Method {0} is not yet implemented.";
+        t[195] = "Método {0} ainda não foi implementado.";
+        t[198] = "Error loading default settings from driverconfig.properties";
+        t[199] = "Erro ao carregar configurações padrão do driverconfig.properties";
+        t[200] = "Results cannot be retrieved from a CallableStatement before it is executed.";
+        t[201] = "Resultados não podem ser recuperados de uma função antes dela ser executada.";
+        t[202] = "Large Objects may not be used in auto-commit mode.";
+        t[203] = "Objetos Grandes não podem ser usados no modo de efetivação automática (auto-commit).";
+        t[208] = "Expected command status BEGIN, got {0}.";
+        t[209] = "Status do comando BEGIN esperado, recebeu {0}.";
+        t[218] = "Invalid fetch direction constant: {0}.";
+        t[219] = "Constante de direção da busca é inválida: {0}.";
+        t[222] = "{0} function takes three and only three arguments.";
+        t[223] = "função {0} recebe três e somente três argumentos.";
+        t[226] = "This SQLXML object has already been freed.";
+        t[227] = "Este objeto SQLXML já foi liberado.";
+        t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
+        t[229] = "Não pode atualizar o ResultSet porque ele está antes do início ou depois do fim dos resultados.";
+        t[230] = "The JVM claims not to support the encoding: {0}";
+        t[231] = "A JVM reclamou que não suporta a codificação: {0}";
+        t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
+        t[233] = "Parâmetro do tipo {0} foi registrado, mas uma chamada a get{1} (tiposql={2}) foi feita.";
+        t[240] = "Cannot establish a savepoint in auto-commit mode.";
+        t[241] = "Não pode estabelecer um savepoint no modo de efetivação automática (auto-commit).";
+        t[242] = "Cannot retrieve the id of a named savepoint.";
+        t[243] = "Não pode recuperar o id de um savepoint com nome.";
+        t[244] = "The column index is out of range: {0}, number of columns: {1}.";
+        t[245] = "O índice da coluna está fora do intervalo: {0}, número de colunas: {1}.";
+        t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
+        t[251] = "Alguma coisa não usual ocorreu para causar a falha do driver. Por favor reporte esta exceção.";
+        t[260] = "Cannot cast an instance of {0} to type {1}";
+        t[261] = "Não pode converter uma instância de {0} para tipo {1}";
+        t[264] = "Unknown Types value.";
+        t[265] = "Valor de Types desconhecido.";
+        t[266] = "Invalid stream length {0}.";
+        t[267] = "Tamanho de dado {0} é inválido.";
+        t[272] = "Cannot retrieve the name of an unnamed savepoint.";
+        t[273] = "Não pode recuperar o nome de um savepoint sem nome.";
+        t[274] = "Unable to translate data into the desired encoding.";
+        t[275] = "Não foi possível traduzir dado para codificação desejada.";
+        t[276] = "Expected an EOF from server, got: {0}";
+        t[277] = "Esperado um EOF do servidor, recebido: {0}";
+        t[278] = "Bad value for type {0} : {1}";
+        t[279] = "Valor inválido para tipo {0} : {1}";
+        t[280] = "The server requested password-based authentication, but no password was provided.";
+        t[281] = "O servidor pediu autenticação baseada em senha, mas nenhuma senha foi fornecida.";
+        t[286] = "Unable to create SAXResult for SQLXML.";
+        t[287] = "Não foi possível criar SAXResult para SQLXML.";
+        t[292] = "Error during recover";
+        t[293] = "Erro durante recuperação";
+        t[294] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
+        t[295] = "tentou executar end sem a chamada ao start correspondente. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
+        t[296] = "Truncation of large objects is only implemented in 8.3 and later servers.";
+        t[297] = "Truncar objetos grandes só é implementado por servidores 8.3 ou superiores.";
+        t[298] = "This PooledConnection has already been closed.";
+        t[299] = "Este PooledConnection já foi fechado.";
+        t[302] = "ClientInfo property not supported.";
+        t[303] = "propriedade ClientInfo não é suportada.";
+        t[306] = "Fetch size must be a value greater to or equal to 0.";
+        t[307] = "Tamanho da busca deve ser um valor maior ou igual a 0.";
+        t[312] = "A connection could not be made using the requested protocol {0}.";
+        t[313] = "A conexão não pode ser feita usando protocolo informado {0}.";
+        t[318] = "Unknown XML Result class: {0}";
+        t[319] = "Classe XML Result desconhecida: {0}";
+        t[322] = "There are no rows in this ResultSet.";
+        t[323] = "Não há nenhum registro neste ResultSet.";
+        t[324] = "Unexpected command status: {0}.";
+        t[325] = "Status do comando inesperado: {0}.";
+        t[330] = "Heuristic commit/rollback not supported. forget xid={0}";
+        t[331] = "Efetivação/Cancelamento heurístico não é suportado. forget xid={0}";
+        t[334] = "Not on the insert row.";
+        t[335] = "Não está inserindo um registro.";
+        t[336] = "This SQLXML object has already been initialized, so you cannot manipulate it further.";
+        t[337] = "Este objeto SQLXML já foi inicializado, então você não pode manipulá-lo depois.";
+        t[344] = "Server SQLState: {0}";
+        t[345] = "SQLState: {0}";
+        t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
+        t[349] = "O parâmetro do servidor standard_conforming_strings foi definido como {0}. O driver JDBC espera que seja on ou off.";
+        t[360] = "The driver currently does not support COPY operations.";
+        t[361] = "O driver atualmente não suporta operações COPY.";
+        t[364] = "The array index is out of range: {0}, number of elements: {1}.";
+        t[365] = "O índice da matriz está fora do intervalo: {0}, número de elementos: {1}.";
+        t[374] = "suspend/resume not implemented";
+        t[375] = "suspender/recomeçar não está implementado";
+        t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it";
+        t[379] = "Não está implementado: efetivada da primeira fase deve ser executada utilizando a mesma conexão que foi utilizada para iniciá-la";
+        t[380] = "Error during one-phase commit. commit xid={0}";
+        t[381] = "Erro durante efetivação de uma fase. commit xid={0}";
+        t[398] = "Cannot call cancelRowUpdates() when on the insert row.";
+        t[399] = "Não pode chamar cancelRowUpdates() quando estiver inserindo registro.";
+        t[400] = "Cannot reference a savepoint after it has been released.";
+        t[401] = "Não pode referenciar um savepoint após ele ser descartado.";
+        t[402] = "You must specify at least one column value to insert a row.";
+        t[403] = "Você deve especificar pelo menos uma coluna para inserir um registro.";
+        t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data.";
+        t[405] = "Não foi possível determinar um valor para MaxIndexKeys por causa de falta de dados no catálogo do sistema.";
+        t[410] = "commit called before end. commit xid={0}, state={1}";
+        t[411] = "commit executado antes do end. commit xid={0}, state={1}";
+        t[412] = "Illegal UTF-8 sequence: final value is out of range: {0}";
+        t[413] = "Sequência UTF-8 ilegal: valor final está fora do intervalo: {0}";
+        t[414] = "{0} function takes two or three arguments.";
+        t[415] = "função {0} recebe dois ou três argumentos.";
+        t[428] = "Unable to convert DOMResult SQLXML data to a string.";
+        t[429] = "Não foi possível converter dado SQLXML do DOMResult para uma cadeia de caracteres.";
+        t[434] = "Unable to decode xml data.";
+        t[435] = "Não foi possível decodificar dado xml.";
+        t[440] = "Unexpected error writing large object to database.";
+        t[441] = "Erro inesperado ao escrever objeto grande no banco de dados.";
+        t[442] = "Zero bytes may not occur in string parameters.";
+        t[443] = "Zero bytes não podem ocorrer em parâmetros de cadeia de caracteres.";
+        t[444] = "A result was returned when none was expected.";
+        t[445] = "Um resultado foi retornado quando nenhum era esperado.";
+        t[450] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
+        t[451] = "ResultSet não é atualizável. A consulta que gerou esse conjunto de resultados deve selecionar somente uma tabela, e deve selecionar todas as chaves primárias daquela tabela. Veja a especificação na API do JDBC 2.1, seção 5.6 para obter mais detalhes.";
+        t[454] = "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.";
+        t[455] = "Tamanho de mensagem de ligação {0} é muito longo. Isso pode ser causado por especificações de tamanho incorretas ou muito grandes nos parâmetros do InputStream.";
+        t[460] = "Statement has been closed.";
+        t[461] = "Comando foi fechado.";
+        t[462] = "No value specified for parameter {0}.";
+        t[463] = "Nenhum valor especificado para parâmetro {0}.";
+        t[468] = "The array index is out of range: {0}";
+        t[469] = "O índice da matriz está fora do intervalo: {0}";
+        t[474] = "Unable to bind parameter values for statement.";
+        t[475] = "Não foi possível ligar valores de parâmetro ao comando.";
+        t[476] = "Can''t refresh the insert row.";
+        t[477] = "Não pode renovar um registro inserido.";
+        t[480] = "No primary key found for table {0}.";
+        t[481] = "Nenhuma chave primária foi encontrada para tabela {0}.";
+        t[482] = "Cannot change transaction isolation level in the middle of a transaction.";
+        t[483] = "Não pode mudar nível de isolamento da transação no meio de uma transação.";
+        t[498] = "Provided InputStream failed.";
+        t[499] = "InputStream fornecido falhou.";
+        t[500] = "The parameter index is out of range: {0}, number of parameters: {1}.";
+        t[501] = "O índice de parâmetro está fora do intervalo: {0}, número de parâmetros: {1}.";
+        t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
+        t[503] = "O parâmetro do servidor DateStyle foi alterado para {0}. O driver JDBC requer que o DateStyle começe com ISO para operação normal.";
+        t[508] = "Connection attempt timed out.";
+        t[509] = "Tentativa de conexão falhou.";
+        t[512] = "Internal Query: {0}";
+        t[513] = "Consulta Interna: {0}";
+        t[514] = "Error preparing transaction. prepare xid={0}";
+        t[515] = "Erro ao preparar transação. prepare xid={0}";
+        t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
+        t[519] = "O tipo de autenticação {0} não é suportado. Verifique se você configurou o arquivo pg_hba.conf incluindo a subrede ou endereço IP do cliente, e se está utilizando o esquema de autenticação suportado pelo driver.";
+        t[526] = "Interval {0} not yet implemented";
+        t[527] = "Intervalo {0} ainda não foi implementado";
+        t[532] = "Conversion of interval failed";
+        t[533] = "Conversão de interval falhou";
+        t[540] = "Query timeout must be a value greater than or equals to 0.";
+        t[541] = "Tempo de espera da consulta deve ser um valor maior ou igual a 0.";
+        t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
+        t[543] = "Conexão foi fechada automaticamente porque uma nova conexão foi aberta pelo mesmo PooledConnection ou o PooledConnection foi fechado.";
+        t[544] = "ResultSet not positioned properly, perhaps you need to call next.";
+        t[545] = "ResultSet não está posicionado corretamente, talvez você precise chamar next.";
+        t[546] = "Prepare called before end. prepare xid={0}, state={1}";
+        t[547] = "Prepare executado antes do end. prepare xid={0}, state={1}";
+        t[548] = "Invalid UUID data.";
+        t[549] = "dado UUID é inválido.";
+        t[550] = "This statement has been closed.";
+        t[551] = "Este comando foi fechado.";
+        t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.";
+        t[553] = "Não pode inferir um tipo SQL a ser usado para uma instância de {0}. Use setObject() com um valor de Types explícito para especificar o tipo a ser usado.";
+        t[554] = "Cannot call updateRow() when on the insert row.";
+        t[555] = "Não pode chamar updateRow() quando estiver inserindo registro.";
+        t[562] = "Detail: {0}";
+        t[563] = "Detalhe: {0}";
+        t[566] = "Cannot call deleteRow() when on the insert row.";
+        t[567] = "Não pode chamar deleteRow() quando estiver inserindo registro.";
+        t[568] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
+        t[569] = "Posicionado antes do início do ResultSet.  Você não pode chamar deleteRow() aqui.";
+        t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
+        t[577] = "Sequência UTF-8 ilegal: valor final é um valor suplementar: {0}";
+        t[578] = "Unknown Response Type {0}.";
+        t[579] = "Tipo de Resposta Desconhecido {0}.";
+        t[582] = "Unsupported value for stringtype parameter: {0}";
+        t[583] = "Valor do parâmetro stringtype não é suportado: {0}";
+        t[584] = "Conversion to type {0} failed: {1}.";
+        t[585] = "Conversão para tipo {0} falhou: {1}.";
+        t[586] = "This SQLXML object has not been initialized, so you cannot retrieve data from it.";
+        t[587] = "Este objeto SQLXML não foi inicializado, então você não pode recuperar dados dele.";
+        t[600] = "Unable to load the class {0} responsible for the datatype {1}";
+        t[601] = "Não foi possível carregar a classe {0} responsável pelo tipo de dado {1}";
+        t[604] = "The fastpath function {0} is unknown.";
+        t[605] = "A função do fastpath {0} é desconhecida.";
+        t[608] = "Malformed function or procedure escape syntax at offset {0}.";
+        t[609] = "Sintaxe de escape mal formada da função ou do procedimento no deslocamento {0}.";
+        t[612] = "Provided Reader failed.";
+        t[613] = "Reader fornecido falhou.";
+        t[614] = "Maximum number of rows must be a value grater than or equal to 0.";
+        t[615] = "Número máximo de registros deve ser um valor maior ou igual a 0.";
+        t[616] = "Failed to create object for: {0}.";
+        t[617] = "Falhou ao criar objeto para: {0}.";
+        t[620] = "Conversion of money failed.";
+        t[621] = "Conversão de money falhou.";
+        t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
+        t[623] = "Fim de entrada prematuro, eram esperados {0} bytes, mas somente {1} foram lidos.";
+        t[626] = "An unexpected result was returned by a query.";
+        t[627] = "Um resultado inesperado foi retornado pela consulta.";
+        t[644] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}";
+        t[645] = "Intercalação de transação não está implementado. xid={0}, currentXid={1}, state={2}, flags={3}";
+        t[646] = "An error occurred while setting up the SSL connection.";
+        t[647] = "Um erro ocorreu ao estabelecer uma conexão SSL.";
+        t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
+        t[655] = "Sequência UTF-8 ilegal: {0} bytes utilizados para codificar um valor de {1} bytes: {2}";
+        t[656] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}";
+        t[657] = "Não está implementado: Prepare deve ser executado utilizando a mesma conexão que iniciou a transação. currentXid={0}, prepare xid={1}";
+        t[658] = "The SSLSocketFactory class provided {0} could not be instantiated.";
+        t[659] = "A classe SSLSocketFactory forneceu {0} que não pôde ser instanciado.";
+        t[662] = "Failed to convert binary xml data to encoding: {0}.";
+        t[663] = "Falhou ao converter dados xml binários para codificação: {0}.";
+        t[670] = "Position: {0}";
+        t[671] = "Posição: {0}";
+        t[676] = "Location: File: {0}, Routine: {1}, Line: {2}";
+        t[677] = "Local: Arquivo: {0}, Rotina: {1}, Linha: {2}";
+        t[684] = "Cannot tell if path is open or closed: {0}.";
+        t[685] = "Não pode dizer se caminho está aberto ou fechado: {0}.";
+        t[690] = "Unable to create StAXResult for SQLXML";
+        t[691] = "Não foi possível criar StAXResult para SQLXML";
+        t[700] = "Cannot convert an instance of {0} to type {1}";
+        t[701] = "Não pode converter uma instância de {0} para tipo {1}";
+        t[710] = "{0} function takes four and only four argument.";
+        t[711] = "função {0} recebe somente quatro argumentos.";
+        t[716] = "Error disabling autocommit";
+        t[717] = "Erro ao desabilitar autocommit";
+        t[718] = "Interrupted while attempting to connect.";
+        t[719] = "Interrompido ao tentar se conectar.";
+        t[722] = "Your security policy has prevented the connection from being attempted.  You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to.";
+        t[723] = "Sua política de segurança impediu que a conexão pudesse ser estabelecida. Você provavelmente precisa conceder permissão em java.net.SocketPermission para a máquina e a porta do servidor de banco de dados que você deseja se conectar.";
+        t[734] = "No function outputs were registered.";
+        t[735] = "Nenhum saída de função foi registrada.";
+        t[736] = "{0} function takes one and only one argument.";
+        t[737] = "função {0} recebe somente um argumento.";
+        t[744] = "This ResultSet is closed.";
+        t[745] = "Este ResultSet está fechado.";
+        t[746] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
+        t[747] = "Caracter inválido foi encontrado. Isso é mais comumente causado por dado armazenado que contém caracteres que são inválidos para a codificação que foi criado o banco de dados. O exemplo mais comum disso é armazenar dados de 8 bits em um banco de dados SQL_ASCII.";
+        t[752] = "GSS Authentication failed";
+        t[753] = "Autenticação GSS falhou";
+        t[754] = "Ran out of memory retrieving query results.";
+        t[755] = "Memória insuficiente ao recuperar resultados da consulta.";
+        t[756] = "Returning autogenerated keys is not supported.";
+        t[757] = "Retorno de chaves geradas automaticamente não é suportado.";
+        t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
+        t[761] = "Operação requer um ResultSet rolável, mas este ResultSet é FORWARD_ONLY (somente para frente).";
+        t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
+        t[763] = "Uma função foi executada e o parâmetro de retorno {0} era do tipo {1} contudo tipo {2} foi registrado.";
+        t[764] = "Unable to find server array type for provided name {0}.";
+        t[765] = "Não foi possível encontrar tipo matriz para nome fornecido {0}.";
+        t[768] = "Unknown ResultSet holdability setting: {0}.";
+        t[769] = "Definição de durabilidade do ResultSet desconhecida: {0}.";
+        t[772] = "Transaction isolation level {0} not supported.";
+        t[773] = "Nível de isolamento da transação {0} não é suportado.";
+        t[774] = "Zero bytes may not occur in identifiers.";
+        t[775] = "Zero bytes não podem ocorrer em identificadores.";
+        t[776] = "No results were returned by the query.";
+        t[777] = "Nenhum resultado foi retornado pela consulta.";
+        t[778] = "A CallableStatement was executed with nothing returned.";
+        t[779] = "Uma função foi executada e nada foi retornado.";
+        t[780] = "wasNull cannot be call before fetching a result.";
+        t[781] = "wasNull não pode ser chamado antes de obter um resultado.";
+        t[784] = "Returning autogenerated keys by column index is not supported.";
+        t[785] = "Retorno de chaves geradas automaticamente por índice de coluna não é suportado.";
+        t[786] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
+        t[787] = "Este comando não declara um parâmetro de saída. Utilize '{' ?= chamada ... '}' para declarar um)";
+        t[788] = "Can''t use relative move methods while on the insert row.";
+        t[789] = "Não pode utilizar métodos de movimentação relativos enquanto estiver inserindo registro.";
+        t[790] = "A CallableStatement was executed with an invalid number of parameters";
+        t[791] = "Uma função foi executada com um número inválido de parâmetros";
+        t[792] = "Connection is busy with another transaction";
+        t[793] = "Conexão está ocupada com outra transação";
+        table = t;
     }
-    int incr = ((hash_val % 395) + 1) << 1;
-    for (;;) {
-      idx += incr;
-      if (idx >= 794)
-        idx -= 794;
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+
+    @Override
+    public Object handleGetObject(String msgid) throws MissingResourceException {
+        int hash_val = msgid.hashCode() & 0x7fffffff;
+        int idx = (hash_val % 397) << 1;
+        {
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
+        int incr = ((hash_val % 395) + 1) << 1;
+        for (; ; ) {
+            idx += incr;
+            if (idx >= 794)
+                idx -= 794;
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
     }
-  }
 
-  @Override
-  public Enumeration<String> getKeys () {
-    return new Enumeration<>() {
-        private int idx = 0;
-        { while (idx < 794 && table[idx] == null) idx += 2; }
-        public boolean hasMoreElements () {
-          return (idx < 794);
-        }
-        public String nextElement () {
-          Object key = table[idx];
-          do idx += 2; while (idx < 794 && table[idx] == null);
-          return key.toString();
-        }
-      };
-  }
+    @Override
+    public Enumeration<String> getKeys() {
+        return new Enumeration<>() {
+            private int idx = 0;
 
-  public ResourceBundle getParent () {
-    return parent;
-  }
+            {
+                while (idx < 794 && table[idx] == null) idx += 2;
+            }
+
+            public boolean hasMoreElements() {
+                return (idx < 794);
+            }
+
+            public String nextElement() {
+                Object key = table[idx];
+                do idx += 2; while (idx < 794 && table[idx] == null);
+                return key.toString();
+            }
+        };
+    }
+
+    public ResourceBundle getParent() {
+        return parent;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_ru.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_ru.java
index f70975a..c0ffd31 100644
--- a/pgjdbc/src/main/java/org/postgresql/translation/messages_ru.java
+++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_ru.java
@@ -5,267 +5,271 @@ import java.util.MissingResourceException;
 import java.util.ResourceBundle;
 
 public class messages_ru extends ResourceBundle {
-  private static final String[] table;
-  static {
-    String[] t = new String[538];
-    t[0] = "";
-    t[1] = "Project-Id-Version: JDBC Driver for PostgreSQL 8.x.x\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2016-01-07 15:09+0300\nLast-Translator: Vladimir Sitnikov <sitnikov.vladimir@gmail.com>\nLanguage-Team: pgsql-rus <pgsql-rus@yahoogroups.com>\nLanguage: ru_RU\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: Poedit 1.5.7\n";
-    t[4] = "Server SQLState: {0}";
-    t[5] = "SQLState сервера: {0}";
-    t[14] = "suspend/resume not implemented";
-    t[15] = "Операции XA suspend/resume не реализованы";
-    t[18] = "The array index is out of range: {0}";
-    t[19] = "Индекс массива вне диапазона: {0}";
-    t[28] = "This PooledConnection has already been closed.";
-    t[29] = "Это соединение уже было закрыто";
-    t[30] = "Malformed function or procedure escape syntax at offset {0}.";
-    t[31] = "Невозможно разобрать SQL команду. Ошибка на позиции {0}";
-    t[32] = "The column index is out of range: {0}, number of columns: {1}.";
-    t[33] = "Индекс колонки вне диапазона: {0}. Допустимые значения: 1..{1}";
-    t[34] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
-    t[35] = "Раннее завершение входного потока, ожидалось байт: {0}, но считано только {1}";
-    t[44] = "An I/O error occurred while sending to the backend.";
-    t[45] = "Ошибка ввода/вывода при отправке бэкенду";
-    t[46] = "Prepare called before end. prepare xid={0}, state={1}";
-    t[47] = "Вызов prepare должен происходить только после вызова end. prepare xid={0}, state={1}";
-    t[48] = "Transaction isolation level {0} not supported.";
-    t[49] = "Уровень изоляции транзакций {0} не поддерживается.";
-    t[50] = "Could not find a server with specified targetServerType: {0}";
-    t[51] = "Не удалось найти сервер с указанным значением targetServerType: {0}";
-    t[52] = "Conversion of interval failed";
-    t[53] = "Невозможно обработать PGInterval: {0}";
-    t[54] = "The array index is out of range: {0}, number of elements: {1}.";
-    t[55] = "Индекс массива вне диапазона: {0}. Допустимые значения: 1..{1}";
-    t[62] = "Unsupported value for stringtype parameter: {0}";
-    t[63] = "Неподдерживаемое значение для параметра stringtype: {0}";
-    t[72] = "Invalid stream length {0}.";
-    t[73] = "Неверная длина потока {0}.";
-    t[80] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}";
-    t[81] = "Ошибка при откате подготовленной транзакции. rollback xid={0}, preparedXid={1}, currentXid={2}";
-    t[84] = "The driver currently does not support COPY operations.";
-    t[85] = "Драйвер в данный момент не поддерживате операции COPY.";
-    t[94] = "DataSource has been closed.";
-    t[95] = "DataSource закрыт.";
-    t[96] = "Cannot write to copy a byte of value {0}";
-    t[97] = "Значение byte должно быть в диапазоне 0..255, переданное значение: {0}";
-    t[98] = "Fastpath call {0} - No result was returned and we expected a long.";
-    t[99] = "Вызов fastpath {0} ничего не вернул, а ожидалось long";
-    t[100] = "Connection attempt timed out.";
-    t[101] = "Закончилось время ожидания";
-    t[102] = "Detail: {0}";
-    t[103] = "Подробности: {0}";
-    t[104] = "Connection to {0} refused. Check that the hostname and port are correct and that the postmaster is accepting TCP/IP connections.";
-    t[105] = "Подсоединение по адресу {0} отклонено. Проверьте что хост и порт указаны правильно и что postmaster принимает TCP/IP-подсоединения.";
-    t[108] = "This statement has been closed.";
-    t[109] = "Этот statement был закрыт.";
-    t[110] = "Error committing prepared transaction. commit xid={0}, preparedXid={1}, currentXid={2}";
-    t[111] = "Ошибка при фиксации подготовленной транзакции. commit xid={0}, preparedXid={1}, currentXid={2}";
-    t[114] = "Position: {0}";
-    t[115] = "Позиция: {0}";
-    t[116] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}";
-    t[117] = "В каком соединении транзакцию начинали, в таком и вызывайте prepare. По-другому не работает. currentXid={0}, prepare xid={1}";
-    t[118] = "The connection attempt failed.";
-    t[119] = "Ошибка при попытке подсоединения.";
-    t[120] = "Unexpected copydata from server for {0}";
-    t[121] = "Неожиданный статус команды COPY: {0}";
-    t[124] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
-    t[125] = "Неверная последовательность UTF-8: начальное значеие {0}: {1}";
-    t[128] = "This ResultSet is closed.";
-    t[129] = "ResultSet закрыт.";
-    t[142] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
-    t[143] = "Духфазная фиксация работает только, если соединение неактивно (state=idle и транзакцция отсутствует). commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
-    t[146] = "Too many update results were returned.";
-    t[147] = "Возвращено слишком много результатов обновления.";
-    t[148] = "An error occurred reading the certificate";
-    t[149] = "Ошибка при чтении сертификата";
-    t[160] = "Unknown type {0}.";
-    t[161] = "Неизвестный тип {0}.";
-    t[172] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
-    t[173] = "Неверная последовательность UTF-8: {0} bytes used to encode a {1} byte value: {2}";
-    t[182] = "Protocol error.  Session setup failed.";
-    t[183] = "Ошибка протокола.  Установление сессии не удалось.";
-    t[184] = "Connection has been closed.";
-    t[185] = "Это соединение уже было закрыто";
-    t[188] = "This copy stream is closed.";
-    t[189] = "Поток уже был закрыт";
-    t[196] = "Statement has been closed.";
-    t[197] = "Statement закрыт.";
-    t[200] = "Failed to set ClientInfo property: {0}";
-    t[201] = "Невозможно установить свойство ClientInfo: {0}";
-    t[204] = "Where: {0}";
-    t[205] = "Где: {0}";
-    t[212] = "Expected command status BEGIN, got {0}.";
-    t[213] = "Ожидался статус команды BEGIN, но получен {0}";
-    t[216] = "The HostnameVerifier class provided {0} could not be instantiated.";
-    t[217] = "Невозможно создать HostnameVerifier с помощью указанного класса {0}";
-    t[220] = "Unsupported properties: {0}";
-    t[221] = "Указанные свойства не поддерживаются: {0}";
-    t[222] = "Failed to create object for: {0}.";
-    t[223] = "Ошибка при создании объект для: {0}.";
-    t[230] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
-    t[231] = "Случилось что-то необычное, что заставило драйвер произвести ошибку. Пожалуйста сообщите это исключение.";
-    t[236] = "Finalizing a Connection that was never closed:";
-    t[237] = "Соединение «утекло». Проверьте, что в коде приложения вызывается connection.close(). Далее следует стектрейс того места, где создавалось проблемное соединение";
-    t[238] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
-    t[239] = "Найдены неверные символьные данные.  Причиной этого скорее всего являются хранимые данные содержащие символы не соответствующие набору символов базы.  Типичным примером этого является хранение 8-битных данных в базе SQL_ASCII.";
-    t[252] = "Unable to create SAXResult for SQLXML.";
-    t[253] = "Невозможно создать SAXResult для SQLXML";
-    t[260] = "The SSLSocketFactory class provided {0} could not be instantiated.";
-    t[261] = "Невозможно создать SSLSocketFactory с помощью указанного класса {0}";
-    t[266] = "No IOException expected from StringBuffer or StringBuilder";
-    t[267] = "Что-то пошло не так: из классов StringBuffer и StringBuilder исключений не ожидалось";
-    t[280] = "Interrupted while waiting to obtain lock on database connection";
-    t[281] = "Ожидание COPY блокировки прервано получением interrupt";
-    t[284] = "Zero bytes may not occur in identifiers.";
-    t[285] = "Символ с кодом 0 в идентификаторах не допустим";
-    t[286] = "There are no rows in this ResultSet.";
-    t[287] = "Невозможно удалить строку, т.к. в текущем ResultSet’е строк вообще нет";
-    t[288] = "Expected an EOF from server, got: {0}";
-    t[289] = "Неожиданный ответ от сервера. Ожидалось окончание потока, получен байт {0}";
-    t[304] = "No results were returned by the query.";
-    t[305] = "Запрос не вернул результатов.";
-    t[306] = "Invalid targetServerType value: {0}";
-    t[307] = "Неверное значение targetServerType: {0}";
-    t[310] = "Requested CopyOut but got {0}";
-    t[311] = "Ожидался ответ CopyOut, а получен {0}";
-    t[318] = "Invalid flags {0}";
-    t[319] = "Неверные флаги {0}";
-    t[324] = "Unsupported Types value: {0}";
-    t[325] = "Неподдерживаемый java.sql.Types тип: {0}";
-    t[326] = "Invalid timeout ({0}<0).";
-    t[327] = "Значение таймаута должно быть неотрицательным: {0}";
-    t[328] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
-    t[329] = "Невозможно завершить транзакцию, т.к. транзакция не была начата. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
-    t[350] = "A result was returned when none was expected.";
-    t[351] = "Результат возвращён когда его не ожидалось.";
-    t[352] = "Unsupported binary encoding of {0}.";
-    t[353] = "Бинарная передача не поддерживается для типа  {0}";
-    t[354] = "Zero bytes may not occur in string parameters.";
-    t[355] = "Байт с кодом 0 не может втречаться в строковых параметрах";
-    t[360] = "Requested CopyIn but got {0}";
-    t[361] = "Ожидался ответ CopyIn, а получен {0}";
-    t[364] = "Error during one-phase commit. commit xid={0}";
-    t[365] = "Ошибка при однофазной фиксации транзакции. commit xid={0}";
-    t[372] = "Unable to bind parameter values for statement.";
-    t[373] = "Не в состоянии ассоциировать значения параметров для команды (PGBindException)";
-    t[374] = "Interrupted while attempting to connect.";
-    t[375] = "Подключение прервано получаением interrupt";
-    t[380] = "An unexpected result was returned by a query.";
-    t[381] = "Запрос вернул неожиданный результат.";
-    t[384] = "Method {0} is not yet implemented.";
-    t[385] = "Метод {0} ещё не реализован";
-    t[386] = "Location: File: {0}, Routine: {1}, Line: {2}";
-    t[387] = "Местонахождение: Файл {0}, Процедура: {1}, Строка: {2}";
-    t[388] = "The server does not support SSL.";
-    t[389] = "Сервер не поддерживает SSL.";
-    t[392] = "The password callback class provided {0} could not be instantiated.";
-    t[393] = "Невозможно создать password callback с помощью указанного класса {0}";
-    t[396] = "Unknown Types value.";
-    t[397] = "Неизвестное значение Types.";
-    t[400] = "Unknown Response Type {0}.";
-    t[401] = "Неизвестный тип ответа {0}.";
-    t[406] = "commit called before end. commit xid={0}, state={1}";
-    t[407] = "Операция commit должна вызываться только после операции end. commit xid={0}, state={1}";
-    t[420] = "An error occurred while setting up the SSL connection.";
-    t[421] = "Ошибка при установке SSL-подсоединения.";
-    t[424] = "Invalid sslmode value: {0}";
-    t[425] = "Неверное значение sslmode: {0}";
-    t[436] = "Copying from database failed: {0}";
-    t[437] = "Ошибка при обработке ответа команды COPY: {0}";
-    t[438] = "Illegal UTF-8 sequence: final value is out of range: {0}";
-    t[439] = "Неверная последовательность UTF-8: финальное значение вне области допустимых: {0}";
-    t[442] = "Error preparing transaction. prepare xid={0}";
-    t[443] = "Ошибка при выполнении prepare для транзакции {0}";
-    t[450] = "A connection could not be made using the requested protocol {0}.";
-    t[451] = "Невозможно установить соединение с помощью протокола {0}";
-    t[460] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}";
-    t[461] = "Чередование транзакций в одном соединении не поддерживается. Предыдущую транзакцию нужно завершить xid={0}, currentXid={1}, state={2}, flags={3}";
-    t[462] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
-    t[463] = "Неверная последовательность UTF-8: финальное значение является surrogate значением: {0}";
-    t[466] = "The column name {0} was not found in this ResultSet.";
-    t[467] = "Колонки {0} не найдено в этом ResultSet’’е.";
-    t[468] = "oid type {0} not known and not a number";
-    t[469] = "Oid {0} не известен или не является числом";
-    t[476] = "Hint: {0}";
-    t[477] = "Подсказка: {0}";
-    t[478] = "Unsupported property name: {0}";
-    t[479] = "Свойство {0} не поддерживается";
-    t[480] = "Ran out of memory retrieving query results.";
-    t[481] = "Недостаточно памяти для обработки результатов запроса. Попробуйте увеличить -Xmx или проверьте размеры обрабатываемых данных";
-    t[484] = "Interval {0} not yet implemented";
-    t[485] = "Интеврвал {0} ещё не реализован";
-    t[486] = "This connection has been closed.";
-    t[487] = "Соединение уже было закрыто";
-    t[488] = "The SocketFactory class provided {0} could not be instantiated.";
-    t[489] = "Невозможно создать SSLSocketFactory с помощью указанного класса {0}";
-    t[490] = "This SQLXML object has already been freed.";
-    t[491] = "Этот объект SQLXML уже был закрыт";
-    t[494] = "Unexpected command status: {0}.";
-    t[495] = "Неожиданный статус команды: {0}.";
-    t[502] = "Large Objects may not be used in auto-commit mode.";
-    t[503] = "Большие объекты не могут использоваться в режиме авто-подтверждения (auto-commit).";
-    t[504] = "Conversion of money failed.";
-    t[505] = "Ошибка при преобразовании типа money.";
-    t[512] = "No value specified for parameter {0}.";
-    t[513] = "Не указано значение для параметра {0}.";
-    t[514] = "The server requested password-based authentication, but no password was provided.";
-    t[515] = "Сервер запросил парольную аутентификацию, но пароль не был указан.";
-    t[518] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
-    t[519] = "Неверная последовательность UTF-8: байт {0} из {1} не подходит к маске 10xxxxxx: {2}";
-    t[522] = "Conversion to type {0} failed: {1}.";
-    t[523] = "Ошибка при преобразовании к типу {0}: {1}";
-    t[528] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
-    t[529] = "Тип аутентификации {0} не поддерживается. Проверьте если вы сконфигурировали файл pg_hba.conf чтобы включить IP-адреса клиентов или подсеть. Также удостовертесь что он использует схему аутентификации поддерживаемую драйвером.";
-    t[534] = "The parameter index is out of range: {0}, number of parameters: {1}.";
-    t[535] = "Индекс параметра вне диапазона: {0}. Допустимые значения: 1..{1}";
-    table = t;
-  }
+    private static final String[] table;
 
-  @Override
-  public Object handleGetObject (String msgid) throws MissingResourceException {
-    int hash_val = msgid.hashCode() & 0x7fffffff;
-    int idx = (hash_val % 269) << 1;
-    {
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+    static {
+        String[] t = new String[538];
+        t[0] = "";
+        t[1] = "Project-Id-Version: JDBC Driver for PostgreSQL 8.x.x\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2016-01-07 15:09+0300\nLast-Translator: Vladimir Sitnikov <sitnikov.vladimir@gmail.com>\nLanguage-Team: pgsql-rus <pgsql-rus@yahoogroups.com>\nLanguage: ru_RU\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: Poedit 1.5.7\n";
+        t[4] = "Server SQLState: {0}";
+        t[5] = "SQLState сервера: {0}";
+        t[14] = "suspend/resume not implemented";
+        t[15] = "Операции XA suspend/resume не реализованы";
+        t[18] = "The array index is out of range: {0}";
+        t[19] = "Индекс массива вне диапазона: {0}";
+        t[28] = "This PooledConnection has already been closed.";
+        t[29] = "Это соединение уже было закрыто";
+        t[30] = "Malformed function or procedure escape syntax at offset {0}.";
+        t[31] = "Невозможно разобрать SQL команду. Ошибка на позиции {0}";
+        t[32] = "The column index is out of range: {0}, number of columns: {1}.";
+        t[33] = "Индекс колонки вне диапазона: {0}. Допустимые значения: 1..{1}";
+        t[34] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
+        t[35] = "Раннее завершение входного потока, ожидалось байт: {0}, но считано только {1}";
+        t[44] = "An I/O error occurred while sending to the backend.";
+        t[45] = "Ошибка ввода/вывода при отправке бэкенду";
+        t[46] = "Prepare called before end. prepare xid={0}, state={1}";
+        t[47] = "Вызов prepare должен происходить только после вызова end. prepare xid={0}, state={1}";
+        t[48] = "Transaction isolation level {0} not supported.";
+        t[49] = "Уровень изоляции транзакций {0} не поддерживается.";
+        t[50] = "Could not find a server with specified targetServerType: {0}";
+        t[51] = "Не удалось найти сервер с указанным значением targetServerType: {0}";
+        t[52] = "Conversion of interval failed";
+        t[53] = "Невозможно обработать PGInterval: {0}";
+        t[54] = "The array index is out of range: {0}, number of elements: {1}.";
+        t[55] = "Индекс массива вне диапазона: {0}. Допустимые значения: 1..{1}";
+        t[62] = "Unsupported value for stringtype parameter: {0}";
+        t[63] = "Неподдерживаемое значение для параметра stringtype: {0}";
+        t[72] = "Invalid stream length {0}.";
+        t[73] = "Неверная длина потока {0}.";
+        t[80] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}";
+        t[81] = "Ошибка при откате подготовленной транзакции. rollback xid={0}, preparedXid={1}, currentXid={2}";
+        t[84] = "The driver currently does not support COPY operations.";
+        t[85] = "Драйвер в данный момент не поддерживате операции COPY.";
+        t[94] = "DataSource has been closed.";
+        t[95] = "DataSource закрыт.";
+        t[96] = "Cannot write to copy a byte of value {0}";
+        t[97] = "Значение byte должно быть в диапазоне 0..255, переданное значение: {0}";
+        t[98] = "Fastpath call {0} - No result was returned and we expected a long.";
+        t[99] = "Вызов fastpath {0} ничего не вернул, а ожидалось long";
+        t[100] = "Connection attempt timed out.";
+        t[101] = "Закончилось время ожидания";
+        t[102] = "Detail: {0}";
+        t[103] = "Подробности: {0}";
+        t[104] = "Connection to {0} refused. Check that the hostname and port are correct and that the postmaster is accepting TCP/IP connections.";
+        t[105] = "Подсоединение по адресу {0} отклонено. Проверьте что хост и порт указаны правильно и что postmaster принимает TCP/IP-подсоединения.";
+        t[108] = "This statement has been closed.";
+        t[109] = "Этот statement был закрыт.";
+        t[110] = "Error committing prepared transaction. commit xid={0}, preparedXid={1}, currentXid={2}";
+        t[111] = "Ошибка при фиксации подготовленной транзакции. commit xid={0}, preparedXid={1}, currentXid={2}";
+        t[114] = "Position: {0}";
+        t[115] = "Позиция: {0}";
+        t[116] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}";
+        t[117] = "В каком соединении транзакцию начинали, в таком и вызывайте prepare. По-другому не работает. currentXid={0}, prepare xid={1}";
+        t[118] = "The connection attempt failed.";
+        t[119] = "Ошибка при попытке подсоединения.";
+        t[120] = "Unexpected copydata from server for {0}";
+        t[121] = "Неожиданный статус команды COPY: {0}";
+        t[124] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
+        t[125] = "Неверная последовательность UTF-8: начальное значеие {0}: {1}";
+        t[128] = "This ResultSet is closed.";
+        t[129] = "ResultSet закрыт.";
+        t[142] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
+        t[143] = "Духфазная фиксация работает только, если соединение неактивно (state=idle и транзакцция отсутствует). commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
+        t[146] = "Too many update results were returned.";
+        t[147] = "Возвращено слишком много результатов обновления.";
+        t[148] = "An error occurred reading the certificate";
+        t[149] = "Ошибка при чтении сертификата";
+        t[160] = "Unknown type {0}.";
+        t[161] = "Неизвестный тип {0}.";
+        t[172] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
+        t[173] = "Неверная последовательность UTF-8: {0} bytes used to encode a {1} byte value: {2}";
+        t[182] = "Protocol error.  Session setup failed.";
+        t[183] = "Ошибка протокола.  Установление сессии не удалось.";
+        t[184] = "Connection has been closed.";
+        t[185] = "Это соединение уже было закрыто";
+        t[188] = "This copy stream is closed.";
+        t[189] = "Поток уже был закрыт";
+        t[196] = "Statement has been closed.";
+        t[197] = "Statement закрыт.";
+        t[200] = "Failed to set ClientInfo property: {0}";
+        t[201] = "Невозможно установить свойство ClientInfo: {0}";
+        t[204] = "Where: {0}";
+        t[205] = "Где: {0}";
+        t[212] = "Expected command status BEGIN, got {0}.";
+        t[213] = "Ожидался статус команды BEGIN, но получен {0}";
+        t[216] = "The HostnameVerifier class provided {0} could not be instantiated.";
+        t[217] = "Невозможно создать HostnameVerifier с помощью указанного класса {0}";
+        t[220] = "Unsupported properties: {0}";
+        t[221] = "Указанные свойства не поддерживаются: {0}";
+        t[222] = "Failed to create object for: {0}.";
+        t[223] = "Ошибка при создании объект для: {0}.";
+        t[230] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
+        t[231] = "Случилось что-то необычное, что заставило драйвер произвести ошибку. Пожалуйста сообщите это исключение.";
+        t[236] = "Finalizing a Connection that was never closed:";
+        t[237] = "Соединение «утекло». Проверьте, что в коде приложения вызывается connection.close(). Далее следует стектрейс того места, где создавалось проблемное соединение";
+        t[238] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
+        t[239] = "Найдены неверные символьные данные.  Причиной этого скорее всего являются хранимые данные содержащие символы не соответствующие набору символов базы.  Типичным примером этого является хранение 8-битных данных в базе SQL_ASCII.";
+        t[252] = "Unable to create SAXResult for SQLXML.";
+        t[253] = "Невозможно создать SAXResult для SQLXML";
+        t[260] = "The SSLSocketFactory class provided {0} could not be instantiated.";
+        t[261] = "Невозможно создать SSLSocketFactory с помощью указанного класса {0}";
+        t[266] = "No IOException expected from StringBuffer or StringBuilder";
+        t[267] = "Что-то пошло не так: из классов StringBuffer и StringBuilder исключений не ожидалось";
+        t[280] = "Interrupted while waiting to obtain lock on database connection";
+        t[281] = "Ожидание COPY блокировки прервано получением interrupt";
+        t[284] = "Zero bytes may not occur in identifiers.";
+        t[285] = "Символ с кодом 0 в идентификаторах не допустим";
+        t[286] = "There are no rows in this ResultSet.";
+        t[287] = "Невозможно удалить строку, т.к. в текущем ResultSet’е строк вообще нет";
+        t[288] = "Expected an EOF from server, got: {0}";
+        t[289] = "Неожиданный ответ от сервера. Ожидалось окончание потока, получен байт {0}";
+        t[304] = "No results were returned by the query.";
+        t[305] = "Запрос не вернул результатов.";
+        t[306] = "Invalid targetServerType value: {0}";
+        t[307] = "Неверное значение targetServerType: {0}";
+        t[310] = "Requested CopyOut but got {0}";
+        t[311] = "Ожидался ответ CopyOut, а получен {0}";
+        t[318] = "Invalid flags {0}";
+        t[319] = "Неверные флаги {0}";
+        t[324] = "Unsupported Types value: {0}";
+        t[325] = "Неподдерживаемый java.sql.Types тип: {0}";
+        t[326] = "Invalid timeout ({0}<0).";
+        t[327] = "Значение таймаута должно быть неотрицательным: {0}";
+        t[328] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
+        t[329] = "Невозможно завершить транзакцию, т.к. транзакция не была начата. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
+        t[350] = "A result was returned when none was expected.";
+        t[351] = "Результат возвращён когда его не ожидалось.";
+        t[352] = "Unsupported binary encoding of {0}.";
+        t[353] = "Бинарная передача не поддерживается для типа  {0}";
+        t[354] = "Zero bytes may not occur in string parameters.";
+        t[355] = "Байт с кодом 0 не может втречаться в строковых параметрах";
+        t[360] = "Requested CopyIn but got {0}";
+        t[361] = "Ожидался ответ CopyIn, а получен {0}";
+        t[364] = "Error during one-phase commit. commit xid={0}";
+        t[365] = "Ошибка при однофазной фиксации транзакции. commit xid={0}";
+        t[372] = "Unable to bind parameter values for statement.";
+        t[373] = "Не в состоянии ассоциировать значения параметров для команды (PGBindException)";
+        t[374] = "Interrupted while attempting to connect.";
+        t[375] = "Подключение прервано получаением interrupt";
+        t[380] = "An unexpected result was returned by a query.";
+        t[381] = "Запрос вернул неожиданный результат.";
+        t[384] = "Method {0} is not yet implemented.";
+        t[385] = "Метод {0} ещё не реализован";
+        t[386] = "Location: File: {0}, Routine: {1}, Line: {2}";
+        t[387] = "Местонахождение: Файл {0}, Процедура: {1}, Строка: {2}";
+        t[388] = "The server does not support SSL.";
+        t[389] = "Сервер не поддерживает SSL.";
+        t[392] = "The password callback class provided {0} could not be instantiated.";
+        t[393] = "Невозможно создать password callback с помощью указанного класса {0}";
+        t[396] = "Unknown Types value.";
+        t[397] = "Неизвестное значение Types.";
+        t[400] = "Unknown Response Type {0}.";
+        t[401] = "Неизвестный тип ответа {0}.";
+        t[406] = "commit called before end. commit xid={0}, state={1}";
+        t[407] = "Операция commit должна вызываться только после операции end. commit xid={0}, state={1}";
+        t[420] = "An error occurred while setting up the SSL connection.";
+        t[421] = "Ошибка при установке SSL-подсоединения.";
+        t[424] = "Invalid sslmode value: {0}";
+        t[425] = "Неверное значение sslmode: {0}";
+        t[436] = "Copying from database failed: {0}";
+        t[437] = "Ошибка при обработке ответа команды COPY: {0}";
+        t[438] = "Illegal UTF-8 sequence: final value is out of range: {0}";
+        t[439] = "Неверная последовательность UTF-8: финальное значение вне области допустимых: {0}";
+        t[442] = "Error preparing transaction. prepare xid={0}";
+        t[443] = "Ошибка при выполнении prepare для транзакции {0}";
+        t[450] = "A connection could not be made using the requested protocol {0}.";
+        t[451] = "Невозможно установить соединение с помощью протокола {0}";
+        t[460] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}";
+        t[461] = "Чередование транзакций в одном соединении не поддерживается. Предыдущую транзакцию нужно завершить xid={0}, currentXid={1}, state={2}, flags={3}";
+        t[462] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
+        t[463] = "Неверная последовательность UTF-8: финальное значение является surrogate значением: {0}";
+        t[466] = "The column name {0} was not found in this ResultSet.";
+        t[467] = "Колонки {0} не найдено в этом ResultSet’’е.";
+        t[468] = "oid type {0} not known and not a number";
+        t[469] = "Oid {0} не известен или не является числом";
+        t[476] = "Hint: {0}";
+        t[477] = "Подсказка: {0}";
+        t[478] = "Unsupported property name: {0}";
+        t[479] = "Свойство {0} не поддерживается";
+        t[480] = "Ran out of memory retrieving query results.";
+        t[481] = "Недостаточно памяти для обработки результатов запроса. Попробуйте увеличить -Xmx или проверьте размеры обрабатываемых данных";
+        t[484] = "Interval {0} not yet implemented";
+        t[485] = "Интеврвал {0} ещё не реализован";
+        t[486] = "This connection has been closed.";
+        t[487] = "Соединение уже было закрыто";
+        t[488] = "The SocketFactory class provided {0} could not be instantiated.";
+        t[489] = "Невозможно создать SSLSocketFactory с помощью указанного класса {0}";
+        t[490] = "This SQLXML object has already been freed.";
+        t[491] = "Этот объект SQLXML уже был закрыт";
+        t[494] = "Unexpected command status: {0}.";
+        t[495] = "Неожиданный статус команды: {0}.";
+        t[502] = "Large Objects may not be used in auto-commit mode.";
+        t[503] = "Большие объекты не могут использоваться в режиме авто-подтверждения (auto-commit).";
+        t[504] = "Conversion of money failed.";
+        t[505] = "Ошибка при преобразовании типа money.";
+        t[512] = "No value specified for parameter {0}.";
+        t[513] = "Не указано значение для параметра {0}.";
+        t[514] = "The server requested password-based authentication, but no password was provided.";
+        t[515] = "Сервер запросил парольную аутентификацию, но пароль не был указан.";
+        t[518] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
+        t[519] = "Неверная последовательность UTF-8: байт {0} из {1} не подходит к маске 10xxxxxx: {2}";
+        t[522] = "Conversion to type {0} failed: {1}.";
+        t[523] = "Ошибка при преобразовании к типу {0}: {1}";
+        t[528] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
+        t[529] = "Тип аутентификации {0} не поддерживается. Проверьте если вы сконфигурировали файл pg_hba.conf чтобы включить IP-адреса клиентов или подсеть. Также удостовертесь что он использует схему аутентификации поддерживаемую драйвером.";
+        t[534] = "The parameter index is out of range: {0}, number of parameters: {1}.";
+        t[535] = "Индекс параметра вне диапазона: {0}. Допустимые значения: 1..{1}";
+        table = t;
     }
-    int incr = ((hash_val % 267) + 1) << 1;
-    for (;;) {
-      idx += incr;
-      if (idx >= 538)
-        idx -= 538;
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+
+    @Override
+    public Object handleGetObject(String msgid) throws MissingResourceException {
+        int hash_val = msgid.hashCode() & 0x7fffffff;
+        int idx = (hash_val % 269) << 1;
+        {
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
+        int incr = ((hash_val % 267) + 1) << 1;
+        for (; ; ) {
+            idx += incr;
+            if (idx >= 538)
+                idx -= 538;
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
     }
-  }
 
-  @Override
-  public Enumeration<String> getKeys () {
-    return new Enumeration<>() {
-        private int idx = 0;
-        { while (idx < 538 && table[idx] == null) idx += 2; }
+    @Override
+    public Enumeration<String> getKeys() {
+        return new Enumeration<>() {
+            private int idx = 0;
 
-      @Override
-        public boolean hasMoreElements () {
-          return (idx < 538);
-        }
+            {
+                while (idx < 538 && table[idx] == null) idx += 2;
+            }
 
-      @Override
-        public String nextElement () {
-          Object key = table[idx];
-          do idx += 2; while (idx < 538 && table[idx] == null);
-          return key.toString();
-        }
-      };
-  }
+            @Override
+            public boolean hasMoreElements() {
+                return (idx < 538);
+            }
 
-  public ResourceBundle getParent () {
-    return parent;
-  }
+            @Override
+            public String nextElement() {
+                Object key = table[idx];
+                do idx += 2; while (idx < 538 && table[idx] == null);
+                return key.toString();
+            }
+        };
+    }
+
+    public ResourceBundle getParent() {
+        return parent;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_sr.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_sr.java
index 2fcac2e..0136993 100644
--- a/pgjdbc/src/main/java/org/postgresql/translation/messages_sr.java
+++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_sr.java
@@ -5,397 +5,401 @@ import java.util.MissingResourceException;
 import java.util.ResourceBundle;
 
 public class messages_sr extends ResourceBundle {
-  private static final String[] table;
-  static {
-    String[] t = new String[794];
-    t[0] = "";
-    t[1] = "Project-Id-Version: PostgreSQL 8.1\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2009-05-26 11:13+0100\nLast-Translator: Bojan Škaljac <skaljac (at) gmail.com>\nLanguage-Team: Srpski <skaljac@gmail.com>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Serbian\nX-Poedit-Country: YUGOSLAVIA\n";
-    t[2] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
-    t[3] = "Nije implementirano: Dvofazni commit mora biti izdat uz korištenje besposlene konekcije. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
-    t[4] = "DataSource has been closed.";
-    t[5] = "DataSource je zatvoren.";
-    t[8] = "Invalid flags {0}";
-    t[9] = "Nevažeće zastavice {0}";
-    t[18] = "Where: {0}";
-    t[19] = "Gde: {0}";
-    t[24] = "Unknown XML Source class: {0}";
-    t[25] = "Nepoznata XML ulazna klasa: {0}";
-    t[26] = "The connection attempt failed.";
-    t[27] = "Pokušaj konektovanja propao.";
-    t[28] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
-    t[29] = "Trenutna pozicija posle kraja ResultSet-a.  Ne možete pozvati deleteRow() na toj poziciji.";
-    t[32] = "Can''t use query methods that take a query string on a PreparedStatement.";
-    t[33] = "Ne možete da koristite metode za upit koji uzimaju string iz upita u PreparedStatement-u.";
-    t[36] = "Multiple ResultSets were returned by the query.";
-    t[37] = "Višestruki ResultSet-vi su vraćeni od strane upita.";
-    t[50] = "Too many update results were returned.";
-    t[51] = "Previše rezultata za ažuriranje je vraćeno.";
-    t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
-    t[59] = "Ilegalna UTF-8 sekvenca: inicijalni bajt je {0}: {1}";
-    t[66] = "The column name {0} was not found in this ResultSet.";
-    t[67] = "Ime kolone {0} nije pronadjeno u ResultSet.";
-    t[70] = "Fastpath call {0} - No result was returned and we expected an integer.";
-    t[71] = "Fastpath poziv {0} - Nikakav rezultat nije vraćen a očekivan je integer.";
-    t[74] = "Protocol error.  Session setup failed.";
-    t[75] = "Greška protokola.  Zakazivanje sesije propalo.";
-    t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
-    t[77] = "CallableStatement jedeklarisan ali nije bilo poziva registerOutParameter (1, <neki_tip>).";
-    t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
-    t[79] = "ResultSets sa osobinom CONCUR_READ_ONLY ne moeže biti ažuriran.";
-    t[90] = "LOB positioning offsets start at 1.";
-    t[91] = "LOB pozicija ofset počinje kod 1.";
-    t[92] = "Internal Position: {0}";
-    t[93] = "Interna pozicija: {0}";
-    t[96] = "free() was called on this LOB previously";
-    t[97] = "free() je pozvan na ovom LOB-u prethodno";
-    t[100] = "Cannot change transaction read-only property in the middle of a transaction.";
-    t[101] = "Nije moguće izmeniti read-only osobinu transakcije u sred izvršavanja transakcije.";
-    t[102] = "The JVM claims not to support the {0} encoding.";
-    t[103] = "JVM tvrdi da ne podržava {0} encoding.";
-    t[108] = "{0} function doesn''t take any argument.";
-    t[109] = "Funkcija {0} nema parametara.";
-    t[112] = "xid must not be null";
-    t[113] = "xid ne sme biti null";
-    t[114] = "Connection has been closed.";
-    t[115] = "Konekcija je već zatvorena.";
-    t[122] = "The server does not support SSL.";
-    t[123] = "Server ne podržava SSL.";
-    t[124] = "Custom type maps are not supported.";
-    t[125] = "Mape sa korisnički definisanim tipovima nisu podržane.";
-    t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
-    t[141] = "Ilegalna UTF-8 sekvenca: bajt {0} od {1} bajtova sekvence nije 10xxxxxx: {2}";
-    t[148] = "Hint: {0}";
-    t[149] = "Nagovest: {0}";
-    t[152] = "Unable to find name datatype in the system catalogs.";
-    t[153] = "Nije moguće pronaći ime tipa podatka u sistemskom katalogu.";
-    t[156] = "Unsupported Types value: {0}";
-    t[157] = "Za tip nije podržana vrednost: {0}";
-    t[158] = "Unknown type {0}.";
-    t[159] = "Nepoznat tip {0}.";
-    t[166] = "{0} function takes two and only two arguments.";
-    t[167] = "Funkcija {0} prima dva i samo dva parametra.";
-    t[170] = "Finalizing a Connection that was never closed:";
-    t[171] = "Dovršavanje konekcije koja nikada nije zatvorena:";
-    t[180] = "The maximum field size must be a value greater than or equal to 0.";
-    t[181] = "Maksimalna vrednost veličine polja mora biti vrednost veća ili jednaka 0.";
-    t[186] = "PostgreSQL LOBs can only index to: {0}";
-    t[187] = "PostgreSQL LOB mogu jedino da označavaju: {0}";
-    t[194] = "Method {0} is not yet implemented.";
-    t[195] = "Metod {0} nije još impelemtiran.";
-    t[198] = "Error loading default settings from driverconfig.properties";
-    t[199] = "Greška u čitanju standardnih podešavanja iz driverconfig.properties";
-    t[200] = "Results cannot be retrieved from a CallableStatement before it is executed.";
-    t[201] = "Razultat nemože da se primi iz CallableStatement pre nego što se on izvrši.";
-    t[202] = "Large Objects may not be used in auto-commit mode.";
-    t[203] = "Veliki objekti (Large Object) se nemogu koristiti u auto-commit modu.";
-    t[208] = "Expected command status BEGIN, got {0}.";
-    t[209] = "Očekivan status komande je BEGIN, a dobijeno je {0}.";
-    t[218] = "Invalid fetch direction constant: {0}.";
-    t[219] = "Pogrešna konstanta za direkciju donošenja: {0}.";
-    t[222] = "{0} function takes three and only three arguments.";
-    t[223] = "Funkcija {0} prima tri i samo tri parametra.";
-    t[226] = "This SQLXML object has already been freed.";
-    t[227] = "Ovaj SQLXML je već obrisan.";
-    t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
-    t[229] = "Nije moguće ažurirati ResultSet zato što je ili početak ili kraj rezultata.";
-    t[230] = "The JVM claims not to support the encoding: {0}";
-    t[231] = "JVM tvrdi da ne podržava encoding: {0}";
-    t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
-    t[233] = "Parametar tipa {0} je registrovan,ali poziv za get{1} (sql tip={2}) je izvršen.";
-    t[234] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}";
-    t[235] = "Greška prilikom povratka na prethodo pripremljenu transakciju. rollback xid={0}, preparedXid={1}, currentXid={2}";
-    t[240] = "Cannot establish a savepoint in auto-commit mode.";
-    t[241] = "U auto-commit modu nije moguće podešavanje tački snimanja.";
-    t[242] = "Cannot retrieve the id of a named savepoint.";
-    t[243] = "Nije moguće primiti id imena tačke snimanja.";
-    t[244] = "The column index is out of range: {0}, number of columns: {1}.";
-    t[245] = "Indeks kolone van osega: {0}, broj kolona: {1}.";
-    t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
-    t[251] = "Nešto neobično se dogodilo i drajver je zakazao. Molim prijavite ovaj izuzetak.";
-    t[260] = "Cannot cast an instance of {0} to type {1}";
-    t[261] = "Nije moguće kastovati instancu {0} u tip {1}";
-    t[264] = "Unknown Types value.";
-    t[265] = "Nepoznata vrednost za Types.";
-    t[266] = "Invalid stream length {0}.";
-    t[267] = "Nevažeća dužina toka {0}.";
-    t[272] = "Cannot retrieve the name of an unnamed savepoint.";
-    t[273] = "Nije moguće izvaditi ime tačke snimanja koja nema ime.";
-    t[274] = "Unable to translate data into the desired encoding.";
-    t[275] = "Nije moguće prevesti podatke u odabrani encoding format.";
-    t[276] = "Expected an EOF from server, got: {0}";
-    t[277] = "Očekivan EOF od servera, a dobijeno: {0}";
-    t[278] = "Bad value for type {0} : {1}";
-    t[279] = "Pogrešna vrednost za tip {0} : {1}";
-    t[280] = "The server requested password-based authentication, but no password was provided.";
-    t[281] = "Server zahteva autentifikaciju baziranu na šifri, ali šifra nije prosleđena.";
-    t[286] = "Unable to create SAXResult for SQLXML.";
-    t[287] = "Nije moguće kreirati SAXResult za SQLXML.";
-    t[292] = "Error during recover";
-    t[293] = "Greška prilikom oporavljanja.";
-    t[294] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
-    t[295] = "Pokušaj pozivanja kraja pre odgovarajućeg početka. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
-    t[296] = "Truncation of large objects is only implemented in 8.3 and later servers.";
-    t[297] = "Skraćivanje velikih objekata je implementirano samo u 8.3 i novijim serverima.";
-    t[298] = "This PooledConnection has already been closed.";
-    t[299] = "PooledConnection je već zatvoren.";
-    t[302] = "ClientInfo property not supported.";
-    t[303] = "ClientInfo property nije podržan.";
-    t[306] = "Fetch size must be a value greater to or equal to 0.";
-    t[307] = "Doneta veličina mora biti vrednost veća ili jednaka 0.";
-    t[312] = "A connection could not be made using the requested protocol {0}.";
-    t[313] = "Konekciju nije moguće kreirati uz pomoć protokola {0}.";
-    t[318] = "Unknown XML Result class: {0}";
-    t[319] = "nepoznata XML klasa rezultata: {0}";
-    t[322] = "There are no rows in this ResultSet.";
-    t[323] = "U ResultSet-u nema redova.";
-    t[324] = "Unexpected command status: {0}.";
-    t[325] = "Neočekivan komandni status: {0}.";
-    t[330] = "Heuristic commit/rollback not supported. forget xid={0}";
-    t[331] = "Heuristički commit/rollback nije podržan. forget xid={0}";
-    t[334] = "Not on the insert row.";
-    t[335] = "Nije mod ubacivanja redova.";
-    t[336] = "This SQLXML object has already been initialized, so you cannot manipulate it further.";
-    t[337] = "SQLXML objekat je već inicijalizovan, tako da ga nije moguće dodatno menjati.";
-    t[344] = "Server SQLState: {0}";
-    t[345] = "SQLState servera: {0}";
-    t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
-    t[349] = "Serverov standard_conforming_strings parametar javlja {0}. JDBC drajver ocekuje on ili off.";
-    t[360] = "The driver currently does not support COPY operations.";
-    t[361] = "Drajver trenutno ne podržava COPY operacije.";
-    t[364] = "The array index is out of range: {0}, number of elements: {1}.";
-    t[365] = "Indeks niza je van opsega: {0}, broj elemenata: {1}.";
-    t[374] = "suspend/resume not implemented";
-    t[375] = "obustavljanje/nastavljanje nije implementirano.";
-    t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it";
-    t[379] = "Nije implementirano: Commit iz jedne faze mora biti izdat uz korištenje iste konekcije koja je korištena za startovanje.";
-    t[380] = "Error during one-phase commit. commit xid={0}";
-    t[381] = "Kreška prilikom commit-a iz jedne faze. commit xid={0}";
-    t[398] = "Cannot call cancelRowUpdates() when on the insert row.";
-    t[399] = "Nije moguće pozvati cancelRowUpdates() prilikom ubacivanja redova.";
-    t[400] = "Cannot reference a savepoint after it has been released.";
-    t[401] = "Nije moguće referenciranje tačke snimanja nakon njenog oslobađanja.";
-    t[402] = "You must specify at least one column value to insert a row.";
-    t[403] = "Morate specificirati barem jednu vrednost za kolonu da bi ste ubacili red.";
-    t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data.";
-    t[405] = "Nije moguće odrediti vrednost za MaxIndexKezs zbog nedostatka podataka u sistemskom katalogu.";
-    t[412] = "Illegal UTF-8 sequence: final value is out of range: {0}";
-    t[413] = "Ilegalna UTF-8 sekvenca: finalna vrednost je van opsega: {0}";
-    t[414] = "{0} function takes two or three arguments.";
-    t[415] = "Funkcija {0} prima dva ili tri parametra.";
-    t[428] = "Unable to convert DOMResult SQLXML data to a string.";
-    t[429] = "Nije moguće konvertovati DOMResult SQLXML podatke u string.";
-    t[434] = "Unable to decode xml data.";
-    t[435] = "Neuspešno dekodiranje XML podataka.";
-    t[440] = "Unexpected error writing large object to database.";
-    t[441] = "Neočekivana greška prilikom upisa velikog objekta u bazu podataka.";
-    t[442] = "Zero bytes may not occur in string parameters.";
-    t[443] = "Nula bajtovji se ne smeju pojavljivati u string parametrima.";
-    t[444] = "A result was returned when none was expected.";
-    t[445] = "Rezultat vraćen ali nikakav rezultat nije očekivan.";
-    t[450] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
-    t[451] = "ResultSet nije moguće ažurirati. Upit koji je generisao ovaj razultat mora selektoati jedino tabelu,i mora selektovati sve primrne ključeve iz te tabele. Pogledajte API specifikaciju za JDBC 2.1, sekciju 5.6 za više detalja.";
-    t[454] = "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.";
-    t[455] = "Dužina vezivne poruke {0} prevelika.  Ovo je možda rezultat veoma velike ili pogrešne dužine specifikacije za InputStream parametre.";
-    t[460] = "Statement has been closed.";
-    t[461] = "Statemen je već zatvoren.";
-    t[462] = "No value specified for parameter {0}.";
-    t[463] = "Nije zadata vrednost za parametar {0}.";
-    t[468] = "The array index is out of range: {0}";
-    t[469] = "Indeks niza je van opsega: {0}";
-    t[474] = "Unable to bind parameter values for statement.";
-    t[475] = "Nije moguće naći vrednost vezivnog parametra za izjavu (statement).";
-    t[476] = "Can''t refresh the insert row.";
-    t[477] = "Nije moguće osvežiti ubačeni red.";
-    t[480] = "No primary key found for table {0}.";
-    t[481] = "Nije pronađen ključ za tabelu {0}.";
-    t[482] = "Cannot change transaction isolation level in the middle of a transaction.";
-    t[483] = "Nije moguće izmeniti nivo izolacije transakcije u sred izvršavanja transakcije.";
-    t[498] = "Provided InputStream failed.";
-    t[499] = "Pribaljeni InputStream zakazao.";
-    t[500] = "The parameter index is out of range: {0}, number of parameters: {1}.";
-    t[501] = "Index parametra je van opsega: {0}, broj parametara je: {1}.";
-    t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
-    t[503] = "Serverov DataStyle parametar promenjen u {0}. JDBC zahteva da DateStyle počinje sa ISO za uspešno završavanje operacije.";
-    t[508] = "Connection attempt timed out.";
-    t[509] = "Isteklo je vreme za pokušaj konektovanja.";
-    t[512] = "Internal Query: {0}";
-    t[513] = "Interni upit: {0}";
-    t[514] = "Error preparing transaction. prepare xid={0}";
-    t[515] = "Greška u pripremanju transakcije. prepare xid={0}";
-    t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
-    t[519] = "Tip autentifikacije {0} nije podržan. Proverite dali imate podešen pg_hba.conf fajl koji uključuje klijentovu IP adresu ili podmrežu, i da ta mreža koristi šemu autentifikacije koja je podržana od strane ovog drajvera.";
-    t[526] = "Interval {0} not yet implemented";
-    t[527] = "Interval {0} još nije implementiran.";
-    t[532] = "Conversion of interval failed";
-    t[533] = "Konverzija intervala propala.";
-    t[540] = "Query timeout must be a value greater than or equals to 0.";
-    t[541] = "Tajm-aut mora biti vrednost veća ili jednaka 0.";
-    t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
-    t[543] = "Konekcija je zatvorena automatski zato što je nova konekcija otvorena za isti PooledConnection ili je PooledConnection zatvoren.";
-    t[544] = "ResultSet not positioned properly, perhaps you need to call next.";
-    t[545] = "ResultSet nije pravilno pozicioniran, možda je potrebno da pozovete next.";
-    t[546] = "Prepare called before end. prepare xid={0}, state={1}";
-    t[547] = "Pripremanje poziva pre kraja. prepare xid={0}, state={1}";
-    t[548] = "Invalid UUID data.";
-    t[549] = "Nevažeća UUID podatak.";
-    t[550] = "This statement has been closed.";
-    t[551] = "Statement je zatvoren.";
-    t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.";
-    t[553] = "Nije moguće zaključiti SQL tip koji bi se koristio sa instancom {0}. Koristite setObject() sa zadatim eksplicitnim tipom vrednosti.";
-    t[554] = "Cannot call updateRow() when on the insert row.";
-    t[555] = "Nije moguće pozvati updateRow() prilikom ubacivanja redova.";
-    t[562] = "Detail: {0}";
-    t[563] = "Detalji: {0}";
-    t[566] = "Cannot call deleteRow() when on the insert row.";
-    t[567] = "Nije moguće pozvati deleteRow() prilikom ubacivanja redova.";
-    t[568] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
-    t[569] = "Trenutna pozicija pre početka ResultSet-a.  Ne možete pozvati deleteRow() na toj poziciji.";
-    t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
-    t[577] = "Ilegalna UTF-8 sekvenca: finalna vrednost je zamena vrednosti: {0}";
-    t[578] = "Unknown Response Type {0}.";
-    t[579] = "Nepoznat tip odziva {0}.";
-    t[582] = "Unsupported value for stringtype parameter: {0}";
-    t[583] = "Vrednost za parametar tipa string nije podržana: {0}";
-    t[584] = "Conversion to type {0} failed: {1}.";
-    t[585] = "Konverzija u tip {0} propala: {1}.";
-    t[586] = "This SQLXML object has not been initialized, so you cannot retrieve data from it.";
-    t[587] = "SQLXML objekat nije inicijalizovan tako da nije moguće preuzimati podatke iz njega.";
-    t[600] = "Unable to load the class {0} responsible for the datatype {1}";
-    t[601] = "Nije moguće učitati kalsu {0} odgovornu za tip podataka {1}";
-    t[604] = "The fastpath function {0} is unknown.";
-    t[605] = "Fastpath funkcija {0} je nepoznata.";
-    t[608] = "Malformed function or procedure escape syntax at offset {0}.";
-    t[609] = "Pogrešna sintaksa u funkciji ili proceduri na poziciji {0}.";
-    t[612] = "Provided Reader failed.";
-    t[613] = "Pribavljeni čitač (Reader) zakazao.";
-    t[614] = "Maximum number of rows must be a value grater than or equal to 0.";
-    t[615] = "Maksimalni broj redova mora biti vrednosti veće ili jednake 0.";
-    t[616] = "Failed to create object for: {0}.";
-    t[617] = "Propao pokušaj kreiranja objekta za: {0}.";
-    t[620] = "Conversion of money failed.";
-    t[621] = "Konverzija novca (money) propala.";
-    t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
-    t[623] = "Prevremen završetak ulaznog toka podataka,očekivano {0} bajtova, a pročitano samo {1}.";
-    t[626] = "An unexpected result was returned by a query.";
-    t[627] = "Nepredviđen rezultat je vraćen od strane upita.";
-    t[644] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}";
-    t[645] = "Preplitanje transakcija nije implementirano. xid={0}, currentXid={1}, state={2}, flags={3}";
-    t[646] = "An error occurred while setting up the SSL connection.";
-    t[647] = "Greška se dogodila prilikom podešavanja SSL konekcije.";
-    t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
-    t[655] = "Ilegalna UTF-8 sekvenca: {0} bytes used to encode a {1} byte value: {2}";
-    t[656] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}";
-    t[657] = "Nije implementirano: Spremanje mora biti pozvano uz korišćenje iste konekcije koja se koristi za startovanje transakcije. currentXid={0}, prepare xid={1}";
-    t[658] = "The SSLSocketFactory class provided {0} could not be instantiated.";
-    t[659] = "SSLSocketFactory klasa koju pruža {0} se nemože instancirati.";
-    t[662] = "Failed to convert binary xml data to encoding: {0}.";
-    t[663] = "Neuspešno konvertovanje binarnih XML podataka u kodnu stranu: {0}.";
-    t[670] = "Position: {0}";
-    t[671] = "Pozicija: {0}";
-    t[676] = "Location: File: {0}, Routine: {1}, Line: {2}";
-    t[677] = "Lokacija: Fajl: {0}, Rutina: {1}, Linija: {2}";
-    t[684] = "Cannot tell if path is open or closed: {0}.";
-    t[685] = "Nije moguće utvrditi dali je putanja otvorena ili zatvorena: {0}.";
-    t[690] = "Unable to create StAXResult for SQLXML";
-    t[691] = "Nije moguće kreirati StAXResult za SQLXML";
-    t[700] = "Cannot convert an instance of {0} to type {1}";
-    t[701] = "Nije moguće konvertovati instancu {0} u tip {1}";
-    t[710] = "{0} function takes four and only four argument.";
-    t[711] = "Funkcija {0} prima četiri i samo četiri parametra.";
-    t[718] = "Interrupted while attempting to connect.";
-    t[719] = "Prekinut pokušaj konektovanja.";
-    t[722] = "Your security policy has prevented the connection from being attempted.  You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to.";
-    t[723] = "Sigurnosna podešavanja su sprečila konekciju. Verovatno je potrebno da dozvolite konekciju klasi java.net.SocketPermission na bazu na serveru.";
-    t[734] = "No function outputs were registered.";
-    t[735] = "Nije registrovan nikakv izlaz iz funkcije.";
-    t[736] = "{0} function takes one and only one argument.";
-    t[737] = "Funkcija {0} prima jedan i samo jedan parametar.";
-    t[744] = "This ResultSet is closed.";
-    t[745] = "ResultSet je zatvoren.";
-    t[746] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
-    t[747] = "Pronađeni su nevažeći karakter podaci. Uzrok je najverovatnije to što pohranjeni podaci sadrže karaktere koji su nevažeći u setu karaktera sa kojima je baza kreirana.  Npr. Čuvanje 8bit podataka u SQL_ASCII bazi podataka.";
-    t[752] = "Error disabling autocommit";
-    t[753] = "Greška u isključivanju autokomita";
-    t[754] = "Ran out of memory retrieving query results.";
-    t[755] = "Nestalo je memorije prilikom preuzimanja rezultata upita.";
-    t[756] = "Returning autogenerated keys is not supported.";
-    t[757] = "Vraćanje autogenerisanih ključeva nije podržano.";
-    t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
-    t[761] = "Operacija zahteva skrolabilan ResultSet,ali ovaj ResultSet je FORWARD_ONLY.";
-    t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
-    t[763] = "CallableStatement funkcija je izvršena dok je izlazni parametar {0} tipa {1} a tip {2} je registrovan kao izlazni parametar.";
-    t[764] = "Unable to find server array type for provided name {0}.";
-    t[765] = "Neuspešno nalaženje liste servera za zadato ime {0}.";
-    t[768] = "Unknown ResultSet holdability setting: {0}.";
-    t[769] = "Nepoznata ResultSet podešavanja za mogućnost držanja (holdability): {0}.";
-    t[772] = "Transaction isolation level {0} not supported.";
-    t[773] = "Nivo izolacije transakcije {0} nije podržan.";
-    t[774] = "Zero bytes may not occur in identifiers.";
-    t[775] = "Nula bajtovji se ne smeju pojavljivati u identifikatorima.";
-    t[776] = "No results were returned by the query.";
-    t[777] = "Nikakav rezultat nije vraćen od strane upita.";
-    t[778] = "A CallableStatement was executed with nothing returned.";
-    t[779] = "CallableStatement je izvršen ali ništa nije vrećeno kao rezultat.";
-    t[780] = "wasNull cannot be call before fetching a result.";
-    t[781] = "wasNull nemože biti pozvan pre zahvatanja rezultata.";
-    t[784] = "Returning autogenerated keys by column index is not supported.";
-    t[785] = "Vraćanje autogenerisanih ključeva po kloloni nije podržano.";
-    t[786] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
-    t[787] = "Izraz ne deklariše izlazni parametar. Koristite '{' ?= poziv ... '}' za deklarisanje.";
-    t[788] = "Can''t use relative move methods while on the insert row.";
-    t[789] = "Ne može se koristiti metod relativnog pomeranja prilikom ubacivanja redova.";
-    t[790] = "A CallableStatement was executed with an invalid number of parameters";
-    t[791] = "CallableStatement je izvršen sa nevažećim brojem parametara";
-    t[792] = "Connection is busy with another transaction";
-    t[793] = "Konekcija je zauzeta sa drugom transakciom.";
-    table = t;
-  }
+    private static final String[] table;
 
-  @Override
-  public Object handleGetObject (String msgid) throws MissingResourceException {
-    int hash_val = msgid.hashCode() & 0x7fffffff;
-    int idx = (hash_val % 397) << 1;
-    {
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+    static {
+        String[] t = new String[794];
+        t[0] = "";
+        t[1] = "Project-Id-Version: PostgreSQL 8.1\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2009-05-26 11:13+0100\nLast-Translator: Bojan Škaljac <skaljac (at) gmail.com>\nLanguage-Team: Srpski <skaljac@gmail.com>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Serbian\nX-Poedit-Country: YUGOSLAVIA\n";
+        t[2] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
+        t[3] = "Nije implementirano: Dvofazni commit mora biti izdat uz korištenje besposlene konekcije. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
+        t[4] = "DataSource has been closed.";
+        t[5] = "DataSource je zatvoren.";
+        t[8] = "Invalid flags {0}";
+        t[9] = "Nevažeće zastavice {0}";
+        t[18] = "Where: {0}";
+        t[19] = "Gde: {0}";
+        t[24] = "Unknown XML Source class: {0}";
+        t[25] = "Nepoznata XML ulazna klasa: {0}";
+        t[26] = "The connection attempt failed.";
+        t[27] = "Pokušaj konektovanja propao.";
+        t[28] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
+        t[29] = "Trenutna pozicija posle kraja ResultSet-a.  Ne možete pozvati deleteRow() na toj poziciji.";
+        t[32] = "Can''t use query methods that take a query string on a PreparedStatement.";
+        t[33] = "Ne možete da koristite metode za upit koji uzimaju string iz upita u PreparedStatement-u.";
+        t[36] = "Multiple ResultSets were returned by the query.";
+        t[37] = "Višestruki ResultSet-vi su vraćeni od strane upita.";
+        t[50] = "Too many update results were returned.";
+        t[51] = "Previše rezultata za ažuriranje je vraćeno.";
+        t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
+        t[59] = "Ilegalna UTF-8 sekvenca: inicijalni bajt je {0}: {1}";
+        t[66] = "The column name {0} was not found in this ResultSet.";
+        t[67] = "Ime kolone {0} nije pronadjeno u ResultSet.";
+        t[70] = "Fastpath call {0} - No result was returned and we expected an integer.";
+        t[71] = "Fastpath poziv {0} - Nikakav rezultat nije vraćen a očekivan je integer.";
+        t[74] = "Protocol error.  Session setup failed.";
+        t[75] = "Greška protokola.  Zakazivanje sesije propalo.";
+        t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
+        t[77] = "CallableStatement jedeklarisan ali nije bilo poziva registerOutParameter (1, <neki_tip>).";
+        t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
+        t[79] = "ResultSets sa osobinom CONCUR_READ_ONLY ne moeže biti ažuriran.";
+        t[90] = "LOB positioning offsets start at 1.";
+        t[91] = "LOB pozicija ofset počinje kod 1.";
+        t[92] = "Internal Position: {0}";
+        t[93] = "Interna pozicija: {0}";
+        t[96] = "free() was called on this LOB previously";
+        t[97] = "free() je pozvan na ovom LOB-u prethodno";
+        t[100] = "Cannot change transaction read-only property in the middle of a transaction.";
+        t[101] = "Nije moguće izmeniti read-only osobinu transakcije u sred izvršavanja transakcije.";
+        t[102] = "The JVM claims not to support the {0} encoding.";
+        t[103] = "JVM tvrdi da ne podržava {0} encoding.";
+        t[108] = "{0} function doesn''t take any argument.";
+        t[109] = "Funkcija {0} nema parametara.";
+        t[112] = "xid must not be null";
+        t[113] = "xid ne sme biti null";
+        t[114] = "Connection has been closed.";
+        t[115] = "Konekcija je već zatvorena.";
+        t[122] = "The server does not support SSL.";
+        t[123] = "Server ne podržava SSL.";
+        t[124] = "Custom type maps are not supported.";
+        t[125] = "Mape sa korisnički definisanim tipovima nisu podržane.";
+        t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
+        t[141] = "Ilegalna UTF-8 sekvenca: bajt {0} od {1} bajtova sekvence nije 10xxxxxx: {2}";
+        t[148] = "Hint: {0}";
+        t[149] = "Nagovest: {0}";
+        t[152] = "Unable to find name datatype in the system catalogs.";
+        t[153] = "Nije moguće pronaći ime tipa podatka u sistemskom katalogu.";
+        t[156] = "Unsupported Types value: {0}";
+        t[157] = "Za tip nije podržana vrednost: {0}";
+        t[158] = "Unknown type {0}.";
+        t[159] = "Nepoznat tip {0}.";
+        t[166] = "{0} function takes two and only two arguments.";
+        t[167] = "Funkcija {0} prima dva i samo dva parametra.";
+        t[170] = "Finalizing a Connection that was never closed:";
+        t[171] = "Dovršavanje konekcije koja nikada nije zatvorena:";
+        t[180] = "The maximum field size must be a value greater than or equal to 0.";
+        t[181] = "Maksimalna vrednost veličine polja mora biti vrednost veća ili jednaka 0.";
+        t[186] = "PostgreSQL LOBs can only index to: {0}";
+        t[187] = "PostgreSQL LOB mogu jedino da označavaju: {0}";
+        t[194] = "Method {0} is not yet implemented.";
+        t[195] = "Metod {0} nije još impelemtiran.";
+        t[198] = "Error loading default settings from driverconfig.properties";
+        t[199] = "Greška u čitanju standardnih podešavanja iz driverconfig.properties";
+        t[200] = "Results cannot be retrieved from a CallableStatement before it is executed.";
+        t[201] = "Razultat nemože da se primi iz CallableStatement pre nego što se on izvrši.";
+        t[202] = "Large Objects may not be used in auto-commit mode.";
+        t[203] = "Veliki objekti (Large Object) se nemogu koristiti u auto-commit modu.";
+        t[208] = "Expected command status BEGIN, got {0}.";
+        t[209] = "Očekivan status komande je BEGIN, a dobijeno je {0}.";
+        t[218] = "Invalid fetch direction constant: {0}.";
+        t[219] = "Pogrešna konstanta za direkciju donošenja: {0}.";
+        t[222] = "{0} function takes three and only three arguments.";
+        t[223] = "Funkcija {0} prima tri i samo tri parametra.";
+        t[226] = "This SQLXML object has already been freed.";
+        t[227] = "Ovaj SQLXML je već obrisan.";
+        t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
+        t[229] = "Nije moguće ažurirati ResultSet zato što je ili početak ili kraj rezultata.";
+        t[230] = "The JVM claims not to support the encoding: {0}";
+        t[231] = "JVM tvrdi da ne podržava encoding: {0}";
+        t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
+        t[233] = "Parametar tipa {0} je registrovan,ali poziv za get{1} (sql tip={2}) je izvršen.";
+        t[234] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}";
+        t[235] = "Greška prilikom povratka na prethodo pripremljenu transakciju. rollback xid={0}, preparedXid={1}, currentXid={2}";
+        t[240] = "Cannot establish a savepoint in auto-commit mode.";
+        t[241] = "U auto-commit modu nije moguće podešavanje tački snimanja.";
+        t[242] = "Cannot retrieve the id of a named savepoint.";
+        t[243] = "Nije moguće primiti id imena tačke snimanja.";
+        t[244] = "The column index is out of range: {0}, number of columns: {1}.";
+        t[245] = "Indeks kolone van osega: {0}, broj kolona: {1}.";
+        t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
+        t[251] = "Nešto neobično se dogodilo i drajver je zakazao. Molim prijavite ovaj izuzetak.";
+        t[260] = "Cannot cast an instance of {0} to type {1}";
+        t[261] = "Nije moguće kastovati instancu {0} u tip {1}";
+        t[264] = "Unknown Types value.";
+        t[265] = "Nepoznata vrednost za Types.";
+        t[266] = "Invalid stream length {0}.";
+        t[267] = "Nevažeća dužina toka {0}.";
+        t[272] = "Cannot retrieve the name of an unnamed savepoint.";
+        t[273] = "Nije moguće izvaditi ime tačke snimanja koja nema ime.";
+        t[274] = "Unable to translate data into the desired encoding.";
+        t[275] = "Nije moguće prevesti podatke u odabrani encoding format.";
+        t[276] = "Expected an EOF from server, got: {0}";
+        t[277] = "Očekivan EOF od servera, a dobijeno: {0}";
+        t[278] = "Bad value for type {0} : {1}";
+        t[279] = "Pogrešna vrednost za tip {0} : {1}";
+        t[280] = "The server requested password-based authentication, but no password was provided.";
+        t[281] = "Server zahteva autentifikaciju baziranu na šifri, ali šifra nije prosleđena.";
+        t[286] = "Unable to create SAXResult for SQLXML.";
+        t[287] = "Nije moguće kreirati SAXResult za SQLXML.";
+        t[292] = "Error during recover";
+        t[293] = "Greška prilikom oporavljanja.";
+        t[294] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
+        t[295] = "Pokušaj pozivanja kraja pre odgovarajućeg početka. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
+        t[296] = "Truncation of large objects is only implemented in 8.3 and later servers.";
+        t[297] = "Skraćivanje velikih objekata je implementirano samo u 8.3 i novijim serverima.";
+        t[298] = "This PooledConnection has already been closed.";
+        t[299] = "PooledConnection je već zatvoren.";
+        t[302] = "ClientInfo property not supported.";
+        t[303] = "ClientInfo property nije podržan.";
+        t[306] = "Fetch size must be a value greater to or equal to 0.";
+        t[307] = "Doneta veličina mora biti vrednost veća ili jednaka 0.";
+        t[312] = "A connection could not be made using the requested protocol {0}.";
+        t[313] = "Konekciju nije moguće kreirati uz pomoć protokola {0}.";
+        t[318] = "Unknown XML Result class: {0}";
+        t[319] = "nepoznata XML klasa rezultata: {0}";
+        t[322] = "There are no rows in this ResultSet.";
+        t[323] = "U ResultSet-u nema redova.";
+        t[324] = "Unexpected command status: {0}.";
+        t[325] = "Neočekivan komandni status: {0}.";
+        t[330] = "Heuristic commit/rollback not supported. forget xid={0}";
+        t[331] = "Heuristički commit/rollback nije podržan. forget xid={0}";
+        t[334] = "Not on the insert row.";
+        t[335] = "Nije mod ubacivanja redova.";
+        t[336] = "This SQLXML object has already been initialized, so you cannot manipulate it further.";
+        t[337] = "SQLXML objekat je već inicijalizovan, tako da ga nije moguće dodatno menjati.";
+        t[344] = "Server SQLState: {0}";
+        t[345] = "SQLState servera: {0}";
+        t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
+        t[349] = "Serverov standard_conforming_strings parametar javlja {0}. JDBC drajver ocekuje on ili off.";
+        t[360] = "The driver currently does not support COPY operations.";
+        t[361] = "Drajver trenutno ne podržava COPY operacije.";
+        t[364] = "The array index is out of range: {0}, number of elements: {1}.";
+        t[365] = "Indeks niza je van opsega: {0}, broj elemenata: {1}.";
+        t[374] = "suspend/resume not implemented";
+        t[375] = "obustavljanje/nastavljanje nije implementirano.";
+        t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it";
+        t[379] = "Nije implementirano: Commit iz jedne faze mora biti izdat uz korištenje iste konekcije koja je korištena za startovanje.";
+        t[380] = "Error during one-phase commit. commit xid={0}";
+        t[381] = "Kreška prilikom commit-a iz jedne faze. commit xid={0}";
+        t[398] = "Cannot call cancelRowUpdates() when on the insert row.";
+        t[399] = "Nije moguće pozvati cancelRowUpdates() prilikom ubacivanja redova.";
+        t[400] = "Cannot reference a savepoint after it has been released.";
+        t[401] = "Nije moguće referenciranje tačke snimanja nakon njenog oslobađanja.";
+        t[402] = "You must specify at least one column value to insert a row.";
+        t[403] = "Morate specificirati barem jednu vrednost za kolonu da bi ste ubacili red.";
+        t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data.";
+        t[405] = "Nije moguće odrediti vrednost za MaxIndexKezs zbog nedostatka podataka u sistemskom katalogu.";
+        t[412] = "Illegal UTF-8 sequence: final value is out of range: {0}";
+        t[413] = "Ilegalna UTF-8 sekvenca: finalna vrednost je van opsega: {0}";
+        t[414] = "{0} function takes two or three arguments.";
+        t[415] = "Funkcija {0} prima dva ili tri parametra.";
+        t[428] = "Unable to convert DOMResult SQLXML data to a string.";
+        t[429] = "Nije moguće konvertovati DOMResult SQLXML podatke u string.";
+        t[434] = "Unable to decode xml data.";
+        t[435] = "Neuspešno dekodiranje XML podataka.";
+        t[440] = "Unexpected error writing large object to database.";
+        t[441] = "Neočekivana greška prilikom upisa velikog objekta u bazu podataka.";
+        t[442] = "Zero bytes may not occur in string parameters.";
+        t[443] = "Nula bajtovji se ne smeju pojavljivati u string parametrima.";
+        t[444] = "A result was returned when none was expected.";
+        t[445] = "Rezultat vraćen ali nikakav rezultat nije očekivan.";
+        t[450] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
+        t[451] = "ResultSet nije moguće ažurirati. Upit koji je generisao ovaj razultat mora selektoati jedino tabelu,i mora selektovati sve primrne ključeve iz te tabele. Pogledajte API specifikaciju za JDBC 2.1, sekciju 5.6 za više detalja.";
+        t[454] = "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.";
+        t[455] = "Dužina vezivne poruke {0} prevelika.  Ovo je možda rezultat veoma velike ili pogrešne dužine specifikacije za InputStream parametre.";
+        t[460] = "Statement has been closed.";
+        t[461] = "Statemen je već zatvoren.";
+        t[462] = "No value specified for parameter {0}.";
+        t[463] = "Nije zadata vrednost za parametar {0}.";
+        t[468] = "The array index is out of range: {0}";
+        t[469] = "Indeks niza je van opsega: {0}";
+        t[474] = "Unable to bind parameter values for statement.";
+        t[475] = "Nije moguće naći vrednost vezivnog parametra za izjavu (statement).";
+        t[476] = "Can''t refresh the insert row.";
+        t[477] = "Nije moguće osvežiti ubačeni red.";
+        t[480] = "No primary key found for table {0}.";
+        t[481] = "Nije pronađen ključ za tabelu {0}.";
+        t[482] = "Cannot change transaction isolation level in the middle of a transaction.";
+        t[483] = "Nije moguće izmeniti nivo izolacije transakcije u sred izvršavanja transakcije.";
+        t[498] = "Provided InputStream failed.";
+        t[499] = "Pribaljeni InputStream zakazao.";
+        t[500] = "The parameter index is out of range: {0}, number of parameters: {1}.";
+        t[501] = "Index parametra je van opsega: {0}, broj parametara je: {1}.";
+        t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
+        t[503] = "Serverov DataStyle parametar promenjen u {0}. JDBC zahteva da DateStyle počinje sa ISO za uspešno završavanje operacije.";
+        t[508] = "Connection attempt timed out.";
+        t[509] = "Isteklo je vreme za pokušaj konektovanja.";
+        t[512] = "Internal Query: {0}";
+        t[513] = "Interni upit: {0}";
+        t[514] = "Error preparing transaction. prepare xid={0}";
+        t[515] = "Greška u pripremanju transakcije. prepare xid={0}";
+        t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
+        t[519] = "Tip autentifikacije {0} nije podržan. Proverite dali imate podešen pg_hba.conf fajl koji uključuje klijentovu IP adresu ili podmrežu, i da ta mreža koristi šemu autentifikacije koja je podržana od strane ovog drajvera.";
+        t[526] = "Interval {0} not yet implemented";
+        t[527] = "Interval {0} još nije implementiran.";
+        t[532] = "Conversion of interval failed";
+        t[533] = "Konverzija intervala propala.";
+        t[540] = "Query timeout must be a value greater than or equals to 0.";
+        t[541] = "Tajm-aut mora biti vrednost veća ili jednaka 0.";
+        t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
+        t[543] = "Konekcija je zatvorena automatski zato što je nova konekcija otvorena za isti PooledConnection ili je PooledConnection zatvoren.";
+        t[544] = "ResultSet not positioned properly, perhaps you need to call next.";
+        t[545] = "ResultSet nije pravilno pozicioniran, možda je potrebno da pozovete next.";
+        t[546] = "Prepare called before end. prepare xid={0}, state={1}";
+        t[547] = "Pripremanje poziva pre kraja. prepare xid={0}, state={1}";
+        t[548] = "Invalid UUID data.";
+        t[549] = "Nevažeća UUID podatak.";
+        t[550] = "This statement has been closed.";
+        t[551] = "Statement je zatvoren.";
+        t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.";
+        t[553] = "Nije moguće zaključiti SQL tip koji bi se koristio sa instancom {0}. Koristite setObject() sa zadatim eksplicitnim tipom vrednosti.";
+        t[554] = "Cannot call updateRow() when on the insert row.";
+        t[555] = "Nije moguće pozvati updateRow() prilikom ubacivanja redova.";
+        t[562] = "Detail: {0}";
+        t[563] = "Detalji: {0}";
+        t[566] = "Cannot call deleteRow() when on the insert row.";
+        t[567] = "Nije moguće pozvati deleteRow() prilikom ubacivanja redova.";
+        t[568] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
+        t[569] = "Trenutna pozicija pre početka ResultSet-a.  Ne možete pozvati deleteRow() na toj poziciji.";
+        t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
+        t[577] = "Ilegalna UTF-8 sekvenca: finalna vrednost je zamena vrednosti: {0}";
+        t[578] = "Unknown Response Type {0}.";
+        t[579] = "Nepoznat tip odziva {0}.";
+        t[582] = "Unsupported value for stringtype parameter: {0}";
+        t[583] = "Vrednost za parametar tipa string nije podržana: {0}";
+        t[584] = "Conversion to type {0} failed: {1}.";
+        t[585] = "Konverzija u tip {0} propala: {1}.";
+        t[586] = "This SQLXML object has not been initialized, so you cannot retrieve data from it.";
+        t[587] = "SQLXML objekat nije inicijalizovan tako da nije moguće preuzimati podatke iz njega.";
+        t[600] = "Unable to load the class {0} responsible for the datatype {1}";
+        t[601] = "Nije moguće učitati kalsu {0} odgovornu za tip podataka {1}";
+        t[604] = "The fastpath function {0} is unknown.";
+        t[605] = "Fastpath funkcija {0} je nepoznata.";
+        t[608] = "Malformed function or procedure escape syntax at offset {0}.";
+        t[609] = "Pogrešna sintaksa u funkciji ili proceduri na poziciji {0}.";
+        t[612] = "Provided Reader failed.";
+        t[613] = "Pribavljeni čitač (Reader) zakazao.";
+        t[614] = "Maximum number of rows must be a value grater than or equal to 0.";
+        t[615] = "Maksimalni broj redova mora biti vrednosti veće ili jednake 0.";
+        t[616] = "Failed to create object for: {0}.";
+        t[617] = "Propao pokušaj kreiranja objekta za: {0}.";
+        t[620] = "Conversion of money failed.";
+        t[621] = "Konverzija novca (money) propala.";
+        t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
+        t[623] = "Prevremen završetak ulaznog toka podataka,očekivano {0} bajtova, a pročitano samo {1}.";
+        t[626] = "An unexpected result was returned by a query.";
+        t[627] = "Nepredviđen rezultat je vraćen od strane upita.";
+        t[644] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}";
+        t[645] = "Preplitanje transakcija nije implementirano. xid={0}, currentXid={1}, state={2}, flags={3}";
+        t[646] = "An error occurred while setting up the SSL connection.";
+        t[647] = "Greška se dogodila prilikom podešavanja SSL konekcije.";
+        t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
+        t[655] = "Ilegalna UTF-8 sekvenca: {0} bytes used to encode a {1} byte value: {2}";
+        t[656] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}";
+        t[657] = "Nije implementirano: Spremanje mora biti pozvano uz korišćenje iste konekcije koja se koristi za startovanje transakcije. currentXid={0}, prepare xid={1}";
+        t[658] = "The SSLSocketFactory class provided {0} could not be instantiated.";
+        t[659] = "SSLSocketFactory klasa koju pruža {0} se nemože instancirati.";
+        t[662] = "Failed to convert binary xml data to encoding: {0}.";
+        t[663] = "Neuspešno konvertovanje binarnih XML podataka u kodnu stranu: {0}.";
+        t[670] = "Position: {0}";
+        t[671] = "Pozicija: {0}";
+        t[676] = "Location: File: {0}, Routine: {1}, Line: {2}";
+        t[677] = "Lokacija: Fajl: {0}, Rutina: {1}, Linija: {2}";
+        t[684] = "Cannot tell if path is open or closed: {0}.";
+        t[685] = "Nije moguće utvrditi dali je putanja otvorena ili zatvorena: {0}.";
+        t[690] = "Unable to create StAXResult for SQLXML";
+        t[691] = "Nije moguće kreirati StAXResult za SQLXML";
+        t[700] = "Cannot convert an instance of {0} to type {1}";
+        t[701] = "Nije moguće konvertovati instancu {0} u tip {1}";
+        t[710] = "{0} function takes four and only four argument.";
+        t[711] = "Funkcija {0} prima četiri i samo četiri parametra.";
+        t[718] = "Interrupted while attempting to connect.";
+        t[719] = "Prekinut pokušaj konektovanja.";
+        t[722] = "Your security policy has prevented the connection from being attempted.  You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to.";
+        t[723] = "Sigurnosna podešavanja su sprečila konekciju. Verovatno je potrebno da dozvolite konekciju klasi java.net.SocketPermission na bazu na serveru.";
+        t[734] = "No function outputs were registered.";
+        t[735] = "Nije registrovan nikakv izlaz iz funkcije.";
+        t[736] = "{0} function takes one and only one argument.";
+        t[737] = "Funkcija {0} prima jedan i samo jedan parametar.";
+        t[744] = "This ResultSet is closed.";
+        t[745] = "ResultSet je zatvoren.";
+        t[746] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
+        t[747] = "Pronađeni su nevažeći karakter podaci. Uzrok je najverovatnije to što pohranjeni podaci sadrže karaktere koji su nevažeći u setu karaktera sa kojima je baza kreirana.  Npr. Čuvanje 8bit podataka u SQL_ASCII bazi podataka.";
+        t[752] = "Error disabling autocommit";
+        t[753] = "Greška u isključivanju autokomita";
+        t[754] = "Ran out of memory retrieving query results.";
+        t[755] = "Nestalo je memorije prilikom preuzimanja rezultata upita.";
+        t[756] = "Returning autogenerated keys is not supported.";
+        t[757] = "Vraćanje autogenerisanih ključeva nije podržano.";
+        t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
+        t[761] = "Operacija zahteva skrolabilan ResultSet,ali ovaj ResultSet je FORWARD_ONLY.";
+        t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
+        t[763] = "CallableStatement funkcija je izvršena dok je izlazni parametar {0} tipa {1} a tip {2} je registrovan kao izlazni parametar.";
+        t[764] = "Unable to find server array type for provided name {0}.";
+        t[765] = "Neuspešno nalaženje liste servera za zadato ime {0}.";
+        t[768] = "Unknown ResultSet holdability setting: {0}.";
+        t[769] = "Nepoznata ResultSet podešavanja za mogućnost držanja (holdability): {0}.";
+        t[772] = "Transaction isolation level {0} not supported.";
+        t[773] = "Nivo izolacije transakcije {0} nije podržan.";
+        t[774] = "Zero bytes may not occur in identifiers.";
+        t[775] = "Nula bajtovji se ne smeju pojavljivati u identifikatorima.";
+        t[776] = "No results were returned by the query.";
+        t[777] = "Nikakav rezultat nije vraćen od strane upita.";
+        t[778] = "A CallableStatement was executed with nothing returned.";
+        t[779] = "CallableStatement je izvršen ali ništa nije vrećeno kao rezultat.";
+        t[780] = "wasNull cannot be call before fetching a result.";
+        t[781] = "wasNull nemože biti pozvan pre zahvatanja rezultata.";
+        t[784] = "Returning autogenerated keys by column index is not supported.";
+        t[785] = "Vraćanje autogenerisanih ključeva po kloloni nije podržano.";
+        t[786] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
+        t[787] = "Izraz ne deklariše izlazni parametar. Koristite '{' ?= poziv ... '}' za deklarisanje.";
+        t[788] = "Can''t use relative move methods while on the insert row.";
+        t[789] = "Ne može se koristiti metod relativnog pomeranja prilikom ubacivanja redova.";
+        t[790] = "A CallableStatement was executed with an invalid number of parameters";
+        t[791] = "CallableStatement je izvršen sa nevažećim brojem parametara";
+        t[792] = "Connection is busy with another transaction";
+        t[793] = "Konekcija je zauzeta sa drugom transakciom.";
+        table = t;
     }
-    int incr = ((hash_val % 395) + 1) << 1;
-    for (;;) {
-      idx += incr;
-      if (idx >= 794)
-        idx -= 794;
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+
+    @Override
+    public Object handleGetObject(String msgid) throws MissingResourceException {
+        int hash_val = msgid.hashCode() & 0x7fffffff;
+        int idx = (hash_val % 397) << 1;
+        {
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
+        int incr = ((hash_val % 395) + 1) << 1;
+        for (; ; ) {
+            idx += incr;
+            if (idx >= 794)
+                idx -= 794;
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
     }
-  }
 
-  @Override
-  public Enumeration<String> getKeys () {
-    return new Enumeration<>() {
-        private int idx = 0;
-        { while (idx < 794 && table[idx] == null) idx += 2; }
+    @Override
+    public Enumeration<String> getKeys() {
+        return new Enumeration<>() {
+            private int idx = 0;
 
-      @Override
-        public boolean hasMoreElements () {
-          return (idx < 794);
-        }
+            {
+                while (idx < 794 && table[idx] == null) idx += 2;
+            }
 
-        @Override
-        public String nextElement () {
-          Object key = table[idx];
-          do idx += 2; while (idx < 794 && table[idx] == null);
-          return key.toString();
-        }
-      };
-  }
+            @Override
+            public boolean hasMoreElements() {
+                return (idx < 794);
+            }
 
-  public ResourceBundle getParent () {
-    return parent;
-  }
+            @Override
+            public String nextElement() {
+                Object key = table[idx];
+                do idx += 2; while (idx < 794 && table[idx] == null);
+                return key.toString();
+            }
+        };
+    }
+
+    public ResourceBundle getParent() {
+        return parent;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_tr.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_tr.java
index 02222b2..c8883c5 100644
--- a/pgjdbc/src/main/java/org/postgresql/translation/messages_tr.java
+++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_tr.java
@@ -5,397 +5,403 @@ import java.util.MissingResourceException;
 import java.util.ResourceBundle;
 
 public class messages_tr extends ResourceBundle {
-  private static final String[] table;
-  static {
-    String[] t = new String[794];
-    t[0] = "";
-    t[1] = "Project-Id-Version: jdbc-tr\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2009-05-31 21:47+0200\nLast-Translator: Devrim GÜNDÜZ <devrim@gunduz.org>\nLanguage-Team: Turkish <pgsql-tr-genel@PostgreSQL.org>\nLanguage: tr\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: KBabel 1.3.1\nX-Poedit-Language: Turkish\nX-Poedit-Country: TURKEY\n";
-    t[2] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
-    t[3] = "Desteklenmiyor: 2nd phase commit, atıl bir bağlantıdan başlatılmalıdır. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
-    t[4] = "DataSource has been closed.";
-    t[5] = "DataSource kapatıldı.";
-    t[8] = "Invalid flags {0}";
-    t[9] = "Geçersiz seçenekler {0}";
-    t[18] = "Where: {0}";
-    t[19] = "Where: {0}";
-    t[24] = "Unknown XML Source class: {0}";
-    t[25] = "Bilinmeyen XML Kaynak Sınıfı: {0}";
-    t[26] = "The connection attempt failed.";
-    t[27] = "Bağlantı denemesi başarısız oldu.";
-    t[28] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
-    t[29] = "Şu an ResultSet sonucundan sonra konumlandı. deleteRow() burada çağırabilirsiniz.";
-    t[32] = "Can''t use query methods that take a query string on a PreparedStatement.";
-    t[33] = "PreparedStatement ile sorgu satırı alan sorgu yöntemleri kullanılamaz.";
-    t[36] = "Multiple ResultSets were returned by the query.";
-    t[37] = "Sorgu tarafından birden fazla ResultSet getirildi.";
-    t[50] = "Too many update results were returned.";
-    t[51] = "Çok fazla güncelleme sonucu döndürüldü.";
-    t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
-    t[59] = "Geçersiz UTF-8 çoklu bayt karakteri: ilk bayt {0}: {1}";
-    t[66] = "The column name {0} was not found in this ResultSet.";
-    t[67] = "Bu ResultSet içinde {0} sütun adı bulunamadı.";
-    t[70] = "Fastpath call {0} - No result was returned and we expected an integer.";
-    t[71] = "Fastpath call {0} - Integer beklenirken hiçbir sonuç getirilmedi.";
-    t[74] = "Protocol error.  Session setup failed.";
-    t[75] = "Protokol hatası.  Oturum kurulumu başarısız oldu.";
-    t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
-    t[77] = "CallableStatement bildirildi ancak registerOutParameter(1, < bir tip>) tanıtımı yapılmadı.";
-    t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
-    t[79] = "Eş zamanlama CONCUR_READ_ONLY olan ResultSet''ler değiştirilemez";
-    t[90] = "LOB positioning offsets start at 1.";
-    t[91] = "LOB bağlangıç adresi 1Den başlıyor";
-    t[92] = "Internal Position: {0}";
-    t[93] = "Internal Position: {0}";
-    t[96] = "free() was called on this LOB previously";
-    t[97] = "Bu LOB'da free() daha önce çağırıldı";
-    t[100] = "Cannot change transaction read-only property in the middle of a transaction.";
-    t[101] = "Transaction ortasında geçerli transactionun read-only özellği değiştirilemez.";
-    t[102] = "The JVM claims not to support the {0} encoding.";
-    t[103] = "JVM, {0} dil kodlamasını desteklememektedir.";
-    t[108] = "{0} function doesn''t take any argument.";
-    t[109] = "{0} fonksiyonu parametre almaz.";
-    t[112] = "xid must not be null";
-    t[113] = "xid null olamaz";
-    t[114] = "Connection has been closed.";
-    t[115] = "Bağlantı kapatıldı.";
-    t[122] = "The server does not support SSL.";
-    t[123] = "Sunucu SSL desteklemiyor.";
-    t[124] = "Custom type maps are not supported.";
-    t[125] = "Özel tip eşleştirmeleri desteklenmiyor.";
-    t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
-    t[141] = "Geçersiz UTF-8 çoklu bayt karakteri: {0}/{1} baytı 10xxxxxx değildir: {2}";
-    t[148] = "Hint: {0}";
-    t[149] = "İpucu: {0}";
-    t[152] = "Unable to find name datatype in the system catalogs.";
-    t[153] = "Sistem kataloglarında name veri tipi bulunamıyor.";
-    t[156] = "Unsupported Types value: {0}";
-    t[157] = "Geçersiz Types değeri: {0}";
-    t[158] = "Unknown type {0}.";
-    t[159] = "Bilinmeyen tip {0}.";
-    t[166] = "{0} function takes two and only two arguments.";
-    t[167] = "{0} fonksiyonunu sadece iki parametre alabilir.";
-    t[170] = "Finalizing a Connection that was never closed:";
-    t[171] = "Kapatılmamış bağlantı sonlandırılıyor.";
-    t[180] = "The maximum field size must be a value greater than or equal to 0.";
-    t[181] = "En büyük alan boyutu sıfır ya da sıfırdan büyük bir değer olmalı.";
-    t[186] = "PostgreSQL LOBs can only index to: {0}";
-    t[187] = "PostgreSQL LOB göstergeleri sadece {0} referans edebilir";
-    t[194] = "Method {0} is not yet implemented.";
-    t[195] = "{0} yöntemi henüz kodlanmadı.";
-    t[198] = "Error loading default settings from driverconfig.properties";
-    t[199] = "driverconfig.properties dosyasından varsayılan ayarları yükleme hatası";
-    t[200] = "Results cannot be retrieved from a CallableStatement before it is executed.";
-    t[201] = "CallableStatement çalıştırılmadan sonuçlar ondan alınamaz.";
-    t[202] = "Large Objects may not be used in auto-commit mode.";
-    t[203] = "Auto-commit biçimde large object kullanılamaz.";
-    t[208] = "Expected command status BEGIN, got {0}.";
-    t[209] = "BEGIN komut durumunu beklenirken {0} alındı.";
-    t[218] = "Invalid fetch direction constant: {0}.";
-    t[219] = "Getirme yönü değişmezi geçersiz: {0}.";
-    t[222] = "{0} function takes three and only three arguments.";
-    t[223] = "{0} fonksiyonunu sadece üç parametre alabilir.";
-    t[226] = "This SQLXML object has already been freed.";
-    t[227] = "Bu SQLXML nesnesi zaten boşaltılmış.";
-    t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
-    t[229] = "ResultSet, sonuçların ilk kaydından önce veya son kaydından sonra olduğu için güncelleme yapılamamaktadır.";
-    t[230] = "The JVM claims not to support the encoding: {0}";
-    t[231] = "JVM, {0} dil kodlamasını desteklememektedir.";
-    t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
-    t[233] = "{0} tipinde parametre tanıtıldı, ancak {1} (sqltype={2}) tipinde geri getirmek için çağrı yapıldı.";
-    t[234] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}";
-    t[235] = "Hazırlanmış transaction rollback hatası. rollback xid={0}, preparedXid={1}, currentXid={2}";
-    t[240] = "Cannot establish a savepoint in auto-commit mode.";
-    t[241] = "Auto-commit biçimde savepoint oluşturulamıyor.";
-    t[242] = "Cannot retrieve the id of a named savepoint.";
-    t[243] = "Adlandırılmış savepointin id değerine erişilemiyor.";
-    t[244] = "The column index is out of range: {0}, number of columns: {1}.";
-    t[245] = "Sütun gçstergesi kapsam dışıdır: {0}, sütun sayısı: {1}.";
-    t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
-    t[251] = "Sıradışı bir durum sürücünün hata vermesine sebep oldu. Lütfen bu durumu geliştiricilere bildirin.";
-    t[260] = "Cannot cast an instance of {0} to type {1}";
-    t[261] = "{0} tipi {1} tipine dönüştürülemiyor";
-    t[264] = "Unknown Types value.";
-    t[265] = "Geçersiz Types değeri.";
-    t[266] = "Invalid stream length {0}.";
-    t[267] = "Geçersiz akım uzunluğu {0}.";
-    t[272] = "Cannot retrieve the name of an unnamed savepoint.";
-    t[273] = "Adı verilmemiş savepointin id değerine erişilemiyor.";
-    t[274] = "Unable to translate data into the desired encoding.";
-    t[275] = "Veri, istenilen dil kodlamasına çevrilemiyor.";
-    t[276] = "Expected an EOF from server, got: {0}";
-    t[277] = "Sunucudan EOF beklendi; ama {0} alındı.";
-    t[278] = "Bad value for type {0} : {1}";
-    t[279] = "{0} veri tipi için geçersiz değer : {1}";
-    t[280] = "The server requested password-based authentication, but no password was provided.";
-    t[281] = "Sunucu şifre tabanlı yetkilendirme istedi; ancak bir şifre sağlanmadı.";
-    t[286] = "Unable to create SAXResult for SQLXML.";
-    t[287] = "SQLXML için SAXResult yaratılamadı.";
-    t[292] = "Error during recover";
-    t[293] = "Kurtarma sırasında hata";
-    t[294] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
-    t[295] = "start çağırımı olmadan end çağırılmıştır. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
-    t[296] = "Truncation of large objects is only implemented in 8.3 and later servers.";
-    t[297] = "Large objectlerin temizlenmesi 8.3 ve sonraki sürümlerde kodlanmıştır.";
-    t[298] = "This PooledConnection has already been closed.";
-    t[299] = "Geçerli PooledConnection zaten önceden kapatıldı.";
-    t[302] = "ClientInfo property not supported.";
-    t[303] = "Clientinfo property'si desteklenememktedir.";
-    t[306] = "Fetch size must be a value greater to or equal to 0.";
-    t[307] = "Fetch boyutu sıfır veya daha büyük bir değer olmalıdır.";
-    t[312] = "A connection could not be made using the requested protocol {0}.";
-    t[313] = "İstenilen protokol ile bağlantı kurulamadı {0}";
-    t[318] = "Unknown XML Result class: {0}";
-    t[319] = "Bilinmeyen XML Sonuç sınıfı: {0}.";
-    t[322] = "There are no rows in this ResultSet.";
-    t[323] = "Bu ResultSet içinde kayıt bulunamadı.";
-    t[324] = "Unexpected command status: {0}.";
-    t[325] = "Beklenmeyen komut durumu: {0}.";
-    t[330] = "Heuristic commit/rollback not supported. forget xid={0}";
-    t[331] = "Heuristic commit/rollback desteklenmiyor. forget xid={0}";
-    t[334] = "Not on the insert row.";
-    t[335] = "Insert kaydı değil.";
-    t[336] = "This SQLXML object has already been initialized, so you cannot manipulate it further.";
-    t[337] = "Bu SQLXML nesnesi daha önceden ilklendirilmiştir; o yüzden daha fazla müdahale edilemez.";
-    t[344] = "Server SQLState: {0}";
-    t[345] = "Sunucu SQLState: {0}";
-    t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
-    t[349] = "İstemcinin client_standard_conforming_strings parametresi {0} olarak raporlandı. JDBC sürücüsü on ya da off olarak bekliyordu.";
-    t[360] = "The driver currently does not support COPY operations.";
-    t[361] = "Bu sunucu şu aşamada COPY işlemleri desteklememktedir.";
-    t[364] = "The array index is out of range: {0}, number of elements: {1}.";
-    t[365] = "Dizin göstergisi kapsam dışıdır: {0}, öğe sayısı: {1}.";
-    t[374] = "suspend/resume not implemented";
-    t[375] = "suspend/resume desteklenmiyor";
-    t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it";
-    t[379] = "Desteklenmiyor: one-phase commit, işlevinde başlatan ve bitiren bağlantı aynı olmalıdır";
-    t[380] = "Error during one-phase commit. commit xid={0}";
-    t[381] = "One-phase commit sırasında hata. commit xid={0}";
-    t[398] = "Cannot call cancelRowUpdates() when on the insert row.";
-    t[399] = "Insert edilmiş kaydın üzerindeyken cancelRowUpdates() çağırılamaz.";
-    t[400] = "Cannot reference a savepoint after it has been released.";
-    t[401] = "Bırakıldıktan sonra savepoint referans edilemez.";
-    t[402] = "You must specify at least one column value to insert a row.";
-    t[403] = "Bir satır eklemek için en az bir sütun değerini belirtmelisiniz.";
-    t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data.";
-    t[405] = "Sistem kataloğu olmadığından MaxIndexKeys değerini tespit edilememektedir.";
-    t[410] = "commit called before end. commit xid={0}, state={1}";
-    t[411] = "commit, sondan önce çağırıldı. commit xid={0}, state={1}";
-    t[412] = "Illegal UTF-8 sequence: final value is out of range: {0}";
-    t[413] = "Geçersiz UTF-8 çoklu bayt karakteri: son değer sıra dışıdır: {0}";
-    t[414] = "{0} function takes two or three arguments.";
-    t[415] = "{0} fonksiyonu yalnız iki veya üç argüman alabilir.";
-    t[428] = "Unable to convert DOMResult SQLXML data to a string.";
-    t[429] = "DOMResult SQLXML verisini diziye dönüştürülemedi.";
-    t[434] = "Unable to decode xml data.";
-    t[435] = "XML verisinin kodu çözülemedi.";
-    t[440] = "Unexpected error writing large object to database.";
-    t[441] = "Large object veritabanına yazılırken beklenmeyan hata.";
-    t[442] = "Zero bytes may not occur in string parameters.";
-    t[443] = "String parametrelerinde sıfır bayt olamaz.";
-    t[444] = "A result was returned when none was expected.";
-    t[445] = "Hiçbir sonuç kebklenimezken sonuç getirildi.";
-    t[450] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
-    t[451] = "ResultSet değiştirilemez. Bu sonucu üreten sorgu tek bir tablodan sorgulamalı ve tablonun tüm primary key alanları belirtmelidir. Daha fazla bilgi için bk. JDBC 2.1 API Specification, section 5.6.";
-    t[454] = "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.";
-    t[455] = "Bind mesaj uzunluğu ({0}) fazla uzun. Bu durum InputStream yalnış uzunluk belirtimlerden kaynaklanabilir.";
-    t[460] = "Statement has been closed.";
-    t[461] = "Komut kapatıldı.";
-    t[462] = "No value specified for parameter {0}.";
-    t[463] = "{0} parametresi için hiç bir değer belirtilmedi.";
-    t[468] = "The array index is out of range: {0}";
-    t[469] = "Dizi göstergesi kapsam dışıdır: {0}";
-    t[474] = "Unable to bind parameter values for statement.";
-    t[475] = "Komut için parametre değerlei bağlanamadı.";
-    t[476] = "Can''t refresh the insert row.";
-    t[477] = "Inser satırı yenilenemiyor.";
-    t[480] = "No primary key found for table {0}.";
-    t[481] = "{0} tablosunda primary key yok.";
-    t[482] = "Cannot change transaction isolation level in the middle of a transaction.";
-    t[483] = "Transaction ortasında geçerli transactionun transaction isolation level özellği değiştirilemez.";
-    t[498] = "Provided InputStream failed.";
-    t[499] = "Sağlanmış InputStream başarısız.";
-    t[500] = "The parameter index is out of range: {0}, number of parameters: {1}.";
-    t[501] = "Dizin göstergisi kapsam dışıdır: {0}, öğe sayısı: {1}.";
-    t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
-    t[503] = "Sunucunun DateStyle parametresi {0} olarak değiştirildi. JDBC sürücüsü doğru işlemesi için DateStyle tanımının ISO işle başlamasını gerekir.";
-    t[508] = "Connection attempt timed out.";
-    t[509] = "Bağlantı denemesi zaman aşımına uğradı.";
-    t[512] = "Internal Query: {0}";
-    t[513] = "Internal Query: {0}";
-    t[514] = "Error preparing transaction. prepare xid={0}";
-    t[515] = "Transaction hazırlama hatası. prepare xid={0}";
-    t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
-    t[519] = "{0} yetkinlendirme tipi desteklenmemektedir. pg_hba.conf dosyanızı istemcinin IP adresini ya da subnetini içerecek şekilde ayarlayıp ayarlamadığınızı ve sürücü tarafından desteklenen yetkilendirme yöntemlerinden birisini kullanıp kullanmadığını kontrol ediniz.";
-    t[526] = "Interval {0} not yet implemented";
-    t[527] = "{0} aralığı henüz kodlanmadı.";
-    t[532] = "Conversion of interval failed";
-    t[533] = "Interval dönüştürmesi başarısız.";
-    t[540] = "Query timeout must be a value greater than or equals to 0.";
-    t[541] = "Sorgu zaman aşımı değer sıfır veya sıfırdan büyük bir sayı olmalıdır.";
-    t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
-    t[543] = "PooledConnection kapatıldığı için veya aynı PooledConnection için yeni bir bağlantı açıldığı için geçerli bağlantı otomatik kapatıldı.";
-    t[544] = "ResultSet not positioned properly, perhaps you need to call next.";
-    t[545] = "ResultSet doğru konumlanmamıştır, next işlemi çağırmanız gerekir.";
-    t[546] = "Prepare called before end. prepare xid={0}, state={1}";
-    t[547] = "Sondan önce prepare çağırılmış. prepare xid={0}, state={1}";
-    t[548] = "Invalid UUID data.";
-    t[549] = "Geçersiz UUID verisi.";
-    t[550] = "This statement has been closed.";
-    t[551] = "Bu komut kapatıldı.";
-    t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.";
-    t[553] = "{0}''nin örneği ile kullanılacak SQL tip bulunamadı. Kullanılacak tip belirtmek için kesin Types değerleri ile setObject() kullanın.";
-    t[554] = "Cannot call updateRow() when on the insert row.";
-    t[555] = "Insert  kaydı üzerinde updateRow() çağırılamaz.";
-    t[562] = "Detail: {0}";
-    t[563] = "Ayrıntı: {0}";
-    t[566] = "Cannot call deleteRow() when on the insert row.";
-    t[567] = "Insert  kaydı üzerinde deleteRow() çağırılamaz.";
-    t[568] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
-    t[569] = "Şu an ResultSet başlangcıından önce konumlandı. deleteRow() burada çağırabilirsiniz.";
-    t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
-    t[577] = "Geçersiz UTF-8 çoklu bayt karakteri: son değer yapay bir değerdir: {0}";
-    t[578] = "Unknown Response Type {0}.";
-    t[579] = "Bilinmeyen yanıt tipi {0}";
-    t[582] = "Unsupported value for stringtype parameter: {0}";
-    t[583] = "strinftype parametresi için destekleneyen değer: {0}";
-    t[584] = "Conversion to type {0} failed: {1}.";
-    t[585] = "{0} veri tipine dönüştürme hatası: {1}.";
-    t[586] = "This SQLXML object has not been initialized, so you cannot retrieve data from it.";
-    t[587] = "Bu SQLXML nesnesi ilklendirilmemiş; o yüzden ondan veri alamazsınız.";
-    t[600] = "Unable to load the class {0} responsible for the datatype {1}";
-    t[601] = "{1} veri tipinden sorumlu {0} sınıfı yüklenemedi";
-    t[604] = "The fastpath function {0} is unknown.";
-    t[605] = "{0} fastpath fonksiyonu bilinmemektedir.";
-    t[608] = "Malformed function or procedure escape syntax at offset {0}.";
-    t[609] = "{0} adresinde fonksiyon veya yordamda kaçış söz dizimi geçersiz.";
-    t[612] = "Provided Reader failed.";
-    t[613] = "Sağlanmış InputStream başarısız.";
-    t[614] = "Maximum number of rows must be a value grater than or equal to 0.";
-    t[615] = "En büyük getirilecek satır sayısı sıfırdan büyük olmalıdır.";
-    t[616] = "Failed to create object for: {0}.";
-    t[617] = "{0} için nesne oluşturma hatası.";
-    t[620] = "Conversion of money failed.";
-    t[621] = "Money dönüştürmesi başarısız.";
-    t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
-    t[623] = "Giriş akımında beklenmeyen dosya sonu, {0} bayt beklenirken sadece {1} bayt alındı.";
-    t[626] = "An unexpected result was returned by a query.";
-    t[627] = "Sorgu beklenmeyen bir sonuç döndürdü.";
-    t[644] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}";
-    t[645] = "Transaction interleaving desteklenmiyor. xid={0}, currentXid={1}, state={2}, flags={3}";
-    t[646] = "An error occurred while setting up the SSL connection.";
-    t[647] = "SSL bağlantısı ayarlanırken bir hata oluştu.";
-    t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
-    t[655] = "Geçersiz UTF-8 çoklu bayt karakteri: {0} bayt, {1} bayt değeri kodlamak için kullanılmış: {2}";
-    t[656] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}";
-    t[657] = "Desteklenmiyor: Prepare, transaction başlatran bağlantı tarafından çağırmalıdır. currentXid={0}, prepare xid={1}";
-    t[658] = "The SSLSocketFactory class provided {0} could not be instantiated.";
-    t[659] = "SSLSocketFactory {0} ile örneklenmedi.";
-    t[662] = "Failed to convert binary xml data to encoding: {0}.";
-    t[663] = "xml verisinin şu dil kodlamasına çevirilmesi başarısız oldu: {0}";
-    t[670] = "Position: {0}";
-    t[671] = "Position: {0}";
-    t[676] = "Location: File: {0}, Routine: {1}, Line: {2}";
-    t[677] = "Yer: Dosya: {0}, Yordam: {1}, Satır: {2}";
-    t[684] = "Cannot tell if path is open or closed: {0}.";
-    t[685] = "Pathın açık mı kapalı olduğunu tespit edilemiyor: {0}.";
-    t[690] = "Unable to create StAXResult for SQLXML";
-    t[691] = "SQLXML için StAXResult yaratılamadı";
-    t[700] = "Cannot convert an instance of {0} to type {1}";
-    t[701] = "{0} instance, {1} tipine dönüştürülemiyor";
-    t[710] = "{0} function takes four and only four argument.";
-    t[711] = "{0} fonksiyonunu yalnız dört parametre alabilir.";
-    t[718] = "Interrupted while attempting to connect.";
-    t[719] = "Bağlanırken kesildi.";
-    t[722] = "Your security policy has prevented the connection from being attempted.  You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to.";
-    t[723] = "Güvenlik politikanız bağlantının kurulmasını engelledi. java.net.SocketPermission'a veritabanına ve de bağlanacağı porta bağlantı izni vermelisiniz.";
-    t[734] = "No function outputs were registered.";
-    t[735] = "Hiçbir fonksiyon çıktısı kaydedilmedi.";
-    t[736] = "{0} function takes one and only one argument.";
-    t[737] = "{0} fonksiyonunu yalnız tek bir parametre alabilir.";
-    t[744] = "This ResultSet is closed.";
-    t[745] = "ResultSet kapalıdır.";
-    t[746] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
-    t[747] = "Geçersiz karakterler bulunmuştur. Bunun sebebi, verilerde veritabanın desteklediği dil kodlamadaki karakterlerin dışında bir karaktere rastlamasıdır. Bunun en yaygın örneği 8 bitlik veriyi SQL_ASCII veritabanında saklamasıdır.";
-    t[752] = "Error disabling autocommit";
-    t[753] = "autocommit'i devre dışı bırakma sırasında hata";
-    t[754] = "Ran out of memory retrieving query results.";
-    t[755] = "Sorgu sonuçları alınırken bellek yetersiz.";
-    t[756] = "Returning autogenerated keys is not supported.";
-    t[757] = "Otomatik üretilen değerlerin getirilmesi desteklenememktedir.";
-    t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
-    t[761] = "İşlem, kaydırılabilen ResultSet gerektirir, ancak bu ResultSet FORWARD_ONLYdir.";
-    t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
-    t[763] = "CallableStatement çalıştırıldı, ancak {2} tipi kaydedilmesine rağmen döndürme parametresi {0} ve tipi {1} idi.";
-    t[764] = "Unable to find server array type for provided name {0}.";
-    t[765] = "Belirtilen {0} adı için sunucu array tipi bulunamadı.";
-    t[768] = "Unknown ResultSet holdability setting: {0}.";
-    t[769] = "ResultSet tutabilme ayarı geçersiz: {0}.";
-    t[772] = "Transaction isolation level {0} not supported.";
-    t[773] = "Transaction isolation level {0} desteklenmiyor.";
-    t[774] = "Zero bytes may not occur in identifiers.";
-    t[775] = "Belirteçlerde sıfır bayt olamaz.";
-    t[776] = "No results were returned by the query.";
-    t[777] = "Sorgudan hiç bir sonuç dönmedi.";
-    t[778] = "A CallableStatement was executed with nothing returned.";
-    t[779] = "CallableStatement çalıştırma sonucunda veri getirilmedi.";
-    t[780] = "wasNull cannot be call before fetching a result.";
-    t[781] = "wasNull sonuç çekmeden önce çağırılamaz.";
-    t[784] = "Returning autogenerated keys by column index is not supported.";
-    t[785] = "Kolonların indexlenmesi ile otomatik olarak oluşturulan anahtarların döndürülmesi desteklenmiyor.";
-    t[786] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
-    t[787] = "Bu komut OUT parametresi bildirmemektedir.  Bildirmek için '{' ?= call ... '}' kullanın.";
-    t[788] = "Can''t use relative move methods while on the insert row.";
-    t[789] = "Insert kaydı üzerinde relative move method kullanılamaz.";
-    t[790] = "A CallableStatement was executed with an invalid number of parameters";
-    t[791] = "CallableStatement geçersiz sayıda parametre ile çalıştırıldı.";
-    t[792] = "Connection is busy with another transaction";
-    t[793] = "Bağlantı, başka bir transaction tarafından meşgul ediliyor";
-    table = t;
-  }
+    private static final String[] table;
 
-  @Override
-  public Object handleGetObject (String msgid) throws MissingResourceException {
-    int hash_val = msgid.hashCode() & 0x7fffffff;
-    int idx = (hash_val % 397) << 1;
-    {
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+    static {
+        String[] t = new String[794];
+        t[0] = "";
+        t[1] = "Project-Id-Version: jdbc-tr\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2009-05-31 21:47+0200\nLast-Translator: Devrim GÜNDÜZ <devrim@gunduz.org>\nLanguage-Team: Turkish <pgsql-tr-genel@PostgreSQL.org>\nLanguage: tr\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Generator: KBabel 1.3.1\nX-Poedit-Language: Turkish\nX-Poedit-Country: TURKEY\n";
+        t[2] = "Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
+        t[3] = "Desteklenmiyor: 2nd phase commit, atıl bir bağlantıdan başlatılmalıdır. commit xid={0}, currentXid={1}, state={2}, transactionState={3}";
+        t[4] = "DataSource has been closed.";
+        t[5] = "DataSource kapatıldı.";
+        t[8] = "Invalid flags {0}";
+        t[9] = "Geçersiz seçenekler {0}";
+        t[18] = "Where: {0}";
+        t[19] = "Where: {0}";
+        t[24] = "Unknown XML Source class: {0}";
+        t[25] = "Bilinmeyen XML Kaynak Sınıfı: {0}";
+        t[26] = "The connection attempt failed.";
+        t[27] = "Bağlantı denemesi başarısız oldu.";
+        t[28] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
+        t[29] = "Şu an ResultSet sonucundan sonra konumlandı. deleteRow() burada çağırabilirsiniz.";
+        t[32] = "Can''t use query methods that take a query string on a PreparedStatement.";
+        t[33] = "PreparedStatement ile sorgu satırı alan sorgu yöntemleri kullanılamaz.";
+        t[36] = "Multiple ResultSets were returned by the query.";
+        t[37] = "Sorgu tarafından birden fazla ResultSet getirildi.";
+        t[50] = "Too many update results were returned.";
+        t[51] = "Çok fazla güncelleme sonucu döndürüldü.";
+        t[58] = "Illegal UTF-8 sequence: initial byte is {0}: {1}";
+        t[59] = "Geçersiz UTF-8 çoklu bayt karakteri: ilk bayt {0}: {1}";
+        t[66] = "The column name {0} was not found in this ResultSet.";
+        t[67] = "Bu ResultSet içinde {0} sütun adı bulunamadı.";
+        t[70] = "Fastpath call {0} - No result was returned and we expected an integer.";
+        t[71] = "Fastpath call {0} - Integer beklenirken hiçbir sonuç getirilmedi.";
+        t[74] = "Protocol error.  Session setup failed.";
+        t[75] = "Protokol hatası.  Oturum kurulumu başarısız oldu.";
+        t[76] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
+        t[77] = "CallableStatement bildirildi ancak registerOutParameter(1, < bir tip>) tanıtımı yapılmadı.";
+        t[78] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
+        t[79] = "Eş zamanlama CONCUR_READ_ONLY olan ResultSet''ler değiştirilemez";
+        t[90] = "LOB positioning offsets start at 1.";
+        t[91] = "LOB bağlangıç adresi 1Den başlıyor";
+        t[92] = "Internal Position: {0}";
+        t[93] = "Internal Position: {0}";
+        t[96] = "free() was called on this LOB previously";
+        t[97] = "Bu LOB'da free() daha önce çağırıldı";
+        t[100] = "Cannot change transaction read-only property in the middle of a transaction.";
+        t[101] = "Transaction ortasında geçerli transactionun read-only özellği değiştirilemez.";
+        t[102] = "The JVM claims not to support the {0} encoding.";
+        t[103] = "JVM, {0} dil kodlamasını desteklememektedir.";
+        t[108] = "{0} function doesn''t take any argument.";
+        t[109] = "{0} fonksiyonu parametre almaz.";
+        t[112] = "xid must not be null";
+        t[113] = "xid null olamaz";
+        t[114] = "Connection has been closed.";
+        t[115] = "Bağlantı kapatıldı.";
+        t[122] = "The server does not support SSL.";
+        t[123] = "Sunucu SSL desteklemiyor.";
+        t[124] = "Custom type maps are not supported.";
+        t[125] = "Özel tip eşleştirmeleri desteklenmiyor.";
+        t[140] = "Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}";
+        t[141] = "Geçersiz UTF-8 çoklu bayt karakteri: {0}/{1} baytı 10xxxxxx değildir: {2}";
+        t[148] = "Hint: {0}";
+        t[149] = "İpucu: {0}";
+        t[152] = "Unable to find name datatype in the system catalogs.";
+        t[153] = "Sistem kataloglarında name veri tipi bulunamıyor.";
+        t[156] = "Unsupported Types value: {0}";
+        t[157] = "Geçersiz Types değeri: {0}";
+        t[158] = "Unknown type {0}.";
+        t[159] = "Bilinmeyen tip {0}.";
+        t[166] = "{0} function takes two and only two arguments.";
+        t[167] = "{0} fonksiyonunu sadece iki parametre alabilir.";
+        t[170] = "Finalizing a Connection that was never closed:";
+        t[171] = "Kapatılmamış bağlantı sonlandırılıyor.";
+        t[180] = "The maximum field size must be a value greater than or equal to 0.";
+        t[181] = "En büyük alan boyutu sıfır ya da sıfırdan büyük bir değer olmalı.";
+        t[186] = "PostgreSQL LOBs can only index to: {0}";
+        t[187] = "PostgreSQL LOB göstergeleri sadece {0} referans edebilir";
+        t[194] = "Method {0} is not yet implemented.";
+        t[195] = "{0} yöntemi henüz kodlanmadı.";
+        t[198] = "Error loading default settings from driverconfig.properties";
+        t[199] = "driverconfig.properties dosyasından varsayılan ayarları yükleme hatası";
+        t[200] = "Results cannot be retrieved from a CallableStatement before it is executed.";
+        t[201] = "CallableStatement çalıştırılmadan sonuçlar ondan alınamaz.";
+        t[202] = "Large Objects may not be used in auto-commit mode.";
+        t[203] = "Auto-commit biçimde large object kullanılamaz.";
+        t[208] = "Expected command status BEGIN, got {0}.";
+        t[209] = "BEGIN komut durumunu beklenirken {0} alındı.";
+        t[218] = "Invalid fetch direction constant: {0}.";
+        t[219] = "Getirme yönü değişmezi geçersiz: {0}.";
+        t[222] = "{0} function takes three and only three arguments.";
+        t[223] = "{0} fonksiyonunu sadece üç parametre alabilir.";
+        t[226] = "This SQLXML object has already been freed.";
+        t[227] = "Bu SQLXML nesnesi zaten boşaltılmış.";
+        t[228] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
+        t[229] = "ResultSet, sonuçların ilk kaydından önce veya son kaydından sonra olduğu için güncelleme yapılamamaktadır.";
+        t[230] = "The JVM claims not to support the encoding: {0}";
+        t[231] = "JVM, {0} dil kodlamasını desteklememektedir.";
+        t[232] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
+        t[233] = "{0} tipinde parametre tanıtıldı, ancak {1} (sqltype={2}) tipinde geri getirmek için çağrı yapıldı.";
+        t[234] = "Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}";
+        t[235] = "Hazırlanmış transaction rollback hatası. rollback xid={0}, preparedXid={1}, currentXid={2}";
+        t[240] = "Cannot establish a savepoint in auto-commit mode.";
+        t[241] = "Auto-commit biçimde savepoint oluşturulamıyor.";
+        t[242] = "Cannot retrieve the id of a named savepoint.";
+        t[243] = "Adlandırılmış savepointin id değerine erişilemiyor.";
+        t[244] = "The column index is out of range: {0}, number of columns: {1}.";
+        t[245] = "Sütun gçstergesi kapsam dışıdır: {0}, sütun sayısı: {1}.";
+        t[250] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
+        t[251] = "Sıradışı bir durum sürücünün hata vermesine sebep oldu. Lütfen bu durumu geliştiricilere bildirin.";
+        t[260] = "Cannot cast an instance of {0} to type {1}";
+        t[261] = "{0} tipi {1} tipine dönüştürülemiyor";
+        t[264] = "Unknown Types value.";
+        t[265] = "Geçersiz Types değeri.";
+        t[266] = "Invalid stream length {0}.";
+        t[267] = "Geçersiz akım uzunluğu {0}.";
+        t[272] = "Cannot retrieve the name of an unnamed savepoint.";
+        t[273] = "Adı verilmemiş savepointin id değerine erişilemiyor.";
+        t[274] = "Unable to translate data into the desired encoding.";
+        t[275] = "Veri, istenilen dil kodlamasına çevrilemiyor.";
+        t[276] = "Expected an EOF from server, got: {0}";
+        t[277] = "Sunucudan EOF beklendi; ama {0} alındı.";
+        t[278] = "Bad value for type {0} : {1}";
+        t[279] = "{0} veri tipi için geçersiz değer : {1}";
+        t[280] = "The server requested password-based authentication, but no password was provided.";
+        t[281] = "Sunucu şifre tabanlı yetkilendirme istedi; ancak bir şifre sağlanmadı.";
+        t[286] = "Unable to create SAXResult for SQLXML.";
+        t[287] = "SQLXML için SAXResult yaratılamadı.";
+        t[292] = "Error during recover";
+        t[293] = "Kurtarma sırasında hata";
+        t[294] = "tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
+        t[295] = "start çağırımı olmadan end çağırılmıştır. state={0}, start xid={1}, currentXid={2}, preparedXid={3}";
+        t[296] = "Truncation of large objects is only implemented in 8.3 and later servers.";
+        t[297] = "Large objectlerin temizlenmesi 8.3 ve sonraki sürümlerde kodlanmıştır.";
+        t[298] = "This PooledConnection has already been closed.";
+        t[299] = "Geçerli PooledConnection zaten önceden kapatıldı.";
+        t[302] = "ClientInfo property not supported.";
+        t[303] = "Clientinfo property'si desteklenememktedir.";
+        t[306] = "Fetch size must be a value greater to or equal to 0.";
+        t[307] = "Fetch boyutu sıfır veya daha büyük bir değer olmalıdır.";
+        t[312] = "A connection could not be made using the requested protocol {0}.";
+        t[313] = "İstenilen protokol ile bağlantı kurulamadı {0}";
+        t[318] = "Unknown XML Result class: {0}";
+        t[319] = "Bilinmeyen XML Sonuç sınıfı: {0}.";
+        t[322] = "There are no rows in this ResultSet.";
+        t[323] = "Bu ResultSet içinde kayıt bulunamadı.";
+        t[324] = "Unexpected command status: {0}.";
+        t[325] = "Beklenmeyen komut durumu: {0}.";
+        t[330] = "Heuristic commit/rollback not supported. forget xid={0}";
+        t[331] = "Heuristic commit/rollback desteklenmiyor. forget xid={0}";
+        t[334] = "Not on the insert row.";
+        t[335] = "Insert kaydı değil.";
+        t[336] = "This SQLXML object has already been initialized, so you cannot manipulate it further.";
+        t[337] = "Bu SQLXML nesnesi daha önceden ilklendirilmiştir; o yüzden daha fazla müdahale edilemez.";
+        t[344] = "Server SQLState: {0}";
+        t[345] = "Sunucu SQLState: {0}";
+        t[348] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
+        t[349] = "İstemcinin client_standard_conforming_strings parametresi {0} olarak raporlandı. JDBC sürücüsü on ya da off olarak bekliyordu.";
+        t[360] = "The driver currently does not support COPY operations.";
+        t[361] = "Bu sunucu şu aşamada COPY işlemleri desteklememktedir.";
+        t[364] = "The array index is out of range: {0}, number of elements: {1}.";
+        t[365] = "Dizin göstergisi kapsam dışıdır: {0}, öğe sayısı: {1}.";
+        t[374] = "suspend/resume not implemented";
+        t[375] = "suspend/resume desteklenmiyor";
+        t[378] = "Not implemented: one-phase commit must be issued using the same connection that was used to start it";
+        t[379] = "Desteklenmiyor: one-phase commit, işlevinde başlatan ve bitiren bağlantı aynı olmalıdır";
+        t[380] = "Error during one-phase commit. commit xid={0}";
+        t[381] = "One-phase commit sırasında hata. commit xid={0}";
+        t[398] = "Cannot call cancelRowUpdates() when on the insert row.";
+        t[399] = "Insert edilmiş kaydın üzerindeyken cancelRowUpdates() çağırılamaz.";
+        t[400] = "Cannot reference a savepoint after it has been released.";
+        t[401] = "Bırakıldıktan sonra savepoint referans edilemez.";
+        t[402] = "You must specify at least one column value to insert a row.";
+        t[403] = "Bir satır eklemek için en az bir sütun değerini belirtmelisiniz.";
+        t[404] = "Unable to determine a value for MaxIndexKeys due to missing system catalog data.";
+        t[405] = "Sistem kataloğu olmadığından MaxIndexKeys değerini tespit edilememektedir.";
+        t[410] = "commit called before end. commit xid={0}, state={1}";
+        t[411] = "commit, sondan önce çağırıldı. commit xid={0}, state={1}";
+        t[412] = "Illegal UTF-8 sequence: final value is out of range: {0}";
+        t[413] = "Geçersiz UTF-8 çoklu bayt karakteri: son değer sıra dışıdır: {0}";
+        t[414] = "{0} function takes two or three arguments.";
+        t[415] = "{0} fonksiyonu yalnız iki veya üç argüman alabilir.";
+        t[428] = "Unable to convert DOMResult SQLXML data to a string.";
+        t[429] = "DOMResult SQLXML verisini diziye dönüştürülemedi.";
+        t[434] = "Unable to decode xml data.";
+        t[435] = "XML verisinin kodu çözülemedi.";
+        t[440] = "Unexpected error writing large object to database.";
+        t[441] = "Large object veritabanına yazılırken beklenmeyan hata.";
+        t[442] = "Zero bytes may not occur in string parameters.";
+        t[443] = "String parametrelerinde sıfır bayt olamaz.";
+        t[444] = "A result was returned when none was expected.";
+        t[445] = "Hiçbir sonuç kebklenimezken sonuç getirildi.";
+        t[450] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
+        t[451] = "ResultSet değiştirilemez. Bu sonucu üreten sorgu tek bir tablodan sorgulamalı ve tablonun tüm primary key alanları belirtmelidir. Daha fazla bilgi için bk. JDBC 2.1 API Specification, section 5.6.";
+        t[454] = "Bind message length {0} too long.  This can be caused by very large or incorrect length specifications on InputStream parameters.";
+        t[455] = "Bind mesaj uzunluğu ({0}) fazla uzun. Bu durum InputStream yalnış uzunluk belirtimlerden kaynaklanabilir.";
+        t[460] = "Statement has been closed.";
+        t[461] = "Komut kapatıldı.";
+        t[462] = "No value specified for parameter {0}.";
+        t[463] = "{0} parametresi için hiç bir değer belirtilmedi.";
+        t[468] = "The array index is out of range: {0}";
+        t[469] = "Dizi göstergesi kapsam dışıdır: {0}";
+        t[474] = "Unable to bind parameter values for statement.";
+        t[475] = "Komut için parametre değerlei bağlanamadı.";
+        t[476] = "Can''t refresh the insert row.";
+        t[477] = "Inser satırı yenilenemiyor.";
+        t[480] = "No primary key found for table {0}.";
+        t[481] = "{0} tablosunda primary key yok.";
+        t[482] = "Cannot change transaction isolation level in the middle of a transaction.";
+        t[483] = "Transaction ortasında geçerli transactionun transaction isolation level özellği değiştirilemez.";
+        t[498] = "Provided InputStream failed.";
+        t[499] = "Sağlanmış InputStream başarısız.";
+        t[500] = "The parameter index is out of range: {0}, number of parameters: {1}.";
+        t[501] = "Dizin göstergisi kapsam dışıdır: {0}, öğe sayısı: {1}.";
+        t[502] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
+        t[503] = "Sunucunun DateStyle parametresi {0} olarak değiştirildi. JDBC sürücüsü doğru işlemesi için DateStyle tanımının ISO işle başlamasını gerekir.";
+        t[508] = "Connection attempt timed out.";
+        t[509] = "Bağlantı denemesi zaman aşımına uğradı.";
+        t[512] = "Internal Query: {0}";
+        t[513] = "Internal Query: {0}";
+        t[514] = "Error preparing transaction. prepare xid={0}";
+        t[515] = "Transaction hazırlama hatası. prepare xid={0}";
+        t[518] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
+        t[519] = "{0} yetkinlendirme tipi desteklenmemektedir. pg_hba.conf dosyanızı istemcinin IP adresini ya da subnetini içerecek şekilde ayarlayıp ayarlamadığınızı ve sürücü tarafından desteklenen yetkilendirme yöntemlerinden birisini kullanıp kullanmadığını kontrol ediniz.";
+        t[526] = "Interval {0} not yet implemented";
+        t[527] = "{0} aralığı henüz kodlanmadı.";
+        t[532] = "Conversion of interval failed";
+        t[533] = "Interval dönüştürmesi başarısız.";
+        t[540] = "Query timeout must be a value greater than or equals to 0.";
+        t[541] = "Sorgu zaman aşımı değer sıfır veya sıfırdan büyük bir sayı olmalıdır.";
+        t[542] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
+        t[543] = "PooledConnection kapatıldığı için veya aynı PooledConnection için yeni bir bağlantı açıldığı için geçerli bağlantı otomatik kapatıldı.";
+        t[544] = "ResultSet not positioned properly, perhaps you need to call next.";
+        t[545] = "ResultSet doğru konumlanmamıştır, next işlemi çağırmanız gerekir.";
+        t[546] = "Prepare called before end. prepare xid={0}, state={1}";
+        t[547] = "Sondan önce prepare çağırılmış. prepare xid={0}, state={1}";
+        t[548] = "Invalid UUID data.";
+        t[549] = "Geçersiz UUID verisi.";
+        t[550] = "This statement has been closed.";
+        t[551] = "Bu komut kapatıldı.";
+        t[552] = "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.";
+        t[553] = "{0}''nin örneği ile kullanılacak SQL tip bulunamadı. Kullanılacak tip belirtmek için kesin Types değerleri ile setObject() kullanın.";
+        t[554] = "Cannot call updateRow() when on the insert row.";
+        t[555] = "Insert  kaydı üzerinde updateRow() çağırılamaz.";
+        t[562] = "Detail: {0}";
+        t[563] = "Ayrıntı: {0}";
+        t[566] = "Cannot call deleteRow() when on the insert row.";
+        t[567] = "Insert  kaydı üzerinde deleteRow() çağırılamaz.";
+        t[568] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
+        t[569] = "Şu an ResultSet başlangcıından önce konumlandı. deleteRow() burada çağırabilirsiniz.";
+        t[576] = "Illegal UTF-8 sequence: final value is a surrogate value: {0}";
+        t[577] = "Geçersiz UTF-8 çoklu bayt karakteri: son değer yapay bir değerdir: {0}";
+        t[578] = "Unknown Response Type {0}.";
+        t[579] = "Bilinmeyen yanıt tipi {0}";
+        t[582] = "Unsupported value for stringtype parameter: {0}";
+        t[583] = "strinftype parametresi için destekleneyen değer: {0}";
+        t[584] = "Conversion to type {0} failed: {1}.";
+        t[585] = "{0} veri tipine dönüştürme hatası: {1}.";
+        t[586] = "This SQLXML object has not been initialized, so you cannot retrieve data from it.";
+        t[587] = "Bu SQLXML nesnesi ilklendirilmemiş; o yüzden ondan veri alamazsınız.";
+        t[600] = "Unable to load the class {0} responsible for the datatype {1}";
+        t[601] = "{1} veri tipinden sorumlu {0} sınıfı yüklenemedi";
+        t[604] = "The fastpath function {0} is unknown.";
+        t[605] = "{0} fastpath fonksiyonu bilinmemektedir.";
+        t[608] = "Malformed function or procedure escape syntax at offset {0}.";
+        t[609] = "{0} adresinde fonksiyon veya yordamda kaçış söz dizimi geçersiz.";
+        t[612] = "Provided Reader failed.";
+        t[613] = "Sağlanmış InputStream başarısız.";
+        t[614] = "Maximum number of rows must be a value grater than or equal to 0.";
+        t[615] = "En büyük getirilecek satır sayısı sıfırdan büyük olmalıdır.";
+        t[616] = "Failed to create object for: {0}.";
+        t[617] = "{0} için nesne oluşturma hatası.";
+        t[620] = "Conversion of money failed.";
+        t[621] = "Money dönüştürmesi başarısız.";
+        t[622] = "Premature end of input stream, expected {0} bytes, but only read {1}.";
+        t[623] = "Giriş akımında beklenmeyen dosya sonu, {0} bayt beklenirken sadece {1} bayt alındı.";
+        t[626] = "An unexpected result was returned by a query.";
+        t[627] = "Sorgu beklenmeyen bir sonuç döndürdü.";
+        t[644] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}";
+        t[645] = "Transaction interleaving desteklenmiyor. xid={0}, currentXid={1}, state={2}, flags={3}";
+        t[646] = "An error occurred while setting up the SSL connection.";
+        t[647] = "SSL bağlantısı ayarlanırken bir hata oluştu.";
+        t[654] = "Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}";
+        t[655] = "Geçersiz UTF-8 çoklu bayt karakteri: {0} bayt, {1} bayt değeri kodlamak için kullanılmış: {2}";
+        t[656] = "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}";
+        t[657] = "Desteklenmiyor: Prepare, transaction başlatran bağlantı tarafından çağırmalıdır. currentXid={0}, prepare xid={1}";
+        t[658] = "The SSLSocketFactory class provided {0} could not be instantiated.";
+        t[659] = "SSLSocketFactory {0} ile örneklenmedi.";
+        t[662] = "Failed to convert binary xml data to encoding: {0}.";
+        t[663] = "xml verisinin şu dil kodlamasına çevirilmesi başarısız oldu: {0}";
+        t[670] = "Position: {0}";
+        t[671] = "Position: {0}";
+        t[676] = "Location: File: {0}, Routine: {1}, Line: {2}";
+        t[677] = "Yer: Dosya: {0}, Yordam: {1}, Satır: {2}";
+        t[684] = "Cannot tell if path is open or closed: {0}.";
+        t[685] = "Pathın açık mı kapalı olduğunu tespit edilemiyor: {0}.";
+        t[690] = "Unable to create StAXResult for SQLXML";
+        t[691] = "SQLXML için StAXResult yaratılamadı";
+        t[700] = "Cannot convert an instance of {0} to type {1}";
+        t[701] = "{0} instance, {1} tipine dönüştürülemiyor";
+        t[710] = "{0} function takes four and only four argument.";
+        t[711] = "{0} fonksiyonunu yalnız dört parametre alabilir.";
+        t[718] = "Interrupted while attempting to connect.";
+        t[719] = "Bağlanırken kesildi.";
+        t[722] = "Your security policy has prevented the connection from being attempted.  You probably need to grant the connect java.net.SocketPermission to the database server host and port that you wish to connect to.";
+        t[723] = "Güvenlik politikanız bağlantının kurulmasını engelledi. java.net.SocketPermission'a veritabanına ve de bağlanacağı porta bağlantı izni vermelisiniz.";
+        t[734] = "No function outputs were registered.";
+        t[735] = "Hiçbir fonksiyon çıktısı kaydedilmedi.";
+        t[736] = "{0} function takes one and only one argument.";
+        t[737] = "{0} fonksiyonunu yalnız tek bir parametre alabilir.";
+        t[744] = "This ResultSet is closed.";
+        t[745] = "ResultSet kapalıdır.";
+        t[746] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
+        t[747] = "Geçersiz karakterler bulunmuştur. Bunun sebebi, verilerde veritabanın desteklediği dil kodlamadaki karakterlerin dışında bir karaktere rastlamasıdır. Bunun en yaygın örneği 8 bitlik veriyi SQL_ASCII veritabanında saklamasıdır.";
+        t[752] = "Error disabling autocommit";
+        t[753] = "autocommit'i devre dışı bırakma sırasında hata";
+        t[754] = "Ran out of memory retrieving query results.";
+        t[755] = "Sorgu sonuçları alınırken bellek yetersiz.";
+        t[756] = "Returning autogenerated keys is not supported.";
+        t[757] = "Otomatik üretilen değerlerin getirilmesi desteklenememktedir.";
+        t[760] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
+        t[761] = "İşlem, kaydırılabilen ResultSet gerektirir, ancak bu ResultSet FORWARD_ONLYdir.";
+        t[762] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
+        t[763] = "CallableStatement çalıştırıldı, ancak {2} tipi kaydedilmesine rağmen döndürme parametresi {0} ve tipi {1} idi.";
+        t[764] = "Unable to find server array type for provided name {0}.";
+        t[765] = "Belirtilen {0} adı için sunucu array tipi bulunamadı.";
+        t[768] = "Unknown ResultSet holdability setting: {0}.";
+        t[769] = "ResultSet tutabilme ayarı geçersiz: {0}.";
+        t[772] = "Transaction isolation level {0} not supported.";
+        t[773] = "Transaction isolation level {0} desteklenmiyor.";
+        t[774] = "Zero bytes may not occur in identifiers.";
+        t[775] = "Belirteçlerde sıfır bayt olamaz.";
+        t[776] = "No results were returned by the query.";
+        t[777] = "Sorgudan hiç bir sonuç dönmedi.";
+        t[778] = "A CallableStatement was executed with nothing returned.";
+        t[779] = "CallableStatement çalıştırma sonucunda veri getirilmedi.";
+        t[780] = "wasNull cannot be call before fetching a result.";
+        t[781] = "wasNull sonuç çekmeden önce çağırılamaz.";
+        t[784] = "Returning autogenerated keys by column index is not supported.";
+        t[785] = "Kolonların indexlenmesi ile otomatik olarak oluşturulan anahtarların döndürülmesi desteklenmiyor.";
+        t[786] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
+        t[787] = "Bu komut OUT parametresi bildirmemektedir.  Bildirmek için '{' ?= call ... '}' kullanın.";
+        t[788] = "Can''t use relative move methods while on the insert row.";
+        t[789] = "Insert kaydı üzerinde relative move method kullanılamaz.";
+        t[790] = "A CallableStatement was executed with an invalid number of parameters";
+        t[791] = "CallableStatement geçersiz sayıda parametre ile çalıştırıldı.";
+        t[792] = "Connection is busy with another transaction";
+        t[793] = "Bağlantı, başka bir transaction tarafından meşgul ediliyor";
+        table = t;
     }
-    int incr = ((hash_val % 395) + 1) << 1;
-    for (;;) {
-      idx += incr;
-      if (idx >= 794)
-        idx -= 794;
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+
+    @Override
+    public Object handleGetObject(String msgid) throws MissingResourceException {
+        int hash_val = msgid.hashCode() & 0x7fffffff;
+        int idx = (hash_val % 397) << 1;
+        {
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
+        int incr = ((hash_val % 395) + 1) << 1;
+        for (; ; ) {
+            idx += incr;
+            if (idx >= 794)
+                idx -= 794;
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
     }
-  }
 
-  @Override
-  public Enumeration<String> getKeys () {
-    return new Enumeration<>() {
-        private int idx = 0;
-        { while (idx < 794 && table[idx] == null) idx += 2; }
-      @Override
-        public boolean hasMoreElements () {
-          return (idx < 794);
-        }
-        @Override
-        public String nextElement () {
-          Object key = table[idx];
-          do idx += 2; while (idx < 794 && table[idx] == null);
-          return key.toString();
-        }
-      };
-  }
+    @Override
+    public Enumeration<String> getKeys() {
+        return new Enumeration<>() {
+            private int idx = 0;
 
-  public ResourceBundle getParent () {
-    return parent;
-  }
+            {
+                while (idx < 794 && table[idx] == null) idx += 2;
+            }
+
+            @Override
+            public boolean hasMoreElements() {
+                return (idx < 794);
+            }
+
+            @Override
+            public String nextElement() {
+                Object key = table[idx];
+                do idx += 2; while (idx < 794 && table[idx] == null);
+                return key.toString();
+            }
+        };
+    }
+
+    public ResourceBundle getParent() {
+        return parent;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_zh_CN.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_zh_CN.java
index 1694aaa..7e4bb87 100644
--- a/pgjdbc/src/main/java/org/postgresql/translation/messages_zh_CN.java
+++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_zh_CN.java
@@ -5,283 +5,287 @@ import java.util.MissingResourceException;
 import java.util.ResourceBundle;
 
 public class messages_zh_CN extends ResourceBundle {
-  private static final String[] table;
-  static {
-    String[] t = new String[578];
-    t[0] = "";
-    t[1] = "Project-Id-Version: PostgreSQL JDBC Driver 8.3\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2008-01-31 14:34+0800\nLast-Translator: 郭朝益(ChaoYi, Kuo) <Kuo.ChaoYi@gmail.com>\nLanguage-Team: The PostgreSQL Development Team <Kuo.ChaoYi@gmail.com>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Chinese\nX-Poedit-Country: CHINA\nX-Poedit-SourceCharset: utf-8\n";
-    t[6] = "Cannot call cancelRowUpdates() when on the insert row.";
-    t[7] = "不能在新增的数据列上呼叫 cancelRowUpdates()。";
-    t[8] = "The server requested password-based authentication, but no password was provided.";
-    t[9] = "服务器要求使用密码验证,但是密码并未提供。";
-    t[12] = "Detail: {0}";
-    t[13] = "详细:{0}";
-    t[16] = "Can''t refresh the insert row.";
-    t[17] = "无法重读新增的数据列。";
-    t[18] = "Connection has been closed.";
-    t[19] = "Connection 已经被关闭。";
-    t[24] = "Bad value for type {0} : {1}";
-    t[25] = "不良的类型值 {0} : {1}";
-    t[36] = "Truncation of large objects is only implemented in 8.3 and later servers.";
-    t[37] = "大型对象的截断(Truncation)仅被实作执行在 8.3 和后来的服务器。";
-    t[40] = "Cannot retrieve the name of an unnamed savepoint.";
-    t[41] = "无法取得未命名储存点(Savepoint)的名称。";
-    t[46] = "An error occurred while setting up the SSL connection.";
-    t[47] = "进行 SSL 连线时发生错误。";
-    t[50] = "suspend/resume not implemented";
-    t[51] = "暂停(suspend)/再继续(resume)尚未被实作。";
-    t[60] = "{0} function takes one and only one argument.";
-    t[61] = "{0} 函式取得一个且仅有一个引数。";
-    t[62] = "Conversion to type {0} failed: {1}.";
-    t[63] = "转换类型 {0} 失败:{1}。";
-    t[66] = "Conversion of money failed.";
-    t[67] = "money 转换失败。";
-    t[70] = "A result was returned when none was expected.";
-    t[71] = "传回预期之外的结果。";
-    t[80] = "This PooledConnection has already been closed.";
-    t[81] = "这个 PooledConnection 已经被关闭。";
-    t[84] = "Multiple ResultSets were returned by the query.";
-    t[85] = "查询传回多个 ResultSet。";
-    t[90] = "Not on the insert row.";
-    t[91] = "不在新增的数据列上。";
-    t[94] = "An unexpected result was returned by a query.";
-    t[95] = "传回非预期的查询结果。";
-    t[102] = "Internal Query: {0}";
-    t[103] = "内部查询:{0}";
-    t[106] = "The array index is out of range: {0}";
-    t[107] = "阵列索引超过许可范围:{0}";
-    t[112] = "Connection attempt timed out.";
-    t[113] = "Connection 尝试逾时。";
-    t[114] = "Unable to find name datatype in the system catalogs.";
-    t[115] = "在系统 catalog 中找不到名称数据类型(datatype)。";
-    t[116] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
-    t[117] = "不明的原因导致驱动程序造成失败,请回报这个例外。";
-    t[120] = "The array index is out of range: {0}, number of elements: {1}.";
-    t[121] = "阵列索引超过许可范围:{0},元素数量:{1}。";
-    t[138] = "Invalid flags {0}";
-    t[139] = "无效的旗标 flags {0}";
-    t[146] = "Unexpected error writing large object to database.";
-    t[147] = "将大型对象(large object)写入数据库时发生不明错误。";
-    t[162] = "Query timeout must be a value greater than or equals to 0.";
-    t[163] = "查询逾时等候时间必须大于或等于 0。";
-    t[170] = "Unknown type {0}.";
-    t[171] = "不明的类型 {0}";
-    t[174] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
-    t[175] = "这服务器的 standard_conforming_strings 参数已回报为 {0},JDBC 驱动程序已预期开启或是关闭。";
-    t[176] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
-    t[177] = "发现不合法的字元,可能的原因是欲储存的数据中包含数据库的字元集不支援的字码,其中最常见例子的就是将 8 位元数据存入使用 SQL_ASCII 编码的数据库中。";
-    t[178] = "The column index is out of range: {0}, number of columns: {1}.";
-    t[179] = "栏位索引超过许可范围:{0},栏位数:{1}。";
-    t[180] = "The connection attempt failed.";
-    t[181] = "尝试连线已失败。";
-    t[182] = "No value specified for parameter {0}.";
-    t[183] = "未设定参数值 {0} 的内容。";
-    t[190] = "Provided Reader failed.";
-    t[191] = "提供的 Reader 已失败。";
-    t[194] = "Unsupported value for stringtype parameter: {0}";
-    t[195] = "字符类型参数值未被支持:{0}";
-    t[198] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
-    t[199] = "已经宣告 CallableStatement 函式,但是尚未呼叫 registerOutParameter (1, <some_type>) 。";
-    t[204] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
-    t[205] = "不能在 ResultSet 的第一笔数据之前呼叫 deleteRow()。";
-    t[214] = "The maximum field size must be a value greater than or equal to 0.";
-    t[215] = "最大栏位容量必须大于或等于 0。";
-    t[216] = "Fetch size must be a value greater to or equal to 0.";
-    t[217] = "数据读取笔数(fetch size)必须大于或等于 0。";
-    t[220] = "PostgreSQL LOBs can only index to: {0}";
-    t[221] = "PostgreSQL LOBs 仅能索引到:{0}";
-    t[224] = "The JVM claims not to support the encoding: {0}";
-    t[225] = "JVM 声明并不支援编码:{0} 。";
-    t[226] = "Interval {0} not yet implemented";
-    t[227] = "隔绝 {0} 尚未被实作。";
-    t[238] = "Fastpath call {0} - No result was returned and we expected an integer.";
-    t[239] = "Fastpath 呼叫 {0} - 没有传回值,且应该传回一个整数。";
-    t[246] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
-    t[247] = "ResultSets 与并发同作(Concurrency) CONCUR_READ_ONLY 不能被更新。";
-    t[250] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
-    t[251] = "这个 statement 未宣告 OUT 参数,使用 '{' ?= call ... '}' 宣告一个。";
-    t[256] = "Cannot reference a savepoint after it has been released.";
-    t[257] = "无法参照已经被释放的储存点。";
-    t[260] = "Unsupported Types value: {0}";
-    t[261] = "未被支持的类型值:{0}";
-    t[266] = "Protocol error.  Session setup failed.";
-    t[267] = "通讯协定错误,Session 初始化失败。";
-    t[274] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
-    t[275] = "不能在 ResultSet 的最后一笔数据之后呼叫 deleteRow()。";
-    t[278] = "Internal Position: {0}";
-    t[279] = "内部位置:{0}";
-    t[280] = "Zero bytes may not occur in identifiers.";
-    t[281] = "在标识识别符中不存在零位元组。";
-    t[288] = "{0} function doesn''t take any argument.";
-    t[289] = "{0} 函式无法取得任何的引数。";
-    t[300] = "This statement has been closed.";
-    t[301] = "这个 statement 已经被关闭。";
-    t[318] = "Cannot establish a savepoint in auto-commit mode.";
-    t[319] = "在自动确认事物交易模式无法建立储存点(Savepoint)。";
-    t[320] = "Position: {0}";
-    t[321] = "位置:{0}";
-    t[322] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
-    t[323] = "不可更新的 ResultSet。用来产生这个 ResultSet 的 SQL 命令只能操作一个数据表,并且必需选择所有主键栏位,详细请参阅 JDBC 2.1 API 规格书 5.6 节。";
-    t[330] = "This ResultSet is closed.";
-    t[331] = "这个 ResultSet 已经被关闭。";
-    t[338] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
-    t[339] = "已注册参数类型 {0},但是又呼叫了get{1}(sqltype={2})。";
-    t[342] = "Transaction isolation level {0} not supported.";
-    t[343] = "不支援交易隔绝等级 {0} 。";
-    t[344] = "Statement has been closed.";
-    t[345] = "Sstatement 已经被关闭。";
-    t[352] = "Server SQLState: {0}";
-    t[353] = "服务器 SQLState:{0}";
-    t[354] = "No primary key found for table {0}.";
-    t[355] = "{0} 数据表中未找到主键(Primary key)。";
-    t[362] = "Cannot convert an instance of {0} to type {1}";
-    t[363] = "无法转换 {0} 到类型 {1} 的实例";
-    t[364] = "DataSource has been closed.";
-    t[365] = "DataSource 已经被关闭。";
-    t[368] = "The column name {0} was not found in this ResultSet.";
-    t[369] = "ResultSet 中找不到栏位名称 {0}。";
-    t[372] = "ResultSet not positioned properly, perhaps you need to call next.";
-    t[373] = "查询结果指标位置不正确,您也许需要呼叫 ResultSet 的 next() 方法。";
-    t[378] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
-    t[379] = "无法更新 ResultSet,可能在第一笔数据之前或最未笔数据之后。";
-    t[380] = "Method {0} is not yet implemented.";
-    t[381] = "这个 {0} 方法尚未被实作。";
-    t[382] = "{0} function takes two or three arguments.";
-    t[383] = "{0} 函式取得二个或三个引数。";
-    t[384] = "The JVM claims not to support the {0} encoding.";
-    t[385] = "JVM 声明并不支援 {0} 编码。";
-    t[396] = "Unknown Response Type {0}.";
-    t[397] = "不明的回应类型 {0}。";
-    t[398] = "The parameter index is out of range: {0}, number of parameters: {1}.";
-    t[399] = "参数索引超出许可范围:{0},参数总数:{1}。";
-    t[400] = "Where: {0}";
-    t[401] = "在位置:{0}";
-    t[406] = "Cannot call deleteRow() when on the insert row.";
-    t[407] = "不能在新增的数据上呼叫 deleteRow()。";
-    t[414] = "{0} function takes four and only four argument.";
-    t[415] = "{0} 函式取得四个且仅有四个引数。";
-    t[416] = "Unable to translate data into the desired encoding.";
-    t[417] = "无法将数据转成目标编码。";
-    t[424] = "Can''t use relative move methods while on the insert row.";
-    t[425] = "不能在新增的数据列上使用相对位置 move 方法。";
-    t[434] = "Invalid stream length {0}.";
-    t[435] = "无效的串流长度 {0}.";
-    t[436] = "The driver currently does not support COPY operations.";
-    t[437] = "驱动程序目前不支援 COPY 操作。";
-    t[440] = "Maximum number of rows must be a value grater than or equal to 0.";
-    t[441] = "最大数据读取笔数必须大于或等于 0。";
-    t[446] = "Failed to create object for: {0}.";
-    t[447] = "为 {0} 建立对象失败。";
-    t[448] = "{0} function takes three and only three arguments.";
-    t[449] = "{0} 函式取得三个且仅有三个引数。";
-    t[450] = "Conversion of interval failed";
-    t[451] = "隔绝(Interval)转换失败。";
-    t[452] = "Cannot tell if path is open or closed: {0}.";
-    t[453] = "无法得知 path 是开启或关闭:{0}。";
-    t[460] = "Provided InputStream failed.";
-    t[461] = "提供的 InputStream 已失败。";
-    t[462] = "Invalid fetch direction constant: {0}.";
-    t[463] = "无效的 fetch 方向常数:{0}。";
-    t[472] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}";
-    t[473] = "事物交易隔绝(Transaction interleaving)未被实作。xid={0}, currentXid={1}, state={2}, flags={3}";
-    t[474] = "{0} function takes two and only two arguments.";
-    t[475] = "{0} 函式取得二个且仅有二个引数。";
-    t[476] = "There are no rows in this ResultSet.";
-    t[477] = "ResultSet 中找不到数据列。";
-    t[478] = "Zero bytes may not occur in string parameters.";
-    t[479] = "字符参数不能有 0 个位元组。";
-    t[480] = "Cannot call updateRow() when on the insert row.";
-    t[481] = "不能在新增的数据列上呼叫 deleteRow()。";
-    t[482] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
-    t[483] = "Connection 已自动结束,因为一个新的  PooledConnection 连线被开启或者或 PooledConnection 已被关闭。";
-    t[488] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
-    t[489] = "一个 CallableStatement 执行函式后输出的参数类型为 {1} 值为 {0},但是已注册的类型是 {2}。";
-    t[494] = "Cannot cast an instance of {0} to type {1}";
-    t[495] = "不能转换一个 {0} 实例到类型 {1}";
-    t[498] = "Cannot retrieve the id of a named savepoint.";
-    t[499] = "无法取得已命名储存点的 id。";
-    t[500] = "Cannot change transaction read-only property in the middle of a transaction.";
-    t[501] = "不能在事物交易过程中改变事物交易唯读属性。";
-    t[502] = "The server does not support SSL.";
-    t[503] = "服务器不支援 SSL 连线。";
-    t[510] = "A connection could not be made using the requested protocol {0}.";
-    t[511] = "无法以要求的通讯协定 {0} 建立连线。";
-    t[512] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
-    t[513] = "不支援 {0} 验证类型。请核对您已经组态 pg_hba.conf 文件包含客户端的IP位址或网路区段,以及驱动程序所支援的验证架构模式已被支援。";
-    t[514] = "Malformed function or procedure escape syntax at offset {0}.";
-    t[515] = "不正确的函式或程序 escape 语法于 {0}。";
-    t[516] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
-    t[517] = "这服务器的 DateStyle 参数被更改成 {0},JDBC 驱动程序请求需要 DateStyle 以 ISO 开头以正确工作。";
-    t[518] = "No results were returned by the query.";
-    t[519] = "查询没有传回任何结果。";
-    t[520] = "Location: File: {0}, Routine: {1}, Line: {2}";
-    t[521] = "位置:文件:{0},常式:{1},行:{2}";
-    t[526] = "Hint: {0}";
-    t[527] = "建议:{0}";
-    t[528] = "A CallableStatement was executed with nothing returned.";
-    t[529] = "一个 CallableStatement 执行函式后没有传回值。";
-    t[530] = "Unknown ResultSet holdability setting: {0}.";
-    t[531] = "未知的 ResultSet 可适用的设置:{0}。";
-    t[540] = "Cannot change transaction isolation level in the middle of a transaction.";
-    t[541] = "不能在事务交易过程中改变事物交易隔绝等级。";
-    t[544] = "The fastpath function {0} is unknown.";
-    t[545] = "不明的 fastpath 函式 {0}。";
-    t[546] = "Can''t use query methods that take a query string on a PreparedStatement.";
-    t[547] = "在 PreparedStatement 上不能使用获取查询字符的查询方法。";
-    t[556] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
-    t[557] = "操作要求可卷动的 ResultSet,但此 ResultSet 是 FORWARD_ONLY。";
-    t[564] = "Unknown Types value.";
-    t[565] = "不明的类型值。";
-    t[570] = "Large Objects may not be used in auto-commit mode.";
-    t[571] = "大型对象无法被使用在自动确认事物交易模式。";
-    table = t;
-  }
+    private static final String[] table;
 
-  @Override
-  public Object handleGetObject (String msgid) throws MissingResourceException {
-    int hash_val = msgid.hashCode() & 0x7fffffff;
-    int idx = (hash_val % 289) << 1;
-    {
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+    static {
+        String[] t = new String[578];
+        t[0] = "";
+        t[1] = "Project-Id-Version: PostgreSQL JDBC Driver 8.3\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2008-01-31 14:34+0800\nLast-Translator: 郭朝益(ChaoYi, Kuo) <Kuo.ChaoYi@gmail.com>\nLanguage-Team: The PostgreSQL Development Team <Kuo.ChaoYi@gmail.com>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Chinese\nX-Poedit-Country: CHINA\nX-Poedit-SourceCharset: utf-8\n";
+        t[6] = "Cannot call cancelRowUpdates() when on the insert row.";
+        t[7] = "不能在新增的数据列上呼叫 cancelRowUpdates()。";
+        t[8] = "The server requested password-based authentication, but no password was provided.";
+        t[9] = "服务器要求使用密码验证,但是密码并未提供。";
+        t[12] = "Detail: {0}";
+        t[13] = "详细:{0}";
+        t[16] = "Can''t refresh the insert row.";
+        t[17] = "无法重读新增的数据列。";
+        t[18] = "Connection has been closed.";
+        t[19] = "Connection 已经被关闭。";
+        t[24] = "Bad value for type {0} : {1}";
+        t[25] = "不良的类型值 {0} : {1}";
+        t[36] = "Truncation of large objects is only implemented in 8.3 and later servers.";
+        t[37] = "大型对象的截断(Truncation)仅被实作执行在 8.3 和后来的服务器。";
+        t[40] = "Cannot retrieve the name of an unnamed savepoint.";
+        t[41] = "无法取得未命名储存点(Savepoint)的名称。";
+        t[46] = "An error occurred while setting up the SSL connection.";
+        t[47] = "进行 SSL 连线时发生错误。";
+        t[50] = "suspend/resume not implemented";
+        t[51] = "暂停(suspend)/再继续(resume)尚未被实作。";
+        t[60] = "{0} function takes one and only one argument.";
+        t[61] = "{0} 函式取得一个且仅有一个引数。";
+        t[62] = "Conversion to type {0} failed: {1}.";
+        t[63] = "转换类型 {0} 失败:{1}。";
+        t[66] = "Conversion of money failed.";
+        t[67] = "money 转换失败。";
+        t[70] = "A result was returned when none was expected.";
+        t[71] = "传回预期之外的结果。";
+        t[80] = "This PooledConnection has already been closed.";
+        t[81] = "这个 PooledConnection 已经被关闭。";
+        t[84] = "Multiple ResultSets were returned by the query.";
+        t[85] = "查询传回多个 ResultSet。";
+        t[90] = "Not on the insert row.";
+        t[91] = "不在新增的数据列上。";
+        t[94] = "An unexpected result was returned by a query.";
+        t[95] = "传回非预期的查询结果。";
+        t[102] = "Internal Query: {0}";
+        t[103] = "内部查询:{0}";
+        t[106] = "The array index is out of range: {0}";
+        t[107] = "阵列索引超过许可范围:{0}";
+        t[112] = "Connection attempt timed out.";
+        t[113] = "Connection 尝试逾时。";
+        t[114] = "Unable to find name datatype in the system catalogs.";
+        t[115] = "在系统 catalog 中找不到名称数据类型(datatype)。";
+        t[116] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
+        t[117] = "不明的原因导致驱动程序造成失败,请回报这个例外。";
+        t[120] = "The array index is out of range: {0}, number of elements: {1}.";
+        t[121] = "阵列索引超过许可范围:{0},元素数量:{1}。";
+        t[138] = "Invalid flags {0}";
+        t[139] = "无效的旗标 flags {0}";
+        t[146] = "Unexpected error writing large object to database.";
+        t[147] = "将大型对象(large object)写入数据库时发生不明错误。";
+        t[162] = "Query timeout must be a value greater than or equals to 0.";
+        t[163] = "查询逾时等候时间必须大于或等于 0。";
+        t[170] = "Unknown type {0}.";
+        t[171] = "不明的类型 {0}";
+        t[174] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
+        t[175] = "这服务器的 standard_conforming_strings 参数已回报为 {0},JDBC 驱动程序已预期开启或是关闭。";
+        t[176] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
+        t[177] = "发现不合法的字元,可能的原因是欲储存的数据中包含数据库的字元集不支援的字码,其中最常见例子的就是将 8 位元数据存入使用 SQL_ASCII 编码的数据库中。";
+        t[178] = "The column index is out of range: {0}, number of columns: {1}.";
+        t[179] = "栏位索引超过许可范围:{0},栏位数:{1}。";
+        t[180] = "The connection attempt failed.";
+        t[181] = "尝试连线已失败。";
+        t[182] = "No value specified for parameter {0}.";
+        t[183] = "未设定参数值 {0} 的内容。";
+        t[190] = "Provided Reader failed.";
+        t[191] = "提供的 Reader 已失败。";
+        t[194] = "Unsupported value for stringtype parameter: {0}";
+        t[195] = "字符类型参数值未被支持:{0}";
+        t[198] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
+        t[199] = "已经宣告 CallableStatement 函式,但是尚未呼叫 registerOutParameter (1, <some_type>) 。";
+        t[204] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
+        t[205] = "不能在 ResultSet 的第一笔数据之前呼叫 deleteRow()。";
+        t[214] = "The maximum field size must be a value greater than or equal to 0.";
+        t[215] = "最大栏位容量必须大于或等于 0。";
+        t[216] = "Fetch size must be a value greater to or equal to 0.";
+        t[217] = "数据读取笔数(fetch size)必须大于或等于 0。";
+        t[220] = "PostgreSQL LOBs can only index to: {0}";
+        t[221] = "PostgreSQL LOBs 仅能索引到:{0}";
+        t[224] = "The JVM claims not to support the encoding: {0}";
+        t[225] = "JVM 声明并不支援编码:{0} 。";
+        t[226] = "Interval {0} not yet implemented";
+        t[227] = "隔绝 {0} 尚未被实作。";
+        t[238] = "Fastpath call {0} - No result was returned and we expected an integer.";
+        t[239] = "Fastpath 呼叫 {0} - 没有传回值,且应该传回一个整数。";
+        t[246] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
+        t[247] = "ResultSets 与并发同作(Concurrency) CONCUR_READ_ONLY 不能被更新。";
+        t[250] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
+        t[251] = "这个 statement 未宣告 OUT 参数,使用 '{' ?= call ... '}' 宣告一个。";
+        t[256] = "Cannot reference a savepoint after it has been released.";
+        t[257] = "无法参照已经被释放的储存点。";
+        t[260] = "Unsupported Types value: {0}";
+        t[261] = "未被支持的类型值:{0}";
+        t[266] = "Protocol error.  Session setup failed.";
+        t[267] = "通讯协定错误,Session 初始化失败。";
+        t[274] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
+        t[275] = "不能在 ResultSet 的最后一笔数据之后呼叫 deleteRow()。";
+        t[278] = "Internal Position: {0}";
+        t[279] = "内部位置:{0}";
+        t[280] = "Zero bytes may not occur in identifiers.";
+        t[281] = "在标识识别符中不存在零位元组。";
+        t[288] = "{0} function doesn''t take any argument.";
+        t[289] = "{0} 函式无法取得任何的引数。";
+        t[300] = "This statement has been closed.";
+        t[301] = "这个 statement 已经被关闭。";
+        t[318] = "Cannot establish a savepoint in auto-commit mode.";
+        t[319] = "在自动确认事物交易模式无法建立储存点(Savepoint)。";
+        t[320] = "Position: {0}";
+        t[321] = "位置:{0}";
+        t[322] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
+        t[323] = "不可更新的 ResultSet。用来产生这个 ResultSet 的 SQL 命令只能操作一个数据表,并且必需选择所有主键栏位,详细请参阅 JDBC 2.1 API 规格书 5.6 节。";
+        t[330] = "This ResultSet is closed.";
+        t[331] = "这个 ResultSet 已经被关闭。";
+        t[338] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
+        t[339] = "已注册参数类型 {0},但是又呼叫了get{1}(sqltype={2})。";
+        t[342] = "Transaction isolation level {0} not supported.";
+        t[343] = "不支援交易隔绝等级 {0} 。";
+        t[344] = "Statement has been closed.";
+        t[345] = "Sstatement 已经被关闭。";
+        t[352] = "Server SQLState: {0}";
+        t[353] = "服务器 SQLState:{0}";
+        t[354] = "No primary key found for table {0}.";
+        t[355] = "{0} 数据表中未找到主键(Primary key)。";
+        t[362] = "Cannot convert an instance of {0} to type {1}";
+        t[363] = "无法转换 {0} 到类型 {1} 的实例";
+        t[364] = "DataSource has been closed.";
+        t[365] = "DataSource 已经被关闭。";
+        t[368] = "The column name {0} was not found in this ResultSet.";
+        t[369] = "ResultSet 中找不到栏位名称 {0}。";
+        t[372] = "ResultSet not positioned properly, perhaps you need to call next.";
+        t[373] = "查询结果指标位置不正确,您也许需要呼叫 ResultSet 的 next() 方法。";
+        t[378] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
+        t[379] = "无法更新 ResultSet,可能在第一笔数据之前或最未笔数据之后。";
+        t[380] = "Method {0} is not yet implemented.";
+        t[381] = "这个 {0} 方法尚未被实作。";
+        t[382] = "{0} function takes two or three arguments.";
+        t[383] = "{0} 函式取得二个或三个引数。";
+        t[384] = "The JVM claims not to support the {0} encoding.";
+        t[385] = "JVM 声明并不支援 {0} 编码。";
+        t[396] = "Unknown Response Type {0}.";
+        t[397] = "不明的回应类型 {0}。";
+        t[398] = "The parameter index is out of range: {0}, number of parameters: {1}.";
+        t[399] = "参数索引超出许可范围:{0},参数总数:{1}。";
+        t[400] = "Where: {0}";
+        t[401] = "在位置:{0}";
+        t[406] = "Cannot call deleteRow() when on the insert row.";
+        t[407] = "不能在新增的数据上呼叫 deleteRow()。";
+        t[414] = "{0} function takes four and only four argument.";
+        t[415] = "{0} 函式取得四个且仅有四个引数。";
+        t[416] = "Unable to translate data into the desired encoding.";
+        t[417] = "无法将数据转成目标编码。";
+        t[424] = "Can''t use relative move methods while on the insert row.";
+        t[425] = "不能在新增的数据列上使用相对位置 move 方法。";
+        t[434] = "Invalid stream length {0}.";
+        t[435] = "无效的串流长度 {0}.";
+        t[436] = "The driver currently does not support COPY operations.";
+        t[437] = "驱动程序目前不支援 COPY 操作。";
+        t[440] = "Maximum number of rows must be a value grater than or equal to 0.";
+        t[441] = "最大数据读取笔数必须大于或等于 0。";
+        t[446] = "Failed to create object for: {0}.";
+        t[447] = "为 {0} 建立对象失败。";
+        t[448] = "{0} function takes three and only three arguments.";
+        t[449] = "{0} 函式取得三个且仅有三个引数。";
+        t[450] = "Conversion of interval failed";
+        t[451] = "隔绝(Interval)转换失败。";
+        t[452] = "Cannot tell if path is open or closed: {0}.";
+        t[453] = "无法得知 path 是开启或关闭:{0}。";
+        t[460] = "Provided InputStream failed.";
+        t[461] = "提供的 InputStream 已失败。";
+        t[462] = "Invalid fetch direction constant: {0}.";
+        t[463] = "无效的 fetch 方向常数:{0}。";
+        t[472] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}";
+        t[473] = "事物交易隔绝(Transaction interleaving)未被实作。xid={0}, currentXid={1}, state={2}, flags={3}";
+        t[474] = "{0} function takes two and only two arguments.";
+        t[475] = "{0} 函式取得二个且仅有二个引数。";
+        t[476] = "There are no rows in this ResultSet.";
+        t[477] = "ResultSet 中找不到数据列。";
+        t[478] = "Zero bytes may not occur in string parameters.";
+        t[479] = "字符参数不能有 0 个位元组。";
+        t[480] = "Cannot call updateRow() when on the insert row.";
+        t[481] = "不能在新增的数据列上呼叫 deleteRow()。";
+        t[482] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
+        t[483] = "Connection 已自动结束,因为一个新的  PooledConnection 连线被开启或者或 PooledConnection 已被关闭。";
+        t[488] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
+        t[489] = "一个 CallableStatement 执行函式后输出的参数类型为 {1} 值为 {0},但是已注册的类型是 {2}。";
+        t[494] = "Cannot cast an instance of {0} to type {1}";
+        t[495] = "不能转换一个 {0} 实例到类型 {1}";
+        t[498] = "Cannot retrieve the id of a named savepoint.";
+        t[499] = "无法取得已命名储存点的 id。";
+        t[500] = "Cannot change transaction read-only property in the middle of a transaction.";
+        t[501] = "不能在事物交易过程中改变事物交易唯读属性。";
+        t[502] = "The server does not support SSL.";
+        t[503] = "服务器不支援 SSL 连线。";
+        t[510] = "A connection could not be made using the requested protocol {0}.";
+        t[511] = "无法以要求的通讯协定 {0} 建立连线。";
+        t[512] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
+        t[513] = "不支援 {0} 验证类型。请核对您已经组态 pg_hba.conf 文件包含客户端的IP位址或网路区段,以及驱动程序所支援的验证架构模式已被支援。";
+        t[514] = "Malformed function or procedure escape syntax at offset {0}.";
+        t[515] = "不正确的函式或程序 escape 语法于 {0}。";
+        t[516] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
+        t[517] = "这服务器的 DateStyle 参数被更改成 {0},JDBC 驱动程序请求需要 DateStyle 以 ISO 开头以正确工作。";
+        t[518] = "No results were returned by the query.";
+        t[519] = "查询没有传回任何结果。";
+        t[520] = "Location: File: {0}, Routine: {1}, Line: {2}";
+        t[521] = "位置:文件:{0},常式:{1},行:{2}";
+        t[526] = "Hint: {0}";
+        t[527] = "建议:{0}";
+        t[528] = "A CallableStatement was executed with nothing returned.";
+        t[529] = "一个 CallableStatement 执行函式后没有传回值。";
+        t[530] = "Unknown ResultSet holdability setting: {0}.";
+        t[531] = "未知的 ResultSet 可适用的设置:{0}。";
+        t[540] = "Cannot change transaction isolation level in the middle of a transaction.";
+        t[541] = "不能在事务交易过程中改变事物交易隔绝等级。";
+        t[544] = "The fastpath function {0} is unknown.";
+        t[545] = "不明的 fastpath 函式 {0}。";
+        t[546] = "Can''t use query methods that take a query string on a PreparedStatement.";
+        t[547] = "在 PreparedStatement 上不能使用获取查询字符的查询方法。";
+        t[556] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
+        t[557] = "操作要求可卷动的 ResultSet,但此 ResultSet 是 FORWARD_ONLY。";
+        t[564] = "Unknown Types value.";
+        t[565] = "不明的类型值。";
+        t[570] = "Large Objects may not be used in auto-commit mode.";
+        t[571] = "大型对象无法被使用在自动确认事物交易模式。";
+        table = t;
     }
-    int incr = ((hash_val % 287) + 1) << 1;
-    for (;;) {
-      idx += incr;
-      if (idx >= 578)
-        idx -= 578;
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+
+    @Override
+    public Object handleGetObject(String msgid) throws MissingResourceException {
+        int hash_val = msgid.hashCode() & 0x7fffffff;
+        int idx = (hash_val % 289) << 1;
+        {
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
+        int incr = ((hash_val % 287) + 1) << 1;
+        for (; ; ) {
+            idx += incr;
+            if (idx >= 578)
+                idx -= 578;
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
     }
-  }
 
-  @Override
-  public Enumeration<String> getKeys () {
-    return new Enumeration<>() {
-        private int idx = 0;
-        { while (idx < 578 && table[idx] == null) idx += 2; }
+    @Override
+    public Enumeration<String> getKeys() {
+        return new Enumeration<>() {
+            private int idx = 0;
 
-      @Override
-        public boolean hasMoreElements () {
-          return (idx < 578);
-        }
+            {
+                while (idx < 578 && table[idx] == null) idx += 2;
+            }
 
-        @Override
-        public String nextElement () {
-          Object key = table[idx];
-          do idx += 2; while (idx < 578 && table[idx] == null);
-          return key.toString();
-        }
-      };
-  }
+            @Override
+            public boolean hasMoreElements() {
+                return (idx < 578);
+            }
 
-  public ResourceBundle getParent () {
-    return parent;
-  }
+            @Override
+            public String nextElement() {
+                Object key = table[idx];
+                do idx += 2; while (idx < 578 && table[idx] == null);
+                return key.toString();
+            }
+        };
+    }
+
+    public ResourceBundle getParent() {
+        return parent;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/translation/messages_zh_TW.java b/pgjdbc/src/main/java/org/postgresql/translation/messages_zh_TW.java
index a010086..dc283e3 100644
--- a/pgjdbc/src/main/java/org/postgresql/translation/messages_zh_TW.java
+++ b/pgjdbc/src/main/java/org/postgresql/translation/messages_zh_TW.java
@@ -5,282 +5,286 @@ import java.util.MissingResourceException;
 import java.util.ResourceBundle;
 
 public class messages_zh_TW extends ResourceBundle {
-  private static final String[] table;
-  static {
-    String[] t = new String[578];
-    t[0] = "";
-    t[1] = "Project-Id-Version: PostgreSQL JDBC Driver 8.3\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2008-01-21 16:50+0800\nLast-Translator: 郭朝益(ChaoYi, Kuo) <Kuo.ChaoYi@gmail.com>\nLanguage-Team: The PostgreSQL Development Team <Kuo.ChaoYi@gmail.com>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Chinese\nX-Poedit-Country: TAIWAN\nX-Poedit-SourceCharset: utf-8\n";
-    t[6] = "Cannot call cancelRowUpdates() when on the insert row.";
-    t[7] = "不能在新增的資料列上呼叫 cancelRowUpdates()。";
-    t[8] = "The server requested password-based authentication, but no password was provided.";
-    t[9] = "伺服器要求使用密碼驗證,但是密碼並未提供。";
-    t[12] = "Detail: {0}";
-    t[13] = "詳細:{0}";
-    t[16] = "Can''t refresh the insert row.";
-    t[17] = "無法重讀新增的資料列。";
-    t[18] = "Connection has been closed.";
-    t[19] = "Connection 已經被關閉。";
-    t[24] = "Bad value for type {0} : {1}";
-    t[25] = "不良的型別值 {0} : {1}";
-    t[36] = "Truncation of large objects is only implemented in 8.3 and later servers.";
-    t[37] = "大型物件的截斷(Truncation)僅被實作執行在 8.3 和後來的伺服器。";
-    t[40] = "Cannot retrieve the name of an unnamed savepoint.";
-    t[41] = "無法取得未命名儲存點(Savepoint)的名稱。";
-    t[46] = "An error occurred while setting up the SSL connection.";
-    t[47] = "進行 SSL 連線時發生錯誤。";
-    t[50] = "suspend/resume not implemented";
-    t[51] = "暫停(suspend)/再繼續(resume)尚未被實作。";
-    t[60] = "{0} function takes one and only one argument.";
-    t[61] = "{0} 函式取得一個且僅有一個引數。";
-    t[62] = "Conversion to type {0} failed: {1}.";
-    t[63] = "轉換型別 {0} 失敗:{1}。";
-    t[66] = "Conversion of money failed.";
-    t[67] = "money 轉換失敗。";
-    t[70] = "A result was returned when none was expected.";
-    t[71] = "傳回預期之外的結果。";
-    t[80] = "This PooledConnection has already been closed.";
-    t[81] = "這個 PooledConnection 已經被關閉。";
-    t[84] = "Multiple ResultSets were returned by the query.";
-    t[85] = "查詢傳回多個 ResultSet。";
-    t[90] = "Not on the insert row.";
-    t[91] = "不在新增的資料列上。";
-    t[94] = "An unexpected result was returned by a query.";
-    t[95] = "傳回非預期的查詢結果。";
-    t[102] = "Internal Query: {0}";
-    t[103] = "內部查詢:{0}";
-    t[106] = "The array index is out of range: {0}";
-    t[107] = "陣列索引超過許可範圍:{0}";
-    t[112] = "Connection attempt timed out.";
-    t[113] = "Connection 嘗試逾時。";
-    t[114] = "Unable to find name datatype in the system catalogs.";
-    t[115] = "在系統 catalog 中找不到名稱資料類型(datatype)。";
-    t[116] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
-    t[117] = "不明的原因導致驅動程式造成失敗,請回報這個例外。";
-    t[120] = "The array index is out of range: {0}, number of elements: {1}.";
-    t[121] = "陣列索引超過許可範圍:{0},元素數量:{1}。";
-    t[138] = "Invalid flags {0}";
-    t[139] = "無效的旗標 {0}";
-    t[146] = "Unexpected error writing large object to database.";
-    t[147] = "將大型物件(large object)寫入資料庫時發生不明錯誤。";
-    t[162] = "Query timeout must be a value greater than or equals to 0.";
-    t[163] = "查詢逾時等候時間必須大於或等於 0。";
-    t[170] = "Unknown type {0}.";
-    t[171] = "不明的型別 {0}";
-    t[174] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
-    t[175] = "這伺服器的 standard_conforming_strings 參數已回報為 {0},JDBC 驅動程式已預期開啟或是關閉。";
-    t[176] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
-    t[177] = "發現不合法的字元,可能的原因是欲儲存的資料中包含資料庫的字元集不支援的字碼,其中最常見例子的就是將 8 位元資料存入使用 SQL_ASCII 編碼的資料庫中。";
-    t[178] = "The column index is out of range: {0}, number of columns: {1}.";
-    t[179] = "欄位索引超過許可範圍:{0},欄位數:{1}。";
-    t[180] = "The connection attempt failed.";
-    t[181] = "嘗試連線已失敗。";
-    t[182] = "No value specified for parameter {0}.";
-    t[183] = "未設定參數值 {0} 的內容。";
-    t[190] = "Provided Reader failed.";
-    t[191] = "提供的 Reader 已失敗。";
-    t[194] = "Unsupported value for stringtype parameter: {0}";
-    t[195] = "字串型別參數值未被支持:{0}";
-    t[198] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
-    t[199] = "已經宣告 CallableStatement 函式,但是尚未呼叫 registerOutParameter (1, <some_type>) 。";
-    t[204] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
-    t[205] = "不能在 ResultSet 的第一筆資料之前呼叫 deleteRow()。";
-    t[214] = "The maximum field size must be a value greater than or equal to 0.";
-    t[215] = "最大欄位容量必須大於或等於 0。";
-    t[216] = "Fetch size must be a value greater to or equal to 0.";
-    t[217] = "資料讀取筆數(fetch size)必須大於或等於 0。";
-    t[220] = "PostgreSQL LOBs can only index to: {0}";
-    t[221] = "PostgreSQL LOBs 僅能索引到:{0}";
-    t[224] = "The JVM claims not to support the encoding: {0}";
-    t[225] = "JVM 聲明並不支援編碼:{0} 。";
-    t[226] = "Interval {0} not yet implemented";
-    t[227] = "隔絕 {0} 尚未被實作。";
-    t[238] = "Fastpath call {0} - No result was returned and we expected an integer.";
-    t[239] = "Fastpath 呼叫 {0} - 沒有傳回值,且應該傳回一個整數。";
-    t[246] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
-    t[247] = "ResultSets 與並發同作(Concurrency) CONCUR_READ_ONLY 不能被更新。";
-    t[250] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
-    t[251] = "這個 statement 未宣告 OUT 參數,使用 '{' ?= call ... '}' 宣告一個。";
-    t[256] = "Cannot reference a savepoint after it has been released.";
-    t[257] = "無法參照已經被釋放的儲存點。";
-    t[260] = "Unsupported Types value: {0}";
-    t[261] = "未被支持的型別值:{0}";
-    t[266] = "Protocol error.  Session setup failed.";
-    t[267] = "通訊協定錯誤,Session 初始化失敗。";
-    t[274] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
-    t[275] = "不能在 ResultSet 的最後一筆資料之後呼叫 deleteRow()。";
-    t[278] = "Internal Position: {0}";
-    t[279] = "內部位置:{0}";
-    t[280] = "Zero bytes may not occur in identifiers.";
-    t[281] = "在標識識別符中不存在零位元組。";
-    t[288] = "{0} function doesn''t take any argument.";
-    t[289] = "{0} 函式無法取得任何的引數。";
-    t[300] = "This statement has been closed.";
-    t[301] = "這個 statement 已經被關閉。";
-    t[318] = "Cannot establish a savepoint in auto-commit mode.";
-    t[319] = "在自動確認事物交易模式無法建立儲存點(Savepoint)。";
-    t[320] = "Position: {0}";
-    t[321] = "位置:{0}";
-    t[322] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
-    t[323] = "不可更新的 ResultSet。用來產生這個 ResultSet 的 SQL 命令只能操作一個資料表,並且必需選擇所有主鍵欄位,詳細請參閱 JDBC 2.1 API 規格書 5.6 節。";
-    t[330] = "This ResultSet is closed.";
-    t[331] = "這個 ResultSet 已經被關閉。";
-    t[338] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
-    t[339] = "已註冊參數型別 {0},但是又呼叫了get{1}(sqltype={2})。";
-    t[342] = "Transaction isolation level {0} not supported.";
-    t[343] = "不支援交易隔絕等級 {0} 。";
-    t[344] = "Statement has been closed.";
-    t[345] = "Sstatement 已經被關閉。";
-    t[352] = "Server SQLState: {0}";
-    t[353] = "伺服器 SQLState:{0}";
-    t[354] = "No primary key found for table {0}.";
-    t[355] = "{0} 資料表中未找到主鍵(Primary key)。";
-    t[362] = "Cannot convert an instance of {0} to type {1}";
-    t[363] = "無法轉換 {0} 到類型 {1} 的實例";
-    t[364] = "DataSource has been closed.";
-    t[365] = "DataSource 已經被關閉。";
-    t[368] = "The column name {0} was not found in this ResultSet.";
-    t[369] = "ResultSet 中找不到欄位名稱 {0}。";
-    t[372] = "ResultSet not positioned properly, perhaps you need to call next.";
-    t[373] = "查詢結果指標位置不正確,您也許需要呼叫 ResultSet 的 next() 方法。";
-    t[378] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
-    t[379] = "無法更新 ResultSet,可能在第一筆資料之前或最未筆資料之後。";
-    t[380] = "Method {0} is not yet implemented.";
-    t[381] = "這個 {0} 方法尚未被實作。";
-    t[382] = "{0} function takes two or three arguments.";
-    t[383] = "{0} 函式取得二個或三個引數。";
-    t[384] = "The JVM claims not to support the {0} encoding.";
-    t[385] = "JVM 聲明並不支援 {0} 編碼。";
-    t[396] = "Unknown Response Type {0}.";
-    t[397] = "不明的回應類型 {0}。";
-    t[398] = "The parameter index is out of range: {0}, number of parameters: {1}.";
-    t[399] = "參數索引超出許可範圍:{0},參數總數:{1}。";
-    t[400] = "Where: {0}";
-    t[401] = "在位置:{0}";
-    t[406] = "Cannot call deleteRow() when on the insert row.";
-    t[407] = "不能在新增的資料上呼叫 deleteRow()。";
-    t[414] = "{0} function takes four and only four argument.";
-    t[415] = "{0} 函式取得四個且僅有四個引數。";
-    t[416] = "Unable to translate data into the desired encoding.";
-    t[417] = "無法將資料轉成目標編碼。";
-    t[424] = "Can''t use relative move methods while on the insert row.";
-    t[425] = "不能在新增的資料列上使用相對位置 move 方法。";
-    t[434] = "Invalid stream length {0}.";
-    t[435] = "無效的串流長度 {0}.";
-    t[436] = "The driver currently does not support COPY operations.";
-    t[437] = "驅動程式目前不支援 COPY 操作。";
-    t[440] = "Maximum number of rows must be a value grater than or equal to 0.";
-    t[441] = "最大資料讀取筆數必須大於或等於 0。";
-    t[446] = "Failed to create object for: {0}.";
-    t[447] = "為 {0} 建立物件失敗。";
-    t[448] = "{0} function takes three and only three arguments.";
-    t[449] = "{0} 函式取得三個且僅有三個引數。";
-    t[450] = "Conversion of interval failed";
-    t[451] = "隔絕(Interval)轉換失敗。";
-    t[452] = "Cannot tell if path is open or closed: {0}.";
-    t[453] = "無法得知 path 是開啟或關閉:{0}。";
-    t[460] = "Provided InputStream failed.";
-    t[461] = "提供的 InputStream 已失敗。";
-    t[462] = "Invalid fetch direction constant: {0}.";
-    t[463] = "無效的 fetch 方向常數:{0}。";
-    t[472] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}";
-    t[473] = "事物交易隔絕(Transaction interleaving)未被實作。xid={0}, currentXid={1}, state={2}, flags={3}";
-    t[474] = "{0} function takes two and only two arguments.";
-    t[475] = "{0} 函式取得二個且僅有二個引數。";
-    t[476] = "There are no rows in this ResultSet.";
-    t[477] = "ResultSet 中找不到資料列。";
-    t[478] = "Zero bytes may not occur in string parameters.";
-    t[479] = "字串參數不能有 0 個位元組。";
-    t[480] = "Cannot call updateRow() when on the insert row.";
-    t[481] = "不能在新增的資料列上呼叫 deleteRow()。";
-    t[482] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
-    t[483] = "Connection 已自動結束,因為一個新的  PooledConnection 連線被開啟或者或 PooledConnection 已被關閉。";
-    t[488] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
-    t[489] = "一個 CallableStatement 執行函式後輸出的參數型別為 {1} 值為 {0},但是已註冊的型別是 {2}。";
-    t[494] = "Cannot cast an instance of {0} to type {1}";
-    t[495] = "不能轉換一個 {0} 實例到型別 {1}";
-    t[498] = "Cannot retrieve the id of a named savepoint.";
-    t[499] = "無法取得已命名儲存點的 id。";
-    t[500] = "Cannot change transaction read-only property in the middle of a transaction.";
-    t[501] = "不能在事物交易過程中改變事物交易唯讀屬性。";
-    t[502] = "The server does not support SSL.";
-    t[503] = "伺服器不支援 SSL 連線。";
-    t[510] = "A connection could not be made using the requested protocol {0}.";
-    t[511] = "無法以要求的通訊協定 {0} 建立連線。";
-    t[512] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
-    t[513] = "不支援 {0} 驗證型別。請核對您已經組態 pg_hba.conf 檔案包含客戶端的IP位址或網路區段,以及驅動程式所支援的驗證架構模式已被支援。";
-    t[514] = "Malformed function or procedure escape syntax at offset {0}.";
-    t[515] = "不正確的函式或程序 escape 語法於 {0}。";
-    t[516] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
-    t[517] = "這伺服器的 DateStyle 參數被更改成 {0},JDBC 驅動程式請求需要 DateStyle 以 ISO 開頭以正確工作。";
-    t[518] = "No results were returned by the query.";
-    t[519] = "查詢沒有傳回任何結果。";
-    t[520] = "Location: File: {0}, Routine: {1}, Line: {2}";
-    t[521] = "位置:檔案:{0},常式:{1},行:{2}";
-    t[526] = "Hint: {0}";
-    t[527] = "建議:{0}";
-    t[528] = "A CallableStatement was executed with nothing returned.";
-    t[529] = "一個 CallableStatement 執行函式後沒有傳回值。";
-    t[530] = "Unknown ResultSet holdability setting: {0}.";
-    t[531] = "未知的 ResultSet 可適用的設置:{0}。";
-    t[540] = "Cannot change transaction isolation level in the middle of a transaction.";
-    t[541] = "不能在事務交易過程中改變事物交易隔絕等級。";
-    t[544] = "The fastpath function {0} is unknown.";
-    t[545] = "不明的 fastpath 函式 {0}。";
-    t[546] = "Can''t use query methods that take a query string on a PreparedStatement.";
-    t[547] = "在 PreparedStatement 上不能使用獲取查詢字串的查詢方法。";
-    t[556] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
-    t[557] = "操作要求可捲動的 ResultSet,但此 ResultSet 是 FORWARD_ONLY。";
-    t[564] = "Unknown Types value.";
-    t[565] = "不明的型別值。";
-    t[570] = "Large Objects may not be used in auto-commit mode.";
-    t[571] = "大型物件無法被使用在自動確認事物交易模式。";
-    table = t;
-  }
+    private static final String[] table;
 
-  @Override
-  public Object handleGetObject (String msgid) throws MissingResourceException {
-    int hash_val = msgid.hashCode() & 0x7fffffff;
-    int idx = (hash_val % 289) << 1;
-    {
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+    static {
+        String[] t = new String[578];
+        t[0] = "";
+        t[1] = "Project-Id-Version: PostgreSQL JDBC Driver 8.3\nReport-Msgid-Bugs-To: \nPO-Revision-Date: 2008-01-21 16:50+0800\nLast-Translator: 郭朝益(ChaoYi, Kuo) <Kuo.ChaoYi@gmail.com>\nLanguage-Team: The PostgreSQL Development Team <Kuo.ChaoYi@gmail.com>\nLanguage: \nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Poedit-Language: Chinese\nX-Poedit-Country: TAIWAN\nX-Poedit-SourceCharset: utf-8\n";
+        t[6] = "Cannot call cancelRowUpdates() when on the insert row.";
+        t[7] = "不能在新增的資料列上呼叫 cancelRowUpdates()。";
+        t[8] = "The server requested password-based authentication, but no password was provided.";
+        t[9] = "伺服器要求使用密碼驗證,但是密碼並未提供。";
+        t[12] = "Detail: {0}";
+        t[13] = "詳細:{0}";
+        t[16] = "Can''t refresh the insert row.";
+        t[17] = "無法重讀新增的資料列。";
+        t[18] = "Connection has been closed.";
+        t[19] = "Connection 已經被關閉。";
+        t[24] = "Bad value for type {0} : {1}";
+        t[25] = "不良的型別值 {0} : {1}";
+        t[36] = "Truncation of large objects is only implemented in 8.3 and later servers.";
+        t[37] = "大型物件的截斷(Truncation)僅被實作執行在 8.3 和後來的伺服器。";
+        t[40] = "Cannot retrieve the name of an unnamed savepoint.";
+        t[41] = "無法取得未命名儲存點(Savepoint)的名稱。";
+        t[46] = "An error occurred while setting up the SSL connection.";
+        t[47] = "進行 SSL 連線時發生錯誤。";
+        t[50] = "suspend/resume not implemented";
+        t[51] = "暫停(suspend)/再繼續(resume)尚未被實作。";
+        t[60] = "{0} function takes one and only one argument.";
+        t[61] = "{0} 函式取得一個且僅有一個引數。";
+        t[62] = "Conversion to type {0} failed: {1}.";
+        t[63] = "轉換型別 {0} 失敗:{1}。";
+        t[66] = "Conversion of money failed.";
+        t[67] = "money 轉換失敗。";
+        t[70] = "A result was returned when none was expected.";
+        t[71] = "傳回預期之外的結果。";
+        t[80] = "This PooledConnection has already been closed.";
+        t[81] = "這個 PooledConnection 已經被關閉。";
+        t[84] = "Multiple ResultSets were returned by the query.";
+        t[85] = "查詢傳回多個 ResultSet。";
+        t[90] = "Not on the insert row.";
+        t[91] = "不在新增的資料列上。";
+        t[94] = "An unexpected result was returned by a query.";
+        t[95] = "傳回非預期的查詢結果。";
+        t[102] = "Internal Query: {0}";
+        t[103] = "內部查詢:{0}";
+        t[106] = "The array index is out of range: {0}";
+        t[107] = "陣列索引超過許可範圍:{0}";
+        t[112] = "Connection attempt timed out.";
+        t[113] = "Connection 嘗試逾時。";
+        t[114] = "Unable to find name datatype in the system catalogs.";
+        t[115] = "在系統 catalog 中找不到名稱資料類型(datatype)。";
+        t[116] = "Something unusual has occurred to cause the driver to fail. Please report this exception.";
+        t[117] = "不明的原因導致驅動程式造成失敗,請回報這個例外。";
+        t[120] = "The array index is out of range: {0}, number of elements: {1}.";
+        t[121] = "陣列索引超過許可範圍:{0},元素數量:{1}。";
+        t[138] = "Invalid flags {0}";
+        t[139] = "無效的旗標 {0}";
+        t[146] = "Unexpected error writing large object to database.";
+        t[147] = "將大型物件(large object)寫入資料庫時發生不明錯誤。";
+        t[162] = "Query timeout must be a value greater than or equals to 0.";
+        t[163] = "查詢逾時等候時間必須大於或等於 0。";
+        t[170] = "Unknown type {0}.";
+        t[171] = "不明的型別 {0}";
+        t[174] = "The server''s standard_conforming_strings parameter was reported as {0}. The JDBC driver expected on or off.";
+        t[175] = "這伺服器的 standard_conforming_strings 參數已回報為 {0},JDBC 驅動程式已預期開啟或是關閉。";
+        t[176] = "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database.";
+        t[177] = "發現不合法的字元,可能的原因是欲儲存的資料中包含資料庫的字元集不支援的字碼,其中最常見例子的就是將 8 位元資料存入使用 SQL_ASCII 編碼的資料庫中。";
+        t[178] = "The column index is out of range: {0}, number of columns: {1}.";
+        t[179] = "欄位索引超過許可範圍:{0},欄位數:{1}。";
+        t[180] = "The connection attempt failed.";
+        t[181] = "嘗試連線已失敗。";
+        t[182] = "No value specified for parameter {0}.";
+        t[183] = "未設定參數值 {0} 的內容。";
+        t[190] = "Provided Reader failed.";
+        t[191] = "提供的 Reader 已失敗。";
+        t[194] = "Unsupported value for stringtype parameter: {0}";
+        t[195] = "字串型別參數值未被支持:{0}";
+        t[198] = "A CallableStatement was declared, but no call to registerOutParameter(1, <some type>) was made.";
+        t[199] = "已經宣告 CallableStatement 函式,但是尚未呼叫 registerOutParameter (1, <some_type>) 。";
+        t[204] = "Currently positioned before the start of the ResultSet.  You cannot call deleteRow() here.";
+        t[205] = "不能在 ResultSet 的第一筆資料之前呼叫 deleteRow()。";
+        t[214] = "The maximum field size must be a value greater than or equal to 0.";
+        t[215] = "最大欄位容量必須大於或等於 0。";
+        t[216] = "Fetch size must be a value greater to or equal to 0.";
+        t[217] = "資料讀取筆數(fetch size)必須大於或等於 0。";
+        t[220] = "PostgreSQL LOBs can only index to: {0}";
+        t[221] = "PostgreSQL LOBs 僅能索引到:{0}";
+        t[224] = "The JVM claims not to support the encoding: {0}";
+        t[225] = "JVM 聲明並不支援編碼:{0} 。";
+        t[226] = "Interval {0} not yet implemented";
+        t[227] = "隔絕 {0} 尚未被實作。";
+        t[238] = "Fastpath call {0} - No result was returned and we expected an integer.";
+        t[239] = "Fastpath 呼叫 {0} - 沒有傳回值,且應該傳回一個整數。";
+        t[246] = "ResultSets with concurrency CONCUR_READ_ONLY cannot be updated.";
+        t[247] = "ResultSets 與並發同作(Concurrency) CONCUR_READ_ONLY 不能被更新。";
+        t[250] = "This statement does not declare an OUT parameter.  Use '{' ?= call ... '}' to declare one.";
+        t[251] = "這個 statement 未宣告 OUT 參數,使用 '{' ?= call ... '}' 宣告一個。";
+        t[256] = "Cannot reference a savepoint after it has been released.";
+        t[257] = "無法參照已經被釋放的儲存點。";
+        t[260] = "Unsupported Types value: {0}";
+        t[261] = "未被支持的型別值:{0}";
+        t[266] = "Protocol error.  Session setup failed.";
+        t[267] = "通訊協定錯誤,Session 初始化失敗。";
+        t[274] = "Currently positioned after the end of the ResultSet.  You cannot call deleteRow() here.";
+        t[275] = "不能在 ResultSet 的最後一筆資料之後呼叫 deleteRow()。";
+        t[278] = "Internal Position: {0}";
+        t[279] = "內部位置:{0}";
+        t[280] = "Zero bytes may not occur in identifiers.";
+        t[281] = "在標識識別符中不存在零位元組。";
+        t[288] = "{0} function doesn''t take any argument.";
+        t[289] = "{0} 函式無法取得任何的引數。";
+        t[300] = "This statement has been closed.";
+        t[301] = "這個 statement 已經被關閉。";
+        t[318] = "Cannot establish a savepoint in auto-commit mode.";
+        t[319] = "在自動確認事物交易模式無法建立儲存點(Savepoint)。";
+        t[320] = "Position: {0}";
+        t[321] = "位置:{0}";
+        t[322] = "ResultSet is not updateable.  The query that generated this result set must select only one table, and must select all primary keys from that table. See the JDBC 2.1 API Specification, section 5.6 for more details.";
+        t[323] = "不可更新的 ResultSet。用來產生這個 ResultSet 的 SQL 命令只能操作一個資料表,並且必需選擇所有主鍵欄位,詳細請參閱 JDBC 2.1 API 規格書 5.6 節。";
+        t[330] = "This ResultSet is closed.";
+        t[331] = "這個 ResultSet 已經被關閉。";
+        t[338] = "Parameter of type {0} was registered, but call to get{1} (sqltype={2}) was made.";
+        t[339] = "已註冊參數型別 {0},但是又呼叫了get{1}(sqltype={2})。";
+        t[342] = "Transaction isolation level {0} not supported.";
+        t[343] = "不支援交易隔絕等級 {0} 。";
+        t[344] = "Statement has been closed.";
+        t[345] = "Sstatement 已經被關閉。";
+        t[352] = "Server SQLState: {0}";
+        t[353] = "伺服器 SQLState:{0}";
+        t[354] = "No primary key found for table {0}.";
+        t[355] = "{0} 資料表中未找到主鍵(Primary key)。";
+        t[362] = "Cannot convert an instance of {0} to type {1}";
+        t[363] = "無法轉換 {0} 到類型 {1} 的實例";
+        t[364] = "DataSource has been closed.";
+        t[365] = "DataSource 已經被關閉。";
+        t[368] = "The column name {0} was not found in this ResultSet.";
+        t[369] = "ResultSet 中找不到欄位名稱 {0}。";
+        t[372] = "ResultSet not positioned properly, perhaps you need to call next.";
+        t[373] = "查詢結果指標位置不正確,您也許需要呼叫 ResultSet 的 next() 方法。";
+        t[378] = "Cannot update the ResultSet because it is either before the start or after the end of the results.";
+        t[379] = "無法更新 ResultSet,可能在第一筆資料之前或最未筆資料之後。";
+        t[380] = "Method {0} is not yet implemented.";
+        t[381] = "這個 {0} 方法尚未被實作。";
+        t[382] = "{0} function takes two or three arguments.";
+        t[383] = "{0} 函式取得二個或三個引數。";
+        t[384] = "The JVM claims not to support the {0} encoding.";
+        t[385] = "JVM 聲明並不支援 {0} 編碼。";
+        t[396] = "Unknown Response Type {0}.";
+        t[397] = "不明的回應類型 {0}。";
+        t[398] = "The parameter index is out of range: {0}, number of parameters: {1}.";
+        t[399] = "參數索引超出許可範圍:{0},參數總數:{1}。";
+        t[400] = "Where: {0}";
+        t[401] = "在位置:{0}";
+        t[406] = "Cannot call deleteRow() when on the insert row.";
+        t[407] = "不能在新增的資料上呼叫 deleteRow()。";
+        t[414] = "{0} function takes four and only four argument.";
+        t[415] = "{0} 函式取得四個且僅有四個引數。";
+        t[416] = "Unable to translate data into the desired encoding.";
+        t[417] = "無法將資料轉成目標編碼。";
+        t[424] = "Can''t use relative move methods while on the insert row.";
+        t[425] = "不能在新增的資料列上使用相對位置 move 方法。";
+        t[434] = "Invalid stream length {0}.";
+        t[435] = "無效的串流長度 {0}.";
+        t[436] = "The driver currently does not support COPY operations.";
+        t[437] = "驅動程式目前不支援 COPY 操作。";
+        t[440] = "Maximum number of rows must be a value grater than or equal to 0.";
+        t[441] = "最大資料讀取筆數必須大於或等於 0。";
+        t[446] = "Failed to create object for: {0}.";
+        t[447] = "為 {0} 建立物件失敗。";
+        t[448] = "{0} function takes three and only three arguments.";
+        t[449] = "{0} 函式取得三個且僅有三個引數。";
+        t[450] = "Conversion of interval failed";
+        t[451] = "隔絕(Interval)轉換失敗。";
+        t[452] = "Cannot tell if path is open or closed: {0}.";
+        t[453] = "無法得知 path 是開啟或關閉:{0}。";
+        t[460] = "Provided InputStream failed.";
+        t[461] = "提供的 InputStream 已失敗。";
+        t[462] = "Invalid fetch direction constant: {0}.";
+        t[463] = "無效的 fetch 方向常數:{0}。";
+        t[472] = "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}";
+        t[473] = "事物交易隔絕(Transaction interleaving)未被實作。xid={0}, currentXid={1}, state={2}, flags={3}";
+        t[474] = "{0} function takes two and only two arguments.";
+        t[475] = "{0} 函式取得二個且僅有二個引數。";
+        t[476] = "There are no rows in this ResultSet.";
+        t[477] = "ResultSet 中找不到資料列。";
+        t[478] = "Zero bytes may not occur in string parameters.";
+        t[479] = "字串參數不能有 0 個位元組。";
+        t[480] = "Cannot call updateRow() when on the insert row.";
+        t[481] = "不能在新增的資料列上呼叫 deleteRow()。";
+        t[482] = "Connection has been closed automatically because a new connection was opened for the same PooledConnection or the PooledConnection has been closed.";
+        t[483] = "Connection 已自動結束,因為一個新的  PooledConnection 連線被開啟或者或 PooledConnection 已被關閉。";
+        t[488] = "A CallableStatement function was executed and the out parameter {0} was of type {1} however type {2} was registered.";
+        t[489] = "一個 CallableStatement 執行函式後輸出的參數型別為 {1} 值為 {0},但是已註冊的型別是 {2}。";
+        t[494] = "Cannot cast an instance of {0} to type {1}";
+        t[495] = "不能轉換一個 {0} 實例到型別 {1}";
+        t[498] = "Cannot retrieve the id of a named savepoint.";
+        t[499] = "無法取得已命名儲存點的 id。";
+        t[500] = "Cannot change transaction read-only property in the middle of a transaction.";
+        t[501] = "不能在事物交易過程中改變事物交易唯讀屬性。";
+        t[502] = "The server does not support SSL.";
+        t[503] = "伺服器不支援 SSL 連線。";
+        t[510] = "A connection could not be made using the requested protocol {0}.";
+        t[511] = "無法以要求的通訊協定 {0} 建立連線。";
+        t[512] = "The authentication type {0} is not supported. Check that you have configured the pg_hba.conf file to include the client''s IP address or subnet, and that it is using an authentication scheme supported by the driver.";
+        t[513] = "不支援 {0} 驗證型別。請核對您已經組態 pg_hba.conf 檔案包含客戶端的IP位址或網路區段,以及驅動程式所支援的驗證架構模式已被支援。";
+        t[514] = "Malformed function or procedure escape syntax at offset {0}.";
+        t[515] = "不正確的函式或程序 escape 語法於 {0}。";
+        t[516] = "The server''s DateStyle parameter was changed to {0}. The JDBC driver requires DateStyle to begin with ISO for correct operation.";
+        t[517] = "這伺服器的 DateStyle 參數被更改成 {0},JDBC 驅動程式請求需要 DateStyle 以 ISO 開頭以正確工作。";
+        t[518] = "No results were returned by the query.";
+        t[519] = "查詢沒有傳回任何結果。";
+        t[520] = "Location: File: {0}, Routine: {1}, Line: {2}";
+        t[521] = "位置:檔案:{0},常式:{1},行:{2}";
+        t[526] = "Hint: {0}";
+        t[527] = "建議:{0}";
+        t[528] = "A CallableStatement was executed with nothing returned.";
+        t[529] = "一個 CallableStatement 執行函式後沒有傳回值。";
+        t[530] = "Unknown ResultSet holdability setting: {0}.";
+        t[531] = "未知的 ResultSet 可適用的設置:{0}。";
+        t[540] = "Cannot change transaction isolation level in the middle of a transaction.";
+        t[541] = "不能在事務交易過程中改變事物交易隔絕等級。";
+        t[544] = "The fastpath function {0} is unknown.";
+        t[545] = "不明的 fastpath 函式 {0}。";
+        t[546] = "Can''t use query methods that take a query string on a PreparedStatement.";
+        t[547] = "在 PreparedStatement 上不能使用獲取查詢字串的查詢方法。";
+        t[556] = "Operation requires a scrollable ResultSet, but this ResultSet is FORWARD_ONLY.";
+        t[557] = "操作要求可捲動的 ResultSet,但此 ResultSet 是 FORWARD_ONLY。";
+        t[564] = "Unknown Types value.";
+        t[565] = "不明的型別值。";
+        t[570] = "Large Objects may not be used in auto-commit mode.";
+        t[571] = "大型物件無法被使用在自動確認事物交易模式。";
+        table = t;
     }
-    int incr = ((hash_val % 287) + 1) << 1;
-    for (;;) {
-      idx += incr;
-      if (idx >= 578)
-        idx -= 578;
-      Object found = table[idx];
-      if (found == null)
-        return null;
-      if (msgid.equals(found))
-        return table[idx + 1];
+
+    @Override
+    public Object handleGetObject(String msgid) throws MissingResourceException {
+        int hash_val = msgid.hashCode() & 0x7fffffff;
+        int idx = (hash_val % 289) << 1;
+        {
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
+        int incr = ((hash_val % 287) + 1) << 1;
+        for (; ; ) {
+            idx += incr;
+            if (idx >= 578)
+                idx -= 578;
+            Object found = table[idx];
+            if (found == null)
+                return null;
+            if (msgid.equals(found))
+                return table[idx + 1];
+        }
     }
-  }
 
-  public Enumeration<String> getKeys () {
-    return new Enumeration<>() {
-        private int idx = 0;
-        { while (idx < 578 && table[idx] == null) idx += 2; }
+    public Enumeration<String> getKeys() {
+        return new Enumeration<>() {
+            private int idx = 0;
 
-      @Override
-        public boolean hasMoreElements () {
-          return (idx < 578);
-        }
+            {
+                while (idx < 578 && table[idx] == null) idx += 2;
+            }
 
-      @Override
-        public String nextElement () {
-          Object key = table[idx];
-          do idx += 2; while (idx < 578 && table[idx] == null);
-          return key.toString();
-        }
-      };
-  }
+            @Override
+            public boolean hasMoreElements() {
+                return (idx < 578);
+            }
 
-  public ResourceBundle getParent () {
-    return parent;
-  }
+            @Override
+            public String nextElement() {
+                Object key = table[idx];
+                do idx += 2; while (idx < 578 && table[idx] == null);
+                return key.toString();
+            }
+        };
+    }
+
+    public ResourceBundle getParent() {
+        return parent;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/ByteBufferByteStreamWriter.java b/pgjdbc/src/main/java/org/postgresql/util/ByteBufferByteStreamWriter.java
index f0a8aa0..45d5428 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/ByteBufferByteStreamWriter.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/ByteBufferByteStreamWriter.java
@@ -16,37 +16,37 @@ import java.nio.channels.WritableByteChannel;
  */
 public class ByteBufferByteStreamWriter implements ByteStreamWriter {
 
-  private final ByteBuffer buf;
-  private final int length;
+    private final ByteBuffer buf;
+    private final int length;
 
-  /**
-   * Construct the writer with the given {@link ByteBuffer}
-   *
-   * @param buf the buffer to use.
-   */
-  public ByteBufferByteStreamWriter(ByteBuffer buf) {
-    this.buf = buf;
-    this.length = buf.remaining();
-  }
-
-  @Override
-  public int getLength() {
-    return length;
-  }
-
-  @Override
-  public void writeTo(ByteStreamTarget target) throws IOException {
-    if (buf.hasArray()) {
-      // Avoid copying the array if possible
-      target.getOutputStream()
-          .write(buf.array(), buf.arrayOffset() + buf.position(), buf.remaining());
-      return;
+    /**
+     * Construct the writer with the given {@link ByteBuffer}
+     *
+     * @param buf the buffer to use.
+     */
+    public ByteBufferByteStreamWriter(ByteBuffer buf) {
+        this.buf = buf;
+        this.length = buf.remaining();
     }
 
-    // this _does_ involve some copying to a temporary buffer, but that's unavoidable
-    // as OutputStream itself only accepts single bytes or heap allocated byte arrays
-    try (WritableByteChannel c = Channels.newChannel(target.getOutputStream())) {
-      c.write(buf);
+    @Override
+    public int getLength() {
+        return length;
+    }
+
+    @Override
+    public void writeTo(ByteStreamTarget target) throws IOException {
+        if (buf.hasArray()) {
+            // Avoid copying the array if possible
+            target.getOutputStream()
+                    .write(buf.array(), buf.arrayOffset() + buf.position(), buf.remaining());
+            return;
+        }
+
+        // this _does_ involve some copying to a temporary buffer, but that's unavoidable
+        // as OutputStream itself only accepts single bytes or heap allocated byte arrays
+        try (WritableByteChannel c = Channels.newChannel(target.getOutputStream())) {
+            c.write(buf);
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/ByteBuffersByteStreamWriter.java b/pgjdbc/src/main/java/org/postgresql/util/ByteBuffersByteStreamWriter.java
index 9edde9f..125b718 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/ByteBuffersByteStreamWriter.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/ByteBuffersByteStreamWriter.java
@@ -17,55 +17,55 @@ import java.nio.channels.WritableByteChannel;
  */
 class ByteBuffersByteStreamWriter implements ByteStreamWriter {
 
-  private final ByteBuffer[] buffers;
-  private final int length;
+    private final ByteBuffer[] buffers;
+    private final int length;
 
-  /**
-   * Construct the writer with the given {@link ByteBuffer}
-   *
-   * @param buffers the buffer to use.
-   */
-  ByteBuffersByteStreamWriter(ByteBuffer... buffers) {
-    this.buffers = buffers;
-    int length = 0;
-    for (ByteBuffer buffer : buffers) {
-      length += buffer.remaining();
-    }
-    this.length = length;
-  }
-
-  @Override
-  public int getLength() {
-    return length;
-  }
-
-  @Override
-  public void writeTo(ByteStreamTarget target) throws IOException {
-    boolean allArraysAreAccessible = true;
-    for (ByteBuffer buffer : buffers) {
-      if (!buffer.hasArray()) {
-        allArraysAreAccessible = false;
-        break;
-      }
-    }
-
-    OutputStream os = target.getOutputStream();
-    if (allArraysAreAccessible) {
-      for (ByteBuffer buffer : buffers) {
-        os.write(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());
-      }
-      return;
-    }
-    // Channels.newChannel does not buffer writes, so we can mix writes to the channel with writes
-    // to the OutputStream
-    try (WritableByteChannel c = Channels.newChannel(os)) {
-      for (ByteBuffer buffer : buffers) {
-        if (buffer.hasArray()) {
-          os.write(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());
-        } else {
-          c.write(buffer);
+    /**
+     * Construct the writer with the given {@link ByteBuffer}
+     *
+     * @param buffers the buffer to use.
+     */
+    ByteBuffersByteStreamWriter(ByteBuffer... buffers) {
+        this.buffers = buffers;
+        int length = 0;
+        for (ByteBuffer buffer : buffers) {
+            length += buffer.remaining();
+        }
+        this.length = length;
+    }
+
+    @Override
+    public int getLength() {
+        return length;
+    }
+
+    @Override
+    public void writeTo(ByteStreamTarget target) throws IOException {
+        boolean allArraysAreAccessible = true;
+        for (ByteBuffer buffer : buffers) {
+            if (!buffer.hasArray()) {
+                allArraysAreAccessible = false;
+                break;
+            }
+        }
+
+        OutputStream os = target.getOutputStream();
+        if (allArraysAreAccessible) {
+            for (ByteBuffer buffer : buffers) {
+                os.write(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());
+            }
+            return;
+        }
+        // Channels.newChannel does not buffer writes, so we can mix writes to the channel with writes
+        // to the OutputStream
+        try (WritableByteChannel c = Channels.newChannel(os)) {
+            for (ByteBuffer buffer : buffers) {
+                if (buffer.hasArray()) {
+                    os.write(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());
+                } else {
+                    c.write(buffer);
+                }
+            }
         }
-      }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/ByteConverter.java b/pgjdbc/src/main/java/org/postgresql/util/ByteConverter.java
index 1cddc45..57bba1e 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/ByteConverter.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/ByteConverter.java
@@ -17,641 +17,639 @@ import java.util.Arrays;
  */
 public class ByteConverter {
 
-  /**
-   * Simple stack structure for non-negative {@code short} values.
-   */
-  private static final class PositiveShorts {
-    private short[] shorts = new short[8];
-    private int idx;
+    private static final int NUMERIC_DSCALE_MASK = 0x00003FFF;
+    private static final short NUMERIC_POS = 0x0000;
+    private static final short NUMERIC_NEG = 0x4000;
+    private static final short NUMERIC_NAN = (short) 0xC000;
+    private static final int SHORT_BYTES = 2;
+    private static final int LONG_BYTES = 4;
+    private static final int[] INT_TEN_POWERS = new int[6];
+    private static final long[] LONG_TEN_POWERS = new long[19];
+    private static final BigInteger[] BI_TEN_POWERS = new BigInteger[32];
+    private static final BigInteger BI_TEN_THOUSAND = BigInteger.valueOf(10000);
+    private static final BigInteger BI_MAX_LONG = BigInteger.valueOf(Long.MAX_VALUE);
 
-    PositiveShorts() {
-    }
-
-    public void push(short s) {
-      if (s < 0) {
-        throw new IllegalArgumentException("only non-negative values accepted: " + s);
-      }
-      if (idx == shorts.length) {
-        grow();
-      }
-      shorts[idx++] = s;
-    }
-
-    public int size() {
-      return idx;
-    }
-
-    public boolean isEmpty() {
-      return idx == 0;
-    }
-
-    public short pop() {
-      return idx > 0 ? shorts[--idx] : -1;
-    }
-
-    private void grow() {
-      final int newSize = shorts.length <= 1024 ? shorts.length << 1 : (int) (shorts.length * 1.5);
-      shorts = Arrays.copyOf(shorts, newSize);
-    }
-  }
-
-  private static final int NUMERIC_DSCALE_MASK = 0x00003FFF;
-  private static final short NUMERIC_POS = 0x0000;
-  private static final short NUMERIC_NEG = 0x4000;
-  private static final short NUMERIC_NAN = (short) 0xC000;
-  private static final int SHORT_BYTES = 2;
-  private static final int LONG_BYTES = 4;
-  private static final int[] INT_TEN_POWERS = new int[6];
-  private static final long[] LONG_TEN_POWERS = new long[19];
-  private static final BigInteger[] BI_TEN_POWERS = new BigInteger[32];
-  private static final BigInteger BI_TEN_THOUSAND = BigInteger.valueOf(10000);
-  private static final BigInteger BI_MAX_LONG = BigInteger.valueOf(Long.MAX_VALUE);
-
-  static {
-    for (int i = 0; i < INT_TEN_POWERS.length; i++) {
-      INT_TEN_POWERS[i] = (int) Math.pow(10, i);
-    }
-    for (int i = 0; i < LONG_TEN_POWERS.length; i++) {
-      LONG_TEN_POWERS[i] = (long) Math.pow(10, i);
-    }
-    for (int i = 0; i < BI_TEN_POWERS.length; i++) {
-      BI_TEN_POWERS[i] = BigInteger.TEN.pow(i);
-    }
-  }
-
-  private ByteConverter() {
-    // prevent instantiation of static helper class
-  }
-
-  /**
-   * Convert a variable length array of bytes to an integer
-   * @param bytes array of bytes that can be decoded as an integer
-   * @return integer
-   */
-  public static int bytesToInt(byte []bytes) {
-    if ( bytes.length == 1 ) {
-      return (int) bytes[0];
-    }
-    if ( bytes.length == SHORT_BYTES ) {
-      return int2(bytes, 0);
-    }
-    if ( bytes.length == LONG_BYTES ) {
-      return int4(bytes, 0);
-    } else {
-      throw new IllegalArgumentException("Argument bytes is empty");
-    }
-  }
-
-  /**
-   * Convert a variable length array of bytes to an integer
-   * @param bytes array of bytes that can be decoded as an integer
-   * @return integer
-   */
-  public static Number numeric(byte [] bytes) {
-    return numeric(bytes, 0, bytes.length);
-  }
-
-  /**
-   * Convert a variable length array of bytes to a {@link Number}. The result will
-   * always be a {@link BigDecimal} or {@link Double#NaN}.
-   *
-   * @param bytes array of bytes to be decoded from binary numeric representation.
-   * @param pos index of the start position of the bytes array for number
-   * @param numBytes number of bytes to use, length is already encoded
-   *                in the binary format but this is used for double checking
-   * @return BigDecimal representation of numeric or {@link Double#NaN}.
-   */
-  public static Number numeric(byte [] bytes, int pos, int numBytes) {
-
-    if (numBytes < 8) {
-      throw new IllegalArgumentException("number of bytes should be at-least 8");
-    }
-
-    //number of 2-byte shorts representing 4 decimal digits - should be treated as unsigned
-    int len = ByteConverter.int2(bytes, pos) & 0xFFFF;
-    //0 based number of 4 decimal digits (i.e. 2-byte shorts) before the decimal
-    //a value <= 0 indicates an absolute value < 1.
-    short weight = ByteConverter.int2(bytes, pos + 2);
-    //indicates positive, negative or NaN
-    short sign = ByteConverter.int2(bytes, pos + 4);
-    //number of digits after the decimal. This must be >= 0.
-    //a value of 0 indicates a whole number (integer).
-    short scale = ByteConverter.int2(bytes, pos + 6);
-
-    //An integer should be built from the len number of 2 byte shorts, treating each
-    //as 4 digits.
-    //The weight, if > 0, indicates how many of those 4 digit chunks should be to the
-    //"left" of the decimal. If the weight is 0, then all 4 digit chunks start immediately
-    //to the "right" of the decimal. If the weight is < 0, the absolute distance from 0
-    //indicates 4 leading "0" digits to the immediate "right" of the decimal, prior to the
-    //digits from "len".
-    //A weight which is positive, can be a number larger than what len defines. This means
-    //there are trailing 0s after the "len" integer and before the decimal.
-    //The scale indicates how many significant digits there are to the right of the decimal.
-    //A value of 0 indicates a whole number (integer).
-    //The combination of weight, len, and scale can result in either trimming digits provided
-    //by len (only to the right of the decimal) or adding significant 0 values to the right
-    //of len (on either side of the decimal).
-
-    if (numBytes != (len * SHORT_BYTES + 8)) {
-      throw new IllegalArgumentException("invalid length of bytes \"numeric\" value");
-    }
-
-    if (!(sign == NUMERIC_POS
-        || sign == NUMERIC_NEG
-        || sign == NUMERIC_NAN)) {
-      throw new IllegalArgumentException("invalid sign in \"numeric\" value");
-    }
-
-    if (sign == NUMERIC_NAN) {
-      return Double.NaN;
-    }
-
-    if ((scale & NUMERIC_DSCALE_MASK) != scale) {
-      throw new IllegalArgumentException("invalid scale in \"numeric\" value");
-    }
-
-    if (len == 0) {
-      return new BigDecimal(BigInteger.ZERO, scale);
-    }
-
-    int idx = pos + 8;
-
-    short d = ByteConverter.int2(bytes, idx);
-
-    //if the absolute value is (0, 1), then leading '0' values
-    //do not matter for the unscaledInt, but trailing 0s do
-    if (weight < 0) {
-      assert scale > 0;
-      int effectiveScale = scale;
-      //adjust weight to determine how many leading 0s after the decimal
-      //before the provided values/digits actually begin
-      ++weight;
-      if (weight < 0) {
-        effectiveScale += 4 * weight;
-      }
-
-      int i = 1;
-      //typically there should not be leading 0 short values, as it is more
-      //efficient to represent that in the weight value
-      for (; i < len && d == 0; i++) {
-        //each leading 0 value removes 4 from the effective scale
-        effectiveScale -= 4;
-        idx += 2;
-        d = ByteConverter.int2(bytes, idx);
-      }
-
-      assert effectiveScale > 0;
-      if (effectiveScale >= 4) {
-        effectiveScale -= 4;
-      } else {
-        //an effective scale of less than four means that the value d
-        //has trailing 0s which are not significant
-        //so we divide by the appropriate power of 10 to reduce those
-        d = (short) (d / INT_TEN_POWERS[4 - effectiveScale]);
-        effectiveScale = 0;
-      }
-      //defer moving to BigInteger as long as possible
-      //operations on the long are much faster
-      BigInteger unscaledBI = null;
-      long unscaledInt = d;
-      for (; i < len; i++) {
-        if (i == 4 && effectiveScale > 2) {
-          unscaledBI = BigInteger.valueOf(unscaledInt);
+    static {
+        for (int i = 0; i < INT_TEN_POWERS.length; i++) {
+            INT_TEN_POWERS[i] = (int) Math.pow(10, i);
         }
-        idx += 2;
-        d = ByteConverter.int2(bytes, idx);
-        //if effective scale is at least 4, then all 4 digits should be used
-        //and the existing number needs to be shifted 4
-        if (effectiveScale >= 4) {
-          if (unscaledBI == null) {
-            unscaledInt *= 10000;
-          } else {
-            unscaledBI = unscaledBI.multiply(BI_TEN_THOUSAND);
-          }
-          effectiveScale -= 4;
+        for (int i = 0; i < LONG_TEN_POWERS.length; i++) {
+            LONG_TEN_POWERS[i] = (long) Math.pow(10, i);
+        }
+        for (int i = 0; i < BI_TEN_POWERS.length; i++) {
+            BI_TEN_POWERS[i] = BigInteger.TEN.pow(i);
+        }
+    }
+
+    private ByteConverter() {
+        // prevent instantiation of static helper class
+    }
+
+    /**
+     * Convert a variable length array of bytes to an integer
+     *
+     * @param bytes array of bytes that can be decoded as an integer
+     * @return integer
+     */
+    public static int bytesToInt(byte[] bytes) {
+        if (bytes.length == 1) {
+            return (int) bytes[0];
+        }
+        if (bytes.length == SHORT_BYTES) {
+            return int2(bytes, 0);
+        }
+        if (bytes.length == LONG_BYTES) {
+            return int4(bytes, 0);
         } else {
-          //if effective scale is less than 4, then only shift left based on remaining scale
-          if (unscaledBI == null) {
-            unscaledInt *= INT_TEN_POWERS[effectiveScale];
-          } else {
+            throw new IllegalArgumentException("Argument bytes is empty");
+        }
+    }
+
+    /**
+     * Convert a variable length array of bytes to an integer
+     *
+     * @param bytes array of bytes that can be decoded as an integer
+     * @return integer
+     */
+    public static Number numeric(byte[] bytes) {
+        return numeric(bytes, 0, bytes.length);
+    }
+
+    /**
+     * Convert a variable length array of bytes to a {@link Number}. The result will
+     * always be a {@link BigDecimal} or {@link Double#NaN}.
+     *
+     * @param bytes    array of bytes to be decoded from binary numeric representation.
+     * @param pos      index of the start position of the bytes array for number
+     * @param numBytes number of bytes to use, length is already encoded
+     *                 in the binary format but this is used for double checking
+     * @return BigDecimal representation of numeric or {@link Double#NaN}.
+     */
+    public static Number numeric(byte[] bytes, int pos, int numBytes) {
+
+        if (numBytes < 8) {
+            throw new IllegalArgumentException("number of bytes should be at-least 8");
+        }
+
+        //number of 2-byte shorts representing 4 decimal digits - should be treated as unsigned
+        int len = ByteConverter.int2(bytes, pos) & 0xFFFF;
+        //0 based number of 4 decimal digits (i.e. 2-byte shorts) before the decimal
+        //a value <= 0 indicates an absolute value < 1.
+        short weight = ByteConverter.int2(bytes, pos + 2);
+        //indicates positive, negative or NaN
+        short sign = ByteConverter.int2(bytes, pos + 4);
+        //number of digits after the decimal. This must be >= 0.
+        //a value of 0 indicates a whole number (integer).
+        short scale = ByteConverter.int2(bytes, pos + 6);
+
+        //An integer should be built from the len number of 2 byte shorts, treating each
+        //as 4 digits.
+        //The weight, if > 0, indicates how many of those 4 digit chunks should be to the
+        //"left" of the decimal. If the weight is 0, then all 4 digit chunks start immediately
+        //to the "right" of the decimal. If the weight is < 0, the absolute distance from 0
+        //indicates 4 leading "0" digits to the immediate "right" of the decimal, prior to the
+        //digits from "len".
+        //A weight which is positive, can be a number larger than what len defines. This means
+        //there are trailing 0s after the "len" integer and before the decimal.
+        //The scale indicates how many significant digits there are to the right of the decimal.
+        //A value of 0 indicates a whole number (integer).
+        //The combination of weight, len, and scale can result in either trimming digits provided
+        //by len (only to the right of the decimal) or adding significant 0 values to the right
+        //of len (on either side of the decimal).
+
+        if (numBytes != (len * SHORT_BYTES + 8)) {
+            throw new IllegalArgumentException("invalid length of bytes \"numeric\" value");
+        }
+
+        if (!(sign == NUMERIC_POS
+                || sign == NUMERIC_NEG
+                || sign == NUMERIC_NAN)) {
+            throw new IllegalArgumentException("invalid sign in \"numeric\" value");
+        }
+
+        if (sign == NUMERIC_NAN) {
+            return Double.NaN;
+        }
+
+        if ((scale & NUMERIC_DSCALE_MASK) != scale) {
+            throw new IllegalArgumentException("invalid scale in \"numeric\" value");
+        }
+
+        if (len == 0) {
+            return new BigDecimal(BigInteger.ZERO, scale);
+        }
+
+        int idx = pos + 8;
+
+        short d = ByteConverter.int2(bytes, idx);
+
+        //if the absolute value is (0, 1), then leading '0' values
+        //do not matter for the unscaledInt, but trailing 0s do
+        if (weight < 0) {
+            assert scale > 0;
+            int effectiveScale = scale;
+            //adjust weight to determine how many leading 0s after the decimal
+            //before the provided values/digits actually begin
+            ++weight;
+            if (weight < 0) {
+                effectiveScale += 4 * weight;
+            }
+
+            int i = 1;
+            //typically there should not be leading 0 short values, as it is more
+            //efficient to represent that in the weight value
+            for (; i < len && d == 0; i++) {
+                //each leading 0 value removes 4 from the effective scale
+                effectiveScale -= 4;
+                idx += 2;
+                d = ByteConverter.int2(bytes, idx);
+            }
+
+            assert effectiveScale > 0;
+            if (effectiveScale >= 4) {
+                effectiveScale -= 4;
+            } else {
+                //an effective scale of less than four means that the value d
+                //has trailing 0s which are not significant
+                //so we divide by the appropriate power of 10 to reduce those
+                d = (short) (d / INT_TEN_POWERS[4 - effectiveScale]);
+                effectiveScale = 0;
+            }
+            //defer moving to BigInteger as long as possible
+            //operations on the long are much faster
+            BigInteger unscaledBI = null;
+            long unscaledInt = d;
+            for (; i < len; i++) {
+                if (i == 4 && effectiveScale > 2) {
+                    unscaledBI = BigInteger.valueOf(unscaledInt);
+                }
+                idx += 2;
+                d = ByteConverter.int2(bytes, idx);
+                //if effective scale is at least 4, then all 4 digits should be used
+                //and the existing number needs to be shifted 4
+                if (effectiveScale >= 4) {
+                    if (unscaledBI == null) {
+                        unscaledInt *= 10000;
+                    } else {
+                        unscaledBI = unscaledBI.multiply(BI_TEN_THOUSAND);
+                    }
+                    effectiveScale -= 4;
+                } else {
+                    //if effective scale is less than 4, then only shift left based on remaining scale
+                    if (unscaledBI == null) {
+                        unscaledInt *= INT_TEN_POWERS[effectiveScale];
+                    } else {
+                        unscaledBI = unscaledBI.multiply(tenPower(effectiveScale));
+                    }
+                    //and d needs to be shifted to the right to only get correct number of
+                    //significant digits
+                    d = (short) (d / INT_TEN_POWERS[4 - effectiveScale]);
+                    effectiveScale = 0;
+                }
+                if (unscaledBI == null) {
+                    unscaledInt += d;
+                } else {
+                    if (d != 0) {
+                        unscaledBI = unscaledBI.add(BigInteger.valueOf(d));
+                    }
+                }
+            }
+            //now we need BigInteger to create BigDecimal
+            if (unscaledBI == null) {
+                unscaledBI = BigInteger.valueOf(unscaledInt);
+            }
+            //if there is remaining effective scale, apply it here
+            if (effectiveScale > 0) {
+                unscaledBI = unscaledBI.multiply(tenPower(effectiveScale));
+            }
+            if (sign == NUMERIC_NEG) {
+                unscaledBI = unscaledBI.negate();
+            }
+
+            return new BigDecimal(unscaledBI, scale);
+        }
+
+        //if there is no scale, then shorts are the unscaled int
+        if (scale == 0) {
+            //defer moving to BigInteger as long as possible
+            //operations on the long are much faster
+            BigInteger unscaledBI = null;
+            long unscaledInt = d;
+            //loop over all of the len shorts to process as the unscaled int
+            for (int i = 1; i < len; i++) {
+                if (i == 4) {
+                    unscaledBI = BigInteger.valueOf(unscaledInt);
+                }
+                idx += 2;
+                d = ByteConverter.int2(bytes, idx);
+                if (unscaledBI == null) {
+                    unscaledInt *= 10000;
+                    unscaledInt += d;
+                } else {
+                    unscaledBI = unscaledBI.multiply(BI_TEN_THOUSAND);
+                    if (d != 0) {
+                        unscaledBI = unscaledBI.add(BigInteger.valueOf(d));
+                    }
+                }
+            }
+            //now we need BigInteger to create BigDecimal
+            if (unscaledBI == null) {
+                unscaledBI = BigInteger.valueOf(unscaledInt);
+            }
+            if (sign == NUMERIC_NEG) {
+                unscaledBI = unscaledBI.negate();
+            }
+            //the difference between len and weight (adjusted from 0 based) becomes the scale for BigDecimal
+            final int bigDecScale = (len - (weight + 1)) * 4;
+            //string representation always results in a BigDecimal with scale of 0
+            //the binary representation, where weight and len can infer trailing 0s, can result in a negative scale
+            //to produce a consistent BigDecimal, we return the equivalent object with scale set to 0
+            return bigDecScale == 0 ? new BigDecimal(unscaledBI) : new BigDecimal(unscaledBI, bigDecScale).setScale(0);
+        }
+
+        //defer moving to BigInteger as long as possible
+        //operations on the long are much faster
+        BigInteger unscaledBI = null;
+        long unscaledInt = d;
+        //weight and scale as defined by postgresql are a bit different than how BigDecimal treats scale
+        //maintain the effective values to massage as we process through values
+        int effectiveWeight = weight;
+        int effectiveScale = scale;
+        for (int i = 1; i < len; i++) {
+            if (i == 4) {
+                unscaledBI = BigInteger.valueOf(unscaledInt);
+            }
+            idx += 2;
+            d = ByteConverter.int2(bytes, idx);
+            //first process effective weight down to 0
+            if (effectiveWeight > 0) {
+                --effectiveWeight;
+                if (unscaledBI == null) {
+                    unscaledInt *= 10000;
+                } else {
+                    unscaledBI = unscaledBI.multiply(BI_TEN_THOUSAND);
+                }
+            } else if (effectiveScale >= 4) {
+                //if effective scale is at least 4, then all 4 digits should be used
+                //and the existing number needs to be shifted 4
+                effectiveScale -= 4;
+                if (unscaledBI == null) {
+                    unscaledInt *= 10000;
+                } else {
+                    unscaledBI = unscaledBI.multiply(BI_TEN_THOUSAND);
+                }
+            } else {
+                //if effective scale is less than 4, then only shift left based on remaining scale
+                if (unscaledBI == null) {
+                    unscaledInt *= INT_TEN_POWERS[effectiveScale];
+                } else {
+                    unscaledBI = unscaledBI.multiply(tenPower(effectiveScale));
+                }
+                //and d needs to be shifted to the right to only get correct number of
+                //significant digits
+                d = (short) (d / INT_TEN_POWERS[4 - effectiveScale]);
+                effectiveScale = 0;
+            }
+            if (unscaledBI == null) {
+                unscaledInt += d;
+            } else {
+                if (d != 0) {
+                    unscaledBI = unscaledBI.add(BigInteger.valueOf(d));
+                }
+            }
+        }
+
+        //now we need BigInteger to create BigDecimal
+        if (unscaledBI == null) {
+            unscaledBI = BigInteger.valueOf(unscaledInt);
+        }
+        //if there is remaining weight, apply it here
+        if (effectiveWeight > 0) {
+            unscaledBI = unscaledBI.multiply(tenPower(effectiveWeight * 4));
+        }
+        //if there is remaining effective scale, apply it here
+        if (effectiveScale > 0) {
             unscaledBI = unscaledBI.multiply(tenPower(effectiveScale));
-          }
-          //and d needs to be shifted to the right to only get correct number of
-          //significant digits
-          d = (short) (d / INT_TEN_POWERS[4 - effectiveScale]);
-          effectiveScale = 0;
         }
-        if (unscaledBI == null) {
-          unscaledInt += d;
-        } else {
-          if (d != 0) {
-            unscaledBI = unscaledBI.add(BigInteger.valueOf(d));
-          }
+        if (sign == NUMERIC_NEG) {
+            unscaledBI = unscaledBI.negate();
         }
-      }
-      //now we need BigInteger to create BigDecimal
-      if (unscaledBI == null) {
-        unscaledBI = BigInteger.valueOf(unscaledInt);
-      }
-      //if there is remaining effective scale, apply it here
-      if (effectiveScale > 0) {
-        unscaledBI = unscaledBI.multiply(tenPower(effectiveScale));
-      }
-      if (sign == NUMERIC_NEG) {
-        unscaledBI = unscaledBI.negate();
-      }
 
-      return new BigDecimal(unscaledBI, scale);
+        return new BigDecimal(unscaledBI, scale);
     }
 
-    //if there is no scale, then shorts are the unscaled int
-    if (scale == 0) {
-      //defer moving to BigInteger as long as possible
-      //operations on the long are much faster
-      BigInteger unscaledBI = null;
-      long unscaledInt = d;
-      //loop over all of the len shorts to process as the unscaled int
-      for (int i = 1; i < len; i++) {
-        if (i == 4) {
-          unscaledBI = BigInteger.valueOf(unscaledInt);
+    /**
+     * Converts a non-null {@link BigDecimal} to binary format for {@link org.postgresql.core.Oid#NUMERIC}.
+     *
+     * @param nbr The instance to represent in binary.
+     * @return The binary representation of <i>nbr</i>.
+     */
+    public static byte[] numeric(BigDecimal nbr) {
+        final PositiveShorts shorts = new PositiveShorts();
+        BigInteger unscaled = nbr.unscaledValue().abs();
+        int scale = nbr.scale();
+        if (unscaled.equals(BigInteger.ZERO)) {
+            final byte[] bytes = new byte[]{0, 0, -1, -1, 0, 0, 0, 0};
+            ByteConverter.int2(bytes, 6, Math.max(0, scale));
+            return bytes;
         }
+        int weight = -1;
+        if (scale <= 0) {
+            //this means we have an integer
+            //adjust unscaled and weight
+            if (scale < 0) {
+                scale = Math.abs(scale);
+                //weight value covers 4 digits
+                weight += scale / 4;
+                //whatever remains needs to be incorporated to the unscaled value
+                int mod = scale % 4;
+                unscaled = unscaled.multiply(tenPower(mod));
+                scale = 0;
+            }
+
+            while (unscaled.compareTo(BI_MAX_LONG) > 0) {
+                final BigInteger[] pair = unscaled.divideAndRemainder(BI_TEN_THOUSAND);
+                unscaled = pair[0];
+                final short shortValue = pair[1].shortValue();
+                if (shortValue != 0 || !shorts.isEmpty()) {
+                    shorts.push(shortValue);
+                }
+                ++weight;
+            }
+            long unscaledLong = unscaled.longValueExact();
+            do {
+                final short shortValue = (short) (unscaledLong % 10000);
+                if (shortValue != 0 || !shorts.isEmpty()) {
+                    shorts.push(shortValue);
+                }
+                unscaledLong = unscaledLong / 10000L;
+                ++weight;
+            } while (unscaledLong != 0);
+        } else {
+            final BigInteger[] split = unscaled.divideAndRemainder(tenPower(scale));
+            BigInteger decimal = split[1];
+            BigInteger wholes = split[0];
+            weight = -1;
+            if (!BigInteger.ZERO.equals(decimal)) {
+                int mod = scale % 4;
+                int segments = scale / 4;
+                if (mod != 0) {
+                    decimal = decimal.multiply(tenPower(4 - mod));
+                    ++segments;
+                }
+                do {
+                    final BigInteger[] pair = decimal.divideAndRemainder(BI_TEN_THOUSAND);
+                    decimal = pair[0];
+                    final short shortValue = pair[1].shortValue();
+                    if (shortValue != 0 || !shorts.isEmpty()) {
+                        shorts.push(shortValue);
+                    }
+                    --segments;
+                } while (!BigInteger.ZERO.equals(decimal));
+
+                //for the leading 0 shorts we either adjust weight (if no wholes)
+                // or push shorts
+                if (BigInteger.ZERO.equals(wholes)) {
+                    weight -= segments;
+                } else {
+                    //now add leading 0 shorts
+                    for (int i = 0; i < segments; i++) {
+                        shorts.push((short) 0);
+                    }
+                }
+            }
+
+            while (!BigInteger.ZERO.equals(wholes)) {
+                ++weight;
+                final BigInteger[] pair = wholes.divideAndRemainder(BI_TEN_THOUSAND);
+                wholes = pair[0];
+                final short shortValue = pair[1].shortValue();
+                if (shortValue != 0 || !shorts.isEmpty()) {
+                    shorts.push(shortValue);
+                }
+            }
+        }
+
+        //8 bytes for "header" and then 2 for each short
+        final byte[] bytes = new byte[8 + (2 * shorts.size())];
+        int idx = 0;
+
+        //number of 2-byte shorts representing 4 decimal digits
+        ByteConverter.int2(bytes, idx, shorts.size());
         idx += 2;
-        d = ByteConverter.int2(bytes, idx);
-        if (unscaledBI == null) {
-          unscaledInt *= 10000;
-          unscaledInt += d;
-        } else {
-          unscaledBI = unscaledBI.multiply(BI_TEN_THOUSAND);
-          if (d != 0) {
-            unscaledBI = unscaledBI.add(BigInteger.valueOf(d));
-          }
+        //0 based number of 4 decimal digits (i.e. 2-byte shorts) before the decimal
+        ByteConverter.int2(bytes, idx, weight);
+        idx += 2;
+        //indicates positive, negative or NaN
+        ByteConverter.int2(bytes, idx, nbr.signum() == -1 ? NUMERIC_NEG : NUMERIC_POS);
+        idx += 2;
+        //number of digits after the decimal
+        ByteConverter.int2(bytes, idx, Math.max(0, scale));
+        idx += 2;
+
+        short s;
+        while ((s = shorts.pop()) != -1) {
+            ByteConverter.int2(bytes, idx, s);
+            idx += 2;
         }
-      }
-      //now we need BigInteger to create BigDecimal
-      if (unscaledBI == null) {
-        unscaledBI = BigInteger.valueOf(unscaledInt);
-      }
-      if (sign == NUMERIC_NEG) {
-        unscaledBI = unscaledBI.negate();
-      }
-      //the difference between len and weight (adjusted from 0 based) becomes the scale for BigDecimal
-      final int bigDecScale = (len - (weight + 1)) * 4;
-      //string representation always results in a BigDecimal with scale of 0
-      //the binary representation, where weight and len can infer trailing 0s, can result in a negative scale
-      //to produce a consistent BigDecimal, we return the equivalent object with scale set to 0
-      return bigDecScale == 0 ? new BigDecimal(unscaledBI) : new BigDecimal(unscaledBI, bigDecScale).setScale(0);
+
+        return bytes;
     }
 
-    //defer moving to BigInteger as long as possible
-    //operations on the long are much faster
-    BigInteger unscaledBI = null;
-    long unscaledInt = d;
-    //weight and scale as defined by postgresql are a bit different than how BigDecimal treats scale
-    //maintain the effective values to massage as we process through values
-    int effectiveWeight = weight;
-    int effectiveScale = scale;
-    for (int i = 1; i < len; i++) {
-      if (i == 4) {
-        unscaledBI = BigInteger.valueOf(unscaledInt);
-      }
-      idx += 2;
-      d = ByteConverter.int2(bytes, idx);
-      //first process effective weight down to 0
-      if (effectiveWeight > 0) {
-        --effectiveWeight;
-        if (unscaledBI == null) {
-          unscaledInt *= 10000;
-        } else {
-          unscaledBI = unscaledBI.multiply(BI_TEN_THOUSAND);
-        }
-      } else if (effectiveScale >= 4) {
-        //if effective scale is at least 4, then all 4 digits should be used
-        //and the existing number needs to be shifted 4
-        effectiveScale -= 4;
-        if (unscaledBI == null) {
-          unscaledInt *= 10000;
-        } else {
-          unscaledBI = unscaledBI.multiply(BI_TEN_THOUSAND);
-        }
-      } else {
-        //if effective scale is less than 4, then only shift left based on remaining scale
-        if (unscaledBI == null) {
-          unscaledInt *= INT_TEN_POWERS[effectiveScale];
-        } else {
-          unscaledBI = unscaledBI.multiply(tenPower(effectiveScale));
-        }
-        //and d needs to be shifted to the right to only get correct number of
-        //significant digits
-        d = (short) (d / INT_TEN_POWERS[4 - effectiveScale]);
-        effectiveScale = 0;
-      }
-      if (unscaledBI == null) {
-        unscaledInt += d;
-      } else {
-        if (d != 0) {
-          unscaledBI = unscaledBI.add(BigInteger.valueOf(d));
-        }
-      }
+    private static BigInteger tenPower(int exponent) {
+        return BI_TEN_POWERS.length > exponent ? BI_TEN_POWERS[exponent] : BigInteger.TEN.pow(exponent);
     }
 
-    //now we need BigInteger to create BigDecimal
-    if (unscaledBI == null) {
-      unscaledBI = BigInteger.valueOf(unscaledInt);
-    }
-    //if there is remaining weight, apply it here
-    if (effectiveWeight > 0) {
-      unscaledBI = unscaledBI.multiply(tenPower(effectiveWeight * 4));
-    }
-    //if there is remaining effective scale, apply it here
-    if (effectiveScale > 0) {
-      unscaledBI = unscaledBI.multiply(tenPower(effectiveScale));
-    }
-    if (sign == NUMERIC_NEG) {
-      unscaledBI = unscaledBI.negate();
+    /**
+     * Parses a long value from the byte array.
+     *
+     * @param bytes The byte array to parse.
+     * @param idx   The starting index of the parse in the byte array.
+     * @return parsed long value.
+     */
+    public static long int8(byte[] bytes, int idx) {
+        return
+                ((long) (bytes[idx + 0] & 255) << 56)
+                        + ((long) (bytes[idx + 1] & 255) << 48)
+                        + ((long) (bytes[idx + 2] & 255) << 40)
+                        + ((long) (bytes[idx + 3] & 255) << 32)
+                        + ((long) (bytes[idx + 4] & 255) << 24)
+                        + ((long) (bytes[idx + 5] & 255) << 16)
+                        + ((long) (bytes[idx + 6] & 255) << 8)
+                        + (bytes[idx + 7] & 255);
     }
 
-    return new BigDecimal(unscaledBI, scale);
-  }
-
-  /**
-   * Converts a non-null {@link BigDecimal} to binary format for {@link org.postgresql.core.Oid#NUMERIC}.
-   * @param nbr The instance to represent in binary.
-   * @return The binary representation of <i>nbr</i>.
-   */
-  public static byte[] numeric(BigDecimal nbr) {
-    final PositiveShorts shorts = new PositiveShorts();
-    BigInteger unscaled = nbr.unscaledValue().abs();
-    int scale = nbr.scale();
-    if (unscaled.equals(BigInteger.ZERO)) {
-      final byte[] bytes = new byte[]{0, 0, -1, -1, 0, 0, 0, 0};
-      ByteConverter.int2(bytes, 6, Math.max(0, scale));
-      return bytes;
+    /**
+     * Parses an int value from the byte array.
+     *
+     * @param bytes The byte array to parse.
+     * @param idx   The starting index of the parse in the byte array.
+     * @return parsed int value.
+     */
+    public static int int4(byte[] bytes, int idx) {
+        return
+                ((bytes[idx] & 255) << 24)
+                        + ((bytes[idx + 1] & 255) << 16)
+                        + ((bytes[idx + 2] & 255) << 8)
+                        + (bytes[idx + 3] & 255);
     }
-    int weight = -1;
-    if (scale <= 0) {
-      //this means we have an integer
-      //adjust unscaled and weight
-      if (scale < 0) {
-        scale = Math.abs(scale);
-        //weight value covers 4 digits
-        weight += scale / 4;
-        //whatever remains needs to be incorporated to the unscaled value
-        int mod = scale % 4;
-        unscaled = unscaled.multiply(tenPower(mod));
-        scale = 0;
-      }
 
-      while (unscaled.compareTo(BI_MAX_LONG) > 0) {
-        final BigInteger[] pair = unscaled.divideAndRemainder(BI_TEN_THOUSAND);
-        unscaled = pair[0];
-        final short shortValue = pair[1].shortValue();
-        if (shortValue != 0 || !shorts.isEmpty()) {
-          shorts.push(shortValue);
+    /**
+     * Parses a short value from the byte array.
+     *
+     * @param bytes The byte array to parse.
+     * @param idx   The starting index of the parse in the byte array.
+     * @return parsed short value.
+     */
+    public static short int2(byte[] bytes, int idx) {
+        return (short) (((bytes[idx] & 255) << 8) + (bytes[idx + 1] & 255));
+    }
+
+    /**
+     * Parses a boolean value from the byte array.
+     *
+     * @param bytes The byte array to parse.
+     * @param idx   The starting index to read from bytes.
+     * @return parsed boolean value.
+     */
+    public static boolean bool(byte[] bytes, int idx) {
+        return bytes[idx] == 1;
+    }
+
+    /**
+     * Parses a float value from the byte array.
+     *
+     * @param bytes The byte array to parse.
+     * @param idx   The starting index of the parse in the byte array.
+     * @return parsed float value.
+     */
+    public static float float4(byte[] bytes, int idx) {
+        return Float.intBitsToFloat(int4(bytes, idx));
+    }
+
+    /**
+     * Parses a double value from the byte array.
+     *
+     * @param bytes The byte array to parse.
+     * @param idx   The starting index of the parse in the byte array.
+     * @return parsed double value.
+     */
+    public static double float8(byte[] bytes, int idx) {
+        return Double.longBitsToDouble(int8(bytes, idx));
+    }
+
+    /**
+     * Encodes a long value to the byte array.
+     *
+     * @param target The byte array to encode to.
+     * @param idx    The starting index in the byte array.
+     * @param value  The value to encode.
+     */
+    public static void int8(byte[] target, int idx, long value) {
+        target[idx + 0] = (byte) (value >>> 56);
+        target[idx + 1] = (byte) (value >>> 48);
+        target[idx + 2] = (byte) (value >>> 40);
+        target[idx + 3] = (byte) (value >>> 32);
+        target[idx + 4] = (byte) (value >>> 24);
+        target[idx + 5] = (byte) (value >>> 16);
+        target[idx + 6] = (byte) (value >>> 8);
+        target[idx + 7] = (byte) value;
+    }
+
+    /**
+     * Encodes a int value to the byte array.
+     *
+     * @param target The byte array to encode to.
+     * @param idx    The starting index in the byte array.
+     * @param value  The value to encode.
+     */
+    public static void int4(byte[] target, int idx, int value) {
+        target[idx + 0] = (byte) (value >>> 24);
+        target[idx + 1] = (byte) (value >>> 16);
+        target[idx + 2] = (byte) (value >>> 8);
+        target[idx + 3] = (byte) value;
+    }
+
+    /**
+     * Encodes a int value to the byte array.
+     *
+     * @param target The byte array to encode to.
+     * @param idx    The starting index in the byte array.
+     * @param value  The value to encode.
+     */
+    public static void int2(byte[] target, int idx, int value) {
+        target[idx + 0] = (byte) (value >>> 8);
+        target[idx + 1] = (byte) value;
+    }
+
+    /**
+     * Encodes a boolean value to the byte array.
+     *
+     * @param target The byte array to encode to.
+     * @param idx    The starting index in the byte array.
+     * @param value  The value to encode.
+     */
+    public static void bool(byte[] target, int idx, boolean value) {
+        target[idx] = value ? (byte) 1 : (byte) 0;
+    }
+
+    /**
+     * Encodes a int value to the byte array.
+     *
+     * @param target The byte array to encode to.
+     * @param idx    The starting index in the byte array.
+     * @param value  The value to encode.
+     */
+    public static void float4(byte[] target, int idx, float value) {
+        int4(target, idx, Float.floatToRawIntBits(value));
+    }
+
+    /**
+     * Encodes a int value to the byte array.
+     *
+     * @param target The byte array to encode to.
+     * @param idx    The starting index in the byte array.
+     * @param value  The value to encode.
+     */
+    public static void float8(byte[] target, int idx, double value) {
+        int8(target, idx, Double.doubleToRawLongBits(value));
+    }
+
+    /**
+     * Simple stack structure for non-negative {@code short} values.
+     */
+    private static final class PositiveShorts {
+        private short[] shorts = new short[8];
+        private int idx;
+
+        PositiveShorts() {
         }
-        ++weight;
-      }
-      long unscaledLong = unscaled.longValueExact();
-      do {
-        final short shortValue = (short) (unscaledLong % 10000);
-        if (shortValue != 0 || !shorts.isEmpty()) {
-          shorts.push(shortValue);
-        }
-        unscaledLong = unscaledLong / 10000L;
-        ++weight;
-      } while (unscaledLong != 0);
-    } else {
-      final BigInteger[] split = unscaled.divideAndRemainder(tenPower(scale));
-      BigInteger decimal = split[1];
-      BigInteger wholes = split[0];
-      weight = -1;
-      if (!BigInteger.ZERO.equals(decimal)) {
-        int mod = scale % 4;
-        int segments = scale / 4;
-        if (mod != 0) {
-          decimal = decimal.multiply(tenPower(4 - mod));
-          ++segments;
-        }
-        do {
-          final BigInteger[] pair = decimal.divideAndRemainder(BI_TEN_THOUSAND);
-          decimal = pair[0];
-          final short shortValue = pair[1].shortValue();
-          if (shortValue != 0 || !shorts.isEmpty()) {
-            shorts.push(shortValue);
-          }
-          --segments;
-        } while (!BigInteger.ZERO.equals(decimal));
 
-        //for the leading 0 shorts we either adjust weight (if no wholes)
-        // or push shorts
-        if (BigInteger.ZERO.equals(wholes)) {
-          weight -= segments;
-        } else {
-          //now add leading 0 shorts
-          for (int i = 0; i < segments; i++) {
-            shorts.push((short) 0);
-          }
+        public void push(short s) {
+            if (s < 0) {
+                throw new IllegalArgumentException("only non-negative values accepted: " + s);
+            }
+            if (idx == shorts.length) {
+                grow();
+            }
+            shorts[idx++] = s;
         }
-      }
 
-      while (!BigInteger.ZERO.equals(wholes)) {
-        ++weight;
-        final BigInteger[] pair = wholes.divideAndRemainder(BI_TEN_THOUSAND);
-        wholes = pair[0];
-        final short shortValue = pair[1].shortValue();
-        if (shortValue != 0 || !shorts.isEmpty()) {
-          shorts.push(shortValue);
+        public int size() {
+            return idx;
+        }
+
+        public boolean isEmpty() {
+            return idx == 0;
+        }
+
+        public short pop() {
+            return idx > 0 ? shorts[--idx] : -1;
+        }
+
+        private void grow() {
+            final int newSize = shorts.length <= 1024 ? shorts.length << 1 : (int) (shorts.length * 1.5);
+            shorts = Arrays.copyOf(shorts, newSize);
         }
-      }
     }
-
-    //8 bytes for "header" and then 2 for each short
-    final byte[] bytes = new byte[8 + (2 * shorts.size())];
-    int idx = 0;
-
-    //number of 2-byte shorts representing 4 decimal digits
-    ByteConverter.int2(bytes, idx, shorts.size());
-    idx += 2;
-    //0 based number of 4 decimal digits (i.e. 2-byte shorts) before the decimal
-    ByteConverter.int2(bytes, idx, weight);
-    idx += 2;
-    //indicates positive, negative or NaN
-    ByteConverter.int2(bytes, idx, nbr.signum() == -1 ? NUMERIC_NEG : NUMERIC_POS);
-    idx += 2;
-    //number of digits after the decimal
-    ByteConverter.int2(bytes, idx, Math.max(0, scale));
-    idx += 2;
-
-    short s;
-    while ((s = shorts.pop()) != -1) {
-      ByteConverter.int2(bytes, idx, s);
-      idx += 2;
-    }
-
-    return bytes;
-  }
-
-  private static BigInteger tenPower(int exponent) {
-    return BI_TEN_POWERS.length > exponent ? BI_TEN_POWERS[exponent] : BigInteger.TEN.pow(exponent);
-  }
-
-  /**
-   * Parses a long value from the byte array.
-   *
-   * @param bytes The byte array to parse.
-   * @param idx The starting index of the parse in the byte array.
-   * @return parsed long value.
-   */
-  public static long int8(byte[] bytes, int idx) {
-    return
-        ((long) (bytes[idx + 0] & 255) << 56)
-            + ((long) (bytes[idx + 1] & 255) << 48)
-            + ((long) (bytes[idx + 2] & 255) << 40)
-            + ((long) (bytes[idx + 3] & 255) << 32)
-            + ((long) (bytes[idx + 4] & 255) << 24)
-            + ((long) (bytes[idx + 5] & 255) << 16)
-            + ((long) (bytes[idx + 6] & 255) << 8)
-            + (bytes[idx + 7] & 255);
-  }
-
-  /**
-   * Parses an int value from the byte array.
-   *
-   * @param bytes The byte array to parse.
-   * @param idx The starting index of the parse in the byte array.
-   * @return parsed int value.
-   */
-  public static int int4(byte[] bytes, int idx) {
-    return
-        ((bytes[idx] & 255) << 24)
-            + ((bytes[idx + 1] & 255) << 16)
-            + ((bytes[idx + 2] & 255) << 8)
-            + (bytes[idx + 3] & 255);
-  }
-
-  /**
-   * Parses a short value from the byte array.
-   *
-   * @param bytes The byte array to parse.
-   * @param idx The starting index of the parse in the byte array.
-   * @return parsed short value.
-   */
-  public static short int2(byte[] bytes, int idx) {
-    return (short) (((bytes[idx] & 255) << 8) + (bytes[idx + 1] & 255));
-  }
-
-  /**
-   * Parses a boolean value from the byte array.
-   *
-   * @param bytes
-   *          The byte array to parse.
-   * @param idx
-   *          The starting index to read from bytes.
-   * @return parsed boolean value.
-   */
-  public static boolean bool(byte[] bytes, int idx) {
-    return bytes[idx] == 1;
-  }
-
-  /**
-   * Parses a float value from the byte array.
-   *
-   * @param bytes The byte array to parse.
-   * @param idx The starting index of the parse in the byte array.
-   * @return parsed float value.
-   */
-  public static float float4(byte[] bytes, int idx) {
-    return Float.intBitsToFloat(int4(bytes, idx));
-  }
-
-  /**
-   * Parses a double value from the byte array.
-   *
-   * @param bytes The byte array to parse.
-   * @param idx The starting index of the parse in the byte array.
-   * @return parsed double value.
-   */
-  public static double float8(byte[] bytes, int idx) {
-    return Double.longBitsToDouble(int8(bytes, idx));
-  }
-
-  /**
-   * Encodes a long value to the byte array.
-   *
-   * @param target The byte array to encode to.
-   * @param idx The starting index in the byte array.
-   * @param value The value to encode.
-   */
-  public static void int8(byte[] target, int idx, long value) {
-    target[idx + 0] = (byte) (value >>> 56);
-    target[idx + 1] = (byte) (value >>> 48);
-    target[idx + 2] = (byte) (value >>> 40);
-    target[idx + 3] = (byte) (value >>> 32);
-    target[idx + 4] = (byte) (value >>> 24);
-    target[idx + 5] = (byte) (value >>> 16);
-    target[idx + 6] = (byte) (value >>> 8);
-    target[idx + 7] = (byte) value;
-  }
-
-  /**
-   * Encodes a int value to the byte array.
-   *
-   * @param target The byte array to encode to.
-   * @param idx The starting index in the byte array.
-   * @param value The value to encode.
-   */
-  public static void int4(byte[] target, int idx, int value) {
-    target[idx + 0] = (byte) (value >>> 24);
-    target[idx + 1] = (byte) (value >>> 16);
-    target[idx + 2] = (byte) (value >>> 8);
-    target[idx + 3] = (byte) value;
-  }
-
-  /**
-   * Encodes a int value to the byte array.
-   *
-   * @param target The byte array to encode to.
-   * @param idx The starting index in the byte array.
-   * @param value The value to encode.
-   */
-  public static void int2(byte[] target, int idx, int value) {
-    target[idx + 0] = (byte) (value >>> 8);
-    target[idx + 1] = (byte) value;
-  }
-
-  /**
-   * Encodes a boolean value to the byte array.
-   *
-   * @param target
-   *          The byte array to encode to.
-   * @param idx
-   *          The starting index in the byte array.
-   * @param value
-   *          The value to encode.
-   */
-  public static void bool(byte[] target, int idx, boolean value) {
-    target[idx] = value ? (byte) 1 : (byte) 0;
-  }
-
-  /**
-   * Encodes a int value to the byte array.
-   *
-   * @param target The byte array to encode to.
-   * @param idx The starting index in the byte array.
-   * @param value The value to encode.
-   */
-  public static void float4(byte[] target, int idx, float value) {
-    int4(target, idx, Float.floatToRawIntBits(value));
-  }
-
-  /**
-   * Encodes a int value to the byte array.
-   *
-   * @param target The byte array to encode to.
-   * @param idx The starting index in the byte array.
-   * @param value The value to encode.
-   */
-  public static void float8(byte[] target, int idx, double value) {
-    int8(target, idx, Double.doubleToRawLongBits(value));
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/ByteStreamWriter.java b/pgjdbc/src/main/java/org/postgresql/util/ByteStreamWriter.java
index b1c723f..112ddaf 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/ByteStreamWriter.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/ByteStreamWriter.java
@@ -32,42 +32,42 @@ import java.nio.ByteBuffer;
  */
 public interface ByteStreamWriter {
 
-  /**
-   * Returns the length of the stream.
-   *
-   * <p> This must be known ahead of calling {@link #writeTo(ByteStreamTarget)}. </p>
-   *
-   * @return the number of bytes in the stream.
-   */
-  int getLength();
-
-  /**
-   * Write the data to the provided {@link OutputStream}.
-   *
-   * <p> Should not write more than {@link #getLength()} bytes. If attempted, the provided stream
-   * will throw an {@link java.io.IOException}. </p>
-   *
-   * @param target the stream to write the data to
-   * @throws IOException if the underlying stream throws or there is some other error.
-   */
-  void writeTo(ByteStreamTarget target) throws IOException;
-
-  static ByteStreamWriter of(ByteBuffer... buf) {
-    return buf.length == 1
-        ? new ByteBufferByteStreamWriter(buf[0])
-        : new ByteBuffersByteStreamWriter(buf);
-  }
-
-  /**
-   * Provides a target to write bytes to.
-   */
-  interface ByteStreamTarget {
+    static ByteStreamWriter of(ByteBuffer... buf) {
+        return buf.length == 1
+                ? new ByteBufferByteStreamWriter(buf[0])
+                : new ByteBuffersByteStreamWriter(buf);
+    }
 
     /**
-     * Provides an output stream to write bytes to.
+     * Returns the length of the stream.
      *
-     * @return an output stream
+     * <p> This must be known ahead of calling {@link #writeTo(ByteStreamTarget)}. </p>
+     *
+     * @return the number of bytes in the stream.
      */
-    OutputStream getOutputStream();
-  }
+    int getLength();
+
+    /**
+     * Write the data to the provided {@link OutputStream}.
+     *
+     * <p> Should not write more than {@link #getLength()} bytes. If attempted, the provided stream
+     * will throw an {@link java.io.IOException}. </p>
+     *
+     * @param target the stream to write the data to
+     * @throws IOException if the underlying stream throws or there is some other error.
+     */
+    void writeTo(ByteStreamTarget target) throws IOException;
+
+    /**
+     * Provides a target to write bytes to.
+     */
+    interface ByteStreamTarget {
+
+        /**
+         * Provides an output stream to write bytes to.
+         *
+         * @return an output stream
+         */
+        OutputStream getOutputStream();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/CanEstimateSize.java b/pgjdbc/src/main/java/org/postgresql/util/CanEstimateSize.java
index 74cba88..572eaeb 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/CanEstimateSize.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/CanEstimateSize.java
@@ -6,5 +6,5 @@
 package org.postgresql.util;
 
 public interface CanEstimateSize {
-  long getSize();
+    long getSize();
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/DriverInfo.java b/pgjdbc/src/main/java/org/postgresql/util/DriverInfo.java
index 1c95166..3f3d00e 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/DriverInfo.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/DriverInfo.java
@@ -10,23 +10,20 @@ package org.postgresql.util;
  */
 public final class DriverInfo {
 
-  private DriverInfo() {
-  }
-
-  // Driver name
-  public static final String DRIVER_NAME = "PostgreSQL JDBC Driver";
-  public static final String DRIVER_SHORT_NAME = "PgJDBC";
-  public static final String DRIVER_VERSION = "/*$version$*/";
-  public static final String DRIVER_FULL_NAME = DRIVER_NAME + " " + DRIVER_VERSION;
-
-  // Driver version
-  public static final int MAJOR_VERSION = /*$version.major+";"$*//*-*/42;
-  public static final int MINOR_VERSION = /*$version.minor+";"$*//*-*/0;
-  public static final int PATCH_VERSION = /*$version.patch+";"$*//*-*/0;
-
-  // JDBC specification
-  public static final String JDBC_VERSION = "/*$jdbc.specification.version$*/";
-  public static final int JDBC_MAJOR_VERSION = JDBC_VERSION.charAt(0) - '0';
-  public static final int JDBC_MINOR_VERSION = JDBC_VERSION.charAt(2) - '0';
+    // Driver name
+    public static final String DRIVER_NAME = "PostgreSQL JDBC Driver";
+    public static final String DRIVER_SHORT_NAME = "PgJDBC";
+    public static final String DRIVER_VERSION = "/*$version$*/";
+    public static final String DRIVER_FULL_NAME = DRIVER_NAME + " " + DRIVER_VERSION;
+    // Driver version
+    public static final int MAJOR_VERSION = /*$version.major+";"$*//*-*/42;
+    public static final int MINOR_VERSION = /*$version.minor+";"$*//*-*/0;
+    public static final int PATCH_VERSION = /*$version.patch+";"$*//*-*/0;
+    // JDBC specification
+    public static final String JDBC_VERSION = "/*$jdbc.specification.version$*/";
+    public static final int JDBC_MAJOR_VERSION = JDBC_VERSION.charAt(0) - '0';
+    public static final int JDBC_MINOR_VERSION = JDBC_VERSION.charAt(2) - '0';
+    private DriverInfo() {
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/ExpressionProperties.java b/pgjdbc/src/main/java/org/postgresql/util/ExpressionProperties.java
index fa50f76..17fb723 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/ExpressionProperties.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/ExpressionProperties.java
@@ -12,85 +12,85 @@ import java.util.regex.Pattern;
 @SuppressWarnings("serial")
 public class ExpressionProperties extends Properties {
 
-  private static final Pattern EXPRESSION = Pattern.compile("\\$\\{([^}]+)\\}");
+    private static final Pattern EXPRESSION = Pattern.compile("\\$\\{([^}]+)\\}");
 
-  private final Properties[] defaults;
+    private final Properties[] defaults;
 
-  /**
-   * Creates an empty property list with the specified defaults.
-   *
-   * @param defaults java.util.Properties
-   */
-  public ExpressionProperties(Properties ...defaults) {
-    this.defaults = defaults;
-  }
+    /**
+     * Creates an empty property list with the specified defaults.
+     *
+     * @param defaults java.util.Properties
+     */
+    public ExpressionProperties(Properties... defaults) {
+        this.defaults = defaults;
+    }
 
-  /**
-   * <p>Returns property value with all {@code ${propKey}} like references replaced with the value of
-   * the relevant property with recursive resolution.</p>
-   *
-   * <p>The method returns <code>null</code> if the property is not found.</p>
-   *
-   * @param key the property key.
-   *
-   * @return the value in this property list with
-   *         the specified key value.
-   */
-  @Override
-  public String getProperty(String key) {
-    String value = getRawPropertyValue(key);
-    return replaceProperties(value);
-  }
+    /**
+     * <p>Returns property value with all {@code ${propKey}} like references replaced with the value of
+     * the relevant property with recursive resolution.</p>
+     *
+     * <p>The method returns <code>null</code> if the property is not found.</p>
+     *
+     * @param key the property key.
+     * @return the value in this property list with
+     * the specified key value.
+     */
+    @Override
+    public String getProperty(String key) {
+        String value = getRawPropertyValue(key);
+        return replaceProperties(value);
+    }
 
-  @Override
-  public String getProperty(String key, String defaultValue) {
-    String value = getRawPropertyValue(key);
-    if (value == null) {
-      value = defaultValue;
+    @Override
+    public String getProperty(String key, String defaultValue) {
+        String value = getRawPropertyValue(key);
+        if (value == null) {
+            value = defaultValue;
+        }
+        return replaceProperties(value);
     }
-    return replaceProperties(value);
-  }
 
-  /**
-   * Returns raw value of a property without any replacements.
-   * @param key property name
-   * @return raw property value
-   */
-  public String getRawPropertyValue(String key) {
-    String value = super.getProperty(key);
-    if (value != null) {
-      return value;
+    /**
+     * Returns raw value of a property without any replacements.
+     *
+     * @param key property name
+     * @return raw property value
+     */
+    public String getRawPropertyValue(String key) {
+        String value = super.getProperty(key);
+        if (value != null) {
+            return value;
+        }
+        for (Properties properties : defaults) {
+            value = properties.getProperty(key);
+            if (value != null) {
+                return value;
+            }
+        }
+        return null;
     }
-    for (Properties properties : defaults) {
-      value = properties.getProperty(key);
-      if (value != null) {
-        return value;
-      }
-    }
-    return null;
-  }
 
-  private String replaceProperties(String value) {
-    if (value == null) {
-      return null;
+    private String replaceProperties(String value) {
+        if (value == null) {
+            return null;
+        }
+        Matcher matcher = EXPRESSION.matcher(value);
+        StringBuffer sb = null;
+        while (matcher.find()) {
+            if (sb == null) {
+                sb = new StringBuffer();
+            }
+            String propValue = getProperty(matcher.group(1));
+            if (propValue == null) {
+                // Use original content like ${propKey} if property is not found
+                propValue = matcher.group();
+            }
+            matcher.appendReplacement(sb, Matcher.quoteReplacement(propValue));
+        }
+        if (sb == null) {
+            return value;
+        }
+        matcher.appendTail(sb);
+        return sb.toString();
     }
-    Matcher matcher = EXPRESSION.matcher(value);
-    StringBuffer sb = null;
-    while (matcher.find()) {
-      if (sb == null) {
-        sb = new StringBuffer();
-      }
-      String propValue = getProperty(matcher.group(1));
-      if (propValue == null) {
-        // Use original content like ${propKey} if property is not found
-        propValue = matcher.group();
-      }
-      matcher.appendReplacement(sb, Matcher.quoteReplacement(propValue));
-    }
-    if (sb == null) {
-      return value;
-    }
-    matcher.appendTail(sb);
-    return sb.toString();
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/GT.java b/pgjdbc/src/main/java/org/postgresql/util/GT.java
index c6e2f8a..70877a1 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/GT.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/GT.java
@@ -18,48 +18,47 @@ import java.util.ResourceBundle;
  */
 public class GT {
 
-  private static final GT _gt = new GT();
-  private static final Object[] noargs = new Object[0];
+    private static final GT _gt = new GT();
+    private static final Object[] noargs = new Object[0];
+    private ResourceBundle bundle;
 
-  public static String tr(String message, Object... args) {
-    return _gt.translate(message, args);
-  }
-
-  private ResourceBundle bundle;
-
-  private GT() {
-    try {
-      bundle = ResourceBundle.getBundle("org.postgresql.translation.messages", Locale.getDefault(Locale.Category.DISPLAY));
-    } catch (MissingResourceException mre) {
-      // translation files have not been installed
-      bundle = null;
-    }
-  }
-
-  private String translate(String message, Object[] args) {
-    if (bundle != null && message != null) {
-      try {
-        message = bundle.getString(message);
-      } catch (MissingResourceException mre) {
-        // If we can't find a translation, just
-        // use the untranslated message.
-      }
+    private GT() {
+        try {
+            bundle = ResourceBundle.getBundle("org.postgresql.translation.messages", Locale.getDefault(Locale.Category.DISPLAY));
+        } catch (MissingResourceException mre) {
+            // translation files have not been installed
+            bundle = null;
+        }
     }
 
-    // If we don't have any parameters we still need to run
-    // this through the MessageFormat(ter) to allow the same
-    // quoting and escaping rules to be used for all messages.
-    //
-    if (args == null) {
-      args = noargs;
+    public static String tr(String message, Object... args) {
+        return _gt.translate(message, args);
     }
 
-    // Replace placeholders with arguments
-    //
-    if (message != null) {
-      message = MessageFormat.format(message, args);
-    }
+    private String translate(String message, Object[] args) {
+        if (bundle != null && message != null) {
+            try {
+                message = bundle.getString(message);
+            } catch (MissingResourceException mre) {
+                // If we can't find a translation, just
+                // use the untranslated message.
+            }
+        }
 
-    return message;
-  }
+        // If we don't have any parameters we still need to run
+        // this through the MessageFormat(ter) to allow the same
+        // quoting and escaping rules to be used for all messages.
+        //
+        if (args == null) {
+            args = noargs;
+        }
+
+        // Replace placeholders with arguments
+        //
+        if (message != null) {
+            message = MessageFormat.format(message, args);
+        }
+
+        return message;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/Gettable.java b/pgjdbc/src/main/java/org/postgresql/util/Gettable.java
index 7ff1c3d..1fdeca3 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/Gettable.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/Gettable.java
@@ -6,5 +6,5 @@
 package org.postgresql.util;
 
 public interface Gettable<K extends Object, V extends Object> {
-  V get(K key);
+    V get(K key);
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/GettableHashMap.java b/pgjdbc/src/main/java/org/postgresql/util/GettableHashMap.java
index 1f9eb16..36a58c7 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/GettableHashMap.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/GettableHashMap.java
@@ -9,8 +9,8 @@ import java.util.HashMap;
 
 @SuppressWarnings("serial")
 public class GettableHashMap<K extends Object, V extends Object>
-    extends HashMap<K,V>
-    implements Gettable<K,V> {
+        extends HashMap<K, V>
+        implements Gettable<K, V> {
 
     public GettableHashMap() {
     }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/HStoreConverter.java b/pgjdbc/src/main/java/org/postgresql/util/HStoreConverter.java
index 98bee9c..897d9e7 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/HStoreConverter.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/HStoreConverter.java
@@ -16,148 +16,148 @@ import java.util.Map.Entry;
 
 public class HStoreConverter {
 
-  public HStoreConverter() {
-  }
+    public HStoreConverter() {
+    }
 
-  public static Map<String, String> fromBytes(byte[] b, Encoding encoding)
-      throws SQLException {
-    Map<String, String> m = new HashMap<String, String>();
-    int pos = 0;
-    int numElements = ByteConverter.int4(b, pos);
-    pos += 4;
-    try {
-      for (int i = 0; i < numElements; i++) {
-        int keyLen = ByteConverter.int4(b, pos);
+    public static Map<String, String> fromBytes(byte[] b, Encoding encoding)
+            throws SQLException {
+        Map<String, String> m = new HashMap<String, String>();
+        int pos = 0;
+        int numElements = ByteConverter.int4(b, pos);
         pos += 4;
-        String key = encoding.decode(b, pos, keyLen);
-        pos += keyLen;
-        int valLen = ByteConverter.int4(b, pos);
-        pos += 4;
-        String val;
-        if (valLen == -1) {
-          val = null;
+        try {
+            for (int i = 0; i < numElements; i++) {
+                int keyLen = ByteConverter.int4(b, pos);
+                pos += 4;
+                String key = encoding.decode(b, pos, keyLen);
+                pos += keyLen;
+                int valLen = ByteConverter.int4(b, pos);
+                pos += 4;
+                String val;
+                if (valLen == -1) {
+                    val = null;
+                } else {
+                    val = encoding.decode(b, pos, valLen);
+                    pos += valLen;
+                }
+                m.put(key, val);
+            }
+        } catch (IOException ioe) {
+            throw new PSQLException(
+                    GT.tr(
+                            "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database."),
+                    PSQLState.DATA_ERROR, ioe);
+        }
+        return m;
+    }
+
+    public static byte[] toBytes(Map<?, ?> m, Encoding encoding) throws SQLException {
+        ByteArrayOutputStream baos = new ByteArrayOutputStream(4 + 10 * m.size());
+        byte[] lenBuf = new byte[4];
+        try {
+            ByteConverter.int4(lenBuf, 0, m.size());
+            baos.write(lenBuf);
+            for (Entry<?, ?> e : m.entrySet()) {
+                Object mapKey = e.getKey();
+                if (mapKey == null) {
+                    throw new PSQLException(GT.tr("hstore key must not be null"),
+                            PSQLState.INVALID_PARAMETER_VALUE);
+                }
+                byte[] key = encoding.encode(mapKey.toString());
+                ByteConverter.int4(lenBuf, 0, key.length);
+                baos.write(lenBuf);
+                baos.write(key);
+
+                if (e.getValue() == null) {
+                    ByteConverter.int4(lenBuf, 0, -1);
+                    baos.write(lenBuf);
+                } else {
+                    byte[] val = encoding.encode(e.getValue().toString());
+                    ByteConverter.int4(lenBuf, 0, val.length);
+                    baos.write(lenBuf);
+                    baos.write(val);
+                }
+            }
+        } catch (IOException ioe) {
+            throw new PSQLException(
+                    GT.tr(
+                            "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database."),
+                    PSQLState.DATA_ERROR, ioe);
+        }
+        return baos.toByteArray();
+    }
+
+    public static String toString(Map<?, ?> map) {
+        if (map.isEmpty()) {
+            return "";
+        }
+        StringBuilder sb = new StringBuilder(map.size() * 8);
+        for (Entry<?, ?> e : map.entrySet()) {
+            appendEscaped(sb, e.getKey());
+            sb.append("=>");
+            appendEscaped(sb, e.getValue());
+            sb.append(", ");
+        }
+        sb.setLength(sb.length() - 2);
+        return sb.toString();
+    }
+
+    private static void appendEscaped(StringBuilder sb, Object val) {
+        if (val != null) {
+            sb.append('"');
+            String s = val.toString();
+            for (int pos = 0; pos < s.length(); pos++) {
+                char ch = s.charAt(pos);
+                if (ch == '"' || ch == '\\') {
+                    sb.append('\\');
+                }
+                sb.append(ch);
+            }
+            sb.append('"');
         } else {
-          val = encoding.decode(b, pos, valLen);
-          pos += valLen;
+            sb.append("NULL");
         }
-        m.put(key, val);
-      }
-    } catch (IOException ioe) {
-      throw new PSQLException(
-          GT.tr(
-              "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database."),
-          PSQLState.DATA_ERROR, ioe);
     }
-    return m;
-  }
 
-  public static byte[] toBytes(Map<?, ?> m, Encoding encoding) throws SQLException {
-    ByteArrayOutputStream baos = new ByteArrayOutputStream(4 + 10 * m.size());
-    byte[] lenBuf = new byte[4];
-    try {
-      ByteConverter.int4(lenBuf, 0, m.size());
-      baos.write(lenBuf);
-      for (Entry<?, ?> e : m.entrySet()) {
-        Object mapKey = e.getKey();
-        if (mapKey == null) {
-          throw new PSQLException(GT.tr("hstore key must not be null"),
-              PSQLState.INVALID_PARAMETER_VALUE);
+    public static Map<String, String> fromString(String s) {
+        Map<String, String> m = new HashMap<String, String>();
+        int pos = 0;
+        StringBuilder sb = new StringBuilder();
+        while (pos < s.length()) {
+            sb.setLength(0);
+            int start = s.indexOf('"', pos);
+            int end = appendUntilQuote(sb, s, start);
+            String key = sb.toString();
+            pos = end + 3;
+
+            String val;
+            if (s.charAt(pos) == 'N') {
+                val = null;
+                pos += 4;
+            } else {
+                sb.setLength(0);
+                end = appendUntilQuote(sb, s, pos);
+                val = sb.toString();
+                pos = end;
+            }
+            pos++;
+            m.put(key, val);
         }
-        byte[] key = encoding.encode(mapKey.toString());
-        ByteConverter.int4(lenBuf, 0, key.length);
-        baos.write(lenBuf);
-        baos.write(key);
+        return m;
+    }
 
-        if (e.getValue() == null) {
-          ByteConverter.int4(lenBuf, 0, -1);
-          baos.write(lenBuf);
-        } else {
-          byte[] val = encoding.encode(e.getValue().toString());
-          ByteConverter.int4(lenBuf, 0, val.length);
-          baos.write(lenBuf);
-          baos.write(val);
+    private static int appendUntilQuote(StringBuilder sb, String s, int pos) {
+        for (pos += 1; pos < s.length(); pos++) {
+            char ch = s.charAt(pos);
+            if (ch == '"') {
+                break;
+            }
+            if (ch == '\\') {
+                pos++;
+                ch = s.charAt(pos);
+            }
+            sb.append(ch);
         }
-      }
-    } catch (IOException ioe) {
-      throw new PSQLException(
-          GT.tr(
-              "Invalid character data was found.  This is most likely caused by stored data containing characters that are invalid for the character set the database was created in.  The most common example of this is storing 8bit data in a SQL_ASCII database."),
-          PSQLState.DATA_ERROR, ioe);
+        return pos;
     }
-    return baos.toByteArray();
-  }
-
-  public static String toString(Map<?, ?> map) {
-    if (map.isEmpty()) {
-      return "";
-    }
-    StringBuilder sb = new StringBuilder(map.size() * 8);
-    for (Entry<?, ?> e : map.entrySet()) {
-      appendEscaped(sb, e.getKey());
-      sb.append("=>");
-      appendEscaped(sb, e.getValue());
-      sb.append(", ");
-    }
-    sb.setLength(sb.length() - 2);
-    return sb.toString();
-  }
-
-  private static void appendEscaped(StringBuilder sb, Object val) {
-    if (val != null) {
-      sb.append('"');
-      String s = val.toString();
-      for (int pos = 0; pos < s.length(); pos++) {
-        char ch = s.charAt(pos);
-        if (ch == '"' || ch == '\\') {
-          sb.append('\\');
-        }
-        sb.append(ch);
-      }
-      sb.append('"');
-    } else {
-      sb.append("NULL");
-    }
-  }
-
-  public static Map<String, String> fromString(String s) {
-    Map<String, String> m = new HashMap<String, String>();
-    int pos = 0;
-    StringBuilder sb = new StringBuilder();
-    while (pos < s.length()) {
-      sb.setLength(0);
-      int start = s.indexOf('"', pos);
-      int end = appendUntilQuote(sb, s, start);
-      String key = sb.toString();
-      pos = end + 3;
-
-      String val;
-      if (s.charAt(pos) == 'N') {
-        val = null;
-        pos += 4;
-      } else {
-        sb.setLength(0);
-        end = appendUntilQuote(sb, s, pos);
-        val = sb.toString();
-        pos = end;
-      }
-      pos++;
-      m.put(key, val);
-    }
-    return m;
-  }
-
-  private static int appendUntilQuote(StringBuilder sb, String s, int pos) {
-    for (pos += 1; pos < s.length(); pos++) {
-      char ch = s.charAt(pos);
-      if (ch == '"') {
-        break;
-      }
-      if (ch == '\\') {
-        pos++;
-        ch = s.charAt(pos);
-      }
-      sb.append(ch);
-    }
-    return pos;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/HostSpec.java b/pgjdbc/src/main/java/org/postgresql/util/HostSpec.java
index 08f7d6c..6e4dcc5 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/HostSpec.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/HostSpec.java
@@ -16,95 +16,95 @@ import java.util.regex.Pattern;
  * Simple container for host and port.
  */
 public class HostSpec {
-  public static final String DEFAULT_NON_PROXY_HOSTS = "localhost|127.*|[::1]|0.0.0.0|[::0]";
+    public static final String DEFAULT_NON_PROXY_HOSTS = "localhost|127.*|[::1]|0.0.0.0|[::0]";
 
-  protected final String localSocketAddress;
-  protected final String host;
-  protected final int port;
+    protected final String localSocketAddress;
+    protected final String host;
+    protected final int port;
 
-  public HostSpec(String host, int port) {
-    this(host, port, null);
-  }
-
-  public HostSpec(String host, int port, String localSocketAddress) {
-    this.host = host;
-    this.port = port;
-    this.localSocketAddress = localSocketAddress;
-  }
-
-  public String getHost() {
-    return host;
-  }
-
-  public int getPort() {
-    return port;
-  }
-
-  @Override
-  public String toString() {
-    return host + ":" + port;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    return obj instanceof HostSpec && port == ((HostSpec) obj).port
-        && host.equals(((HostSpec) obj).host) && Objects.equals(localSocketAddress, ((HostSpec) obj).localSocketAddress);
-  }
-
-  @Override
-  public int hashCode() {
-    return Objects.hash(localSocketAddress, host, port);
-  }
-
-  public String getLocalSocketAddress() {
-    return localSocketAddress;
-  }
-
-  public Boolean shouldResolve() {
-    String socksProxy = System.getProperty("socksProxyHost");
-    if (socksProxy == null || socksProxy.trim().isEmpty()) {
-      return true;
-    }
-    return matchesNonProxyHosts();
-  }
-
-  private Boolean matchesNonProxyHosts() {
-    String nonProxyHosts = System.getProperty("socksNonProxyHosts", DEFAULT_NON_PROXY_HOSTS);
-    if (nonProxyHosts == null || this.host.isEmpty()) {
-      return false;
+    public HostSpec(String host, int port) {
+        this(host, port, null);
     }
 
-    Pattern pattern = toPattern(nonProxyHosts);
-    Matcher matcher = pattern == null ? null : pattern.matcher(this.host);
-    return matcher != null && matcher.matches();
-  }
-
-  @SuppressWarnings("regex")
-  private Pattern toPattern(String mask) {
-    StringBuilder joiner = new StringBuilder();
-    String separator = "";
-    for (String disjunct : mask.split("\\|")) {
-      if (!disjunct.isEmpty()) {
-        String regex = disjunctToRegex(disjunct.toLowerCase(Locale.ROOT));
-        joiner.append(separator).append(regex);
-        separator = "|";
-      }
+    public HostSpec(String host, int port, String localSocketAddress) {
+        this.host = host;
+        this.port = port;
+        this.localSocketAddress = localSocketAddress;
     }
 
-    return joiner.length() == 0 ? null : compile(joiner.toString());
-  }
-
-  private String disjunctToRegex(String disjunct) {
-    String regex;
-
-    if (disjunct.startsWith("*")) {
-      regex = ".*" + Pattern.quote(disjunct.substring(1));
-    } else if (disjunct.endsWith("*")) {
-      regex = Pattern.quote(disjunct.substring(0, disjunct.length() - 1)) + ".*";
-    } else {
-      regex = Pattern.quote(disjunct);
+    public String getHost() {
+        return host;
     }
 
-    return regex;
-  }
+    public int getPort() {
+        return port;
+    }
+
+    @Override
+    public String toString() {
+        return host + ":" + port;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        return obj instanceof HostSpec && port == ((HostSpec) obj).port
+                && host.equals(((HostSpec) obj).host) && Objects.equals(localSocketAddress, ((HostSpec) obj).localSocketAddress);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(localSocketAddress, host, port);
+    }
+
+    public String getLocalSocketAddress() {
+        return localSocketAddress;
+    }
+
+    public Boolean shouldResolve() {
+        String socksProxy = System.getProperty("socksProxyHost");
+        if (socksProxy == null || socksProxy.trim().isEmpty()) {
+            return true;
+        }
+        return matchesNonProxyHosts();
+    }
+
+    private Boolean matchesNonProxyHosts() {
+        String nonProxyHosts = System.getProperty("socksNonProxyHosts", DEFAULT_NON_PROXY_HOSTS);
+        if (nonProxyHosts == null || this.host.isEmpty()) {
+            return false;
+        }
+
+        Pattern pattern = toPattern(nonProxyHosts);
+        Matcher matcher = pattern == null ? null : pattern.matcher(this.host);
+        return matcher != null && matcher.matches();
+    }
+
+    @SuppressWarnings("regex")
+    private Pattern toPattern(String mask) {
+        StringBuilder joiner = new StringBuilder();
+        String separator = "";
+        for (String disjunct : mask.split("\\|")) {
+            if (!disjunct.isEmpty()) {
+                String regex = disjunctToRegex(disjunct.toLowerCase(Locale.ROOT));
+                joiner.append(separator).append(regex);
+                separator = "|";
+            }
+        }
+
+        return joiner.length() == 0 ? null : compile(joiner.toString());
+    }
+
+    private String disjunctToRegex(String disjunct) {
+        String regex;
+
+        if (disjunct.startsWith("*")) {
+            regex = ".*" + Pattern.quote(disjunct.substring(1));
+        } else if (disjunct.endsWith("*")) {
+            regex = Pattern.quote(disjunct.substring(0, disjunct.length() - 1)) + ".*";
+        } else {
+            regex = Pattern.quote(disjunct);
+        }
+
+        return regex;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/IntList.java b/pgjdbc/src/main/java/org/postgresql/util/IntList.java
index 52dd577..ae15e27 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/IntList.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/IntList.java
@@ -12,69 +12,69 @@ import java.util.Arrays;
  * this is a driver-internal class, and it is not intended to be used outside the driver.
  */
 public final class IntList {
-  private static final int[] EMPTY_INT_ARRAY = new int[0];
-  private int[] ints = EMPTY_INT_ARRAY;
-  private int size;
+    private static final int[] EMPTY_INT_ARRAY = new int[0];
+    private int[] ints = EMPTY_INT_ARRAY;
+    private int size;
 
-  public IntList() {
-  }
-
-  public void add(int i) {
-    int size = this.size;
-    ensureCapacity(size);
-    ints[size] = i;
-    this.size = size + 1;
-  }
-
-  private void ensureCapacity(int size) {
-    int length = ints.length;
-    if (size >= length) {
-      // double in size until 1024 in size, then grow by 1.5x
-      final int newLength = length == 0 ? 8 :
-          length < 1024 ? length << 1 :
-              (length + (length >> 1));
-      ints = Arrays.copyOf(ints, newLength);
+    public IntList() {
     }
-  }
 
-  public int size() {
-    return size;
-  }
-
-  public int get(int i) {
-    if (i < 0 || i >= size) {
-      throw new ArrayIndexOutOfBoundsException("Index: " + i + ", Size: " + size);
+    public void add(int i) {
+        int size = this.size;
+        ensureCapacity(size);
+        ints[size] = i;
+        this.size = size + 1;
     }
-    return ints[i];
-  }
 
-  public void clear() {
-    size = 0;
-  }
-
-  /**
-   * Returns an array containing all the elements in this list. The modifications of the returned
-   * array will not affect this list.
-   *
-   * @return an array containing all the elements in this list
-   */
-  public int[] toArray() {
-    if (size == 0) {
-      return EMPTY_INT_ARRAY;
+    private void ensureCapacity(int size) {
+        int length = ints.length;
+        if (size >= length) {
+            // double in size until 1024 in size, then grow by 1.5x
+            final int newLength = length == 0 ? 8 :
+                    length < 1024 ? length << 1 :
+                            (length + (length >> 1));
+            ints = Arrays.copyOf(ints, newLength);
+        }
     }
-    return Arrays.copyOf(ints, size);
-  }
 
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder("[");
-    for (int i = 0; i < size; i++) {
-      if (i > 0) {
-        sb.append(", ");
-      }
-      sb.append(ints[i]);
+    public int size() {
+        return size;
+    }
+
+    public int get(int i) {
+        if (i < 0 || i >= size) {
+            throw new ArrayIndexOutOfBoundsException("Index: " + i + ", Size: " + size);
+        }
+        return ints[i];
+    }
+
+    public void clear() {
+        size = 0;
+    }
+
+    /**
+     * Returns an array containing all the elements in this list. The modifications of the returned
+     * array will not affect this list.
+     *
+     * @return an array containing all the elements in this list
+     */
+    public int[] toArray() {
+        if (size == 0) {
+            return EMPTY_INT_ARRAY;
+        }
+        return Arrays.copyOf(ints, size);
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder sb = new StringBuilder("[");
+        for (int i = 0; i < size; i++) {
+            if (i > 0) {
+                sb.append(", ");
+            }
+            sb.append(ints[i]);
+        }
+        sb.append("]");
+        return sb.toString();
     }
-    sb.append("]");
-    return sb.toString();
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/JdbcBlackHole.java b/pgjdbc/src/main/java/org/postgresql/util/JdbcBlackHole.java
index 56f7d6a..f502c65 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/JdbcBlackHole.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/JdbcBlackHole.java
@@ -12,36 +12,36 @@ import java.sql.Statement;
 
 public class JdbcBlackHole {
 
-  public JdbcBlackHole() {
-  }
-
-  public static void close(Connection con) {
-    try {
-      if (con != null) {
-        con.close();
-      }
-    } catch (SQLException e) {
-      /* ignore for now */
+    public JdbcBlackHole() {
     }
-  }
 
-  public static void close(Statement s) {
-    try {
-      if (s != null) {
-        s.close();
-      }
-    } catch (SQLException e) {
-      /* ignore for now */
+    public static void close(Connection con) {
+        try {
+            if (con != null) {
+                con.close();
+            }
+        } catch (SQLException e) {
+            /* ignore for now */
+        }
     }
-  }
 
-  public static void close(ResultSet rs) {
-    try {
-      if (rs != null) {
-        rs.close();
-      }
-    } catch (SQLException e) {
-      /* ignore for now */
+    public static void close(Statement s) {
+        try {
+            if (s != null) {
+                s.close();
+            }
+        } catch (SQLException e) {
+            /* ignore for now */
+        }
+    }
+
+    public static void close(ResultSet rs) {
+        try {
+            if (rs != null) {
+                rs.close();
+            }
+        } catch (SQLException e) {
+            /* ignore for now */
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/KerberosTicket.java b/pgjdbc/src/main/java/org/postgresql/util/KerberosTicket.java
index 5ec3801..971caf6 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/KerberosTicket.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/KerberosTicket.java
@@ -21,67 +21,67 @@ import javax.security.auth.login.LoginException;
 
 public class KerberosTicket {
 
-  private static final String CONFIG_ITEM_NAME = "ticketCache";
-  private static final String KRBLOGIN_MODULE = "com.sun.security.auth.module.Krb5LoginModule";
+    private static final String CONFIG_ITEM_NAME = "ticketCache";
+    private static final String KRBLOGIN_MODULE = "com.sun.security.auth.module.Krb5LoginModule";
 
-  static class CustomKrbConfig extends Configuration {
-
-    public CustomKrbConfig() {
+    public KerberosTicket() {
     }
 
-    @Override
-    public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
-      if (CONFIG_ITEM_NAME.equals(name)) {
-        Map<String, String> options = new HashMap<>();
-        options.put("refreshKrb5Config", Boolean.FALSE.toString());
-        options.put("useTicketCache", Boolean.TRUE.toString());
-        options.put("doNotPrompt", Boolean.TRUE.toString());
-        options.put("useKeyTab", Boolean.TRUE.toString());
-        options.put("isInitiator", Boolean.FALSE.toString());
-        options.put("renewTGT", Boolean.FALSE.toString());
-        options.put("debug", Boolean.FALSE.toString());
-        return new AppConfigurationEntry[]{
-            new AppConfigurationEntry(KRBLOGIN_MODULE,
-                AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, options)};
-      }
-      return null;
+    public static boolean credentialCacheExists(Properties info) {
+        LoginContext lc = null;
+
+        // in the event that the user has specified a jaas.conf file then we want to remember it
+        Configuration existingConfiguration = Configuration.getConfiguration();
+        Configuration.setConfiguration(new CustomKrbConfig());
+
+        try {
+            lc = new LoginContext(CONFIG_ITEM_NAME, new CallbackHandler() {
+
+                @Override
+                public void handle(Callback[] callbacks)
+                        throws IOException, UnsupportedCallbackException {
+                    // if the user has not configured jaasLogin correctly this can happen
+                    throw new RuntimeException("This is an error, you should set doNotPrompt to false in jaas.config");
+                }
+            });
+            lc.login();
+        } catch (LoginException e) {
+            // restore saved configuration
+            if (existingConfiguration != null) {
+                Configuration.setConfiguration(existingConfiguration);
+            }
+            return false;
+        }
+        // restore saved configuration
+        if (existingConfiguration != null) {
+            Configuration.setConfiguration(existingConfiguration);
+        }
+        Subject sub = lc.getSubject();
+        return sub != null;
     }
 
-  }
+    static class CustomKrbConfig extends Configuration {
 
-  public KerberosTicket() {
-  }
-
-  public static boolean credentialCacheExists(Properties info) {
-    LoginContext lc = null;
-
-    // in the event that the user has specified a jaas.conf file then we want to remember it
-    Configuration existingConfiguration = Configuration.getConfiguration();
-    Configuration.setConfiguration(new CustomKrbConfig());
-
-    try {
-      lc = new LoginContext(CONFIG_ITEM_NAME, new CallbackHandler() {
+        public CustomKrbConfig() {
+        }
 
         @Override
-        public void handle(Callback[] callbacks)
-            throws IOException, UnsupportedCallbackException {
-          // if the user has not configured jaasLogin correctly this can happen
-          throw new RuntimeException("This is an error, you should set doNotPrompt to false in jaas.config");
+        public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
+            if (CONFIG_ITEM_NAME.equals(name)) {
+                Map<String, String> options = new HashMap<>();
+                options.put("refreshKrb5Config", Boolean.FALSE.toString());
+                options.put("useTicketCache", Boolean.TRUE.toString());
+                options.put("doNotPrompt", Boolean.TRUE.toString());
+                options.put("useKeyTab", Boolean.TRUE.toString());
+                options.put("isInitiator", Boolean.FALSE.toString());
+                options.put("renewTGT", Boolean.FALSE.toString());
+                options.put("debug", Boolean.FALSE.toString());
+                return new AppConfigurationEntry[]{
+                        new AppConfigurationEntry(KRBLOGIN_MODULE,
+                                AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, options)};
+            }
+            return null;
         }
-      });
-      lc.login();
-    } catch (LoginException e) {
-      // restore saved configuration
-      if (existingConfiguration != null ) {
-        Configuration.setConfiguration(existingConfiguration);
-      }
-      return false;
+
     }
-    // restore saved configuration
-    if (existingConfiguration != null ) {
-      Configuration.setConfiguration(existingConfiguration);
-    }
-    Subject sub = lc.getSubject();
-    return sub != null;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/LazyCleaner.java b/pgjdbc/src/main/java/org/postgresql/util/LazyCleaner.java
index 9ca4c68..01a70b0 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/LazyCleaner.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/LazyCleaner.java
@@ -34,188 +34,186 @@ import java.util.logging.Logger;
  * <p>Note: this is a driver-internal class</p>
  */
 public class LazyCleaner {
-  private static final Logger LOGGER = Logger.getLogger(LazyCleaner.class.getName());
-  private static final LazyCleaner instance =
-      new LazyCleaner(
-          Duration.ofMillis(Long.getLong("pgjdbc.config.cleanup.thread.ttl", 30000)),
-          "PostgreSQL-JDBC-Cleaner"
-      );
-
-  public interface Cleanable<T extends Throwable> {
-    void clean() throws T;
-  }
-
-  public interface CleaningAction<T extends Throwable> {
-    void onClean(boolean leak) throws T;
-  }
-
-  private final ReferenceQueue<Object> queue = new ReferenceQueue<>();
-  private final long threadTtl;
-  private final ThreadFactory threadFactory;
-  private boolean threadRunning;
-  private int watchedCount;
-  private Node<?> first;
-
-  /**
-   * Returns a default cleaner instance.
-   * <p>Note: this is driver-internal API.</p>
-   * @return the instance of LazyCleaner
-   */
-  public static LazyCleaner getInstance() {
-    return instance;
-  }
-
-  public LazyCleaner(Duration threadTtl, final String threadName) {
-    this(threadTtl, runnable -> {
-      Thread thread = new Thread(runnable, threadName);
-      thread.setDaemon(true);
-      return thread;
-    });
-  }
-
-  private LazyCleaner(Duration threadTtl, ThreadFactory threadFactory) {
-    this.threadTtl = threadTtl.toMillis();
-    this.threadFactory = threadFactory;
-  }
-
-  public <T extends Throwable> Cleanable<T> register(Object obj, CleaningAction<T> action) {
-    assert obj != action : "object handle should not be the same as cleaning action, otherwise"
-        + " the object will never become phantom reachable, so the action will never trigger";
-    return add(new Node<T>(obj, action));
-  }
-
-  public synchronized int getWatchedCount() {
-    return watchedCount;
-  }
-
-  public synchronized boolean isThreadRunning() {
-    return threadRunning;
-  }
-
-  private synchronized boolean checkEmpty() {
-    if (first == null) {
-      threadRunning = false;
-      return true;
+    private static final Logger LOGGER = Logger.getLogger(LazyCleaner.class.getName());
+    private static final LazyCleaner instance =
+            new LazyCleaner(
+                    Duration.ofMillis(Long.getLong("pgjdbc.config.cleanup.thread.ttl", 30000)),
+                    "PostgreSQL-JDBC-Cleaner"
+            );
+    private final ReferenceQueue<Object> queue = new ReferenceQueue<>();
+    private final long threadTtl;
+    private final ThreadFactory threadFactory;
+    private boolean threadRunning;
+    private int watchedCount;
+    private Node<?> first;
+    public LazyCleaner(Duration threadTtl, final String threadName) {
+        this(threadTtl, runnable -> {
+            Thread thread = new Thread(runnable, threadName);
+            thread.setDaemon(true);
+            return thread;
+        });
     }
-    return false;
-  }
-
-  private synchronized <T extends Throwable> Node<T> add(Node<T> node) {
-    if (first != null) {
-      node.next = first;
-      first.prev = node;
+    private LazyCleaner(Duration threadTtl, ThreadFactory threadFactory) {
+        this.threadTtl = threadTtl.toMillis();
+        this.threadFactory = threadFactory;
     }
-    first = node;
-    watchedCount++;
 
-    if (!threadRunning) {
-      threadRunning = startThread();
+    /**
+     * Returns a default cleaner instance.
+     * <p>Note: this is driver-internal API.</p>
+     *
+     * @return the instance of LazyCleaner
+     */
+    public static LazyCleaner getInstance() {
+        return instance;
     }
-    return node;
-  }
 
-  private boolean startThread() {
-    Thread thread = threadFactory.newThread(new Runnable() {
-      @Override
-      public void run() {
-        while (true) {
-          try {
-            // Clear setContextClassLoader to avoid leaking the classloader
-            Thread.currentThread().setContextClassLoader(null);
-            Thread.currentThread().setUncaughtExceptionHandler(null);
-            // Node extends PhantomReference, so this cast is safe
-            Node<?> ref = (Node<?>) queue.remove(threadTtl);
-            if (ref == null) {
-              if (checkEmpty()) {
-                break;
-              }
-              continue;
-            }
-            try {
-              ref.onClean(true);
-            } catch (Throwable e) {
-              if (e instanceof InterruptedException) {
-                // This could happen if onClean uses sneaky-throws
-                LOGGER.log(Level.WARNING, "Unexpected interrupt while executing onClean", e);
-                throw e;
-              }
-              // Should not happen if cleaners are well-behaved
-              LOGGER.log(Level.WARNING, "Unexpected exception while executing onClean", e);
-            }
-          } catch (InterruptedException e) {
-            if (LazyCleaner.this.checkEmpty()) {
-              LOGGER.log(
-                  Level.FINE,
-                  "Cleanup queue is empty, and got interrupt, will terminate the cleanup thread"
-              );
-              break;
-            }
-            LOGGER.log(Level.FINE, "Ignoring interrupt since the cleanup queue is non-empty");
-          } catch (Throwable e) {
-            // Ignore exceptions from the cleanup action
-            LOGGER.log(Level.WARNING, "Unexpected exception in cleaner thread main loop", e);
-          }
+    public <T extends Throwable> Cleanable<T> register(Object obj, CleaningAction<T> action) {
+        assert obj != action : "object handle should not be the same as cleaning action, otherwise"
+                + " the object will never become phantom reachable, so the action will never trigger";
+        return add(new Node<T>(obj, action));
+    }
+
+    public synchronized int getWatchedCount() {
+        return watchedCount;
+    }
+
+    public synchronized boolean isThreadRunning() {
+        return threadRunning;
+    }
+
+    private synchronized boolean checkEmpty() {
+        if (first == null) {
+            threadRunning = false;
+            return true;
         }
-      }
-    });
-    if (thread != null) {
-      thread.start();
-      return true;
-    }
-    LOGGER.log(Level.WARNING, "Unable to create cleanup thread");
-    return false;
-  }
-
-  private synchronized boolean remove(Node<?> node) {
-    // If already removed, do nothing
-    if (node.next == node) {
-      return false;
+        return false;
     }
 
-    // Update list
-    if (first == node) {
-      first = node.next;
-    }
-    if (node.next != null) {
-      node.next.prev = node.prev;
-    }
-    if (node.prev != null) {
-      node.prev.next = node.next;
+    private synchronized <T extends Throwable> Node<T> add(Node<T> node) {
+        if (first != null) {
+            node.next = first;
+            first.prev = node;
+        }
+        first = node;
+        watchedCount++;
+
+        if (!threadRunning) {
+            threadRunning = startThread();
+        }
+        return node;
     }
 
-    // Indicate removal by pointing the cleaner to itself
-    node.next = node;
-    node.prev = node;
-
-    watchedCount--;
-    return true;
-  }
-
-  private class Node<T extends Throwable> extends PhantomReference<Object> implements Cleanable<T>,
-      CleaningAction<T> {
-    private final CleaningAction<T> action;
-    private Node<?> prev;
-    private Node<?> next;
-
-    Node(Object referent, CleaningAction<T> action) {
-      super(referent, queue);
-      this.action = action;
-      //Objects.requireNonNull(referent); // poor man`s reachabilityFence
+    private boolean startThread() {
+        Thread thread = threadFactory.newThread(new Runnable() {
+            @Override
+            public void run() {
+                while (true) {
+                    try {
+                        // Clear setContextClassLoader to avoid leaking the classloader
+                        Thread.currentThread().setContextClassLoader(null);
+                        Thread.currentThread().setUncaughtExceptionHandler(null);
+                        // Node extends PhantomReference, so this cast is safe
+                        Node<?> ref = (Node<?>) queue.remove(threadTtl);
+                        if (ref == null) {
+                            if (checkEmpty()) {
+                                break;
+                            }
+                            continue;
+                        }
+                        try {
+                            ref.onClean(true);
+                        } catch (Throwable e) {
+                            if (e instanceof InterruptedException) {
+                                // This could happen if onClean uses sneaky-throws
+                                LOGGER.log(Level.WARNING, "Unexpected interrupt while executing onClean", e);
+                                throw e;
+                            }
+                            // Should not happen if cleaners are well-behaved
+                            LOGGER.log(Level.WARNING, "Unexpected exception while executing onClean", e);
+                        }
+                    } catch (InterruptedException e) {
+                        if (LazyCleaner.this.checkEmpty()) {
+                            LOGGER.log(
+                                    Level.FINE,
+                                    "Cleanup queue is empty, and got interrupt, will terminate the cleanup thread"
+                            );
+                            break;
+                        }
+                        LOGGER.log(Level.FINE, "Ignoring interrupt since the cleanup queue is non-empty");
+                    } catch (Throwable e) {
+                        // Ignore exceptions from the cleanup action
+                        LOGGER.log(Level.WARNING, "Unexpected exception in cleaner thread main loop", e);
+                    }
+                }
+            }
+        });
+        if (thread != null) {
+            thread.start();
+            return true;
+        }
+        LOGGER.log(Level.WARNING, "Unable to create cleanup thread");
+        return false;
     }
 
-    @Override
-    public void clean() throws T {
-      onClean(false);
+    private synchronized boolean remove(Node<?> node) {
+        // If already removed, do nothing
+        if (node.next == node) {
+            return false;
+        }
+
+        // Update list
+        if (first == node) {
+            first = node.next;
+        }
+        if (node.next != null) {
+            node.next.prev = node.prev;
+        }
+        if (node.prev != null) {
+            node.prev.next = node.next;
+        }
+
+        // Indicate removal by pointing the cleaner to itself
+        node.next = node;
+        node.prev = node;
+
+        watchedCount--;
+        return true;
     }
 
-    @Override
-    public void onClean(boolean leak) throws T {
-      if (!remove(this)) {
-        return;
-      }
-      if (action != null) {
-        action.onClean(leak);
-      }
+    public interface Cleanable<T extends Throwable> {
+        void clean() throws T;
+    }
+
+    public interface CleaningAction<T extends Throwable> {
+        void onClean(boolean leak) throws T;
+    }
+
+    private class Node<T extends Throwable> extends PhantomReference<Object> implements Cleanable<T>,
+            CleaningAction<T> {
+        private final CleaningAction<T> action;
+        private Node<?> prev;
+        private Node<?> next;
+
+        Node(Object referent, CleaningAction<T> action) {
+            super(referent, queue);
+            this.action = action;
+            //Objects.requireNonNull(referent); // poor man`s reachabilityFence
+        }
+
+        @Override
+        public void clean() throws T {
+            onClean(false);
+        }
+
+        @Override
+        public void onClean(boolean leak) throws T {
+            if (!remove(this)) {
+                return;
+            }
+            if (action != null) {
+                action.onClean(leak);
+            }
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/LogWriterHandler.java b/pgjdbc/src/main/java/org/postgresql/util/LogWriterHandler.java
index 4b9aea6..1c4a150 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/LogWriterHandler.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/LogWriterHandler.java
@@ -18,81 +18,81 @@ import java.util.logging.SimpleFormatter;
 @SuppressWarnings("try")
 public class LogWriterHandler extends Handler {
 
-  private Writer writer;
-  private final ResourceLock lock = new ResourceLock();
+    private final ResourceLock lock = new ResourceLock();
+    private Writer writer;
 
-  @SuppressWarnings("this-escape")
-  public LogWriterHandler(Writer inWriter) {
-    super();
-    setLevel(Level.INFO);
-    setFilter(null);
-    setFormatter(new SimpleFormatter());
-    setWriter(inWriter);
-  }
-
-  @Override
-  public void publish(LogRecord record) {
-    final String formatted;
-    final Formatter formatter = getFormatter();
-
-    try {
-      formatted = formatter.format(record);
-    } catch (Exception ex) {
-      reportError("Error Formatting record", ex, ErrorManager.FORMAT_FAILURE);
-      return;
+    @SuppressWarnings("this-escape")
+    public LogWriterHandler(Writer inWriter) {
+        super();
+        setLevel(Level.INFO);
+        setFilter(null);
+        setFormatter(new SimpleFormatter());
+        setWriter(inWriter);
     }
 
-    if (formatted.length() == 0) {
-      return;
-    }
-    try {
-      try (ResourceLock ignore = lock.obtain()) {
-        Writer writer = this.writer;
-        if (writer != null) {
-          writer.write(formatted);
+    @Override
+    public void publish(LogRecord record) {
+        final String formatted;
+        final Formatter formatter = getFormatter();
+
+        try {
+            formatted = formatter.format(record);
+        } catch (Exception ex) {
+            reportError("Error Formatting record", ex, ErrorManager.FORMAT_FAILURE);
+            return;
         }
-      }
-    } catch (Exception ex) {
-      reportError("Error writing message", ex, ErrorManager.WRITE_FAILURE);
-    }
-  }
 
-  @Override
-  public void flush() {
-    try (ResourceLock ignore = lock.obtain()) {
-      Writer writer = this.writer;
-      if (writer != null) {
-        writer.flush();
-      }
-    } catch ( Exception ex ) {
-      reportError("Error on flush", ex, ErrorManager.WRITE_FAILURE);
+        if (formatted.length() == 0) {
+            return;
+        }
+        try {
+            try (ResourceLock ignore = lock.obtain()) {
+                Writer writer = this.writer;
+                if (writer != null) {
+                    writer.write(formatted);
+                }
+            }
+        } catch (Exception ex) {
+            reportError("Error writing message", ex, ErrorManager.WRITE_FAILURE);
+        }
     }
-  }
 
-  @Override
-  public void close() throws SecurityException {
-    try (ResourceLock ignore = lock.obtain()) {
-      Writer writer = this.writer;
-      if (writer != null) {
-        writer.close();
-      }
-    } catch ( Exception ex ) {
-      reportError("Error closing writer", ex, ErrorManager.WRITE_FAILURE);
+    @Override
+    public void flush() {
+        try (ResourceLock ignore = lock.obtain()) {
+            Writer writer = this.writer;
+            if (writer != null) {
+                writer.flush();
+            }
+        } catch (Exception ex) {
+            reportError("Error on flush", ex, ErrorManager.WRITE_FAILURE);
+        }
     }
-  }
 
-  private void setWriter(Writer writer) throws IllegalArgumentException {
-    try (ResourceLock ignore = lock.obtain()) {
-      if (writer == null) {
-        throw new IllegalArgumentException("Writer cannot be null");
-      }
-      this.writer = writer;
-
-      try {
-        writer.write(getFormatter().getHead(this));
-      } catch (Exception ex) {
-        reportError("Error writing head section", ex, ErrorManager.WRITE_FAILURE);
-      }
+    @Override
+    public void close() throws SecurityException {
+        try (ResourceLock ignore = lock.obtain()) {
+            Writer writer = this.writer;
+            if (writer != null) {
+                writer.close();
+            }
+        } catch (Exception ex) {
+            reportError("Error closing writer", ex, ErrorManager.WRITE_FAILURE);
+        }
+    }
+
+    private void setWriter(Writer writer) throws IllegalArgumentException {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (writer == null) {
+                throw new IllegalArgumentException("Writer cannot be null");
+            }
+            this.writer = writer;
+
+            try {
+                writer.write(getFormatter().getHead(this));
+            } catch (Exception ex) {
+                reportError("Error writing head section", ex, ErrorManager.WRITE_FAILURE);
+            }
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/LruCache.java b/pgjdbc/src/main/java/org/postgresql/util/LruCache.java
index 6360591..f5e7b5b 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/LruCache.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/LruCache.java
@@ -17,161 +17,159 @@ import java.util.Map;
  */
 @SuppressWarnings("try")
 public class LruCache<Key extends Object, Value extends CanEstimateSize>
-    implements Gettable<Key, Value> {
-  /**
-   * Action that is invoked when the entry is removed from the cache.
-   *
-   * @param <Value> type of the cache entry
-   */
-  public interface EvictAction<Value> {
-    void evict(Value value) throws SQLException;
-  }
-
-  /**
-   * When the entry is not present in cache, this create action is used to create one.
-   *
-   * @param <Value> type of the cache entry
-   */
-  public interface CreateAction<Key, Value> {
-    Value create(Key key) throws SQLException;
-  }
-
-  private final EvictAction<Value> onEvict;
-  private final CreateAction<Key, Value> createAction;
-  private final int maxSizeEntries;
-  private final long maxSizeBytes;
-  private long currentSize;
-  private final Map<Key, Value> cache;
-  private final ResourceLock lock = new ResourceLock();
-
-  @SuppressWarnings("serial")
-  private class LimitedMap extends LinkedHashMap<Key, Value> {
-    LimitedMap(int initialCapacity, float loadFactor, boolean accessOrder) {
-      super(initialCapacity, loadFactor, accessOrder);
+        implements Gettable<Key, Value> {
+    private final EvictAction<Value> onEvict;
+    private final CreateAction<Key, Value> createAction;
+    private final int maxSizeEntries;
+    private final long maxSizeBytes;
+    private final Map<Key, Value> cache;
+    private final ResourceLock lock = new ResourceLock();
+    private long currentSize;
+    public LruCache(int maxSizeEntries, long maxSizeBytes, boolean accessOrder) {
+        this(maxSizeEntries, maxSizeBytes, accessOrder, null, null);
+    }
+    public LruCache(int maxSizeEntries, long maxSizeBytes, boolean accessOrder,
+                    CreateAction<Key, Value> createAction,
+                    EvictAction<Value> onEvict) {
+        this.maxSizeEntries = maxSizeEntries;
+        this.maxSizeBytes = maxSizeBytes;
+        this.createAction = createAction;
+        this.onEvict = onEvict;
+        this.cache = new LimitedMap(16, 0.75f, accessOrder);
     }
 
+    private void evictValue(Value value) {
+        try {
+            if (onEvict != null) {
+                onEvict.evict(value);
+            }
+        } catch (SQLException e) {
+            /* ignore */
+        }
+    }
+
+    /**
+     * Returns an entry from the cache.
+     *
+     * @param key cache key
+     * @return entry from cache or null if cache does not contain given key.
+     */
     @Override
-    protected boolean removeEldestEntry(Map.Entry<Key, Value> eldest) {
-      // Avoid creating iterators if size constraints not violated
-      if (size() <= maxSizeEntries && currentSize <= maxSizeBytes) {
-        return false;
-      }
+    public Value get(Key key) {
+        try (ResourceLock ignore = lock.obtain()) {
+            return cache.get(key);
+        }
+    }
 
-      Iterator<Map.Entry<Key, Value>> it = entrySet().iterator();
-      while (it.hasNext()) {
-        if (size() <= maxSizeEntries && currentSize <= maxSizeBytes) {
-          return false;
+    /**
+     * Borrows an entry from the cache.
+     *
+     * @param key cache key
+     * @return entry from cache or newly created entry if cache does not contain given key.
+     * @throws SQLException if entry creation fails
+     */
+    public Value borrow(Key key) throws SQLException {
+        try (ResourceLock ignore = lock.obtain()) {
+            Value value = cache.remove(key);
+            if (value == null) {
+                if (createAction == null) {
+                    throw new UnsupportedOperationException("createAction == null, so can't create object");
+                }
+                return createAction.create(key);
+            }
+            currentSize -= value.getSize();
+            return value;
+        }
+    }
+
+    /**
+     * Returns given value to the cache.
+     *
+     * @param key   key
+     * @param value value
+     */
+    public void put(Key key, Value value) {
+        try (ResourceLock ignore = lock.obtain()) {
+            long valueSize = value.getSize();
+            if (maxSizeBytes == 0 || maxSizeEntries == 0 || valueSize * 2 > maxSizeBytes) {
+                // Just destroy the value if cache is disabled or if entry would consume more than a half of
+                // the cache
+                evictValue(value);
+                return;
+            }
+            currentSize += valueSize;
+            Value prev = cache.put(key, value);
+            if (prev == null) {
+                return;
+            }
+            // This should be a rare case
+            currentSize -= prev.getSize();
+            if (prev != value) {
+                evictValue(prev);
+            }
+        }
+    }
+
+    /**
+     * Puts all the values from the given map into the cache.
+     *
+     * @param m The map containing entries to put into the cache
+     */
+    public void putAll(Map<Key, Value> m) {
+        try (ResourceLock ignore = lock.obtain()) {
+            for (Map.Entry<Key, Value> entry : m.entrySet()) {
+                this.put(entry.getKey(), entry.getValue());
+            }
+        }
+    }
+
+    /**
+     * Action that is invoked when the entry is removed from the cache.
+     *
+     * @param <Value> type of the cache entry
+     */
+    public interface EvictAction<Value> {
+        void evict(Value value) throws SQLException;
+    }
+
+    /**
+     * When the entry is not present in cache, this create action is used to create one.
+     *
+     * @param <Value> type of the cache entry
+     */
+    public interface CreateAction<Key, Value> {
+        Value create(Key key) throws SQLException;
+    }
+
+    @SuppressWarnings("serial")
+    private class LimitedMap extends LinkedHashMap<Key, Value> {
+        LimitedMap(int initialCapacity, float loadFactor, boolean accessOrder) {
+            super(initialCapacity, loadFactor, accessOrder);
         }
 
-        Map.Entry<Key, Value> entry = it.next();
-        evictValue(entry.getValue());
-        long valueSize = entry.getValue().getSize();
-        if (valueSize > 0) {
-          // just in case
-          currentSize -= valueSize;
+        @Override
+        protected boolean removeEldestEntry(Map.Entry<Key, Value> eldest) {
+            // Avoid creating iterators if size constraints not violated
+            if (size() <= maxSizeEntries && currentSize <= maxSizeBytes) {
+                return false;
+            }
+
+            Iterator<Map.Entry<Key, Value>> it = entrySet().iterator();
+            while (it.hasNext()) {
+                if (size() <= maxSizeEntries && currentSize <= maxSizeBytes) {
+                    return false;
+                }
+
+                Map.Entry<Key, Value> entry = it.next();
+                evictValue(entry.getValue());
+                long valueSize = entry.getValue().getSize();
+                if (valueSize > 0) {
+                    // just in case
+                    currentSize -= valueSize;
+                }
+                it.remove();
+            }
+            return false;
         }
-        it.remove();
-      }
-      return false;
     }
-  }
-
-  private void evictValue(Value value) {
-    try {
-      if (onEvict != null) {
-        onEvict.evict(value);
-      }
-    } catch (SQLException e) {
-      /* ignore */
-    }
-  }
-
-  public LruCache(int maxSizeEntries, long maxSizeBytes, boolean accessOrder) {
-    this(maxSizeEntries, maxSizeBytes, accessOrder, null, null);
-  }
-
-  public LruCache(int maxSizeEntries, long maxSizeBytes, boolean accessOrder,
-      CreateAction<Key, Value> createAction,
-      EvictAction<Value> onEvict) {
-    this.maxSizeEntries = maxSizeEntries;
-    this.maxSizeBytes = maxSizeBytes;
-    this.createAction = createAction;
-    this.onEvict = onEvict;
-    this.cache = new LimitedMap(16, 0.75f, accessOrder);
-  }
-
-  /**
-   * Returns an entry from the cache.
-   *
-   * @param key cache key
-   * @return entry from cache or null if cache does not contain given key.
-   */
-  @Override
-  public Value get(Key key) {
-    try (ResourceLock ignore = lock.obtain()) {
-      return cache.get(key);
-    }
-  }
-
-  /**
-   * Borrows an entry from the cache.
-   *
-   * @param key cache key
-   * @return entry from cache or newly created entry if cache does not contain given key.
-   * @throws SQLException if entry creation fails
-   */
-  public Value borrow(Key key) throws SQLException {
-    try (ResourceLock ignore = lock.obtain()) {
-      Value value = cache.remove(key);
-      if (value == null) {
-        if (createAction == null) {
-          throw new UnsupportedOperationException("createAction == null, so can't create object");
-        }
-        return createAction.create(key);
-      }
-      currentSize -= value.getSize();
-      return value;
-    }
-  }
-
-  /**
-   * Returns given value to the cache.
-   *
-   * @param key key
-   * @param value value
-   */
-  public void put(Key key, Value value) {
-    try (ResourceLock ignore = lock.obtain()) {
-      long valueSize = value.getSize();
-      if (maxSizeBytes == 0 || maxSizeEntries == 0 || valueSize * 2 > maxSizeBytes) {
-        // Just destroy the value if cache is disabled or if entry would consume more than a half of
-        // the cache
-        evictValue(value);
-        return;
-      }
-      currentSize += valueSize;
-      Value prev = cache.put(key, value);
-      if (prev == null) {
-        return;
-      }
-      // This should be a rare case
-      currentSize -= prev.getSize();
-      if (prev != value) {
-        evictValue(prev);
-      }
-    }
-  }
-
-  /**
-   * Puts all the values from the given map into the cache.
-   *
-   * @param m The map containing entries to put into the cache
-   */
-  public void putAll(Map<Key, Value> m) {
-    try (ResourceLock ignore = lock.obtain()) {
-      for (Map.Entry<Key, Value> entry : m.entrySet()) {
-        this.put(entry.getKey(), entry.getValue());
-      }
-    }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/MD5Digest.java b/pgjdbc/src/main/java/org/postgresql/util/MD5Digest.java
index f0e586c..223e87a 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/MD5Digest.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/MD5Digest.java
@@ -15,55 +15,55 @@ import java.security.NoSuchAlgorithmException;
  */
 public class MD5Digest {
 
-  private static final byte[] HEX_BYTES = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
+    private static final byte[] HEX_BYTES = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
 
-  private MD5Digest() {
-  }
-
-  /**
-   * Encodes user/password/salt information in the following way: MD5(MD5(password + user) + salt).
-   *
-   * @param user The connecting user.
-   * @param password The connecting user's password.
-   * @param salt A four-salt sent by the server.
-   * @return A 35-byte array, comprising the string "md5" and an MD5 digest.
-   */
-  public static byte[] encode(byte[] user, byte[] password, byte[] salt) {
-    try {
-      final MessageDigest md = MessageDigest.getInstance("MD5");
-
-      md.update(password);
-      md.update(user);
-      byte[] digest = md.digest();
-
-      final byte[] hexDigest = new byte[35];
-
-      bytesToHex(digest, hexDigest, 0);
-      md.update(hexDigest, 0, 32);
-      md.update(salt);
-      digest = md.digest();
-
-      bytesToHex(digest, hexDigest, 3);
-      hexDigest[0] = (byte) 'm';
-      hexDigest[1] = (byte) 'd';
-      hexDigest[2] = (byte) '5';
-
-      return hexDigest;
-    } catch (NoSuchAlgorithmException e) {
-      throw new IllegalStateException("Unable to encode password with MD5", e);
+    private MD5Digest() {
     }
-  }
 
-  /*
-   * Turn 16-byte stream into a human-readable 32-byte hex string
-   */
-  public static void bytesToHex(byte[] bytes, byte[] hex, int offset) {
-    int pos = offset;
-    for (int i = 0; i < 16; i++) {
-      //bit twiddling converts to int, so just do it once here for both operations
-      final int c = bytes[i] & 0xFF;
-      hex[pos++] = HEX_BYTES[c >> 4];
-      hex[pos++] = HEX_BYTES[c & 0xF];
+    /**
+     * Encodes user/password/salt information in the following way: MD5(MD5(password + user) + salt).
+     *
+     * @param user     The connecting user.
+     * @param password The connecting user's password.
+     * @param salt     A four-salt sent by the server.
+     * @return A 35-byte array, comprising the string "md5" and an MD5 digest.
+     */
+    public static byte[] encode(byte[] user, byte[] password, byte[] salt) {
+        try {
+            final MessageDigest md = MessageDigest.getInstance("MD5");
+
+            md.update(password);
+            md.update(user);
+            byte[] digest = md.digest();
+
+            final byte[] hexDigest = new byte[35];
+
+            bytesToHex(digest, hexDigest, 0);
+            md.update(hexDigest, 0, 32);
+            md.update(salt);
+            digest = md.digest();
+
+            bytesToHex(digest, hexDigest, 3);
+            hexDigest[0] = (byte) 'm';
+            hexDigest[1] = (byte) 'd';
+            hexDigest[2] = (byte) '5';
+
+            return hexDigest;
+        } catch (NoSuchAlgorithmException e) {
+            throw new IllegalStateException("Unable to encode password with MD5", e);
+        }
+    }
+
+    /*
+     * Turn 16-byte stream into a human-readable 32-byte hex string
+     */
+    public static void bytesToHex(byte[] bytes, byte[] hex, int offset) {
+        int pos = offset;
+        for (int i = 0; i < 16; i++) {
+            //bit twiddling converts to int, so just do it once here for both operations
+            final int c = bytes[i] & 0xFF;
+            hex[pos++] = HEX_BYTES[c >> 4];
+            hex[pos++] = HEX_BYTES[c & 0xF];
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/NumberParser.java b/pgjdbc/src/main/java/org/postgresql/util/NumberParser.java
index e3437e1..821502f 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/NumberParser.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/NumberParser.java
@@ -9,83 +9,83 @@ package org.postgresql.util;
  * Optimised byte[] to number parser.
  */
 public class NumberParser {
-  private static final NumberFormatException FAST_NUMBER_FAILED = new NumberFormatException() {
-    @Override
-    public Throwable fillInStackTrace() {
-      return this;
-    }
-  };
-
-  private static final long MAX_LONG_DIV_TEN = Long.MAX_VALUE / 10;
-
-  public NumberParser() {
-  }
-
-  /**
-   * Optimised byte[] to number parser. This code does not handle null values, so the caller must do
-   * checkResultSet and handle null values prior to calling this function. Fraction part is
-   * discarded.
-   *
-   * @param bytes integer represented as a sequence of ASCII bytes
-   * @return The parsed number.
-   * @throws NumberFormatException If the number is invalid or the out of range for fast parsing.
-   *                               The value must then be parsed by another (less optimised) method.
-   */
-  public static long getFastLong(byte[] bytes, long minVal, long maxVal) throws NumberFormatException {
-    int len = bytes.length;
-    if (len == 0) {
-      throw FAST_NUMBER_FAILED;
-    }
-
-    boolean neg = bytes[0] == '-';
-
-    long val = 0;
-    int start = neg ? 1 : 0;
-    while (start < len) {
-      byte b = bytes[start++];
-      if (b < '0' || b > '9') {
-        if (b == '.') {
-          if (neg && len == 2 || !neg && len == 1) {
-            // we have to check that string is not "." or "-."
-            throw FAST_NUMBER_FAILED;
-          }
-          // check that the rest of the buffer contains only digits
-          while (start < len) {
-            b = bytes[start++];
-            if (b < '0' || b > '9') {
-              throw FAST_NUMBER_FAILED;
-            }
-          }
-          break;
-        } else {
-          throw FAST_NUMBER_FAILED;
+    private static final NumberFormatException FAST_NUMBER_FAILED = new NumberFormatException() {
+        @Override
+        public Throwable fillInStackTrace() {
+            return this;
         }
-      }
+    };
 
-      if (val <= MAX_LONG_DIV_TEN) {
-        val *= 10;
-        val += b - '0';
-      } else {
-        throw FAST_NUMBER_FAILED;
-      }
+    private static final long MAX_LONG_DIV_TEN = Long.MAX_VALUE / 10;
+
+    public NumberParser() {
     }
 
-    if (val < 0) {
-      // It is possible to get overflow in two situations:
-      // 1. for MIN_VALUE, because abs(MIN_VALUE)=MAX_VALUE+1. In this situation thanks to
-      //    complement arithmetic we got correct result and shouldn't do anything with it.
-      // 2. for incorrect string, representing a number greater than MAX_VALUE, for example
-      //    "9223372036854775809", it this case we have to throw exception
-      if (!(neg && val == Long.MIN_VALUE)) {
-        throw FAST_NUMBER_FAILED;
-      }
-    } else if (neg) {
-      val = -val;
-    }
+    /**
+     * Optimised byte[] to number parser. This code does not handle null values, so the caller must do
+     * checkResultSet and handle null values prior to calling this function. Fraction part is
+     * discarded.
+     *
+     * @param bytes integer represented as a sequence of ASCII bytes
+     * @return The parsed number.
+     * @throws NumberFormatException If the number is invalid or the out of range for fast parsing.
+     *                               The value must then be parsed by another (less optimised) method.
+     */
+    public static long getFastLong(byte[] bytes, long minVal, long maxVal) throws NumberFormatException {
+        int len = bytes.length;
+        if (len == 0) {
+            throw FAST_NUMBER_FAILED;
+        }
 
-    if (val < minVal || val > maxVal) {
-      throw FAST_NUMBER_FAILED;
+        boolean neg = bytes[0] == '-';
+
+        long val = 0;
+        int start = neg ? 1 : 0;
+        while (start < len) {
+            byte b = bytes[start++];
+            if (b < '0' || b > '9') {
+                if (b == '.') {
+                    if (neg && len == 2 || !neg && len == 1) {
+                        // we have to check that string is not "." or "-."
+                        throw FAST_NUMBER_FAILED;
+                    }
+                    // check that the rest of the buffer contains only digits
+                    while (start < len) {
+                        b = bytes[start++];
+                        if (b < '0' || b > '9') {
+                            throw FAST_NUMBER_FAILED;
+                        }
+                    }
+                    break;
+                } else {
+                    throw FAST_NUMBER_FAILED;
+                }
+            }
+
+            if (val <= MAX_LONG_DIV_TEN) {
+                val *= 10;
+                val += b - '0';
+            } else {
+                throw FAST_NUMBER_FAILED;
+            }
+        }
+
+        if (val < 0) {
+            // It is possible to get overflow in two situations:
+            // 1. for MIN_VALUE, because abs(MIN_VALUE)=MAX_VALUE+1. In this situation thanks to
+            //    complement arithmetic we got correct result and shouldn't do anything with it.
+            // 2. for incorrect string, representing a number greater than MAX_VALUE, for example
+            //    "9223372036854775809", it this case we have to throw exception
+            if (!(neg && val == Long.MIN_VALUE)) {
+                throw FAST_NUMBER_FAILED;
+            }
+        } else if (neg) {
+            val = -val;
+        }
+
+        if (val < minVal || val > maxVal) {
+            throw FAST_NUMBER_FAILED;
+        }
+        return val;
     }
-    return val;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/OSUtil.java b/pgjdbc/src/main/java/org/postgresql/util/OSUtil.java
index 4904aa6..9417b51 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/OSUtil.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/OSUtil.java
@@ -13,27 +13,25 @@ import java.util.Locale;
  */
 public class OSUtil {
 
-  public OSUtil() {
-  }
-
-  /**
-   *
-   * @return true if OS is windows
-   */
-  public static boolean isWindows() {
-    return System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("windows");
-  }
-
-  /**
-   *
-   * @return OS specific root directory for user specific configurations
-   */
-  public static String getUserConfigRootDirectory() {
-    if (isWindows()) {
-      return System.getenv("APPDATA") + File.separator + "postgresql";
-    } else {
-      return System.getProperty("user.home");
+    public OSUtil() {
+    }
+
+    /**
+     * @return true if OS is windows
+     */
+    public static boolean isWindows() {
+        return System.getProperty("os.name").toLowerCase(Locale.ROOT).contains("windows");
+    }
+
+    /**
+     * @return OS specific root directory for user specific configurations
+     */
+    public static String getUserConfigRootDirectory() {
+        if (isWindows()) {
+            return System.getenv("APPDATA") + File.separator + "postgresql";
+        } else {
+            return System.getProperty("user.home");
+        }
     }
-  }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/ObjectFactory.java b/pgjdbc/src/main/java/org/postgresql/util/ObjectFactory.java
index 4b79fc9..8313417 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/ObjectFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/ObjectFactory.java
@@ -15,55 +15,55 @@ import java.util.Properties;
  */
 public class ObjectFactory {
 
-  public ObjectFactory() {
-  }
+    public ObjectFactory() {
+    }
 
-  /**
-   * Instantiates a class using the appropriate constructor. If a constructor with a single
-   * Propertiesparameter exists, it is used. Otherwise, if tryString is true a constructor with a
-   * single String argument is searched if it fails, or tryString is true a no argument constructor
-   * is tried.
-   *
-   * @param <T> type of expected class
-   * @param expectedClass expected class of type T, if the classname instantiated doesn't match
-   *                     the expected type of this class this method will fail
-   * @param classname name of the class to instantiate
-   * @param info parameter to pass as Properties
-   * @param tryString whether to look for a single String argument constructor
-   * @param stringarg parameter to pass as String
-   * @return the instantiated class
-   * @throws ClassNotFoundException if something goes wrong
-   * @throws SecurityException if something goes wrong
-   * @throws NoSuchMethodException if something goes wrong
-   * @throws IllegalArgumentException if something goes wrong
-   * @throws InstantiationException if something goes wrong
-   * @throws IllegalAccessException if something goes wrong
-   * @throws InvocationTargetException if something goes wrong
-   */
-  public static <T> T instantiate(Class<T> expectedClass, String classname, Properties info,
-      boolean tryString,
-      String stringarg)
-      throws ClassNotFoundException, SecurityException, NoSuchMethodException,
-          IllegalArgumentException, InstantiationException, IllegalAccessException,
-          InvocationTargetException {
-    Object[] args = {info};
-    Constructor<? extends T> ctor = null;
-    Class<? extends T> cls = Class.forName(classname).asSubclass(expectedClass);
-    try {
-      ctor = cls.getConstructor(Properties.class);
-    } catch (NoSuchMethodException ignored) {
+    /**
+     * Instantiates a class using the appropriate constructor. If a constructor with a single
+     * Propertiesparameter exists, it is used. Otherwise, if tryString is true a constructor with a
+     * single String argument is searched if it fails, or tryString is true a no argument constructor
+     * is tried.
+     *
+     * @param <T>           type of expected class
+     * @param expectedClass expected class of type T, if the classname instantiated doesn't match
+     *                      the expected type of this class this method will fail
+     * @param classname     name of the class to instantiate
+     * @param info          parameter to pass as Properties
+     * @param tryString     whether to look for a single String argument constructor
+     * @param stringarg     parameter to pass as String
+     * @return the instantiated class
+     * @throws ClassNotFoundException    if something goes wrong
+     * @throws SecurityException         if something goes wrong
+     * @throws NoSuchMethodException     if something goes wrong
+     * @throws IllegalArgumentException  if something goes wrong
+     * @throws InstantiationException    if something goes wrong
+     * @throws IllegalAccessException    if something goes wrong
+     * @throws InvocationTargetException if something goes wrong
+     */
+    public static <T> T instantiate(Class<T> expectedClass, String classname, Properties info,
+                                    boolean tryString,
+                                    String stringarg)
+            throws ClassNotFoundException, SecurityException, NoSuchMethodException,
+            IllegalArgumentException, InstantiationException, IllegalAccessException,
+            InvocationTargetException {
+        Object[] args = {info};
+        Constructor<? extends T> ctor = null;
+        Class<? extends T> cls = Class.forName(classname).asSubclass(expectedClass);
+        try {
+            ctor = cls.getConstructor(Properties.class);
+        } catch (NoSuchMethodException ignored) {
+        }
+        if (tryString && ctor == null) {
+            try {
+                ctor = cls.getConstructor(String.class);
+                args = new String[]{stringarg};
+            } catch (NoSuchMethodException ignored) {
+            }
+        }
+        if (ctor == null) {
+            ctor = cls.getConstructor();
+            args = new Object[0];
+        }
+        return ctor.newInstance(args);
     }
-    if (tryString && ctor == null) {
-      try {
-        ctor = cls.getConstructor(String.class);
-        args = new String[]{stringarg};
-      } catch (NoSuchMethodException ignored) {
-      }
-    }
-    if (ctor == null) {
-      ctor = cls.getConstructor();
-      args = new Object[0];
-    }
-    return ctor.newInstance(args);
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGBinaryObject.java b/pgjdbc/src/main/java/org/postgresql/util/PGBinaryObject.java
index f41e763..c412948 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/PGBinaryObject.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/PGBinaryObject.java
@@ -12,30 +12,30 @@ import java.sql.SQLException;
  * of more optimal binary encoding of the data type.
  */
 public interface PGBinaryObject {
-  /**
-   * This method is called to set the value of this object.
-   *
-   * @param value data containing the binary representation of the value of the object
-   * @param offset the offset in the byte array where object data starts
-   * @throws SQLException thrown if value is invalid for this type
-   */
-  void setByteValue(byte[] value, int offset) throws SQLException;
+    /**
+     * This method is called to set the value of this object.
+     *
+     * @param value  data containing the binary representation of the value of the object
+     * @param offset the offset in the byte array where object data starts
+     * @throws SQLException thrown if value is invalid for this type
+     */
+    void setByteValue(byte[] value, int offset) throws SQLException;
 
-  /**
-   * This method is called to return the number of bytes needed to store this object in the binary
-   * form required by org.postgresql.
-   *
-   * @return the number of bytes needed to store this object
-   */
-  int lengthInBytes();
+    /**
+     * This method is called to return the number of bytes needed to store this object in the binary
+     * form required by org.postgresql.
+     *
+     * @return the number of bytes needed to store this object
+     */
+    int lengthInBytes();
 
-  /**
-   * This method is called the to store the value of the object, in the binary form required by
-   * org.postgresql.
-   *
-   * @param bytes the array to store the value, it is guaranteed to be at lest
-   *        {@link #lengthInBytes} in size.
-   * @param offset the offset in the byte array where object must be stored
-   */
-  void toBytes(byte[] bytes, int offset);
+    /**
+     * This method is called the to store the value of the object, in the binary form required by
+     * org.postgresql.
+     *
+     * @param bytes  the array to store the value, it is guaranteed to be at lest
+     *               {@link #lengthInBytes} in size.
+     * @param offset the offset in the byte array where object must be stored
+     */
+    void toBytes(byte[] bytes, int offset);
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGInterval.java b/pgjdbc/src/main/java/org/postgresql/util/PGInterval.java
index 215ae3a..03d7854 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/PGInterval.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/PGInterval.java
@@ -20,530 +20,530 @@ import java.util.StringTokenizer;
 @SuppressWarnings("serial")
 public class PGInterval extends PGobject implements Serializable, Cloneable {
 
-  private static final int MICROS_IN_SECOND = 1000000;
+    private static final int MICROS_IN_SECOND = 1000000;
 
-  private int years;
-  private int months;
-  private int days;
-  private int hours;
-  private int minutes;
-  private int wholeSeconds;
-  private int microSeconds;
-  private boolean isNull;
+    private int years;
+    private int months;
+    private int days;
+    private int hours;
+    private int minutes;
+    private int wholeSeconds;
+    private int microSeconds;
+    private boolean isNull;
 
-  /**
-   * required by the driver.
-   */
-  public PGInterval() {
-    type = "interval";
-  }
+    /**
+     * required by the driver.
+     */
+    public PGInterval() {
+        type = "interval";
+    }
 
-  /**
-   * Initialize a interval with a given interval string representation.
-   *
-   * @param value String representated interval (e.g. '3 years 2 mons')
-   * @throws SQLException Is thrown if the string representation has an unknown format
-   * @see PGobject#setValue(String)
-   */
-  @SuppressWarnings("this-escape")
-  public PGInterval(String value) throws SQLException {
-    this();
-    setValue(value);
-  }
+    /**
+     * Initialize a interval with a given interval string representation.
+     *
+     * @param value String representated interval (e.g. '3 years 2 mons')
+     * @throws SQLException Is thrown if the string representation has an unknown format
+     * @see PGobject#setValue(String)
+     */
+    @SuppressWarnings("this-escape")
+    public PGInterval(String value) throws SQLException {
+        this();
+        setValue(value);
+    }
 
-  private int lookAhead(String value, int position, String find) {
-    char [] tokens = find.toCharArray();
-    int found = -1;
+    /**
+     * Initializes all values of this interval to the specified values.
+     *
+     * @param years   years
+     * @param months  months
+     * @param days    days
+     * @param hours   hours
+     * @param minutes minutes
+     * @param seconds seconds
+     * @see #setValue(int, int, int, int, int, double)
+     */
+    @SuppressWarnings("this-escape")
+    public PGInterval(int years, int months, int days, int hours, int minutes, double seconds) {
+        this();
+        setValue(years, months, days, hours, minutes, seconds);
+    }
 
-    for (int i = 0; i < tokens.length; i++) {
-      found = value.indexOf(tokens[i], position);
-      if (found > 0) {
+    /**
+     * Returns integer value of value or 0 if value is null.
+     *
+     * @param value integer as string value
+     * @return integer parsed from string value
+     * @throws NumberFormatException if the string contains invalid chars
+     */
+    private static int nullSafeIntGet(String value) throws NumberFormatException {
+        return value == null ? 0 : Integer.parseInt(value);
+    }
+
+    /**
+     * Returns double value of value or 0 if value is null.
+     *
+     * @param value double as string value
+     * @return double parsed from string value
+     * @throws NumberFormatException if the string contains invalid chars
+     */
+    private static double nullSafeDoubleGet(String value) throws NumberFormatException {
+        return value == null ? 0 : Double.parseDouble(value);
+    }
+
+    private int lookAhead(String value, int position, String find) {
+        char[] tokens = find.toCharArray();
+        int found = -1;
+
+        for (int i = 0; i < tokens.length; i++) {
+            found = value.indexOf(tokens[i], position);
+            if (found > 0) {
+                return found;
+            }
+        }
         return found;
-      }
-    }
-    return found;
-  }
-
-  private void parseISO8601Format(String value) {
-    int number = 0;
-    String dateValue;
-    String timeValue = null;
-
-    int hasTime = value.indexOf('T');
-    if ( hasTime > 0 ) {
-      /* skip over the P */
-      dateValue = value.substring(1, hasTime);
-      timeValue = value.substring(hasTime + 1);
-    } else {
-      /* skip over the P */
-      dateValue = value.substring(1);
     }
 
-    for (int i = 0; i < dateValue.length(); i++) {
-      int lookAhead = lookAhead(dateValue, i, "YMD");
-      if (lookAhead > 0) {
-        number = Integer.parseInt(dateValue.substring(i, lookAhead));
-        if (dateValue.charAt(lookAhead) == 'Y') {
-          setYears(number);
-        } else if (dateValue.charAt(lookAhead) == 'M') {
-          setMonths(number);
-        } else if (dateValue.charAt(lookAhead) == 'D') {
-          setDays(number);
-        }
-        i = lookAhead;
-      }
-    }
-    if ( timeValue != null ) {
-      for (int i = 0; i < timeValue.length(); i++) {
-        int lookAhead = lookAhead(timeValue, i, "HMS");
-        if (lookAhead > 0) {
-          if (timeValue.charAt(lookAhead) == 'H') {
-            setHours(Integer.parseInt(timeValue.substring(i, lookAhead)));
-          } else if (timeValue.charAt(lookAhead) == 'M') {
-            setMinutes(Integer.parseInt(timeValue.substring(i, lookAhead)));
-          } else if (timeValue.charAt(lookAhead) == 'S') {
-            setSeconds(Double.parseDouble(timeValue.substring(i, lookAhead)));
-          }
-          i = lookAhead;
-        }
-      }
-    }
-  }
+    private void parseISO8601Format(String value) {
+        int number = 0;
+        String dateValue;
+        String timeValue = null;
 
-  /**
-   * Initializes all values of this interval to the specified values.
-   *
-   * @param years years
-   * @param months months
-   * @param days days
-   * @param hours hours
-   * @param minutes minutes
-   * @param seconds seconds
-   * @see #setValue(int, int, int, int, int, double)
-   */
-  @SuppressWarnings("this-escape")
-  public PGInterval(int years, int months, int days, int hours, int minutes, double seconds) {
-    this();
-    setValue(years, months, days, hours, minutes, seconds);
-  }
-
-  /**
-   * Sets a interval string represented value to this instance. This method only recognize the
-   * format, that Postgres returns - not all input formats are supported (e.g. '1 yr 2 m 3 s').
-   *
-   * @param value String representated interval (e.g. '3 years 2 mons')
-   * @throws SQLException Is thrown if the string representation has an unknown format
-   */
-  @Override
-  public void setValue(String value) throws SQLException {
-    isNull = value == null;
-    if (value == null) {
-      setValue(0, 0, 0, 0, 0, 0);
-      isNull = true;
-      return;
-    }
-    final boolean postgresFormat = !value.startsWith("@");
-    if (value.startsWith("P")) {
-      parseISO8601Format(value);
-      return;
-    }
-    // Just a simple '0'
-    if (!postgresFormat && value.length() == 3 && value.charAt(2) == '0') {
-      setValue(0, 0, 0, 0, 0, 0.0);
-      return;
-    }
-
-    int years = 0;
-    int months = 0;
-    int days = 0;
-    int hours = 0;
-    int minutes = 0;
-    double seconds = 0;
-
-    try {
-      String valueToken = null;
-
-      value = value.replace('+', ' ').replace('@', ' ');
-      final StringTokenizer st = new StringTokenizer(value);
-      for (int i = 1; st.hasMoreTokens(); i++) {
-        String token = st.nextToken();
-
-        if ((i & 1) == 1) {
-          int endHours = token.indexOf(':');
-          if (endHours == -1) {
-            valueToken = token;
-            continue;
-          }
-
-          // This handles hours, minutes, seconds and microseconds for
-          // ISO intervals
-          int offset = token.charAt(0) == '-' ? 1 : 0;
-
-          hours = nullSafeIntGet(token.substring(offset + 0, endHours));
-          minutes = nullSafeIntGet(token.substring(endHours + 1, endHours + 3));
-
-          // Pre 7.4 servers do not put second information into the results
-          // unless it is non-zero.
-          int endMinutes = token.indexOf(':', endHours + 1);
-          if (endMinutes != -1) {
-            seconds = nullSafeDoubleGet(token.substring(endMinutes + 1));
-          }
-
-          if (offset == 1) {
-            hours = -hours;
-            minutes = -minutes;
-            seconds = -seconds;
-          }
-
-          valueToken = null;
+        int hasTime = value.indexOf('T');
+        if (hasTime > 0) {
+            /* skip over the P */
+            dateValue = value.substring(1, hasTime);
+            timeValue = value.substring(hasTime + 1);
         } else {
-          // This handles years, months, days for both, ISO and
-          // Non-ISO intervals. Hours, minutes, seconds and microseconds
-          // are handled for Non-ISO intervals here.
-
-          if (token.startsWith("year")) {
-            years = nullSafeIntGet(valueToken);
-          } else if (token.startsWith("mon")) {
-            months = nullSafeIntGet(valueToken);
-          } else if (token.startsWith("day")) {
-            days = nullSafeIntGet(valueToken);
-          } else if (token.startsWith("hour")) {
-            hours = nullSafeIntGet(valueToken);
-          } else if (token.startsWith("min")) {
-            minutes = nullSafeIntGet(valueToken);
-          } else if (token.startsWith("sec")) {
-            seconds = nullSafeDoubleGet(valueToken);
-          }
+            /* skip over the P */
+            dateValue = value.substring(1);
+        }
+
+        for (int i = 0; i < dateValue.length(); i++) {
+            int lookAhead = lookAhead(dateValue, i, "YMD");
+            if (lookAhead > 0) {
+                number = Integer.parseInt(dateValue.substring(i, lookAhead));
+                if (dateValue.charAt(lookAhead) == 'Y') {
+                    setYears(number);
+                } else if (dateValue.charAt(lookAhead) == 'M') {
+                    setMonths(number);
+                } else if (dateValue.charAt(lookAhead) == 'D') {
+                    setDays(number);
+                }
+                i = lookAhead;
+            }
+        }
+        if (timeValue != null) {
+            for (int i = 0; i < timeValue.length(); i++) {
+                int lookAhead = lookAhead(timeValue, i, "HMS");
+                if (lookAhead > 0) {
+                    if (timeValue.charAt(lookAhead) == 'H') {
+                        setHours(Integer.parseInt(timeValue.substring(i, lookAhead)));
+                    } else if (timeValue.charAt(lookAhead) == 'M') {
+                        setMinutes(Integer.parseInt(timeValue.substring(i, lookAhead)));
+                    } else if (timeValue.charAt(lookAhead) == 'S') {
+                        setSeconds(Double.parseDouble(timeValue.substring(i, lookAhead)));
+                    }
+                    i = lookAhead;
+                }
+            }
         }
-      }
-    } catch (NumberFormatException e) {
-      throw new PSQLException(GT.tr("Conversion of interval failed"),
-          PSQLState.NUMERIC_CONSTANT_OUT_OF_RANGE, e);
     }
 
-    if (!postgresFormat && value.endsWith("ago")) {
-      // Inverse the leading sign
-      setValue(-years, -months, -days, -hours, -minutes, -seconds);
-    } else {
-      setValue(years, months, days, hours, minutes, seconds);
-    }
-  }
-
-  /**
-   * Set all values of this interval to the specified values.
-   *
-   * @param years years
-   * @param months months
-   * @param days days
-   * @param hours hours
-   * @param minutes minutes
-   * @param seconds seconds
-   */
-  public void setValue(int years, int months, int days, int hours, int minutes, double seconds) {
-    setYears(years);
-    setMonths(months);
-    setDays(days);
-    setHours(hours);
-    setMinutes(minutes);
-    setSeconds(seconds);
-  }
-
-  /**
-   * Returns the stored interval information as a string.
-   *
-   * @return String represented interval
-   */
-  @Override
-  public String getValue() {
-    if (isNull) {
-      return null;
-    }
-    DecimalFormat df = (DecimalFormat) NumberFormat.getInstance(Locale.US);
-    df.applyPattern("0.0#####");
-
-    return String.format(
-      Locale.ROOT,
-      "%d years %d mons %d days %d hours %d mins %s secs",
-      years,
-      months,
-      days,
-      hours,
-      minutes,
-      df.format(getSeconds())
-    );
-  }
-
-  /**
-   * Returns the years represented by this interval.
-   *
-   * @return years represented by this interval
-   */
-  public int getYears() {
-    return years;
-  }
-
-  /**
-   * Set the years of this interval to the specified value.
-   *
-   * @param years years to set
-   */
-  public void setYears(int years) {
-    isNull = false;
-    this.years = years;
-  }
-
-  /**
-   * Returns the months represented by this interval.
-   *
-   * @return months represented by this interval
-   */
-  public int getMonths() {
-    return months;
-  }
-
-  /**
-   * Set the months of this interval to the specified value.
-   *
-   * @param months months to set
-   */
-  public void setMonths(int months) {
-    isNull = false;
-    this.months = months;
-  }
-
-  /**
-   * Returns the days represented by this interval.
-   *
-   * @return days represented by this interval
-   */
-  public int getDays() {
-    return days;
-  }
-
-  /**
-   * Set the days of this interval to the specified value.
-   *
-   * @param days days to set
-   */
-  public void setDays(int days) {
-    isNull = false;
-    this.days = days;
-  }
-
-  /**
-   * Returns the hours represented by this interval.
-   *
-   * @return hours represented by this interval
-   */
-  public int getHours() {
-    return hours;
-  }
-
-  /**
-   * Set the hours of this interval to the specified value.
-   *
-   * @param hours hours to set
-   */
-  public void setHours(int hours) {
-    isNull = false;
-    this.hours = hours;
-  }
-
-  /**
-   * Returns the minutes represented by this interval.
-   *
-   * @return minutes represented by this interval
-   */
-  public int getMinutes() {
-    return minutes;
-  }
-
-  /**
-   * Set the minutes of this interval to the specified value.
-   *
-   * @param minutes minutes to set
-   */
-  public void setMinutes(int minutes) {
-    isNull = false;
-    this.minutes = minutes;
-  }
-
-  /**
-   * Returns the seconds represented by this interval.
-   *
-   * @return seconds represented by this interval
-   */
-  public double getSeconds() {
-    return wholeSeconds + (double) microSeconds / MICROS_IN_SECOND;
-  }
-
-  public int getWholeSeconds() {
-    return wholeSeconds;
-  }
-
-  public int getMicroSeconds() {
-    return microSeconds;
-  }
-
-  /**
-   * Set the seconds of this interval to the specified value.
-   *
-   * @param seconds seconds to set
-   */
-  public void setSeconds(double seconds) {
-    isNull = false;
-    wholeSeconds = (int) seconds;
-    microSeconds = (int) Math.round((seconds - wholeSeconds) * MICROS_IN_SECOND);
-  }
-
-  /**
-   * Rolls this interval on a given calendar.
-   *
-   * @param cal Calendar instance to add to
-   */
-  public void add(Calendar cal) {
-    if (isNull) {
-      return;
+    /**
+     * Set all values of this interval to the specified values.
+     *
+     * @param years   years
+     * @param months  months
+     * @param days    days
+     * @param hours   hours
+     * @param minutes minutes
+     * @param seconds seconds
+     */
+    public void setValue(int years, int months, int days, int hours, int minutes, double seconds) {
+        setYears(years);
+        setMonths(months);
+        setDays(days);
+        setHours(hours);
+        setMinutes(minutes);
+        setSeconds(seconds);
     }
 
-    final int milliseconds = (microSeconds + (microSeconds < 0 ? -500 : 500)) / 1000 + wholeSeconds * 1000;
+    /**
+     * Returns the stored interval information as a string.
+     *
+     * @return String represented interval
+     */
+    @Override
+    public String getValue() {
+        if (isNull) {
+            return null;
+        }
+        DecimalFormat df = (DecimalFormat) NumberFormat.getInstance(Locale.US);
+        df.applyPattern("0.0#####");
 
-    cal.add(Calendar.MILLISECOND, milliseconds);
-    cal.add(Calendar.MINUTE, getMinutes());
-    cal.add(Calendar.HOUR, getHours());
-    cal.add(Calendar.DAY_OF_MONTH, getDays());
-    cal.add(Calendar.MONTH, getMonths());
-    cal.add(Calendar.YEAR, getYears());
-  }
-
-  /**
-   * Rolls this interval on a given date.
-   *
-   * @param date Date instance to add to
-   */
-  public void add(Date date) {
-    if (isNull) {
-      return;
-    }
-    final Calendar cal = Calendar.getInstance();
-    cal.setTime(date);
-    add(cal);
-    date.setTime(cal.getTime().getTime());
-  }
-
-  /**
-   * Add this interval's value to the passed interval. This is backwards to what I would expect, but
-   * this makes it match the other existing add methods.
-   *
-   * @param interval intval to add
-   */
-  public void add(PGInterval interval) {
-    if (isNull || interval.isNull) {
-      return;
-    }
-    interval.setYears(interval.getYears() + getYears());
-    interval.setMonths(interval.getMonths() + getMonths());
-    interval.setDays(interval.getDays() + getDays());
-    interval.setHours(interval.getHours() + getHours());
-    interval.setMinutes(interval.getMinutes() + getMinutes());
-    interval.setSeconds(interval.getSeconds() + getSeconds());
-  }
-
-  /**
-   * Scale this interval by an integer factor. The server can scale by arbitrary factors, but that
-   * would require adjusting the call signatures for all the existing methods like getDays() or
-   * providing our own justification of fractional intervals. Neither of these seem like a good idea
-   * without a strong use case.
-   *
-   * @param factor scale factor
-   */
-  public void scale(int factor) {
-    if (isNull) {
-      return;
-    }
-    setYears(factor * getYears());
-    setMonths(factor * getMonths());
-    setDays(factor * getDays());
-    setHours(factor * getHours());
-    setMinutes(factor * getMinutes());
-    setSeconds(factor * getSeconds());
-  }
-
-  /**
-   * Returns integer value of value or 0 if value is null.
-   *
-   * @param value integer as string value
-   * @return integer parsed from string value
-   * @throws NumberFormatException if the string contains invalid chars
-   */
-  private static int nullSafeIntGet(String value) throws NumberFormatException {
-    return value == null ? 0 : Integer.parseInt(value);
-  }
-
-  /**
-   * Returns double value of value or 0 if value is null.
-   *
-   * @param value double as string value
-   * @return double parsed from string value
-   * @throws NumberFormatException if the string contains invalid chars
-   */
-  private static double nullSafeDoubleGet(String value) throws NumberFormatException {
-    return value == null ? 0 : Double.parseDouble(value);
-  }
-
-  /**
-   * Returns whether an object is equal to this one or not.
-   *
-   * @param obj Object to compare with
-   * @return true if the two intervals are identical
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) {
-      return false;
+        return String.format(
+                Locale.ROOT,
+                "%d years %d mons %d days %d hours %d mins %s secs",
+                years,
+                months,
+                days,
+                hours,
+                minutes,
+                df.format(getSeconds())
+        );
     }
 
-    if (obj == this) {
-      return true;
+    /**
+     * Sets a interval string represented value to this instance. This method only recognize the
+     * format, that Postgres returns - not all input formats are supported (e.g. '1 yr 2 m 3 s').
+     *
+     * @param value String representated interval (e.g. '3 years 2 mons')
+     * @throws SQLException Is thrown if the string representation has an unknown format
+     */
+    @Override
+    public void setValue(String value) throws SQLException {
+        isNull = value == null;
+        if (value == null) {
+            setValue(0, 0, 0, 0, 0, 0);
+            isNull = true;
+            return;
+        }
+        final boolean postgresFormat = !value.startsWith("@");
+        if (value.startsWith("P")) {
+            parseISO8601Format(value);
+            return;
+        }
+        // Just a simple '0'
+        if (!postgresFormat && value.length() == 3 && value.charAt(2) == '0') {
+            setValue(0, 0, 0, 0, 0, 0.0);
+            return;
+        }
+
+        int years = 0;
+        int months = 0;
+        int days = 0;
+        int hours = 0;
+        int minutes = 0;
+        double seconds = 0;
+
+        try {
+            String valueToken = null;
+
+            value = value.replace('+', ' ').replace('@', ' ');
+            final StringTokenizer st = new StringTokenizer(value);
+            for (int i = 1; st.hasMoreTokens(); i++) {
+                String token = st.nextToken();
+
+                if ((i & 1) == 1) {
+                    int endHours = token.indexOf(':');
+                    if (endHours == -1) {
+                        valueToken = token;
+                        continue;
+                    }
+
+                    // This handles hours, minutes, seconds and microseconds for
+                    // ISO intervals
+                    int offset = token.charAt(0) == '-' ? 1 : 0;
+
+                    hours = nullSafeIntGet(token.substring(offset + 0, endHours));
+                    minutes = nullSafeIntGet(token.substring(endHours + 1, endHours + 3));
+
+                    // Pre 7.4 servers do not put second information into the results
+                    // unless it is non-zero.
+                    int endMinutes = token.indexOf(':', endHours + 1);
+                    if (endMinutes != -1) {
+                        seconds = nullSafeDoubleGet(token.substring(endMinutes + 1));
+                    }
+
+                    if (offset == 1) {
+                        hours = -hours;
+                        minutes = -minutes;
+                        seconds = -seconds;
+                    }
+
+                    valueToken = null;
+                } else {
+                    // This handles years, months, days for both, ISO and
+                    // Non-ISO intervals. Hours, minutes, seconds and microseconds
+                    // are handled for Non-ISO intervals here.
+
+                    if (token.startsWith("year")) {
+                        years = nullSafeIntGet(valueToken);
+                    } else if (token.startsWith("mon")) {
+                        months = nullSafeIntGet(valueToken);
+                    } else if (token.startsWith("day")) {
+                        days = nullSafeIntGet(valueToken);
+                    } else if (token.startsWith("hour")) {
+                        hours = nullSafeIntGet(valueToken);
+                    } else if (token.startsWith("min")) {
+                        minutes = nullSafeIntGet(valueToken);
+                    } else if (token.startsWith("sec")) {
+                        seconds = nullSafeDoubleGet(valueToken);
+                    }
+                }
+            }
+        } catch (NumberFormatException e) {
+            throw new PSQLException(GT.tr("Conversion of interval failed"),
+                    PSQLState.NUMERIC_CONSTANT_OUT_OF_RANGE, e);
+        }
+
+        if (!postgresFormat && value.endsWith("ago")) {
+            // Inverse the leading sign
+            setValue(-years, -months, -days, -hours, -minutes, -seconds);
+        } else {
+            setValue(years, months, days, hours, minutes, seconds);
+        }
     }
 
-    if (!(obj instanceof PGInterval)) {
-      return false;
+    /**
+     * Returns the years represented by this interval.
+     *
+     * @return years represented by this interval
+     */
+    public int getYears() {
+        return years;
     }
 
-    final PGInterval pgi = (PGInterval) obj;
-    if (isNull) {
-      return pgi.isNull;
-    } else if (pgi.isNull) {
-      return false;
+    /**
+     * Set the years of this interval to the specified value.
+     *
+     * @param years years to set
+     */
+    public void setYears(int years) {
+        isNull = false;
+        this.years = years;
     }
 
-    return pgi.years == years
-        && pgi.months == months
-        && pgi.days == days
-        && pgi.hours == hours
-        && pgi.minutes == minutes
-        && pgi.wholeSeconds == wholeSeconds
-        && pgi.microSeconds == microSeconds;
-  }
-
-  /**
-   * Returns a hashCode for this object.
-   *
-   * @return hashCode
-   */
-  @Override
-  public int hashCode() {
-    if (isNull) {
-      return 0;
+    /**
+     * Returns the months represented by this interval.
+     *
+     * @return months represented by this interval
+     */
+    public int getMonths() {
+        return months;
     }
-    return (((((((8 * 31 + microSeconds) * 31 + wholeSeconds) * 31 + minutes) * 31 + hours) * 31
-        + days) * 31 + months) * 31 + years) * 31;
-  }
 
-  @Override
-  public Object clone() throws CloneNotSupportedException {
-    // squid:S2157 "Cloneables" should implement "clone
-    return super.clone();
-  }
+    /**
+     * Set the months of this interval to the specified value.
+     *
+     * @param months months to set
+     */
+    public void setMonths(int months) {
+        isNull = false;
+        this.months = months;
+    }
+
+    /**
+     * Returns the days represented by this interval.
+     *
+     * @return days represented by this interval
+     */
+    public int getDays() {
+        return days;
+    }
+
+    /**
+     * Set the days of this interval to the specified value.
+     *
+     * @param days days to set
+     */
+    public void setDays(int days) {
+        isNull = false;
+        this.days = days;
+    }
+
+    /**
+     * Returns the hours represented by this interval.
+     *
+     * @return hours represented by this interval
+     */
+    public int getHours() {
+        return hours;
+    }
+
+    /**
+     * Set the hours of this interval to the specified value.
+     *
+     * @param hours hours to set
+     */
+    public void setHours(int hours) {
+        isNull = false;
+        this.hours = hours;
+    }
+
+    /**
+     * Returns the minutes represented by this interval.
+     *
+     * @return minutes represented by this interval
+     */
+    public int getMinutes() {
+        return minutes;
+    }
+
+    /**
+     * Set the minutes of this interval to the specified value.
+     *
+     * @param minutes minutes to set
+     */
+    public void setMinutes(int minutes) {
+        isNull = false;
+        this.minutes = minutes;
+    }
+
+    /**
+     * Returns the seconds represented by this interval.
+     *
+     * @return seconds represented by this interval
+     */
+    public double getSeconds() {
+        return wholeSeconds + (double) microSeconds / MICROS_IN_SECOND;
+    }
+
+    /**
+     * Set the seconds of this interval to the specified value.
+     *
+     * @param seconds seconds to set
+     */
+    public void setSeconds(double seconds) {
+        isNull = false;
+        wholeSeconds = (int) seconds;
+        microSeconds = (int) Math.round((seconds - wholeSeconds) * MICROS_IN_SECOND);
+    }
+
+    public int getWholeSeconds() {
+        return wholeSeconds;
+    }
+
+    public int getMicroSeconds() {
+        return microSeconds;
+    }
+
+    /**
+     * Rolls this interval on a given calendar.
+     *
+     * @param cal Calendar instance to add to
+     */
+    public void add(Calendar cal) {
+        if (isNull) {
+            return;
+        }
+
+        final int milliseconds = (microSeconds + (microSeconds < 0 ? -500 : 500)) / 1000 + wholeSeconds * 1000;
+
+        cal.add(Calendar.MILLISECOND, milliseconds);
+        cal.add(Calendar.MINUTE, getMinutes());
+        cal.add(Calendar.HOUR, getHours());
+        cal.add(Calendar.DAY_OF_MONTH, getDays());
+        cal.add(Calendar.MONTH, getMonths());
+        cal.add(Calendar.YEAR, getYears());
+    }
+
+    /**
+     * Rolls this interval on a given date.
+     *
+     * @param date Date instance to add to
+     */
+    public void add(Date date) {
+        if (isNull) {
+            return;
+        }
+        final Calendar cal = Calendar.getInstance();
+        cal.setTime(date);
+        add(cal);
+        date.setTime(cal.getTime().getTime());
+    }
+
+    /**
+     * Add this interval's value to the passed interval. This is backwards to what I would expect, but
+     * this makes it match the other existing add methods.
+     *
+     * @param interval intval to add
+     */
+    public void add(PGInterval interval) {
+        if (isNull || interval.isNull) {
+            return;
+        }
+        interval.setYears(interval.getYears() + getYears());
+        interval.setMonths(interval.getMonths() + getMonths());
+        interval.setDays(interval.getDays() + getDays());
+        interval.setHours(interval.getHours() + getHours());
+        interval.setMinutes(interval.getMinutes() + getMinutes());
+        interval.setSeconds(interval.getSeconds() + getSeconds());
+    }
+
+    /**
+     * Scale this interval by an integer factor. The server can scale by arbitrary factors, but that
+     * would require adjusting the call signatures for all the existing methods like getDays() or
+     * providing our own justification of fractional intervals. Neither of these seem like a good idea
+     * without a strong use case.
+     *
+     * @param factor scale factor
+     */
+    public void scale(int factor) {
+        if (isNull) {
+            return;
+        }
+        setYears(factor * getYears());
+        setMonths(factor * getMonths());
+        setDays(factor * getDays());
+        setHours(factor * getHours());
+        setMinutes(factor * getMinutes());
+        setSeconds(factor * getSeconds());
+    }
+
+    /**
+     * Returns whether an object is equal to this one or not.
+     *
+     * @param obj Object to compare with
+     * @return true if the two intervals are identical
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (obj == null) {
+            return false;
+        }
+
+        if (obj == this) {
+            return true;
+        }
+
+        if (!(obj instanceof PGInterval)) {
+            return false;
+        }
+
+        final PGInterval pgi = (PGInterval) obj;
+        if (isNull) {
+            return pgi.isNull;
+        } else if (pgi.isNull) {
+            return false;
+        }
+
+        return pgi.years == years
+                && pgi.months == months
+                && pgi.days == days
+                && pgi.hours == hours
+                && pgi.minutes == minutes
+                && pgi.wholeSeconds == wholeSeconds
+                && pgi.microSeconds == microSeconds;
+    }
+
+    /**
+     * Returns a hashCode for this object.
+     *
+     * @return hashCode
+     */
+    @Override
+    public int hashCode() {
+        if (isNull) {
+            return 0;
+        }
+        return (((((((8 * 31 + microSeconds) * 31 + wholeSeconds) * 31 + minutes) * 31 + hours) * 31
+                + days) * 31 + months) * 31 + years) * 31;
+    }
+
+    @Override
+    public Object clone() throws CloneNotSupportedException {
+        // squid:S2157 "Cloneables" should implement "clone
+        return super.clone();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGJDBCMain.java b/pgjdbc/src/main/java/org/postgresql/util/PGJDBCMain.java
index bf8fd2d..70049ce 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/PGJDBCMain.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/PGJDBCMain.java
@@ -11,23 +11,23 @@ import java.net.URL;
 
 public class PGJDBCMain {
 
-  public PGJDBCMain() {
-  }
+    public PGJDBCMain() {
+    }
 
-  public static void main(String[] args) {
+    public static void main(String[] args) {
 
-    URL url = Driver.class.getResource("/org/postgresql/Driver.class");
-    System.out.printf("%n%s%n", DriverInfo.DRIVER_FULL_NAME);
-    System.out.printf("Found in: %s%n%n", url);
+        URL url = Driver.class.getResource("/org/postgresql/Driver.class");
+        System.out.printf("%n%s%n", DriverInfo.DRIVER_FULL_NAME);
+        System.out.printf("Found in: %s%n%n", url);
 
-    System.out.printf("The PgJDBC driver is not an executable Java program.%n%n"
-                       + "You must install it according to the JDBC driver installation "
-                       + "instructions for your application / container / appserver, "
-                       + "then use it by specifying a JDBC URL of the form %n    jdbc:postgresql://%n"
-                       + "or using an application specific method.%n%n"
-                       + "See the PgJDBC documentation: http://jdbc.postgresql.org/documentation/head/index.html%n%n"
-                       + "This command has had no effect.%n");
+        System.out.printf("The PgJDBC driver is not an executable Java program.%n%n"
+                + "You must install it according to the JDBC driver installation "
+                + "instructions for your application / container / appserver, "
+                + "then use it by specifying a JDBC URL of the form %n    jdbc:postgresql://%n"
+                + "or using an application specific method.%n%n"
+                + "See the PgJDBC documentation: http://jdbc.postgresql.org/documentation/head/index.html%n%n"
+                + "This command has had no effect.%n");
 
-    System.exit(1);
-  }
+        System.exit(1);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGPropertyMaxResultBufferParser.java b/pgjdbc/src/main/java/org/postgresql/util/PGPropertyMaxResultBufferParser.java
index ca0ca3b..9fdfe1c 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/PGPropertyMaxResultBufferParser.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/PGPropertyMaxResultBufferParser.java
@@ -11,213 +11,213 @@ import java.util.logging.Logger;
 
 public class PGPropertyMaxResultBufferParser {
 
-  private static final Logger LOGGER = Logger.getLogger(PGPropertyMaxResultBufferParser.class.getName());
+    private static final Logger LOGGER = Logger.getLogger(PGPropertyMaxResultBufferParser.class.getName());
 
-  private static final String[] PERCENT_PHRASES = new String[]{
-    "p",
-    "pct",
-    "percent"
-  };
+    private static final String[] PERCENT_PHRASES = new String[]{
+            "p",
+            "pct",
+            "percent"
+    };
 
-  public PGPropertyMaxResultBufferParser() {
-  }
-
-  /**
-   * Method to parse value of max result buffer size.
-   *
-   * @param value string containing size of bytes with optional multiplier (T, G, M or K) or percent
-   *              value to declare max percent of heap memory to use.
-   * @return value of max result buffer size.
-   * @throws PSQLException Exception when given value can't be parsed.
-   */
-  public static long parseProperty(String value) throws PSQLException {
-    long result = -1;
-    if (value == null) {
-      // default branch
-    } else if (checkIfValueContainsPercent(value)) {
-      result = parseBytePercentValue(value);
-    } else if (!value.isEmpty()) {
-      result = parseByteValue(value);
+    public PGPropertyMaxResultBufferParser() {
     }
-    result = adjustResultSize(result);
-    return result;
-  }
 
-  /**
-   * Method to check if given value can contain percent declaration of size of max result buffer.
-   *
-   * @param value Value to check.
-   * @return Result if value contains percent.
-   */
-  private static boolean checkIfValueContainsPercent(String value) {
-    return getPercentPhraseLengthIfContains(value) != -1;
-  }
-
-  /**
-   * Method to get percent value of max result buffer size dependable on actual free memory. This
-   * method doesn't check other possibilities of value declaration.
-   *
-   * @param value string containing percent used to define max result buffer.
-   * @return percent value of max result buffer size.
-   * @throws PSQLException Exception when given value can't be parsed.
-   */
-  private static long parseBytePercentValue(String value) throws PSQLException {
-    long result = -1;
-    int length;
-
-    if (!value.isEmpty()) {
-      length = getPercentPhraseLengthIfContains(value);
-
-      if (length == -1) {
-        throwExceptionAboutParsingError(
-            "Received MaxResultBuffer parameter can't be parsed. Value received to parse: {0}",
-            value);
-      }
-
-      result = calculatePercentOfMemory(value, length);
-    }
-    return result;
-  }
-
-  /**
-   * Method to get length of percent phrase existing in given string, only if one of phrases exist
-   * on the length of string.
-   *
-   * @param valueToCheck String which is gonna be checked if contains percent phrase.
-   * @return Length of phrase inside string, returns -1 when no phrase found.
-   */
-  private static int getPercentPhraseLengthIfContains(String valueToCheck) {
-    int result = -1;
-    for (String phrase : PERCENT_PHRASES) {
-      int indx = getPhraseLengthIfContains(valueToCheck, phrase);
-      if (indx != -1) {
-        result = indx;
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Method to get length of given phrase in given string to check, method checks if phrase exist on
-   * the end of given string.
-   *
-   * @param valueToCheck String which gonna be checked if contains phrase.
-   * @param phrase       Phrase to be looked for on the end of given string.
-   * @return Length of phrase inside string, returns -1 when phrase wasn't found.
-   */
-  private static int getPhraseLengthIfContains(String valueToCheck, String phrase) {
-    int searchValueLength = phrase.length();
-
-    if (valueToCheck.length() > searchValueLength) {
-      String subValue = valueToCheck.substring(valueToCheck.length() - searchValueLength);
-      if (subValue.equals(phrase)) {
-        return searchValueLength;
-      }
-    }
-    return -1;
-  }
-
-  /**
-   * Method to calculate percent of given max heap memory.
-   *
-   * @param value               String which contains percent + percent phrase which gonna be used
-   *                            during calculations.
-   * @param percentPhraseLength Length of percent phrase inside given value.
-   * @return Size of byte buffer based on percent of max heap memory.
-   */
-  private static long calculatePercentOfMemory(String value, int percentPhraseLength) {
-    String realValue = value.substring(0, value.length() - percentPhraseLength);
-    double percent = Double.parseDouble(realValue) / 100;
-    return (long) (percent * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax());
-  }
-
-  /**
-   * Method to get size based on given string value. String can contains just a number or number +
-   * multiplier sign (like T, G, M or K).
-   *
-   * @param value Given string to be parsed.
-   * @return Size based on given string.
-   * @throws PSQLException Exception when given value can't be parsed.
-   */
-  @SuppressWarnings("fallthrough")
-  private static long parseByteValue(String value) throws PSQLException {
-    long result = -1;
-    long multiplier = 1;
-    long mul = 1000;
-    String realValue;
-    char sign = value.charAt(value.length() - 1);
-
-    switch (sign) {
-
-      case 'T':
-      case 't':
-        multiplier *= mul;
-        // fall through
-
-      case 'G':
-      case 'g':
-        multiplier *= mul;
-        // fall through
-
-      case 'M':
-      case 'm':
-        multiplier *= mul;
-        // fall through
-
-      case 'K':
-      case 'k':
-        multiplier *= mul;
-        realValue = value.substring(0, value.length() - 1);
-        result = Integer.parseInt(realValue) * multiplier;
-        break;
-
-      case '%':
-        return result;
-
-      default:
-        if (sign >= '0' && sign <= '9') {
-          result = Long.parseLong(value);
-        } else {
-          throwExceptionAboutParsingError(
-              "Received MaxResultBuffer parameter can't be parsed. Value received to parse: {0}",
-              value);
+    /**
+     * Method to parse value of max result buffer size.
+     *
+     * @param value string containing size of bytes with optional multiplier (T, G, M or K) or percent
+     *              value to declare max percent of heap memory to use.
+     * @return value of max result buffer size.
+     * @throws PSQLException Exception when given value can't be parsed.
+     */
+    public static long parseProperty(String value) throws PSQLException {
+        long result = -1;
+        if (value == null) {
+            // default branch
+        } else if (checkIfValueContainsPercent(value)) {
+            result = parseBytePercentValue(value);
+        } else if (!value.isEmpty()) {
+            result = parseByteValue(value);
         }
-        break;
+        result = adjustResultSize(result);
+        return result;
     }
-    return result;
-  }
 
-  /**
-   * Method to adjust result memory limit size. If given memory is larger than 90% of max heap
-   * memory then it gonna be reduced to 90% of max heap memory.
-   *
-   * @param value Size to be adjusted.
-   * @return Adjusted size (original size or 90% of max heap memory)
-   */
-  private static long adjustResultSize(long value) {
-    if (value > 0.9 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()) {
-      long newResult = (long) (0.9 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax());
-
-      LOGGER.log(Level.WARNING, GT.tr(
-          "WARNING! Required to allocate {0} bytes, which exceeded possible heap memory size. Assigned {1} bytes as limit.",
-          String.valueOf(value), String.valueOf(newResult)));
-
-      value = newResult;
+    /**
+     * Method to check if given value can contain percent declaration of size of max result buffer.
+     *
+     * @param value Value to check.
+     * @return Result if value contains percent.
+     */
+    private static boolean checkIfValueContainsPercent(String value) {
+        return getPercentPhraseLengthIfContains(value) != -1;
     }
-    return value;
-  }
 
-  /**
-   * Method to throw message for parsing MaxResultBuffer.
-   *
-   * @param message Message to be added to exception.
-   * @param values  Values to be put inside exception message.
-   * @throws PSQLException Exception when given value can't be parsed.
-   */
-  private static void throwExceptionAboutParsingError(String message, Object... values) throws PSQLException {
-    throw new PSQLException(GT.tr(
-      message,
-      values),
-      PSQLState.SYNTAX_ERROR);
-  }
+    /**
+     * Method to get percent value of max result buffer size dependable on actual free memory. This
+     * method doesn't check other possibilities of value declaration.
+     *
+     * @param value string containing percent used to define max result buffer.
+     * @return percent value of max result buffer size.
+     * @throws PSQLException Exception when given value can't be parsed.
+     */
+    private static long parseBytePercentValue(String value) throws PSQLException {
+        long result = -1;
+        int length;
+
+        if (!value.isEmpty()) {
+            length = getPercentPhraseLengthIfContains(value);
+
+            if (length == -1) {
+                throwExceptionAboutParsingError(
+                        "Received MaxResultBuffer parameter can't be parsed. Value received to parse: {0}",
+                        value);
+            }
+
+            result = calculatePercentOfMemory(value, length);
+        }
+        return result;
+    }
+
+    /**
+     * Method to get length of percent phrase existing in given string, only if one of phrases exist
+     * on the length of string.
+     *
+     * @param valueToCheck String which is gonna be checked if contains percent phrase.
+     * @return Length of phrase inside string, returns -1 when no phrase found.
+     */
+    private static int getPercentPhraseLengthIfContains(String valueToCheck) {
+        int result = -1;
+        for (String phrase : PERCENT_PHRASES) {
+            int indx = getPhraseLengthIfContains(valueToCheck, phrase);
+            if (indx != -1) {
+                result = indx;
+            }
+        }
+        return result;
+    }
+
+    /**
+     * Method to get length of given phrase in given string to check, method checks if phrase exist on
+     * the end of given string.
+     *
+     * @param valueToCheck String which gonna be checked if contains phrase.
+     * @param phrase       Phrase to be looked for on the end of given string.
+     * @return Length of phrase inside string, returns -1 when phrase wasn't found.
+     */
+    private static int getPhraseLengthIfContains(String valueToCheck, String phrase) {
+        int searchValueLength = phrase.length();
+
+        if (valueToCheck.length() > searchValueLength) {
+            String subValue = valueToCheck.substring(valueToCheck.length() - searchValueLength);
+            if (subValue.equals(phrase)) {
+                return searchValueLength;
+            }
+        }
+        return -1;
+    }
+
+    /**
+     * Method to calculate percent of given max heap memory.
+     *
+     * @param value               String which contains percent + percent phrase which gonna be used
+     *                            during calculations.
+     * @param percentPhraseLength Length of percent phrase inside given value.
+     * @return Size of byte buffer based on percent of max heap memory.
+     */
+    private static long calculatePercentOfMemory(String value, int percentPhraseLength) {
+        String realValue = value.substring(0, value.length() - percentPhraseLength);
+        double percent = Double.parseDouble(realValue) / 100;
+        return (long) (percent * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax());
+    }
+
+    /**
+     * Method to get size based on given string value. String can contains just a number or number +
+     * multiplier sign (like T, G, M or K).
+     *
+     * @param value Given string to be parsed.
+     * @return Size based on given string.
+     * @throws PSQLException Exception when given value can't be parsed.
+     */
+    @SuppressWarnings("fallthrough")
+    private static long parseByteValue(String value) throws PSQLException {
+        long result = -1;
+        long multiplier = 1;
+        long mul = 1000;
+        String realValue;
+        char sign = value.charAt(value.length() - 1);
+
+        switch (sign) {
+
+            case 'T':
+            case 't':
+                multiplier *= mul;
+                // fall through
+
+            case 'G':
+            case 'g':
+                multiplier *= mul;
+                // fall through
+
+            case 'M':
+            case 'm':
+                multiplier *= mul;
+                // fall through
+
+            case 'K':
+            case 'k':
+                multiplier *= mul;
+                realValue = value.substring(0, value.length() - 1);
+                result = Integer.parseInt(realValue) * multiplier;
+                break;
+
+            case '%':
+                return result;
+
+            default:
+                if (sign >= '0' && sign <= '9') {
+                    result = Long.parseLong(value);
+                } else {
+                    throwExceptionAboutParsingError(
+                            "Received MaxResultBuffer parameter can't be parsed. Value received to parse: {0}",
+                            value);
+                }
+                break;
+        }
+        return result;
+    }
+
+    /**
+     * Method to adjust result memory limit size. If given memory is larger than 90% of max heap
+     * memory then it gonna be reduced to 90% of max heap memory.
+     *
+     * @param value Size to be adjusted.
+     * @return Adjusted size (original size or 90% of max heap memory)
+     */
+    private static long adjustResultSize(long value) {
+        if (value > 0.9 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()) {
+            long newResult = (long) (0.9 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax());
+
+            LOGGER.log(Level.WARNING, GT.tr(
+                    "WARNING! Required to allocate {0} bytes, which exceeded possible heap memory size. Assigned {1} bytes as limit.",
+                    String.valueOf(value), String.valueOf(newResult)));
+
+            value = newResult;
+        }
+        return value;
+    }
+
+    /**
+     * Method to throw message for parsing MaxResultBuffer.
+     *
+     * @param message Message to be added to exception.
+     * @param values  Values to be put inside exception message.
+     * @throws PSQLException Exception when given value can't be parsed.
+     */
+    private static void throwExceptionAboutParsingError(String message, Object... values) throws PSQLException {
+        throw new PSQLException(GT.tr(
+                message,
+                values),
+                PSQLState.SYNTAX_ERROR);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGPropertyUtil.java b/pgjdbc/src/main/java/org/postgresql/util/PGPropertyUtil.java
index 8c1e344..838c17c 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/PGPropertyUtil.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/PGPropertyUtil.java
@@ -17,110 +17,110 @@ import java.util.logging.Logger;
  */
 public class PGPropertyUtil {
 
-  private static final Logger LOGGER = Logger.getLogger(PGPropertyUtil.class.getName());
+    private static final Logger LOGGER = Logger.getLogger(PGPropertyUtil.class.getName());
 
-  public PGPropertyUtil() {
-  }
-
-  /**
-   * converts PGPORT String to Integer
-   *
-   * @param portStr value of port
-   * @return value of port or null
-   */
-  private static Integer convertPgPortToInt(String portStr) {
-    try {
-      int port = Integer.parseInt(portStr);
-      if (port < 1 || port > 65535) {
-        LOGGER.log(Level.WARNING, "JDBC URL port: {0} not valid (1:65535) ", portStr);
-        return null;
-      }
-      return port;
-    } catch (NumberFormatException ignore) {
-      LOGGER.log(Level.WARNING, "JDBC URL invalid port number: {0}", portStr);
-      return null;
-    }
-  }
-
-  /**
-   * Validate properties. Goal is to detect inconsistencies and report understandable messages
-   *
-   * @param properties properties
-   * @return false if errors found
-   */
-  public static boolean propertiesConsistencyCheck(Properties properties) {
-    //
-    String hosts = PGProperty.PG_HOST.getOrDefault(properties);
-    if (hosts == null) {
-      LOGGER.log(Level.WARNING, "Property [{0}] can not be null", PGProperty.PG_HOST.getName());
-      return false;
-    }
-    String ports = PGProperty.PG_PORT.getOrDefault(properties);
-    if (ports == null) {
-      LOGGER.log(Level.WARNING, "Property [{0}] can not be null", PGProperty.PG_PORT.getName());
-      return false;
+    public PGPropertyUtil() {
     }
 
-    // check port values
-    for (String portStr : ports.split(",")) {
-      if (PGPropertyUtil.convertPgPortToInt(portStr) == null) {
-        return false;
-      }
+    /**
+     * converts PGPORT String to Integer
+     *
+     * @param portStr value of port
+     * @return value of port or null
+     */
+    private static Integer convertPgPortToInt(String portStr) {
+        try {
+            int port = Integer.parseInt(portStr);
+            if (port < 1 || port > 65535) {
+                LOGGER.log(Level.WARNING, "JDBC URL port: {0} not valid (1:65535) ", portStr);
+                return null;
+            }
+            return port;
+        } catch (NumberFormatException ignore) {
+            LOGGER.log(Level.WARNING, "JDBC URL invalid port number: {0}", portStr);
+            return null;
+        }
     }
 
-    // check count of hosts and count of ports
-    int hostCount = hosts.split(",").length;
-    int portCount = ports.split(",").length;
-    if (hostCount != portCount) {
-      LOGGER.log(Level.WARNING, "Properties [{0}] [{1}] must have same amount of values",
-          new Object[]{PGProperty.PG_HOST.getName(), PGProperty.PG_PORT.getName()});
-      LOGGER.log(Level.WARNING, "Property [{0}] ; value [{1}] ; count [{2}]",
-          new Object[]{PGProperty.PG_HOST.getName(), hosts, hostCount});
-      LOGGER.log(Level.WARNING, "Property [{0}] ; value [{1}] ; count [{2}]",
-          new Object[]{PGProperty.PG_PORT.getName(), ports, portCount});
-      return false;
-    }
-    //
-    return true;
-  }
+    /**
+     * Validate properties. Goal is to detect inconsistencies and report understandable messages
+     *
+     * @param properties properties
+     * @return false if errors found
+     */
+    public static boolean propertiesConsistencyCheck(Properties properties) {
+        //
+        String hosts = PGProperty.PG_HOST.getOrDefault(properties);
+        if (hosts == null) {
+            LOGGER.log(Level.WARNING, "Property [{0}] can not be null", PGProperty.PG_HOST.getName());
+            return false;
+        }
+        String ports = PGProperty.PG_PORT.getOrDefault(properties);
+        if (ports == null) {
+            LOGGER.log(Level.WARNING, "Property [{0}] can not be null", PGProperty.PG_PORT.getName());
+            return false;
+        }
 
-  /**
-   * translate PGSERVICEFILE keys host, port, dbname
-   * Example: "host" becomes "PGHOST"
-   *
-   * @param serviceKey key in pg_service.conf
-   * @return translated property or the same value if translation is not needed
-   */
-  // translate PGSERVICEFILE keys host, port, dbname
-  public static String translatePGServiceToPGProperty(String serviceKey) {
-    String testKey = "PG" + serviceKey.toUpperCase(Locale.ROOT);
-    if (
-        PGProperty.PG_HOST.getName().equals(testKey)
-            || (PGProperty.PG_PORT.getName().equals(testKey))
-            || (PGProperty.PG_DBNAME.getName().equals(testKey))
-    ) {
-      return testKey;
-    } else {
-      return serviceKey;
-    }
-  }
+        // check port values
+        for (String portStr : ports.split(",")) {
+            if (PGPropertyUtil.convertPgPortToInt(portStr) == null) {
+                return false;
+            }
+        }
 
-  /**
-   * translate PGSERVICEFILE keys host, port, dbname
-   * Example: "PGHOST" becomes "host"
-   *
-   * @param propertyKey postgres property
-   * @return translated property or the same value if translation is not needed
-   */
-  public static String translatePGPropertyToPGService(String propertyKey) {
-    if (
-        PGProperty.PG_HOST.getName().equals(propertyKey)
-            || (PGProperty.PG_PORT.getName().equals(propertyKey))
-            || (PGProperty.PG_DBNAME.getName().equals(propertyKey))
-    ) {
-      return propertyKey.substring(2).toLowerCase(Locale.ROOT);
-    } else {
-      return propertyKey;
+        // check count of hosts and count of ports
+        int hostCount = hosts.split(",").length;
+        int portCount = ports.split(",").length;
+        if (hostCount != portCount) {
+            LOGGER.log(Level.WARNING, "Properties [{0}] [{1}] must have same amount of values",
+                    new Object[]{PGProperty.PG_HOST.getName(), PGProperty.PG_PORT.getName()});
+            LOGGER.log(Level.WARNING, "Property [{0}] ; value [{1}] ; count [{2}]",
+                    new Object[]{PGProperty.PG_HOST.getName(), hosts, hostCount});
+            LOGGER.log(Level.WARNING, "Property [{0}] ; value [{1}] ; count [{2}]",
+                    new Object[]{PGProperty.PG_PORT.getName(), ports, portCount});
+            return false;
+        }
+        //
+        return true;
+    }
+
+    /**
+     * translate PGSERVICEFILE keys host, port, dbname
+     * Example: "host" becomes "PGHOST"
+     *
+     * @param serviceKey key in pg_service.conf
+     * @return translated property or the same value if translation is not needed
+     */
+    // translate PGSERVICEFILE keys host, port, dbname
+    public static String translatePGServiceToPGProperty(String serviceKey) {
+        String testKey = "PG" + serviceKey.toUpperCase(Locale.ROOT);
+        if (
+                PGProperty.PG_HOST.getName().equals(testKey)
+                        || (PGProperty.PG_PORT.getName().equals(testKey))
+                        || (PGProperty.PG_DBNAME.getName().equals(testKey))
+        ) {
+            return testKey;
+        } else {
+            return serviceKey;
+        }
+    }
+
+    /**
+     * translate PGSERVICEFILE keys host, port, dbname
+     * Example: "PGHOST" becomes "host"
+     *
+     * @param propertyKey postgres property
+     * @return translated property or the same value if translation is not needed
+     */
+    public static String translatePGPropertyToPGService(String propertyKey) {
+        if (
+                PGProperty.PG_HOST.getName().equals(propertyKey)
+                        || (PGProperty.PG_PORT.getName().equals(propertyKey))
+                        || (PGProperty.PG_DBNAME.getName().equals(propertyKey))
+        ) {
+            return propertyKey.substring(2).toLowerCase(Locale.ROOT);
+        } else {
+            return propertyKey;
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGTime.java b/pgjdbc/src/main/java/org/postgresql/util/PGTime.java
index ddc8ed4..1c26f1e 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/PGTime.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/PGTime.java
@@ -15,89 +15,89 @@ import java.util.Calendar;
 @SuppressWarnings("serial")
 public class PGTime extends Time {
 
-  /**
-   * The optional calendar for this time.
-   */
-  private Calendar calendar;
+    /**
+     * The optional calendar for this time.
+     */
+    private Calendar calendar;
 
-  /**
-   * Constructs a <code>PGTime</code> without a time zone.
-   *
-   * @param time milliseconds since January 1, 1970, 00:00:00 GMT; a negative number is milliseconds
-   *        before January 1, 1970, 00:00:00 GMT.
-   * @see Time#Time(long)
-   */
-  public PGTime(long time) {
-    this(time, null);
-  }
-
-  /**
-   * Constructs a <code>PGTime</code> with the given calendar object. The calendar object is
-   * optional. If absent, the driver will treat the time as <code>time without time zone</code>.
-   * When present, the driver will treat the time as a <code>time with time zone</code> using the
-   * <code>TimeZone</code> in the calendar object. Furthermore, this calendar will be used instead
-   * of the calendar object passed to {@link PreparedStatement#setTime(int, Time, Calendar)}.
-   *
-   * @param time milliseconds since January 1, 1970, 00:00:00 GMT; a negative number is milliseconds
-   *        before January 1, 1970, 00:00:00 GMT.
-   * @param calendar the calendar object containing the time zone or <code>null</code>.
-   * @see Time#Time(long)
-   */
-  public PGTime(long time, Calendar calendar) {
-    super(time);
-    this.calendar = calendar;
-  }
-
-  /**
-   * Sets the calendar object for this time.
-   *
-   * @param calendar the calendar object or <code>null</code>.
-   */
-  public void setCalendar(Calendar calendar) {
-    this.calendar = calendar;
-  }
-
-  /**
-   * Returns the calendar object for this time.
-   *
-   * @return the calendar or <code>null</code>.
-   */
-  public Calendar getCalendar() {
-    return calendar;
-  }
-
-  @Override
-  public int hashCode() {
-    final int prime = 31;
-    int result = super.hashCode();
-    result = prime * result + (calendar == null ? 0 : calendar.hashCode());
-    return result;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    if (!super.equals(o)) {
-      return false;
+    /**
+     * Constructs a <code>PGTime</code> without a time zone.
+     *
+     * @param time milliseconds since January 1, 1970, 00:00:00 GMT; a negative number is milliseconds
+     *             before January 1, 1970, 00:00:00 GMT.
+     * @see Time#Time(long)
+     */
+    public PGTime(long time) {
+        this(time, null);
     }
 
-    PGTime pgTime = (PGTime) o;
-
-    return calendar != null ? calendar.equals(pgTime.calendar) : pgTime.calendar == null;
-  }
-
-  @Override
-  public Object clone() {
-    PGTime clone = (PGTime) super.clone();
-    Calendar calendar = getCalendar();
-    if (calendar != null) {
-      clone.setCalendar((Calendar) calendar.clone());
+    /**
+     * Constructs a <code>PGTime</code> with the given calendar object. The calendar object is
+     * optional. If absent, the driver will treat the time as <code>time without time zone</code>.
+     * When present, the driver will treat the time as a <code>time with time zone</code> using the
+     * <code>TimeZone</code> in the calendar object. Furthermore, this calendar will be used instead
+     * of the calendar object passed to {@link PreparedStatement#setTime(int, Time, Calendar)}.
+     *
+     * @param time     milliseconds since January 1, 1970, 00:00:00 GMT; a negative number is milliseconds
+     *                 before January 1, 1970, 00:00:00 GMT.
+     * @param calendar the calendar object containing the time zone or <code>null</code>.
+     * @see Time#Time(long)
+     */
+    public PGTime(long time, Calendar calendar) {
+        super(time);
+        this.calendar = calendar;
+    }
+
+    /**
+     * Returns the calendar object for this time.
+     *
+     * @return the calendar or <code>null</code>.
+     */
+    public Calendar getCalendar() {
+        return calendar;
+    }
+
+    /**
+     * Sets the calendar object for this time.
+     *
+     * @param calendar the calendar object or <code>null</code>.
+     */
+    public void setCalendar(Calendar calendar) {
+        this.calendar = calendar;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = super.hashCode();
+        result = prime * result + (calendar == null ? 0 : calendar.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        if (!super.equals(o)) {
+            return false;
+        }
+
+        PGTime pgTime = (PGTime) o;
+
+        return calendar != null ? calendar.equals(pgTime.calendar) : pgTime.calendar == null;
+    }
+
+    @Override
+    public Object clone() {
+        PGTime clone = (PGTime) super.clone();
+        Calendar calendar = getCalendar();
+        if (calendar != null) {
+            clone.setCalendar((Calendar) calendar.clone());
+        }
+        return clone;
     }
-    return clone;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGTimestamp.java b/pgjdbc/src/main/java/org/postgresql/util/PGTimestamp.java
index 36deb55..b2c11a0 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/PGTimestamp.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/PGTimestamp.java
@@ -14,95 +14,95 @@ import java.util.Calendar;
 @SuppressWarnings("serial")
 public class PGTimestamp extends Timestamp {
 
-  /**
-   * The optional calendar for this timestamp.
-   */
-  private Calendar calendar;
+    /**
+     * The optional calendar for this timestamp.
+     */
+    private Calendar calendar;
 
-  /**
-   * Constructs a <code>PGTimestamp</code> without a time zone. The integral seconds are stored in
-   * the underlying date value; the fractional seconds are stored in the <code>nanos</code> field of
-   * the <code>Timestamp</code> object.
-   *
-   * @param time milliseconds since January 1, 1970, 00:00:00 GMT. A negative number is the number
-   *        of milliseconds before January 1, 1970, 00:00:00 GMT.
-   * @see Timestamp#Timestamp(long)
-   */
-  public PGTimestamp(long time) {
-    this(time, null);
-  }
-
-  /**
-   * <p>Constructs a <code>PGTimestamp</code> with the given time zone. The integral seconds are stored
-   * in the underlying date value; the fractional seconds are stored in the <code>nanos</code> field
-   * of the <code>Timestamp</code> object.</p>
-   *
-   * <p>The calendar object is optional. If absent, the driver will treat the timestamp as
-   * <code>timestamp without time zone</code>. When present, the driver will treat the timestamp as
-   * a <code>timestamp with time zone</code> using the <code>TimeZone</code> in the calendar object.
-   * Furthermore, this calendar will be used instead of the calendar object passed to
-   * {@link java.sql.PreparedStatement#setTimestamp(int, Timestamp, Calendar)}.</p>
-   *
-   * @param time milliseconds since January 1, 1970, 00:00:00 GMT. A negative number is the number
-   *        of milliseconds before January 1, 1970, 00:00:00 GMT.
-   * @param calendar the calendar object containing the time zone or <code>null</code>.
-   * @see Timestamp#Timestamp(long)
-   */
-  public PGTimestamp(long time, Calendar calendar) {
-    super(time);
-    this.calendar = calendar;
-  }
-
-  /**
-   * Sets the calendar object for this timestamp.
-   *
-   * @param calendar the calendar object or <code>null</code>.
-   */
-  public void setCalendar(Calendar calendar) {
-    this.calendar = calendar;
-  }
-
-  /**
-   * Returns the calendar object for this timestamp.
-   *
-   * @return the calendar object or <code>null</code>.
-   */
-  public Calendar getCalendar() {
-    return calendar;
-  }
-
-  @Override
-  public int hashCode() {
-    final int prime = 31;
-    int result = super.hashCode();
-    result = prime * result + (calendar == null ? 0 : calendar.hashCode());
-    return result;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    if (!super.equals(o)) {
-      return false;
+    /**
+     * Constructs a <code>PGTimestamp</code> without a time zone. The integral seconds are stored in
+     * the underlying date value; the fractional seconds are stored in the <code>nanos</code> field of
+     * the <code>Timestamp</code> object.
+     *
+     * @param time milliseconds since January 1, 1970, 00:00:00 GMT. A negative number is the number
+     *             of milliseconds before January 1, 1970, 00:00:00 GMT.
+     * @see Timestamp#Timestamp(long)
+     */
+    public PGTimestamp(long time) {
+        this(time, null);
     }
 
-    PGTimestamp that = (PGTimestamp) o;
-
-    return calendar != null ? calendar.equals(that.calendar) : that.calendar == null;
-  }
-
-  @Override
-  public Object clone() {
-    PGTimestamp clone = (PGTimestamp) super.clone();
-    Calendar calendar = getCalendar();
-    if (calendar != null) {
-      clone.setCalendar((Calendar) calendar.clone());
+    /**
+     * <p>Constructs a <code>PGTimestamp</code> with the given time zone. The integral seconds are stored
+     * in the underlying date value; the fractional seconds are stored in the <code>nanos</code> field
+     * of the <code>Timestamp</code> object.</p>
+     *
+     * <p>The calendar object is optional. If absent, the driver will treat the timestamp as
+     * <code>timestamp without time zone</code>. When present, the driver will treat the timestamp as
+     * a <code>timestamp with time zone</code> using the <code>TimeZone</code> in the calendar object.
+     * Furthermore, this calendar will be used instead of the calendar object passed to
+     * {@link java.sql.PreparedStatement#setTimestamp(int, Timestamp, Calendar)}.</p>
+     *
+     * @param time     milliseconds since January 1, 1970, 00:00:00 GMT. A negative number is the number
+     *                 of milliseconds before January 1, 1970, 00:00:00 GMT.
+     * @param calendar the calendar object containing the time zone or <code>null</code>.
+     * @see Timestamp#Timestamp(long)
+     */
+    public PGTimestamp(long time, Calendar calendar) {
+        super(time);
+        this.calendar = calendar;
+    }
+
+    /**
+     * Returns the calendar object for this timestamp.
+     *
+     * @return the calendar object or <code>null</code>.
+     */
+    public Calendar getCalendar() {
+        return calendar;
+    }
+
+    /**
+     * Sets the calendar object for this timestamp.
+     *
+     * @param calendar the calendar object or <code>null</code>.
+     */
+    public void setCalendar(Calendar calendar) {
+        this.calendar = calendar;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = super.hashCode();
+        result = prime * result + (calendar == null ? 0 : calendar.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        if (!super.equals(o)) {
+            return false;
+        }
+
+        PGTimestamp that = (PGTimestamp) o;
+
+        return calendar != null ? calendar.equals(that.calendar) : that.calendar == null;
+    }
+
+    @Override
+    public Object clone() {
+        PGTimestamp clone = (PGTimestamp) super.clone();
+        Calendar calendar = getCalendar();
+        if (calendar != null) {
+            clone.setCalendar((Calendar) calendar.clone());
+        }
+        return clone;
     }
-    return clone;
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGbytea.java b/pgjdbc/src/main/java/org/postgresql/util/PGbytea.java
index 44cc543..143d280 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/PGbytea.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/PGbytea.java
@@ -11,150 +11,150 @@ import java.sql.SQLException;
  * Converts to and from the postgresql bytea datatype used by the backend.
  */
 public class PGbytea {
-  private static final int MAX_3_BUFF_SIZE = 2 * 1024 * 1024;
+    private static final int MAX_3_BUFF_SIZE = 2 * 1024 * 1024;
 
-  /**
-   * Lookup table for each of the valid ascii code points (offset by {@code '0'})
-   * to the 4 bit numeric value.
-   */
-  private static final int[] HEX_VALS = new int['f' + 1 - '0'];
+    /**
+     * Lookup table for each of the valid ascii code points (offset by {@code '0'})
+     * to the 4 bit numeric value.
+     */
+    private static final int[] HEX_VALS = new int['f' + 1 - '0'];
 
-  static {
-    for (int i = 0; i < 10; i++) {
-      HEX_VALS[i] = (byte) i;
-    }
-    for (int i = 0; i < 6; i++) {
-      HEX_VALS['A' + i - '0'] = (byte) (10 + i);
-      HEX_VALS['a' + i - '0'] = (byte) (10 + i);
-    }
-  }
-
-  public PGbytea() {
-  }
-
-  /*
-   * Converts a PG bytea raw value (i.e. the raw binary representation of the bytea data type) into
-   * a java byte[]
-   */
-  public static byte[] toBytes(byte[] s) throws SQLException {
-    if (s == null) {
-      return null;
-    }
-
-    // Starting with PG 9.0, a new hex format is supported
-    // that starts with "\x". Figure out which format we're
-    // dealing with here.
-    //
-    if (s.length < 2 || s[0] != '\\' || s[1] != 'x') {
-      return toBytesOctalEscaped(s);
-    }
-    return toBytesHexEscaped(s);
-  }
-
-  private static byte[] toBytesHexEscaped(byte[] s) {
-    // first 2 bytes of s indicate the byte[] is hex encoded
-    // so they need to be ignored here
-    final int realLength = s.length - 2;
-    byte[] output = new byte[realLength >>> 1];
-    for (int i = 0; i < realLength; i += 2) {
-      int val = getHex(s[2 + i]) << 4;
-      val |= getHex(s[3 + i]);
-      output[i >>> 1] = (byte) val;
-    }
-    return output;
-  }
-
-  private static int getHex(byte b) {
-    return HEX_VALS[b - '0'];
-  }
-
-  private static byte[] toBytesOctalEscaped(byte[] s) {
-    final int slength = s.length;
-    byte[] buf = null;
-    int correctSize = slength;
-    if (slength > MAX_3_BUFF_SIZE) {
-      // count backslash escapes, they will be either
-      // backslashes or an octal escape \\ or \003
-      //
-      for (int i = 0; i < slength; i++) {
-        byte current = s[i];
-        if (current == '\\') {
-          byte next = s[++i];
-          if (next == '\\') {
-            --correctSize;
-          } else {
-            correctSize -= 3;
-          }
+    static {
+        for (int i = 0; i < 10; i++) {
+            HEX_VALS[i] = (byte) i;
+        }
+        for (int i = 0; i < 6; i++) {
+            HEX_VALS['A' + i - '0'] = (byte) (10 + i);
+            HEX_VALS['a' + i - '0'] = (byte) (10 + i);
         }
-      }
-      buf = new byte[correctSize];
-    } else {
-      buf = new byte[slength];
     }
-    int bufpos = 0;
-    int thebyte;
-    byte nextbyte;
-    byte secondbyte;
-    for (int i = 0; i < slength; i++) {
-      nextbyte = s[i];
-      if (nextbyte == (byte) '\\') {
-        secondbyte = s[++i];
-        if (secondbyte == (byte) '\\') {
-          // escaped \
-          buf[bufpos++] = (byte) '\\';
+
+    public PGbytea() {
+    }
+
+    /*
+     * Converts a PG bytea raw value (i.e. the raw binary representation of the bytea data type) into
+     * a java byte[]
+     */
+    public static byte[] toBytes(byte[] s) throws SQLException {
+        if (s == null) {
+            return null;
+        }
+
+        // Starting with PG 9.0, a new hex format is supported
+        // that starts with "\x". Figure out which format we're
+        // dealing with here.
+        //
+        if (s.length < 2 || s[0] != '\\' || s[1] != 'x') {
+            return toBytesOctalEscaped(s);
+        }
+        return toBytesHexEscaped(s);
+    }
+
+    private static byte[] toBytesHexEscaped(byte[] s) {
+        // first 2 bytes of s indicate the byte[] is hex encoded
+        // so they need to be ignored here
+        final int realLength = s.length - 2;
+        byte[] output = new byte[realLength >>> 1];
+        for (int i = 0; i < realLength; i += 2) {
+            int val = getHex(s[2 + i]) << 4;
+            val |= getHex(s[3 + i]);
+            output[i >>> 1] = (byte) val;
+        }
+        return output;
+    }
+
+    private static int getHex(byte b) {
+        return HEX_VALS[b - '0'];
+    }
+
+    private static byte[] toBytesOctalEscaped(byte[] s) {
+        final int slength = s.length;
+        byte[] buf = null;
+        int correctSize = slength;
+        if (slength > MAX_3_BUFF_SIZE) {
+            // count backslash escapes, they will be either
+            // backslashes or an octal escape \\ or \003
+            //
+            for (int i = 0; i < slength; i++) {
+                byte current = s[i];
+                if (current == '\\') {
+                    byte next = s[++i];
+                    if (next == '\\') {
+                        --correctSize;
+                    } else {
+                        correctSize -= 3;
+                    }
+                }
+            }
+            buf = new byte[correctSize];
         } else {
-          thebyte = (secondbyte - 48) * 64 + (s[++i] - 48) * 8 + (s[++i] - 48);
-          if (thebyte > 127) {
-            thebyte -= 256;
-          }
-          buf[bufpos++] = (byte) thebyte;
+            buf = new byte[slength];
         }
-      } else {
-        buf[bufpos++] = nextbyte;
-      }
+        int bufpos = 0;
+        int thebyte;
+        byte nextbyte;
+        byte secondbyte;
+        for (int i = 0; i < slength; i++) {
+            nextbyte = s[i];
+            if (nextbyte == (byte) '\\') {
+                secondbyte = s[++i];
+                if (secondbyte == (byte) '\\') {
+                    // escaped \
+                    buf[bufpos++] = (byte) '\\';
+                } else {
+                    thebyte = (secondbyte - 48) * 64 + (s[++i] - 48) * 8 + (s[++i] - 48);
+                    if (thebyte > 127) {
+                        thebyte -= 256;
+                    }
+                    buf[bufpos++] = (byte) thebyte;
+                }
+            } else {
+                buf[bufpos++] = nextbyte;
+            }
+        }
+        if (bufpos == correctSize) {
+            return buf;
+        }
+        byte[] result = new byte[bufpos];
+        System.arraycopy(buf, 0, result, 0, bufpos);
+        return result;
     }
-    if (bufpos == correctSize) {
-      return buf;
-    }
-    byte[] result = new byte[bufpos];
-    System.arraycopy(buf, 0, result, 0, bufpos);
-    return result;
-  }
 
-  /*
-   * Converts a java byte[] into a PG bytea string (i.e. the text representation of the bytea data
-   * type)
-   */
-  public static String toPGString(byte[] buf) {
-    if (buf == null) {
-      return null;
+    /*
+     * Converts a java byte[] into a PG bytea string (i.e. the text representation of the bytea data
+     * type)
+     */
+    public static String toPGString(byte[] buf) {
+        if (buf == null) {
+            return null;
+        }
+        StringBuilder stringBuilder = new StringBuilder(2 * buf.length);
+        for (byte element : buf) {
+            int elementAsInt = (int) element;
+            if (elementAsInt < 0) {
+                elementAsInt = 256 + elementAsInt;
+            }
+            // we escape the same non-printable characters as the backend
+            // we must escape all 8bit characters otherwise when converting
+            // from java unicode to the db character set we may end up with
+            // question marks if the character set is SQL_ASCII
+            if (elementAsInt < 32 || elementAsInt > 126) {
+                // escape character with the form \000, but need two \\ because of
+                // the Java parser
+                stringBuilder.append("\\");
+                stringBuilder.append((char) (((elementAsInt >> 6) & 0x3) + 48));
+                stringBuilder.append((char) (((elementAsInt >> 3) & 0x7) + 48));
+                stringBuilder.append((char) ((elementAsInt & 0x07) + 48));
+            } else if (element == (byte) '\\') {
+                // escape the backslash character as \\, but need four \\\\ because
+                // of the Java parser
+                stringBuilder.append("\\\\");
+            } else {
+                // other characters are left alone
+                stringBuilder.append((char) element);
+            }
+        }
+        return stringBuilder.toString();
     }
-    StringBuilder stringBuilder = new StringBuilder(2 * buf.length);
-    for (byte element : buf) {
-      int elementAsInt = (int) element;
-      if (elementAsInt < 0) {
-        elementAsInt = 256 + elementAsInt;
-      }
-      // we escape the same non-printable characters as the backend
-      // we must escape all 8bit characters otherwise when converting
-      // from java unicode to the db character set we may end up with
-      // question marks if the character set is SQL_ASCII
-      if (elementAsInt < 32 || elementAsInt > 126) {
-        // escape character with the form \000, but need two \\ because of
-        // the Java parser
-        stringBuilder.append("\\");
-        stringBuilder.append((char) (((elementAsInt >> 6) & 0x3) + 48));
-        stringBuilder.append((char) (((elementAsInt >> 3) & 0x7) + 48));
-        stringBuilder.append((char) ((elementAsInt & 0x07) + 48));
-      } else if (element == (byte) '\\') {
-        // escape the backslash character as \\, but need four \\\\ because
-        // of the Java parser
-        stringBuilder.append("\\\\");
-      } else {
-        // other characters are left alone
-        stringBuilder.append((char) element);
-      }
-    }
-    return stringBuilder.toString();
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGmoney.java b/pgjdbc/src/main/java/org/postgresql/util/PGmoney.java
index a8c137f..801480e 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/PGmoney.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/PGmoney.java
@@ -13,110 +13,110 @@ import java.sql.SQLException;
  */
 @SuppressWarnings("serial")
 public class PGmoney extends PGobject implements Serializable, Cloneable {
-  /*
-   * The value of the field
-   */
-  public double val;
+    /*
+     * The value of the field
+     */
+    public double val;
 
-  /**
-   * If the object represents {@code null::money}
-   */
-  public boolean isNull;
+    /**
+     * If the object represents {@code null::money}
+     */
+    public boolean isNull;
 
-  /**
-   * @param value of field
-   */
-  public PGmoney(double value) {
-    this();
-    val = value;
-  }
-
-  @SuppressWarnings("this-escape")
-  public PGmoney(String value) throws SQLException {
-    this();
-    setValue(value);
-  }
-
-  /*
-   * Required by the driver
-   */
-  public PGmoney() {
-    type = "money";
-  }
-
-  @Override
-  public void setValue(String s) throws SQLException {
-    isNull = s == null;
-    if (s == null) {
-      return;
+    /**
+     * @param value of field
+     */
+    public PGmoney(double value) {
+        this();
+        val = value;
     }
-    try {
-      String s1;
-      boolean negative;
 
-      negative = s.charAt(0) == '(';
-
-      // Remove any () (for negative) & currency symbol
-      s1 = PGtokenizer.removePara(s).substring(1);
-
-      // Strip out any , in currency
-      int pos = s1.indexOf(',');
-      while (pos != -1) {
-        s1 = s1.substring(0, pos) + s1.substring(pos + 1);
-        pos = s1.indexOf(',');
-      }
-
-      val = Double.parseDouble(s1);
-      val = negative ? -val : val;
-
-    } catch (NumberFormatException e) {
-      throw new PSQLException(GT.tr("Conversion of money failed."),
-          PSQLState.NUMERIC_CONSTANT_OUT_OF_RANGE, e);
+    @SuppressWarnings("this-escape")
+    public PGmoney(String value) throws SQLException {
+        this();
+        setValue(value);
     }
-  }
 
-  @Override
-  public int hashCode() {
-    if (isNull) {
-      return 0;
+    /*
+     * Required by the driver
+     */
+    public PGmoney() {
+        type = "money";
     }
-    final int prime = 31;
-    int result = super.hashCode();
-    long temp;
-    temp = Double.doubleToLongBits(val);
-    result = prime * result + (int) (temp ^ (temp >>> 32));
-    return result;
-  }
 
-  @Override
-  public boolean equals(Object obj) {
-    if (obj instanceof PGmoney) {
-      PGmoney p = (PGmoney) obj;
-      if (isNull) {
-        return p.isNull;
-      } else if (p.isNull) {
+    @Override
+    public int hashCode() {
+        if (isNull) {
+            return 0;
+        }
+        final int prime = 31;
+        int result = super.hashCode();
+        long temp;
+        temp = Double.doubleToLongBits(val);
+        result = prime * result + (int) (temp ^ (temp >>> 32));
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (obj instanceof PGmoney) {
+            PGmoney p = (PGmoney) obj;
+            if (isNull) {
+                return p.isNull;
+            } else if (p.isNull) {
+                return false;
+            }
+            return val == p.val;
+        }
         return false;
-      }
-      return val == p.val;
     }
-    return false;
-  }
 
-  @Override
-  public String getValue() {
-    if (isNull) {
-      return null;
+    @Override
+    public String getValue() {
+        if (isNull) {
+            return null;
+        }
+        if (val < 0) {
+            return "-$" + (-val);
+        } else {
+            return "$" + val;
+        }
     }
-    if (val < 0) {
-      return "-$" + (-val);
-    } else {
-      return "$" + val;
-    }
-  }
 
-  @Override
-  public Object clone() throws CloneNotSupportedException {
-    // squid:S2157 "Cloneables" should implement "clone
-    return super.clone();
-  }
+    @Override
+    public void setValue(String s) throws SQLException {
+        isNull = s == null;
+        if (s == null) {
+            return;
+        }
+        try {
+            String s1;
+            boolean negative;
+
+            negative = s.charAt(0) == '(';
+
+            // Remove any () (for negative) & currency symbol
+            s1 = PGtokenizer.removePara(s).substring(1);
+
+            // Strip out any , in currency
+            int pos = s1.indexOf(',');
+            while (pos != -1) {
+                s1 = s1.substring(0, pos) + s1.substring(pos + 1);
+                pos = s1.indexOf(',');
+            }
+
+            val = Double.parseDouble(s1);
+            val = negative ? -val : val;
+
+        } catch (NumberFormatException e) {
+            throw new PSQLException(GT.tr("Conversion of money failed."),
+                    PSQLState.NUMERIC_CONSTANT_OUT_OF_RANGE, e);
+        }
+    }
+
+    @Override
+    public Object clone() throws CloneNotSupportedException {
+        // squid:S2157 "Cloneables" should implement "clone
+        return super.clone();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGobject.java b/pgjdbc/src/main/java/org/postgresql/util/PGobject.java
index 7c8f468..f9160a4 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/PGobject.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/PGobject.java
@@ -14,114 +14,114 @@ import java.sql.SQLException;
  */
 @SuppressWarnings("serial")
 public class PGobject implements Serializable, Cloneable {
-  protected String type;
-  protected String value;
+    protected String type;
+    protected String value;
 
-  /**
-   * This is called by org.postgresql.Connection.getObject() to create the object.
-   */
-  public PGobject() {
-  }
-
-  /**
-   * <p>This method sets the type of this object.</p>
-   *
-   * <p>It should not be extended by subclasses, hence it is final</p>
-   *
-   * @param type a string describing the type of the object
-   */
-  public final void setType(String type) {
-    this.type = type;
-  }
-
-  /**
-   * This method sets the value of this object. It must be overridden.
-   *
-   * @param value a string representation of the value of the object
-   * @throws SQLException thrown if value is invalid for this type
-   */
-  public void setValue(String value) throws SQLException {
-    this.value = value;
-  }
-
-  /**
-   * As this cannot change during the life of the object, it's final.
-   *
-   * @return the type name of this object
-   */
-  public final String getType() {
-    return type;
-  }
-
-  /**
-   * This must be overridden, to return the value of the object, in the form required by
-   * org.postgresql.
-   *
-   * @return the value of this object
-   */
-  public String getValue() {
-    return value;
-  }
-
-  /**
-   * Returns true if the current object wraps `null` value.
-   * This might be helpful
-   *
-   * @return true if the current object wraps `null` value.
-   */
-  public boolean isNull() {
-    return getValue() == null;
-  }
-
-  /**
-   * This must be overridden to allow comparisons of objects.
-   *
-   * @param obj Object to compare with
-   * @return true if the two boxes are identical
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (obj instanceof PGobject) {
-      final Object otherValue = ((PGobject) obj).getValue();
-
-      if (otherValue == null) {
-        return getValue() == null;
-      }
-      return otherValue.equals(getValue());
+    /**
+     * This is called by org.postgresql.Connection.getObject() to create the object.
+     */
+    public PGobject() {
     }
-    return false;
-  }
 
-  /**
-   * This must be overridden to allow the object to be cloned.
-   */
-  public Object clone() throws CloneNotSupportedException {
-    return super.clone();
-  }
+    protected static boolean equals(Object a, Object b) {
+        return a == b || a != null && a.equals(b);
+    }
 
-  /**
-   * This is defined here, so user code need not override it.
-   *
-   * @return the value of this object, in the syntax expected by org.postgresql
-   */
-  @Override
-  @SuppressWarnings("nullness")
-  public String toString() {
-    return getValue();
-  }
+    /**
+     * As this cannot change during the life of the object, it's final.
+     *
+     * @return the type name of this object
+     */
+    public final String getType() {
+        return type;
+    }
 
-  /**
-   * Compute hash. As equals() use only value. Return the same hash for the same value.
-   *
-   * @return Value hashcode, 0 if value is null {@link java.util.Objects#hashCode(Object)}
-   */
-  @Override
-  public int hashCode() {
-    String value = getValue();
-    return value != null ? value.hashCode() : 0;
-  }
+    /**
+     * <p>This method sets the type of this object.</p>
+     *
+     * <p>It should not be extended by subclasses, hence it is final</p>
+     *
+     * @param type a string describing the type of the object
+     */
+    public final void setType(String type) {
+        this.type = type;
+    }
 
-  protected static boolean equals(Object a, Object b) {
-    return a == b || a != null && a.equals(b);
-  }
+    /**
+     * This must be overridden, to return the value of the object, in the form required by
+     * org.postgresql.
+     *
+     * @return the value of this object
+     */
+    public String getValue() {
+        return value;
+    }
+
+    /**
+     * This method sets the value of this object. It must be overridden.
+     *
+     * @param value a string representation of the value of the object
+     * @throws SQLException thrown if value is invalid for this type
+     */
+    public void setValue(String value) throws SQLException {
+        this.value = value;
+    }
+
+    /**
+     * Returns true if the current object wraps `null` value.
+     * This might be helpful
+     *
+     * @return true if the current object wraps `null` value.
+     */
+    public boolean isNull() {
+        return getValue() == null;
+    }
+
+    /**
+     * This must be overridden to allow comparisons of objects.
+     *
+     * @param obj Object to compare with
+     * @return true if the two boxes are identical
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (obj instanceof PGobject) {
+            final Object otherValue = ((PGobject) obj).getValue();
+
+            if (otherValue == null) {
+                return getValue() == null;
+            }
+            return otherValue.equals(getValue());
+        }
+        return false;
+    }
+
+    /**
+     * This must be overridden to allow the object to be cloned.
+     */
+    public Object clone() throws CloneNotSupportedException {
+        return super.clone();
+    }
+
+    /**
+     * This is defined here, so user code need not override it.
+     *
+     * @return the value of this object, in the syntax expected by org.postgresql
+     */
+    @Override
+    @SuppressWarnings("nullness")
+    public String toString() {
+        return getValue();
+    }
+
+    /**
+     * Compute hash. As equals() use only value. Return the same hash for the same value.
+     *
+     * @return Value hashcode, 0 if value is null {@link java.util.Objects#hashCode(Object)}
+     */
+    @Override
+    public int hashCode() {
+        String value = getValue();
+        return value != null ? value.hashCode() : 0;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/PGtokenizer.java b/pgjdbc/src/main/java/org/postgresql/util/PGtokenizer.java
index d0030ce..b1408a8 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/PGtokenizer.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/PGtokenizer.java
@@ -25,234 +25,234 @@ import java.util.Map;
  */
 public class PGtokenizer {
 
-  private static final Map<Character, Character> CLOSING_TO_OPENING_CHARACTER = new HashMap<>();
+    private static final Map<Character, Character> CLOSING_TO_OPENING_CHARACTER = new HashMap<>();
 
-  static 	{
-    CLOSING_TO_OPENING_CHARACTER.put(')', '(');
+    static {
+        CLOSING_TO_OPENING_CHARACTER.put(')', '(');
 
-    CLOSING_TO_OPENING_CHARACTER.put(']', '[');
+        CLOSING_TO_OPENING_CHARACTER.put(']', '[');
 
-    CLOSING_TO_OPENING_CHARACTER.put('>', '<');
+        CLOSING_TO_OPENING_CHARACTER.put('>', '<');
 
-    CLOSING_TO_OPENING_CHARACTER.put('"', '"');
-  }
+        CLOSING_TO_OPENING_CHARACTER.put('"', '"');
+    }
 
-  // Our tokens
-  protected List<String> tokens = new ArrayList<>();
+    // Our tokens
+    protected List<String> tokens = new ArrayList<>();
 
-  /**
-   * <p>Create a tokeniser.</p>
-   *
-   * <p>We could have used StringTokenizer to do this, however, we needed to handle nesting of '(' ')'
-   * '[' ']' '&lt;' and '&gt;' as these are used by the geometric data types.</p>
-   *
-   * @param string containing tokens
-   * @param delim single character to split the tokens
-   */
-  @SuppressWarnings("this-escape")
-  public PGtokenizer(String string, char delim) {
-    tokenize(string, delim);
-  }
+    /**
+     * <p>Create a tokeniser.</p>
+     *
+     * <p>We could have used StringTokenizer to do this, however, we needed to handle nesting of '(' ')'
+     * '[' ']' '&lt;' and '&gt;' as these are used by the geometric data types.</p>
+     *
+     * @param string containing tokens
+     * @param delim  single character to split the tokens
+     */
+    @SuppressWarnings("this-escape")
+    public PGtokenizer(String string, char delim) {
+        tokenize(string, delim);
+    }
 
-  /**
-   * This resets this tokenizer with a new string and/or delimiter.
-   *
-   * @param string containing tokens
-   * @param delim single character to split the tokens
-   * @return number of tokens
-   */
-  public int tokenize(String string, char delim) {
-    tokens.clear();
-
-    final Deque<Character> stack = new ArrayDeque<>();
-
-    // stack keeps track of the levels we are in the current token.
-    // if stack.size is > 0 then we don't split a token when delim is matched.
-    //
-    // The Geometric datatypes use this, because often a type may have others
-    // (usually PGpoint) embedded within a token.
-    //
-    // Peter 1998 Jan 6 - Added < and > to the nesting rules
-    int p;
-    int s;
-    boolean skipChar = false;
-    boolean nestedDoubleQuote = false;
-    char c = (char) 0;
-    for (p = 0, s = 0; p < string.length(); p++) {
-      c = string.charAt(p);
-
-      // increase nesting if an open character is found
-      if (c == '(' || c == '[' || c == '<' || (!nestedDoubleQuote && !skipChar && c == '"')) {
-        stack.push(c);
-        if (c == '"') {
-          nestedDoubleQuote = true;
-          skipChar = true;
+    /**
+     * This removes the lead/trailing strings from a string.
+     *
+     * @param s Source string
+     * @param l Leading string to remove
+     * @param t Trailing string to remove
+     * @return String without the lead/trailing strings
+     */
+    public static String remove(String s, String l, String t) {
+        if (s.startsWith(l)) {
+            s = s.substring(l.length());
         }
-      }
-
-      // decrease nesting if a close character is found
-      if (c == ')' || c == ']' || c == '>' || (nestedDoubleQuote && !skipChar && c == '"')) {
-
-        if (c == '"') {
-          while (!stack.isEmpty() && !Character.valueOf('"').equals(stack.peek())) {
-            stack.pop();
-          }
-          nestedDoubleQuote = false;
-          stack.pop();
-        } else {
-          final Character ch = CLOSING_TO_OPENING_CHARACTER.get(c);
-          if (!stack.isEmpty() && ch != null && ch.equals(stack.peek())) {
-            stack.pop();
-          }
+        if (s.endsWith(t)) {
+            s = s.substring(0, s.length() - t.length());
         }
-      }
-
-      skipChar = c == '\\';
-
-      if (stack.isEmpty() && c == delim) {
-        tokens.add(string.substring(s, p));
-        s = p + 1; // +1 to skip the delimiter
-      }
-
+        return s;
     }
 
-    // Don't forget the last token ;-)
-    if (s < string.length()) {
-      tokens.add(string.substring(s));
+    /**
+     * Removes ( and ) from the beginning and end of a string.
+     *
+     * @param s String to remove from
+     * @return String without the ( or )
+     */
+    public static String removePara(String s) {
+        return remove(s, "(", ")");
     }
 
-    // check for last token empty
-    if ( s == string.length() && c == delim) {
-      tokens.add("");
+    /**
+     * Removes [ and ] from the beginning and end of a string.
+     *
+     * @param s String to remove from
+     * @return String without the [ or ]
+     */
+    public static String removeBox(String s) {
+        return remove(s, "[", "]");
     }
 
-    return tokens.size();
-  }
-
-  /**
-   * @return the number of tokens available
-   */
-  public int getSize() {
-    return tokens.size();
-  }
-
-  /**
-   * @param n Token number ( 0 ... getSize()-1 )
-   * @return The token value
-   */
-  public String getToken(int n) {
-    return tokens.get(n);
-  }
-
-  /**
-   * <p>This returns a new tokenizer based on one of our tokens.</p>
-   *
-   * <p>The geometric datatypes use this to process nested tokens (usually PGpoint).</p>
-   *
-   * @param n Token number ( 0 ... getSize()-1 )
-   * @param delim The delimiter to use
-   * @return A new instance of PGtokenizer based on the token
-   */
-  public PGtokenizer tokenizeToken(int n, char delim) {
-    return new PGtokenizer(getToken(n), delim);
-  }
-
-  /**
-   * This removes the lead/trailing strings from a string.
-   *
-   * @param s Source string
-   * @param l Leading string to remove
-   * @param t Trailing string to remove
-   * @return String without the lead/trailing strings
-   */
-  public static String remove(String s, String l, String t) {
-    if (s.startsWith(l)) {
-      s = s.substring(l.length());
+    /**
+     * Removes &lt; and &gt; from the beginning and end of a string.
+     *
+     * @param s String to remove from
+     * @return String without the &lt; or &gt;
+     */
+    public static String removeAngle(String s) {
+        return remove(s, "<", ">");
     }
-    if (s.endsWith(t)) {
-      s = s.substring(0, s.length() - t.length());
+
+    /**
+     * Removes curly braces { and } from the beginning and end of a string.
+     *
+     * @param s String to remove from
+     * @return String without the { or }
+     */
+    public static String removeCurlyBrace(String s) {
+        return remove(s, "{", "}");
     }
-    return s;
-  }
 
-  /**
-   * This removes the lead/trailing strings from all tokens.
-   *
-   * @param l Leading string to remove
-   * @param t Trailing string to remove
-   */
-  public void remove(String l, String t) {
-    for (int i = 0; i < tokens.size(); i++) {
-      tokens.set(i, remove(tokens.get(i), l, t));
+    /**
+     * This resets this tokenizer with a new string and/or delimiter.
+     *
+     * @param string containing tokens
+     * @param delim  single character to split the tokens
+     * @return number of tokens
+     */
+    public int tokenize(String string, char delim) {
+        tokens.clear();
+
+        final Deque<Character> stack = new ArrayDeque<>();
+
+        // stack keeps track of the levels we are in the current token.
+        // if stack.size is > 0 then we don't split a token when delim is matched.
+        //
+        // The Geometric datatypes use this, because often a type may have others
+        // (usually PGpoint) embedded within a token.
+        //
+        // Peter 1998 Jan 6 - Added < and > to the nesting rules
+        int p;
+        int s;
+        boolean skipChar = false;
+        boolean nestedDoubleQuote = false;
+        char c = (char) 0;
+        for (p = 0, s = 0; p < string.length(); p++) {
+            c = string.charAt(p);
+
+            // increase nesting if an open character is found
+            if (c == '(' || c == '[' || c == '<' || (!nestedDoubleQuote && !skipChar && c == '"')) {
+                stack.push(c);
+                if (c == '"') {
+                    nestedDoubleQuote = true;
+                    skipChar = true;
+                }
+            }
+
+            // decrease nesting if a close character is found
+            if (c == ')' || c == ']' || c == '>' || (nestedDoubleQuote && !skipChar && c == '"')) {
+
+                if (c == '"') {
+                    while (!stack.isEmpty() && !Character.valueOf('"').equals(stack.peek())) {
+                        stack.pop();
+                    }
+                    nestedDoubleQuote = false;
+                    stack.pop();
+                } else {
+                    final Character ch = CLOSING_TO_OPENING_CHARACTER.get(c);
+                    if (!stack.isEmpty() && ch != null && ch.equals(stack.peek())) {
+                        stack.pop();
+                    }
+                }
+            }
+
+            skipChar = c == '\\';
+
+            if (stack.isEmpty() && c == delim) {
+                tokens.add(string.substring(s, p));
+                s = p + 1; // +1 to skip the delimiter
+            }
+
+        }
+
+        // Don't forget the last token ;-)
+        if (s < string.length()) {
+            tokens.add(string.substring(s));
+        }
+
+        // check for last token empty
+        if (s == string.length() && c == delim) {
+            tokens.add("");
+        }
+
+        return tokens.size();
     }
-  }
 
-  /**
-   * Removes ( and ) from the beginning and end of a string.
-   *
-   * @param s String to remove from
-   * @return String without the ( or )
-   */
-  public static String removePara(String s) {
-    return remove(s, "(", ")");
-  }
+    /**
+     * @return the number of tokens available
+     */
+    public int getSize() {
+        return tokens.size();
+    }
 
-  /**
-   * Removes ( and ) from the beginning and end of all tokens.
-   */
-  public void removePara() {
-    remove("(", ")");
-  }
+    /**
+     * @param n Token number ( 0 ... getSize()-1 )
+     * @return The token value
+     */
+    public String getToken(int n) {
+        return tokens.get(n);
+    }
 
-  /**
-   * Removes [ and ] from the beginning and end of a string.
-   *
-   * @param s String to remove from
-   * @return String without the [ or ]
-   */
-  public static String removeBox(String s) {
-    return remove(s, "[", "]");
-  }
+    /**
+     * <p>This returns a new tokenizer based on one of our tokens.</p>
+     *
+     * <p>The geometric datatypes use this to process nested tokens (usually PGpoint).</p>
+     *
+     * @param n     Token number ( 0 ... getSize()-1 )
+     * @param delim The delimiter to use
+     * @return A new instance of PGtokenizer based on the token
+     */
+    public PGtokenizer tokenizeToken(int n, char delim) {
+        return new PGtokenizer(getToken(n), delim);
+    }
 
-  /**
-   * Removes [ and ] from the beginning and end of all tokens.
-   */
-  public void removeBox() {
-    remove("[", "]");
-  }
+    /**
+     * This removes the lead/trailing strings from all tokens.
+     *
+     * @param l Leading string to remove
+     * @param t Trailing string to remove
+     */
+    public void remove(String l, String t) {
+        for (int i = 0; i < tokens.size(); i++) {
+            tokens.set(i, remove(tokens.get(i), l, t));
+        }
+    }
 
-  /**
-   * Removes &lt; and &gt; from the beginning and end of a string.
-   *
-   * @param s String to remove from
-   * @return String without the &lt; or &gt;
-   */
-  public static String removeAngle(String s) {
-    return remove(s, "<", ">");
-  }
+    /**
+     * Removes ( and ) from the beginning and end of all tokens.
+     */
+    public void removePara() {
+        remove("(", ")");
+    }
 
-  /**
-   * Removes &lt; and &gt; from the beginning and end of all tokens.
-   */
-  public void removeAngle() {
-    remove("<", ">");
-  }
+    /**
+     * Removes [ and ] from the beginning and end of all tokens.
+     */
+    public void removeBox() {
+        remove("[", "]");
+    }
 
-  /**
-   * Removes curly braces { and } from the beginning and end of a string.
-   *
-   * @param s String to remove from
-   * @return String without the { or }
-   */
-  public static String removeCurlyBrace(String s) {
-    return remove(s, "{", "}");
-  }
+    /**
+     * Removes &lt; and &gt; from the beginning and end of all tokens.
+     */
+    public void removeAngle() {
+        remove("<", ">");
+    }
 
-  /**
-   * Removes &lt; and &gt; from the beginning and end of all tokens.
-   */
-  public void removeCurlyBrace() {
-    remove("{", "}");
-  }
+    /**
+     * Removes &lt; and &gt; from the beginning and end of all tokens.
+     */
+    public void removeCurlyBrace() {
+        remove("{", "}");
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/PSQLException.java b/pgjdbc/src/main/java/org/postgresql/util/PSQLException.java
index 8a1913d..018730e 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/PSQLException.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/PSQLException.java
@@ -10,26 +10,26 @@ import java.sql.SQLException;
 @SuppressWarnings("serial")
 public class PSQLException extends SQLException {
 
-  private ServerErrorMessage serverError;
+    private ServerErrorMessage serverError;
 
-  public PSQLException(String msg, PSQLState state, Throwable cause) {
-    super(msg, state == null ? null : state.getState(), cause);
-  }
+    public PSQLException(String msg, PSQLState state, Throwable cause) {
+        super(msg, state == null ? null : state.getState(), cause);
+    }
 
-  public PSQLException(String msg, PSQLState state) {
-    super(msg, state == null ? null : state.getState());
-  }
+    public PSQLException(String msg, PSQLState state) {
+        super(msg, state == null ? null : state.getState());
+    }
 
-  public PSQLException(ServerErrorMessage serverError) {
-    this(serverError, true);
-  }
+    public PSQLException(ServerErrorMessage serverError) {
+        this(serverError, true);
+    }
 
-  public PSQLException(ServerErrorMessage serverError, boolean detail) {
-    super(detail ? serverError.toString() : serverError.getNonSensitiveErrorMessage(), serverError.getSQLState());
-    this.serverError = serverError;
-  }
+    public PSQLException(ServerErrorMessage serverError, boolean detail) {
+        super(detail ? serverError.toString() : serverError.getNonSensitiveErrorMessage(), serverError.getSQLState());
+        this.serverError = serverError;
+    }
 
-  public ServerErrorMessage getServerErrorMessage() {
-    return serverError;
-  }
+    public ServerErrorMessage getServerErrorMessage() {
+        return serverError;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/PSQLState.java b/pgjdbc/src/main/java/org/postgresql/util/PSQLState.java
index 507ce13..2c1f25a 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/PSQLState.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/PSQLState.java
@@ -10,118 +10,118 @@ package org.postgresql.util;
  */
 public enum PSQLState {
 
-  UNKNOWN_STATE(""),
+    UNKNOWN_STATE(""),
 
-  TOO_MANY_RESULTS("0100E"),
+    TOO_MANY_RESULTS("0100E"),
 
-  NO_DATA("02000"),
+    NO_DATA("02000"),
 
-  INVALID_PARAMETER_TYPE("07006"),
+    INVALID_PARAMETER_TYPE("07006"),
 
-  /**
-   * We could establish a connection with the server for unknown reasons. Could be a network
-   * problem.
-   */
-  CONNECTION_UNABLE_TO_CONNECT("08001"),
+    /**
+     * We could establish a connection with the server for unknown reasons. Could be a network
+     * problem.
+     */
+    CONNECTION_UNABLE_TO_CONNECT("08001"),
 
-  CONNECTION_DOES_NOT_EXIST("08003"),
+    CONNECTION_DOES_NOT_EXIST("08003"),
 
-  /**
-   * The server rejected our connection attempt. Usually an authentication failure, but could be a
-   * configuration error like asking for a SSL connection with a server that wasn't built with SSL
-   * support.
-   */
-  CONNECTION_REJECTED("08004"),
+    /**
+     * The server rejected our connection attempt. Usually an authentication failure, but could be a
+     * configuration error like asking for a SSL connection with a server that wasn't built with SSL
+     * support.
+     */
+    CONNECTION_REJECTED("08004"),
 
-  /**
-   * After a connection has been established, it went bad.
-   */
-  CONNECTION_FAILURE("08006"),
-  CONNECTION_FAILURE_DURING_TRANSACTION("08007"),
+    /**
+     * After a connection has been established, it went bad.
+     */
+    CONNECTION_FAILURE("08006"),
+    CONNECTION_FAILURE_DURING_TRANSACTION("08007"),
 
-  /**
-   * The server sent us a response the driver was not prepared for and is either bizarre datastream
-   * corruption, a driver bug, or a protocol violation on the server's part.
-   */
-  PROTOCOL_VIOLATION("08P01"),
+    /**
+     * The server sent us a response the driver was not prepared for and is either bizarre datastream
+     * corruption, a driver bug, or a protocol violation on the server's part.
+     */
+    PROTOCOL_VIOLATION("08P01"),
 
-  COMMUNICATION_ERROR("08S01"),
+    COMMUNICATION_ERROR("08S01"),
 
-  NOT_IMPLEMENTED("0A000"),
+    NOT_IMPLEMENTED("0A000"),
 
-  DATA_ERROR("22000"),
-  STRING_DATA_RIGHT_TRUNCATION("22001"),
-  NUMERIC_VALUE_OUT_OF_RANGE("22003"),
-  BAD_DATETIME_FORMAT("22007"),
-  DATETIME_OVERFLOW("22008"),
-  DIVISION_BY_ZERO("22012"),
-  MOST_SPECIFIC_TYPE_DOES_NOT_MATCH("2200G"),
-  INVALID_PARAMETER_VALUE("22023"),
+    DATA_ERROR("22000"),
+    STRING_DATA_RIGHT_TRUNCATION("22001"),
+    NUMERIC_VALUE_OUT_OF_RANGE("22003"),
+    BAD_DATETIME_FORMAT("22007"),
+    DATETIME_OVERFLOW("22008"),
+    DIVISION_BY_ZERO("22012"),
+    MOST_SPECIFIC_TYPE_DOES_NOT_MATCH("2200G"),
+    INVALID_PARAMETER_VALUE("22023"),
 
-  NOT_NULL_VIOLATION("23502"),
-  FOREIGN_KEY_VIOLATION("23503"),
-  UNIQUE_VIOLATION("23505"),
-  CHECK_VIOLATION("23514"),
-  EXCLUSION_VIOLATION("23P01"),
+    NOT_NULL_VIOLATION("23502"),
+    FOREIGN_KEY_VIOLATION("23503"),
+    UNIQUE_VIOLATION("23505"),
+    CHECK_VIOLATION("23514"),
+    EXCLUSION_VIOLATION("23P01"),
 
-  INVALID_CURSOR_STATE("24000"),
+    INVALID_CURSOR_STATE("24000"),
 
-  TRANSACTION_STATE_INVALID("25000"),
-  ACTIVE_SQL_TRANSACTION("25001"),
-  NO_ACTIVE_SQL_TRANSACTION("25P01"),
-  IN_FAILED_SQL_TRANSACTION("25P02"),
+    TRANSACTION_STATE_INVALID("25000"),
+    ACTIVE_SQL_TRANSACTION("25001"),
+    NO_ACTIVE_SQL_TRANSACTION("25P01"),
+    IN_FAILED_SQL_TRANSACTION("25P02"),
 
-  INVALID_SQL_STATEMENT_NAME("26000"),
-  INVALID_AUTHORIZATION_SPECIFICATION("28000"),
-  INVALID_PASSWORD("28P01"),
+    INVALID_SQL_STATEMENT_NAME("26000"),
+    INVALID_AUTHORIZATION_SPECIFICATION("28000"),
+    INVALID_PASSWORD("28P01"),
 
-  INVALID_TRANSACTION_TERMINATION("2D000"),
+    INVALID_TRANSACTION_TERMINATION("2D000"),
 
-  STATEMENT_NOT_ALLOWED_IN_FUNCTION_CALL("2F003"),
+    STATEMENT_NOT_ALLOWED_IN_FUNCTION_CALL("2F003"),
 
-  INVALID_SAVEPOINT_SPECIFICATION("3B000"),
+    INVALID_SAVEPOINT_SPECIFICATION("3B000"),
 
-  SERIALIZATION_FAILURE("40001"),
-  DEADLOCK_DETECTED("40P01"),
-  SYNTAX_ERROR("42601"),
-  UNDEFINED_COLUMN("42703"),
-  UNDEFINED_OBJECT("42704"),
-  WRONG_OBJECT_TYPE("42809"),
-  NUMERIC_CONSTANT_OUT_OF_RANGE("42820"),
-  DATA_TYPE_MISMATCH("42821"),
-  UNDEFINED_FUNCTION("42883"),
-  INVALID_NAME("42602"),
-  DATATYPE_MISMATCH("42804"),
-  CANNOT_COERCE("42846"),
-  UNDEFINED_TABLE("42P01"),
+    SERIALIZATION_FAILURE("40001"),
+    DEADLOCK_DETECTED("40P01"),
+    SYNTAX_ERROR("42601"),
+    UNDEFINED_COLUMN("42703"),
+    UNDEFINED_OBJECT("42704"),
+    WRONG_OBJECT_TYPE("42809"),
+    NUMERIC_CONSTANT_OUT_OF_RANGE("42820"),
+    DATA_TYPE_MISMATCH("42821"),
+    UNDEFINED_FUNCTION("42883"),
+    INVALID_NAME("42602"),
+    DATATYPE_MISMATCH("42804"),
+    CANNOT_COERCE("42846"),
+    UNDEFINED_TABLE("42P01"),
 
-  OUT_OF_MEMORY("53200"),
-  OBJECT_NOT_IN_STATE("55000"),
-  OBJECT_IN_USE("55006"),
+    OUT_OF_MEMORY("53200"),
+    OBJECT_NOT_IN_STATE("55000"),
+    OBJECT_IN_USE("55006"),
 
-  QUERY_CANCELED("57014"),
+    QUERY_CANCELED("57014"),
 
-  SYSTEM_ERROR("60000"),
-  IO_ERROR("58030"),
+    SYSTEM_ERROR("60000"),
+    IO_ERROR("58030"),
 
-  UNEXPECTED_ERROR("99999");
+    UNEXPECTED_ERROR("99999");
 
-  private final String state;
+    private final String state;
 
-  PSQLState(String state) {
-    this.state = state;
-  }
+    PSQLState(String state) {
+        this.state = state;
+    }
 
-  public String getState() {
-    return this.state;
-  }
+    public static boolean isConnectionError(String psqlState) {
+        return PSQLState.CONNECTION_UNABLE_TO_CONNECT.getState().equals(psqlState)
+                || PSQLState.CONNECTION_DOES_NOT_EXIST.getState().equals(psqlState)
+                || PSQLState.CONNECTION_REJECTED.getState().equals(psqlState)
+                || PSQLState.CONNECTION_FAILURE.getState().equals(psqlState)
+                || PSQLState.CONNECTION_FAILURE_DURING_TRANSACTION.getState().equals(psqlState);
+    }
 
-  public static boolean isConnectionError(String psqlState) {
-    return PSQLState.CONNECTION_UNABLE_TO_CONNECT.getState().equals(psqlState)
-        || PSQLState.CONNECTION_DOES_NOT_EXIST.getState().equals(psqlState)
-        || PSQLState.CONNECTION_REJECTED.getState().equals(psqlState)
-        || PSQLState.CONNECTION_FAILURE.getState().equals(psqlState)
-        || PSQLState.CONNECTION_FAILURE_DURING_TRANSACTION.getState().equals(psqlState);
-  }
+    public String getState() {
+        return this.state;
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/PSQLWarning.java b/pgjdbc/src/main/java/org/postgresql/util/PSQLWarning.java
index 3f45d7d..0c43202 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/PSQLWarning.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/PSQLWarning.java
@@ -10,19 +10,19 @@ import java.sql.SQLWarning;
 @SuppressWarnings("serial")
 public class PSQLWarning extends SQLWarning {
 
-  private final ServerErrorMessage serverError;
+    private final ServerErrorMessage serverError;
 
-  public PSQLWarning(ServerErrorMessage err) {
-    super(err.toString(), err.getSQLState());
-    this.serverError = err;
-  }
+    public PSQLWarning(ServerErrorMessage err) {
+        super(err.toString(), err.getSQLState());
+        this.serverError = err;
+    }
 
-  @Override
-  public String getMessage() {
-    return serverError.getMessage();
-  }
+    @Override
+    public String getMessage() {
+        return serverError.getMessage();
+    }
 
-  public ServerErrorMessage getServerErrorMessage() {
-    return serverError;
-  }
+    public ServerErrorMessage getServerErrorMessage() {
+        return serverError;
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/PasswordUtil.java b/pgjdbc/src/main/java/org/postgresql/util/PasswordUtil.java
index 77301c5..60b8014 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/PasswordUtil.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/PasswordUtil.java
@@ -18,134 +18,134 @@ import java.util.Arrays;
 import java.util.Objects;
 
 public class PasswordUtil {
-  private static final int DEFAULT_ITERATIONS = 4096;
-  private static final int DEFAULT_SALT_LENGTH = 16;
+    private static final int DEFAULT_ITERATIONS = 4096;
+    private static final int DEFAULT_SALT_LENGTH = 16;
 
-  private static class SecureRandomHolder {
-    static final SecureRandom INSTANCE = new SecureRandom();
-  }
+    public PasswordUtil() {
+    }
 
-  public PasswordUtil() {
-  }
+    private static SecureRandom getSecureRandom() {
+        return SecureRandomHolder.INSTANCE;
+    }
 
-  private static SecureRandom getSecureRandom() {
-    return SecureRandomHolder.INSTANCE;
-  }
+    /**
+     * Encode the given password for use with md5 authentication. The PostgreSQL
+     * server uses the username as the per-user salt so that must also be provided.
+     * The return value of this method is the literal text that may be used when
+     * creating or modifying a user with the given password without the surrounding
+     * single quotes.
+     *
+     * @param user     The username of the database user
+     * @param password The plain text of the user's password. The implementation will zero out the
+     *                 array after use
+     * @return The text representation of the password encrypted for md5
+     * authentication.
+     * @deprecated prefer {@link org.postgresql.PGConnection#alterUserPassword(String, char[], String)}
+     * for better security.
+     */
+    @Deprecated
+    @SuppressWarnings("DeprecatedIsStillUsed")
+    public static String encodeMd5(String user, char[] password) {
+        Objects.requireNonNull(user, "user");
+        Objects.requireNonNull(password, "password");
+        ByteBuffer passwordBytes = null;
+        try {
+            passwordBytes = StandardCharsets.UTF_8.encode(CharBuffer.wrap(password));
+            byte[] userBytes = user.getBytes(StandardCharsets.UTF_8);
+            final MessageDigest md = MessageDigest.getInstance("MD5");
 
-  /**
-   * Encode the given password for use with md5 authentication. The PostgreSQL
-   * server uses the username as the per-user salt so that must also be provided.
-   * The return value of this method is the literal text that may be used when
-   * creating or modifying a user with the given password without the surrounding
-   * single quotes.
-   *
-   * @param user     The username of the database user
-   * @param password The plain text of the user's password. The implementation will zero out the
-   *                 array after use
-   * @return The text representation of the password encrypted for md5
-   *         authentication.
-   * @deprecated prefer {@link org.postgresql.PGConnection#alterUserPassword(String, char[], String)}
-   *             for better security.
-   */
-  @Deprecated
-  @SuppressWarnings("DeprecatedIsStillUsed")
-  public static String encodeMd5(String user, char[] password) {
-    Objects.requireNonNull(user, "user");
-    Objects.requireNonNull(password, "password");
-    ByteBuffer passwordBytes = null;
-    try {
-      passwordBytes = StandardCharsets.UTF_8.encode(CharBuffer.wrap(password));
-      byte[] userBytes = user.getBytes(StandardCharsets.UTF_8);
-      final MessageDigest md = MessageDigest.getInstance("MD5");
+            md.update(passwordBytes);
+            md.update(userBytes);
+            byte[] digest = md.digest(); // 16-byte MD5
 
-      md.update(passwordBytes);
-      md.update(userBytes);
-      byte[] digest = md.digest(); // 16-byte MD5
+            final byte[] encodedPassword = new byte[35]; // 3 + 2 x 16
+            encodedPassword[0] = (byte) 'm';
+            encodedPassword[1] = (byte) 'd';
+            encodedPassword[2] = (byte) '5';
+            MD5Digest.bytesToHex(digest, encodedPassword, 3);
 
-      final byte[] encodedPassword = new byte[35]; // 3 + 2 x 16
-      encodedPassword[0] = (byte) 'm';
-      encodedPassword[1] = (byte) 'd';
-      encodedPassword[2] = (byte) '5';
-      MD5Digest.bytesToHex(digest, encodedPassword, 3);
-
-      return new String(encodedPassword, StandardCharsets.UTF_8);
-    } catch (NoSuchAlgorithmException e) {
-      throw new IllegalStateException("Unable to encode password with MD5", e);
-    } finally {
-      Arrays.fill(password, (char) 0);
-      if (passwordBytes != null) {
-        if (passwordBytes.hasArray()) {
-          Arrays.fill(passwordBytes.array(), (byte) 0);
-        } else {
-          int limit = passwordBytes.limit();
-          for (int i = 0; i < limit; i++) {
-            passwordBytes.put(i, (byte) 0);
-          }
+            return new String(encodedPassword, StandardCharsets.UTF_8);
+        } catch (NoSuchAlgorithmException e) {
+            throw new IllegalStateException("Unable to encode password with MD5", e);
+        } finally {
+            Arrays.fill(password, (char) 0);
+            if (passwordBytes != null) {
+                if (passwordBytes.hasArray()) {
+                    Arrays.fill(passwordBytes.array(), (byte) 0);
+                } else {
+                    int limit = passwordBytes.limit();
+                    for (int i = 0; i < limit; i++) {
+                        passwordBytes.put(i, (byte) 0);
+                    }
+                }
+            }
         }
-      }
     }
-  }
 
-  /**
-   * Encode the given password for the specified encryption type.
-   * The word "encryption" is used here to match the verbiage in the PostgreSQL
-   * server, i.e. the "password_encryption" setting. In reality, a cryptographic
-   * digest / HMAC operation is being performed.
-   * The database user is only required for the md5 encryption type.
-   *
-   * @param user           The username of the database user
-   * @param password       The plain text of the user's password. The implementation will zero
-   *                       out the array after use
-   * @param encryptionType The encryption type for which to encode the user's
-   *                       password. This should match the database's supported
-   *                       methods and value of the password_encryption setting.
-   * @return The encoded password
-   * @throws SQLException If an error occurs encoding the password
-   */
-  public static String encodePassword(String user, char[] password, String encryptionType)
-      throws SQLException {
-    Objects.requireNonNull(password, "password");
-    Objects.requireNonNull(encryptionType, "encryptionType");
-    switch (encryptionType) {
-      case "on":
-      case "off":
-      case "md5":
-        return encodeMd5(user, password);
+    /**
+     * Encode the given password for the specified encryption type.
+     * The word "encryption" is used here to match the verbiage in the PostgreSQL
+     * server, i.e. the "password_encryption" setting. In reality, a cryptographic
+     * digest / HMAC operation is being performed.
+     * The database user is only required for the md5 encryption type.
+     *
+     * @param user           The username of the database user
+     * @param password       The plain text of the user's password. The implementation will zero
+     *                       out the array after use
+     * @param encryptionType The encryption type for which to encode the user's
+     *                       password. This should match the database's supported
+     *                       methods and value of the password_encryption setting.
+     * @return The encoded password
+     * @throws SQLException If an error occurs encoding the password
+     */
+    public static String encodePassword(String user, char[] password, String encryptionType)
+            throws SQLException {
+        Objects.requireNonNull(password, "password");
+        Objects.requireNonNull(encryptionType, "encryptionType");
+        switch (encryptionType) {
+            case "on":
+            case "off":
+            case "md5":
+                return encodeMd5(user, password);
+        }
+        // If we get here then it's an unhandled encryption type so we must wipe the array ourselves
+        Arrays.fill(password, (char) 0);
+        throw new PSQLException("Unable to determine encryption type: " + encryptionType, PSQLState.SYSTEM_ERROR);
     }
-    // If we get here then it's an unhandled encryption type so we must wipe the array ourselves
-    Arrays.fill(password, (char) 0);
-    throw new PSQLException("Unable to determine encryption type: " + encryptionType, PSQLState.SYSTEM_ERROR);
-  }
 
-  /**
-   * Generate the SQL statement to alter a user's password using the given
-   * encryption.
-   * All other encryption settings for the password will use the driver's
-   * defaults.
-   *
-   * @param user           The username of the database user
-   * @param password       The plain text of the user's password. The implementation will zero
-   *                       out the array after use
-   * @param encryptionType The encryption type of the password
-   * @return An SQL statement that may be executed to change the user's password
-   * @throws SQLException If an error occurs encoding the password
-   */
-  public static String genAlterUserPasswordSQL(String user, char[] password, String encryptionType)
-      throws SQLException {
-    try {
-      String encodedPassword = encodePassword(user, password, encryptionType);
-      StringBuilder sb = new StringBuilder();
-      sb.append("ALTER USER ");
-      Utils.escapeIdentifier(sb, user);
-      sb.append(" PASSWORD '");
-      // The choice of true / false for standard conforming strings does not matter
-      // here as the value being escaped is generated by us and known to be hex
-      // characters for all of the implemented password encryption methods.
-      Utils.escapeLiteral(sb, encodedPassword, true);
-      sb.append("'");
-      return sb.toString();
-    } finally {
-      Arrays.fill(password, (char) 0);
+    /**
+     * Generate the SQL statement to alter a user's password using the given
+     * encryption.
+     * All other encryption settings for the password will use the driver's
+     * defaults.
+     *
+     * @param user           The username of the database user
+     * @param password       The plain text of the user's password. The implementation will zero
+     *                       out the array after use
+     * @param encryptionType The encryption type of the password
+     * @return An SQL statement that may be executed to change the user's password
+     * @throws SQLException If an error occurs encoding the password
+     */
+    public static String genAlterUserPasswordSQL(String user, char[] password, String encryptionType)
+            throws SQLException {
+        try {
+            String encodedPassword = encodePassword(user, password, encryptionType);
+            StringBuilder sb = new StringBuilder();
+            sb.append("ALTER USER ");
+            Utils.escapeIdentifier(sb, user);
+            sb.append(" PASSWORD '");
+            // The choice of true / false for standard conforming strings does not matter
+            // here as the value being escaped is generated by us and known to be hex
+            // characters for all of the implemented password encryption methods.
+            Utils.escapeLiteral(sb, encodedPassword, true);
+            sb.append("'");
+            return sb.toString();
+        } finally {
+            Arrays.fill(password, (char) 0);
+        }
+    }
+
+    private static class SecureRandomHolder {
+        static final SecureRandom INSTANCE = new SecureRandom();
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/ReaderInputStream.java b/pgjdbc/src/main/java/org/postgresql/util/ReaderInputStream.java
index 0227bf5..387566e 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/ReaderInputStream.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/ReaderInputStream.java
@@ -23,138 +23,137 @@ import java.nio.charset.StandardCharsets;
  * binary stream to a character stream.</p>
  */
 public class ReaderInputStream extends InputStream {
-  private static final int DEFAULT_CHAR_BUFFER_SIZE = 8 * 1024;
+    private static final int DEFAULT_CHAR_BUFFER_SIZE = 8 * 1024;
 
-  private final Reader reader;
-  private final CharsetEncoder encoder;
-  private final ByteBuffer bbuf;
-  private final CharBuffer cbuf;
+    private final Reader reader;
+    private final CharsetEncoder encoder;
+    private final ByteBuffer bbuf;
+    private final CharBuffer cbuf;
+    private final byte[] oneByte = new byte[1];
+    /**
+     * true when all of the characters have been read from the reader into inbuf.
+     */
+    private boolean endOfInput;
 
-  /**
-   * true when all of the characters have been read from the reader into inbuf.
-   */
-  private boolean endOfInput;
-  private final byte[] oneByte = new byte[1];
-
-  public ReaderInputStream(Reader reader) {
-    this(reader, DEFAULT_CHAR_BUFFER_SIZE);
-  }
-
-  /**
-   * Allow ReaderInputStreamTest to use small buffers to force UTF-16
-   * surrogate pairs to cross buffer boundaries in interesting ways.
-   * Because this constructor is package-private, the unit test must be in
-   * the same package.
-   */
-  ReaderInputStream(Reader reader, int charBufferSize) {
-    if (reader == null) {
-      throw new IllegalArgumentException("reader cannot be null");
+    public ReaderInputStream(Reader reader) {
+        this(reader, DEFAULT_CHAR_BUFFER_SIZE);
     }
 
-    // The standard UTF-8 encoder will only encode a UTF-16 surrogate pair
-    // when both surrogates are available in the CharBuffer.
-    if (charBufferSize < 2) {
-      throw new IllegalArgumentException("charBufferSize must be at least 2 chars");
-    }
-
-    this.reader = reader;
-    this.encoder = StandardCharsets.UTF_8.newEncoder();
-    // encoder.maxBytesPerChar() always returns 3.0 for UTF-8
-    this.bbuf = ByteBuffer.allocate(3 * charBufferSize);
-    this.bbuf.flip(); // prepare for subsequent write
-    this.cbuf = CharBuffer.allocate(charBufferSize);
-    this.cbuf.flip(); // prepare for subsequent write
-  }
-
-  private void advance() throws IOException {
-    assert !endOfInput;
-    assert !bbuf.hasRemaining()
-        : "advance() should be called when output byte buffer is empty. bbuf: " + bbuf + ", as string: " + bbuf.asCharBuffer().toString();
-    assert cbuf.remaining() < 2;
-
-    // given that bbuf.capacity = 3 x cbuf.capacity, the only time that we should have a
-    // remaining char is if the last char read was the 1st half of a surrogate pair
-    if (cbuf.remaining() == 0) {
-      cbuf.clear();
-    } else {
-      cbuf.compact();
-    }
-
-    int n = reader.read(cbuf);  // read #1
-    cbuf.flip();
-
-    CoderResult result;
-
-    endOfInput = n == -1;
-
-    bbuf.clear();
-    result = encoder.encode(cbuf, bbuf, endOfInput);
-    checkEncodeResult(result);
-
-    if (endOfInput) {
-      result = encoder.flush(bbuf);
-      checkEncodeResult(result);
-    }
-
-    bbuf.flip();
-  }
-
-  private void checkEncodeResult(CoderResult result) throws CharacterCodingException {
-    if (result.isError()) {
-      result.throwException();
-    }
-  }
-
-  @Override
-  public int read() throws IOException {
-    int res = 0;
-    while (res != -1) {
-      res = read(oneByte);
-      if (res > 0) {
-        return oneByte[0] & 0xFF;
-      }
-    }
-    return -1;
-  }
-
-  // The implementation of InputStream.read(byte[], int, int) silently ignores
-  // an IOException thrown by overrides of the read() method.
-  @Override
-  public int read(byte[] b, int off, int len) throws IOException {
-    if (b == null) {
-      throw new NullPointerException();
-    } else if (off < 0 || len < 0 || len > b.length - off) {
-      throw new IndexOutOfBoundsException();
-    } else if (len == 0) {
-      return 0;
-    }
-    if (endOfInput && !bbuf.hasRemaining()) {
-      return -1;
-    }
-
-    int totalRead = 0;
-    while (len > 0 && !endOfInput) {
-      if (bbuf.hasRemaining()) {
-        int remaining = Math.min(len, bbuf.remaining());
-        bbuf.get(b, off, remaining);
-        totalRead += remaining;
-        off += remaining;
-        len -= remaining;
-        if (len == 0) {
-          return totalRead;
+    /**
+     * Allow ReaderInputStreamTest to use small buffers to force UTF-16
+     * surrogate pairs to cross buffer boundaries in interesting ways.
+     * Because this constructor is package-private, the unit test must be in
+     * the same package.
+     */
+    public ReaderInputStream(Reader reader, int charBufferSize) {
+        if (reader == null) {
+            throw new IllegalArgumentException("reader cannot be null");
         }
-      }
-      advance();
-    }
-    if (endOfInput && !bbuf.hasRemaining() && totalRead == 0) {
-      return -1;
-    }
-    return totalRead;
-  }
 
-  @Override
-  public void close() throws IOException {
-    endOfInput = true;
-    reader.close();
-  }
+        // The standard UTF-8 encoder will only encode a UTF-16 surrogate pair
+        // when both surrogates are available in the CharBuffer.
+        if (charBufferSize < 2) {
+            throw new IllegalArgumentException("charBufferSize must be at least 2 chars");
+        }
+
+        this.reader = reader;
+        this.encoder = StandardCharsets.UTF_8.newEncoder();
+        // encoder.maxBytesPerChar() always returns 3.0 for UTF-8
+        this.bbuf = ByteBuffer.allocate(3 * charBufferSize);
+        this.bbuf.flip(); // prepare for subsequent write
+        this.cbuf = CharBuffer.allocate(charBufferSize);
+        this.cbuf.flip(); // prepare for subsequent write
+    }
+
+    private void advance() throws IOException {
+        assert !endOfInput;
+        assert !bbuf.hasRemaining()
+                : "advance() should be called when output byte buffer is empty. bbuf: " + bbuf + ", as string: " + bbuf.asCharBuffer().toString();
+        assert cbuf.remaining() < 2;
+
+        // given that bbuf.capacity = 3 x cbuf.capacity, the only time that we should have a
+        // remaining char is if the last char read was the 1st half of a surrogate pair
+        if (cbuf.remaining() == 0) {
+            cbuf.clear();
+        } else {
+            cbuf.compact();
+        }
+
+        int n = reader.read(cbuf);  // read #1
+        cbuf.flip();
+
+        CoderResult result;
+
+        endOfInput = n == -1;
+
+        bbuf.clear();
+        result = encoder.encode(cbuf, bbuf, endOfInput);
+        checkEncodeResult(result);
+
+        if (endOfInput) {
+            result = encoder.flush(bbuf);
+            checkEncodeResult(result);
+        }
+
+        bbuf.flip();
+    }
+
+    private void checkEncodeResult(CoderResult result) throws CharacterCodingException {
+        if (result.isError()) {
+            result.throwException();
+        }
+    }
+
+    @Override
+    public int read() throws IOException {
+        int res = 0;
+        while (res != -1) {
+            res = read(oneByte);
+            if (res > 0) {
+                return oneByte[0] & 0xFF;
+            }
+        }
+        return -1;
+    }
+
+    // The implementation of InputStream.read(byte[], int, int) silently ignores
+    // an IOException thrown by overrides of the read() method.
+    @Override
+    public int read(byte[] b, int off, int len) throws IOException {
+        if (b == null) {
+            throw new NullPointerException();
+        } else if (off < 0 || len < 0 || len > b.length - off) {
+            throw new IndexOutOfBoundsException();
+        } else if (len == 0) {
+            return 0;
+        }
+        if (endOfInput && !bbuf.hasRemaining()) {
+            return -1;
+        }
+
+        int totalRead = 0;
+        while (len > 0 && !endOfInput) {
+            if (bbuf.hasRemaining()) {
+                int remaining = Math.min(len, bbuf.remaining());
+                bbuf.get(b, off, remaining);
+                totalRead += remaining;
+                off += remaining;
+                len -= remaining;
+                if (len == 0) {
+                    return totalRead;
+                }
+            }
+            advance();
+        }
+        if (endOfInput && !bbuf.hasRemaining() && totalRead == 0) {
+            return -1;
+        }
+        return totalRead;
+    }
+
+    @Override
+    public void close() throws IOException {
+        endOfInput = true;
+        reader.close();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/ServerErrorMessage.java b/pgjdbc/src/main/java/org/postgresql/util/ServerErrorMessage.java
index 6b60de5..0563a8d 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/ServerErrorMessage.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/ServerErrorMessage.java
@@ -15,216 +15,216 @@ import java.util.logging.Logger;
 
 @SuppressWarnings("serial")
 public class ServerErrorMessage implements Serializable {
-  private static final Logger LOGGER = Logger.getLogger(ServerErrorMessage.class.getName());
+    private static final Logger LOGGER = Logger.getLogger(ServerErrorMessage.class.getName());
 
-  private static final Character SEVERITY = 'S';
-  private static final Character MESSAGE = 'M';
-  private static final Character DETAIL = 'D';
-  private static final Character HINT = 'H';
-  private static final Character POSITION = 'P';
-  private static final Character WHERE = 'W';
-  private static final Character FILE = 'F';
-  private static final Character LINE = 'L';
-  private static final Character ROUTINE = 'R';
-  private static final Character SQLSTATE = 'C';
-  private static final Character INTERNAL_POSITION = 'p';
-  private static final Character INTERNAL_QUERY = 'q';
-  private static final Character SCHEMA = 's';
-  private static final Character TABLE = 't';
-  private static final Character COLUMN = 'c';
-  private static final Character DATATYPE = 'd';
-  private static final Character CONSTRAINT = 'n';
+    private static final Character SEVERITY = 'S';
+    private static final Character MESSAGE = 'M';
+    private static final Character DETAIL = 'D';
+    private static final Character HINT = 'H';
+    private static final Character POSITION = 'P';
+    private static final Character WHERE = 'W';
+    private static final Character FILE = 'F';
+    private static final Character LINE = 'L';
+    private static final Character ROUTINE = 'R';
+    private static final Character SQLSTATE = 'C';
+    private static final Character INTERNAL_POSITION = 'p';
+    private static final Character INTERNAL_QUERY = 'q';
+    private static final Character SCHEMA = 's';
+    private static final Character TABLE = 't';
+    private static final Character COLUMN = 'c';
+    private static final Character DATATYPE = 'd';
+    private static final Character CONSTRAINT = 'n';
 
-  private final Map<Character, String> mesgParts = new HashMap<>();
+    private final Map<Character, String> mesgParts = new HashMap<>();
 
-  public ServerErrorMessage(EncodingPredictor.DecodeResult serverError) {
-    this(serverError.result);
-    if (serverError.encoding != null) {
-      mesgParts.put(MESSAGE, mesgParts.get(MESSAGE)
-          + GT.tr(" (pgjdbc: autodetected server-encoding to be {0}, if the message is not readable, please check database logs and/or host, port, dbname, user, password, pg_hba.conf)",
-          serverError.encoding)
-      );
-    }
-  }
-
-  public ServerErrorMessage(String serverError) {
-    char[] chars = serverError.toCharArray();
-    int pos = 0;
-    int length = chars.length;
-    while (pos < length) {
-      char mesgType = chars[pos];
-      if (mesgType != '\0') {
-        pos++;
-        int startString = pos;
-        // order here is important position must be checked before accessing the array
-        while (pos < length && chars[pos] != '\0') {
-          pos++;
+    public ServerErrorMessage(EncodingPredictor.DecodeResult serverError) {
+        this(serverError.result);
+        if (serverError.encoding != null) {
+            mesgParts.put(MESSAGE, mesgParts.get(MESSAGE)
+                    + GT.tr(" (pgjdbc: autodetected server-encoding to be {0}, if the message is not readable, please check database logs and/or host, port, dbname, user, password, pg_hba.conf)",
+                    serverError.encoding)
+            );
         }
-        String mesgPart = new String(chars, startString, pos - startString);
-        mesgParts.put(mesgType, mesgPart);
-      }
-      pos++;
-    }
-  }
-
-  public String getSQLState() {
-    return mesgParts.get(SQLSTATE);
-  }
-
-  public String getMessage() {
-    return mesgParts.get(MESSAGE);
-  }
-
-  public String getSeverity() {
-    return mesgParts.get(SEVERITY);
-  }
-
-  public String getDetail() {
-    return mesgParts.get(DETAIL);
-  }
-
-  public String getHint() {
-    return mesgParts.get(HINT);
-  }
-
-  public int getPosition() {
-    return getIntegerPart(POSITION);
-  }
-
-  public String getWhere() {
-    return mesgParts.get(WHERE);
-  }
-
-  public String getSchema() {
-    return mesgParts.get(SCHEMA);
-  }
-
-  public String getTable() {
-    return mesgParts.get(TABLE);
-  }
-
-  public String getColumn() {
-    return mesgParts.get(COLUMN);
-  }
-
-  public String getDatatype() {
-    return mesgParts.get(DATATYPE);
-  }
-
-  public String getConstraint() {
-    return mesgParts.get(CONSTRAINT);
-  }
-
-  public String getFile() {
-    return mesgParts.get(FILE);
-  }
-
-  public int getLine() {
-    return getIntegerPart(LINE);
-  }
-
-  public String getRoutine() {
-    return mesgParts.get(ROUTINE);
-  }
-
-  public String getInternalQuery() {
-    return mesgParts.get(INTERNAL_QUERY);
-  }
-
-  public int getInternalPosition() {
-    return getIntegerPart(INTERNAL_POSITION);
-  }
-
-  private int getIntegerPart(Character c) {
-    String s = mesgParts.get(c);
-    if (s == null) {
-      return 0;
-    }
-    return Integer.parseInt(s);
-  }
-
-  String getNonSensitiveErrorMessage() {
-    StringBuilder totalMessage = new StringBuilder();
-    String message = mesgParts.get(SEVERITY);
-    if (message != null) {
-      totalMessage.append(message).append(": ");
-    }
-    message = mesgParts.get(MESSAGE);
-    if (message != null) {
-      totalMessage.append(message);
-    }
-    return totalMessage.toString();
-  }
-
-  @Override
-  public String toString() {
-    // Now construct the message from what the server sent
-    // The general format is:
-    // SEVERITY: Message \n
-    // Detail: \n
-    // Hint: \n
-    // Position: \n
-    // Where: \n
-    // Internal Query: \n
-    // Internal Position: \n
-    // Location: File:Line:Routine \n
-    // SQLState: \n
-    //
-    // Normally only the message and detail is included.
-    // If INFO level logging is enabled then detail, hint, position and where are
-    // included. If DEBUG level logging is enabled then all information
-    // is included.
-
-    StringBuilder totalMessage = new StringBuilder();
-    String message = mesgParts.get(SEVERITY);
-    if (message != null) {
-      totalMessage.append(message).append(": ");
-    }
-    message = mesgParts.get(MESSAGE);
-    if (message != null) {
-      totalMessage.append(message);
-    }
-    message = mesgParts.get(DETAIL);
-    if (message != null) {
-      totalMessage.append("\n  ").append(GT.tr("Detail: {0}", message));
     }
 
-    message = mesgParts.get(HINT);
-    if (message != null) {
-      totalMessage.append("\n  ").append(GT.tr("Hint: {0}", message));
-    }
-    message = mesgParts.get(POSITION);
-    if (message != null) {
-      totalMessage.append("\n  ").append(GT.tr("Position: {0}", message));
-    }
-    message = mesgParts.get(WHERE);
-    if (message != null) {
-      totalMessage.append("\n  ").append(GT.tr("Where: {0}", message));
+    public ServerErrorMessage(String serverError) {
+        char[] chars = serverError.toCharArray();
+        int pos = 0;
+        int length = chars.length;
+        while (pos < length) {
+            char mesgType = chars[pos];
+            if (mesgType != '\0') {
+                pos++;
+                int startString = pos;
+                // order here is important position must be checked before accessing the array
+                while (pos < length && chars[pos] != '\0') {
+                    pos++;
+                }
+                String mesgPart = new String(chars, startString, pos - startString);
+                mesgParts.put(mesgType, mesgPart);
+            }
+            pos++;
+        }
     }
 
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      String internalQuery = mesgParts.get(INTERNAL_QUERY);
-      if (internalQuery != null) {
-        totalMessage.append("\n  ").append(GT.tr("Internal Query: {0}", internalQuery));
-      }
-      String internalPosition = mesgParts.get(INTERNAL_POSITION);
-      if (internalPosition != null) {
-        totalMessage.append("\n  ").append(GT.tr("Internal Position: {0}", internalPosition));
-      }
-
-      String file = mesgParts.get(FILE);
-      String line = mesgParts.get(LINE);
-      String routine = mesgParts.get(ROUTINE);
-      if (file != null || line != null || routine != null) {
-        totalMessage.append("\n  ").append(GT.tr("Location: File: {0}, Routine: {1}, Line: {2}",
-            file, routine, line));
-      }
-      message = mesgParts.get(SQLSTATE);
-      if (message != null) {
-        totalMessage.append("\n  ").append(GT.tr("Server SQLState: {0}", message));
-      }
+    public String getSQLState() {
+        return mesgParts.get(SQLSTATE);
     }
 
-    return totalMessage.toString();
-  }
+    public String getMessage() {
+        return mesgParts.get(MESSAGE);
+    }
+
+    public String getSeverity() {
+        return mesgParts.get(SEVERITY);
+    }
+
+    public String getDetail() {
+        return mesgParts.get(DETAIL);
+    }
+
+    public String getHint() {
+        return mesgParts.get(HINT);
+    }
+
+    public int getPosition() {
+        return getIntegerPart(POSITION);
+    }
+
+    public String getWhere() {
+        return mesgParts.get(WHERE);
+    }
+
+    public String getSchema() {
+        return mesgParts.get(SCHEMA);
+    }
+
+    public String getTable() {
+        return mesgParts.get(TABLE);
+    }
+
+    public String getColumn() {
+        return mesgParts.get(COLUMN);
+    }
+
+    public String getDatatype() {
+        return mesgParts.get(DATATYPE);
+    }
+
+    public String getConstraint() {
+        return mesgParts.get(CONSTRAINT);
+    }
+
+    public String getFile() {
+        return mesgParts.get(FILE);
+    }
+
+    public int getLine() {
+        return getIntegerPart(LINE);
+    }
+
+    public String getRoutine() {
+        return mesgParts.get(ROUTINE);
+    }
+
+    public String getInternalQuery() {
+        return mesgParts.get(INTERNAL_QUERY);
+    }
+
+    public int getInternalPosition() {
+        return getIntegerPart(INTERNAL_POSITION);
+    }
+
+    private int getIntegerPart(Character c) {
+        String s = mesgParts.get(c);
+        if (s == null) {
+            return 0;
+        }
+        return Integer.parseInt(s);
+    }
+
+    String getNonSensitiveErrorMessage() {
+        StringBuilder totalMessage = new StringBuilder();
+        String message = mesgParts.get(SEVERITY);
+        if (message != null) {
+            totalMessage.append(message).append(": ");
+        }
+        message = mesgParts.get(MESSAGE);
+        if (message != null) {
+            totalMessage.append(message);
+        }
+        return totalMessage.toString();
+    }
+
+    @Override
+    public String toString() {
+        // Now construct the message from what the server sent
+        // The general format is:
+        // SEVERITY: Message \n
+        // Detail: \n
+        // Hint: \n
+        // Position: \n
+        // Where: \n
+        // Internal Query: \n
+        // Internal Position: \n
+        // Location: File:Line:Routine \n
+        // SQLState: \n
+        //
+        // Normally only the message and detail is included.
+        // If INFO level logging is enabled then detail, hint, position and where are
+        // included. If DEBUG level logging is enabled then all information
+        // is included.
+
+        StringBuilder totalMessage = new StringBuilder();
+        String message = mesgParts.get(SEVERITY);
+        if (message != null) {
+            totalMessage.append(message).append(": ");
+        }
+        message = mesgParts.get(MESSAGE);
+        if (message != null) {
+            totalMessage.append(message);
+        }
+        message = mesgParts.get(DETAIL);
+        if (message != null) {
+            totalMessage.append("\n  ").append(GT.tr("Detail: {0}", message));
+        }
+
+        message = mesgParts.get(HINT);
+        if (message != null) {
+            totalMessage.append("\n  ").append(GT.tr("Hint: {0}", message));
+        }
+        message = mesgParts.get(POSITION);
+        if (message != null) {
+            totalMessage.append("\n  ").append(GT.tr("Position: {0}", message));
+        }
+        message = mesgParts.get(WHERE);
+        if (message != null) {
+            totalMessage.append("\n  ").append(GT.tr("Where: {0}", message));
+        }
+
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            String internalQuery = mesgParts.get(INTERNAL_QUERY);
+            if (internalQuery != null) {
+                totalMessage.append("\n  ").append(GT.tr("Internal Query: {0}", internalQuery));
+            }
+            String internalPosition = mesgParts.get(INTERNAL_POSITION);
+            if (internalPosition != null) {
+                totalMessage.append("\n  ").append(GT.tr("Internal Position: {0}", internalPosition));
+            }
+
+            String file = mesgParts.get(FILE);
+            String line = mesgParts.get(LINE);
+            String routine = mesgParts.get(ROUTINE);
+            if (file != null || line != null || routine != null) {
+                totalMessage.append("\n  ").append(GT.tr("Location: File: {0}, Routine: {1}, Line: {2}",
+                        file, routine, line));
+            }
+            message = mesgParts.get(SQLSTATE);
+            if (message != null) {
+                totalMessage.append("\n  ").append(GT.tr("Server SQLState: {0}", message));
+            }
+        }
+
+        return totalMessage.toString();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/SharedTimer.java b/pgjdbc/src/main/java/org/postgresql/util/SharedTimer.java
index b96fdc8..966fbb2 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/SharedTimer.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/SharedTimer.java
@@ -14,83 +14,81 @@ import java.util.logging.Logger;
 
 @SuppressWarnings("try")
 public class SharedTimer {
-  static class TimerCleanup implements LazyCleaner.CleaningAction<RuntimeException> {
-    private final Timer timer;
-
-    TimerCleanup(Timer timer) {
-      this.timer = timer;
+    // Incremented for each Timer created, this allows each to have a unique Timer name
+    private static final AtomicInteger timerCount = new AtomicInteger(0);
+    private static final Logger LOGGER = Logger.getLogger(SharedTimer.class.getName());
+    private final AtomicInteger refCount = new AtomicInteger(0);
+    private final ResourceLock lock = new ResourceLock();
+    private volatile Timer timer;
+    private LazyCleaner.Cleanable<RuntimeException> timerCleanup;
+    public SharedTimer() {
     }
 
-    @Override
-    public void onClean(boolean leak) throws RuntimeException {
-      timer.cancel();
+    public int getRefCount() {
+        return refCount.get();
     }
-  }
 
-  // Incremented for each Timer created, this allows each to have a unique Timer name
-  private static final AtomicInteger timerCount = new AtomicInteger(0);
-
-  private static final Logger LOGGER = Logger.getLogger(SharedTimer.class.getName());
-  private volatile Timer timer;
-  private final AtomicInteger refCount = new AtomicInteger(0);
-  private final ResourceLock lock = new ResourceLock();
-  private LazyCleaner.Cleanable<RuntimeException> timerCleanup;
-
-  public SharedTimer() {
-  }
-
-  public int getRefCount() {
-    return refCount.get();
-  }
-
-  public Timer getTimer() {
-    try (ResourceLock ignore = lock.obtain()) {
-      Timer timer = this.timer;
-      if (timer == null) {
-        int index = timerCount.incrementAndGet();
+    public Timer getTimer() {
+        try (ResourceLock ignore = lock.obtain()) {
+            Timer timer = this.timer;
+            if (timer == null) {
+                int index = timerCount.incrementAndGet();
 
         /*
          Temporarily switch contextClassLoader to the one that loaded this driver to avoid TimerThread preventing current
          contextClassLoader - which may be the ClassLoader of a web application - from being GC:ed.
          */
-        final ClassLoader prevContextCL = Thread.currentThread().getContextClassLoader();
-        try {
+                final ClassLoader prevContextCL = Thread.currentThread().getContextClassLoader();
+                try {
           /*
            Scheduled tasks should not need to use .getContextClassLoader, so we just reset it to null
            */
-          Thread.currentThread().setContextClassLoader(null);
+                    Thread.currentThread().setContextClassLoader(null);
 
-          this.timer = timer = new Timer("PostgreSQL-JDBC-SharedTimer-" + index, true);
-          this.timerCleanup = LazyCleaner.getInstance().register(refCount, new TimerCleanup(timer));
-        } finally {
-          Thread.currentThread().setContextClassLoader(prevContextCL);
+                    this.timer = timer = new Timer("PostgreSQL-JDBC-SharedTimer-" + index, true);
+                    this.timerCleanup = LazyCleaner.getInstance().register(refCount, new TimerCleanup(timer));
+                } finally {
+                    Thread.currentThread().setContextClassLoader(prevContextCL);
+                }
+            }
+            refCount.incrementAndGet();
+            return timer;
         }
-      }
-      refCount.incrementAndGet();
-      return timer;
     }
-  }
 
-  public void releaseTimer() {
-    try (ResourceLock ignore = lock.obtain()) {
-      int count = refCount.decrementAndGet();
-      if (count > 0) {
-        // There are outstanding references to the timer so do nothing
-        LOGGER.log(Level.FINEST, "Outstanding references still exist so not closing shared Timer");
-      } else if (count == 0) {
-        // This is the last usage of the Timer so cancel it so it's resources can be release.
-        LOGGER.log(Level.FINEST, "No outstanding references to shared Timer, will cancel and close it");
-        if (timerCleanup != null) {
-          timerCleanup.clean();
-          timer = null;
-          timerCleanup = null;
+    public void releaseTimer() {
+        try (ResourceLock ignore = lock.obtain()) {
+            int count = refCount.decrementAndGet();
+            if (count > 0) {
+                // There are outstanding references to the timer so do nothing
+                LOGGER.log(Level.FINEST, "Outstanding references still exist so not closing shared Timer");
+            } else if (count == 0) {
+                // This is the last usage of the Timer so cancel it so it's resources can be release.
+                LOGGER.log(Level.FINEST, "No outstanding references to shared Timer, will cancel and close it");
+                if (timerCleanup != null) {
+                    timerCleanup.clean();
+                    timer = null;
+                    timerCleanup = null;
+                }
+            } else {
+                // Should not get here under normal circumstance, probably a bug in app code.
+                LOGGER.log(Level.WARNING,
+                        "releaseTimer() called too many times; there is probably a bug in the calling code");
+                refCount.set(0);
+            }
+        }
+    }
+
+    static class TimerCleanup implements LazyCleaner.CleaningAction<RuntimeException> {
+        private final Timer timer;
+
+        TimerCleanup(Timer timer) {
+            this.timer = timer;
+        }
+
+        @Override
+        public void onClean(boolean leak) throws RuntimeException {
+            timer.cancel();
         }
-      } else {
-        // Should not get here under normal circumstance, probably a bug in app code.
-        LOGGER.log(Level.WARNING,
-            "releaseTimer() called too many times; there is probably a bug in the calling code");
-        refCount.set(0);
-      }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/StreamWrapper.java b/pgjdbc/src/main/java/org/postgresql/util/StreamWrapper.java
index 1d3961c..76629b5 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/StreamWrapper.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/StreamWrapper.java
@@ -22,124 +22,123 @@ import java.nio.file.Path;
  */
 public final class StreamWrapper implements Closeable {
 
-  private static final int MAX_MEMORY_BUFFER_BYTES = 51200;
+    private static final int MAX_MEMORY_BUFFER_BYTES = 51200;
 
-  private static final String TEMP_FILE_PREFIX = "postgres-pgjdbc-stream";
+    private static final String TEMP_FILE_PREFIX = "postgres-pgjdbc-stream";
+    private final InputStream stream;
+    private final Object leakHandle = new Object();
+    private final byte[] rawData;
+    private final int offset;
+    private final int length;
+    private TempFileHolder tempFileHolder;
+    private LazyCleaner.Cleanable<IOException> cleaner;
 
-  public StreamWrapper(byte[] data, int offset, int length) {
-    this.stream = null;
-    this.rawData = data;
-    this.offset = offset;
-    this.length = length;
-  }
-
-  public StreamWrapper(InputStream stream, int length) {
-    this.stream = stream;
-    this.rawData = null;
-    this.offset = 0;
-    this.length = length;
-  }
-
-  public StreamWrapper(InputStream stream) throws PSQLException {
-    try {
-      ByteArrayOutputStream memoryOutputStream = new ByteArrayOutputStream();
-      final int memoryLength = copyStream(stream, memoryOutputStream, MAX_MEMORY_BUFFER_BYTES);
-      byte[] rawData = memoryOutputStream.toByteArray();
-
-      if (memoryLength == -1) {
-        final int diskLength;
-        final Path tempFile = Files.createTempFile(TEMP_FILE_PREFIX, ".tmp");
-        try (OutputStream diskOutputStream = Files.newOutputStream(tempFile)) {
-          diskOutputStream.write(rawData);
-          diskLength = copyStream(stream, diskOutputStream, Integer.MAX_VALUE - rawData.length);
-          if (diskLength == -1) {
-            throw new PSQLException(GT.tr("Object is too large to send over the protocol."),
-                PSQLState.NUMERIC_CONSTANT_OUT_OF_RANGE);
-          }
-        } catch (RuntimeException | Error | PSQLException e) {
-          try {
-            tempFile.toFile().delete();
-          } catch (Throwable ignore) {
-          }
-          throw e;
-        }
-        // The finalize action is not created if the above code throws
-        this.offset = 0;
-        this.length = rawData.length + diskLength;
-        this.rawData = null;
-        this.stream = null; // The stream is opened on demand
-        TempFileHolder tempFileHolder = new TempFileHolder(tempFile);
-        this.tempFileHolder = tempFileHolder;
-        cleaner = LazyCleaner.getInstance().register(leakHandle, tempFileHolder);
-      } else {
-        this.rawData = rawData;
+    public StreamWrapper(byte[] data, int offset, int length) {
         this.stream = null;
+        this.rawData = data;
+        this.offset = offset;
+        this.length = length;
+    }
+
+    public StreamWrapper(InputStream stream, int length) {
+        this.stream = stream;
+        this.rawData = null;
         this.offset = 0;
-        this.length = rawData.length;
-      }
-    } catch (IOException e) {
-      throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
-          PSQLState.IO_ERROR, e);
-    }
-  }
-
-  public InputStream getStream() throws IOException {
-    if (stream != null) {
-      return stream;
-    }
-    TempFileHolder finalizeAction = this.tempFileHolder;
-    if (finalizeAction != null) {
-      return finalizeAction.getStream();
+        this.length = length;
     }
 
-    return new ByteArrayInputStream(rawData, offset, length);
-  }
+    public StreamWrapper(InputStream stream) throws PSQLException {
+        try {
+            ByteArrayOutputStream memoryOutputStream = new ByteArrayOutputStream();
+            final int memoryLength = copyStream(stream, memoryOutputStream, MAX_MEMORY_BUFFER_BYTES);
+            byte[] rawData = memoryOutputStream.toByteArray();
 
-  @Override
-  public void close() throws IOException {
-    if (cleaner != null) {
-      cleaner.clean();
+            if (memoryLength == -1) {
+                final int diskLength;
+                final Path tempFile = Files.createTempFile(TEMP_FILE_PREFIX, ".tmp");
+                try (OutputStream diskOutputStream = Files.newOutputStream(tempFile)) {
+                    diskOutputStream.write(rawData);
+                    diskLength = copyStream(stream, diskOutputStream, Integer.MAX_VALUE - rawData.length);
+                    if (diskLength == -1) {
+                        throw new PSQLException(GT.tr("Object is too large to send over the protocol."),
+                                PSQLState.NUMERIC_CONSTANT_OUT_OF_RANGE);
+                    }
+                } catch (RuntimeException | Error | PSQLException e) {
+                    try {
+                        tempFile.toFile().delete();
+                    } catch (Throwable ignore) {
+                    }
+                    throw e;
+                }
+                // The finalize action is not created if the above code throws
+                this.offset = 0;
+                this.length = rawData.length + diskLength;
+                this.rawData = null;
+                this.stream = null; // The stream is opened on demand
+                TempFileHolder tempFileHolder = new TempFileHolder(tempFile);
+                this.tempFileHolder = tempFileHolder;
+                cleaner = LazyCleaner.getInstance().register(leakHandle, tempFileHolder);
+            } else {
+                this.rawData = rawData;
+                this.stream = null;
+                this.offset = 0;
+                this.length = rawData.length;
+            }
+        } catch (IOException e) {
+            throw new PSQLException(GT.tr("An I/O error occurred while sending to the backend."),
+                    PSQLState.IO_ERROR, e);
+        }
     }
-  }
 
-  public int getLength() {
-    return length;
-  }
-
-  public int getOffset() {
-    return offset;
-  }
-
-  public byte [] getBytes() {
-    return rawData;
-  }
-
-  @Override
-  public String toString() {
-    return "<stream of " + length + " bytes>";
-  }
-
-  private static int copyStream(InputStream inputStream, OutputStream outputStream, int limit)
-      throws IOException {
-    int totalLength = 0;
-    byte[] buffer = new byte[2048];
-    int readLength = inputStream.read(buffer);
-    while (readLength > 0) {
-      totalLength += readLength;
-      outputStream.write(buffer, 0, readLength);
-      if (totalLength >= limit) {
-        return -1;
-      }
-      readLength = inputStream.read(buffer);
+    private static int copyStream(InputStream inputStream, OutputStream outputStream, int limit)
+            throws IOException {
+        int totalLength = 0;
+        byte[] buffer = new byte[2048];
+        int readLength = inputStream.read(buffer);
+        while (readLength > 0) {
+            totalLength += readLength;
+            outputStream.write(buffer, 0, readLength);
+            if (totalLength >= limit) {
+                return -1;
+            }
+            readLength = inputStream.read(buffer);
+        }
+        return totalLength;
     }
-    return totalLength;
-  }
 
-  private final InputStream stream;
-  private TempFileHolder tempFileHolder;
-  private final Object leakHandle = new Object();
-  private LazyCleaner.Cleanable<IOException> cleaner;
-  private final byte [] rawData;
-  private final int offset;
-  private final int length;
+    public InputStream getStream() throws IOException {
+        if (stream != null) {
+            return stream;
+        }
+        TempFileHolder finalizeAction = this.tempFileHolder;
+        if (finalizeAction != null) {
+            return finalizeAction.getStream();
+        }
+
+        return new ByteArrayInputStream(rawData, offset, length);
+    }
+
+    @Override
+    public void close() throws IOException {
+        if (cleaner != null) {
+            cleaner.clean();
+        }
+    }
+
+    public int getLength() {
+        return length;
+    }
+
+    public int getOffset() {
+        return offset;
+    }
+
+    public byte[] getBytes() {
+        return rawData;
+    }
+
+    @Override
+    public String toString() {
+        return "<stream of " + length + " bytes>";
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/TempFileHolder.java b/pgjdbc/src/main/java/org/postgresql/util/TempFileHolder.java
index 367ae90..b0a99a6 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/TempFileHolder.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/TempFileHolder.java
@@ -18,38 +18,38 @@ import java.util.logging.Logger;
  */
 class TempFileHolder implements LazyCleaner.CleaningAction<IOException> {
 
-  private static final Logger LOGGER = Logger.getLogger(StreamWrapper.class.getName());
-  private InputStream stream;
-  private Path tempFile;
+    private static final Logger LOGGER = Logger.getLogger(StreamWrapper.class.getName());
+    private InputStream stream;
+    private Path tempFile;
 
-  TempFileHolder(Path tempFile) {
-    this.tempFile = tempFile;
-  }
+    TempFileHolder(Path tempFile) {
+        this.tempFile = tempFile;
+    }
 
-  public InputStream getStream() throws IOException {
-    InputStream stream = this.stream;
-    if (stream == null) {
-      stream = Files.newInputStream(tempFile);
-      this.stream = stream;
+    public InputStream getStream() throws IOException {
+        InputStream stream = this.stream;
+        if (stream == null) {
+            stream = Files.newInputStream(tempFile);
+            this.stream = stream;
+        }
+        return stream;
     }
-    return stream;
-  }
 
-  @Override
-  public void onClean(boolean leak) throws IOException {
-    if (leak) {
-      LOGGER.log(Level.WARNING, GT.tr("StreamWrapper leak detected StreamWrapper.close() was not called. "));
+    @Override
+    public void onClean(boolean leak) throws IOException {
+        if (leak) {
+            LOGGER.log(Level.WARNING, GT.tr("StreamWrapper leak detected StreamWrapper.close() was not called. "));
+        }
+        Path tempFile = this.tempFile;
+        if (tempFile != null) {
+            tempFile.toFile().delete();
+            this.tempFile = null;
+        }
+        InputStream stream = this.stream;
+        if (stream != null) {
+            stream.close();
+            this.stream = null;
+        }
     }
-    Path tempFile = this.tempFile;
-    if (tempFile != null) {
-      tempFile.toFile().delete();
-      this.tempFile = null;
-    }
-    InputStream stream = this.stream;
-    if (stream != null) {
-      stream.close();
-      this.stream = null;
-    }
-  }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/util/URLCoder.java b/pgjdbc/src/main/java/org/postgresql/util/URLCoder.java
index d92ab94..33baf1a 100644
--- a/pgjdbc/src/main/java/org/postgresql/util/URLCoder.java
+++ b/pgjdbc/src/main/java/org/postgresql/util/URLCoder.java
@@ -18,42 +18,42 @@ import java.net.URLEncoder;
  * driver.</p>
  */
 public final class URLCoder {
-  private static final String ENCODING_FOR_URL =
-      System.getProperty("postgresql.url.encoding", "UTF-8");
+    private static final String ENCODING_FOR_URL =
+            System.getProperty("postgresql.url.encoding", "UTF-8");
 
 
-  public URLCoder() {
-  }
-
-  /**
-   * Decodes {@code x-www-form-urlencoded} string into Java string.
-   *
-   * @param encoded encoded value
-   * @return decoded value
-   * @see URLDecoder#decode(String, String)
-   */
-  public static String decode(String encoded) {
-    try {
-      return URLDecoder.decode(encoded, ENCODING_FOR_URL);
-    } catch (UnsupportedEncodingException e) {
-      throw new IllegalStateException(
-          "Unable to decode URL entry via " + ENCODING_FOR_URL + ". This should not happen", e);
+    public URLCoder() {
     }
-  }
 
-  /**
-   * Encodes Java string into {@code x-www-form-urlencoded} format
-   *
-   * @param plain input value
-   * @return encoded value
-   * @see URLEncoder#encode(String, String)
-   */
-  public static String encode(String plain) {
-    try {
-      return URLEncoder.encode(plain, "UTF-8");
-    } catch (UnsupportedEncodingException e) {
-      throw new IllegalStateException(
-          "Unable to encode URL entry via " + ENCODING_FOR_URL + ". This should not happen", e);
+    /**
+     * Decodes {@code x-www-form-urlencoded} string into Java string.
+     *
+     * @param encoded encoded value
+     * @return decoded value
+     * @see URLDecoder#decode(String, String)
+     */
+    public static String decode(String encoded) {
+        try {
+            return URLDecoder.decode(encoded, ENCODING_FOR_URL);
+        } catch (UnsupportedEncodingException e) {
+            throw new IllegalStateException(
+                    "Unable to decode URL entry via " + ENCODING_FOR_URL + ". This should not happen", e);
+        }
+    }
+
+    /**
+     * Encodes Java string into {@code x-www-form-urlencoded} format
+     *
+     * @param plain input value
+     * @return encoded value
+     * @see URLEncoder#encode(String, String)
+     */
+    public static String encode(String plain) {
+        try {
+            return URLEncoder.encode(plain, "UTF-8");
+        } catch (UnsupportedEncodingException e) {
+            throw new IllegalStateException(
+                    "Unable to encode URL entry via " + ENCODING_FOR_URL + ". This should not happen", e);
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/xa/PGXAConnection.java b/pgjdbc/src/main/java/org/postgresql/xa/PGXAConnection.java
index b785722..72f2cb3 100644
--- a/pgjdbc/src/main/java/org/postgresql/xa/PGXAConnection.java
+++ b/pgjdbc/src/main/java/org/postgresql/xa/PGXAConnection.java
@@ -5,14 +5,6 @@
 
 package org.postgresql.xa;
 
-import org.postgresql.PGConnection;
-import org.postgresql.core.BaseConnection;
-import org.postgresql.core.TransactionState;
-import org.postgresql.ds.PGPooledConnection;
-import org.postgresql.util.GT;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
@@ -24,11 +16,17 @@ import java.sql.Statement;
 import java.util.LinkedList;
 import java.util.logging.Level;
 import java.util.logging.Logger;
-
 import javax.sql.XAConnection;
 import javax.transaction.xa.XAException;
 import javax.transaction.xa.XAResource;
 import javax.transaction.xa.Xid;
+import org.postgresql.PGConnection;
+import org.postgresql.core.BaseConnection;
+import org.postgresql.core.TransactionState;
+import org.postgresql.ds.PGPooledConnection;
+import org.postgresql.util.GT;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
 
 /**
  * <p>The PostgreSQL implementation of {@link XAResource}.</p>
@@ -42,647 +40,647 @@ import javax.transaction.xa.Xid;
  */
 public class PGXAConnection extends PGPooledConnection implements XAConnection, XAResource {
 
-  private static final Logger LOGGER = Logger.getLogger(PGXAConnection.class.getName());
+    private static final Logger LOGGER = Logger.getLogger(PGXAConnection.class.getName());
 
-  /**
-   * Underlying physical database connection. It's used for issuing PREPARE TRANSACTION/ COMMIT
-   * PREPARED/ROLLBACK PREPARED commands.
-   */
-  private final BaseConnection conn;
+    /**
+     * Underlying physical database connection. It's used for issuing PREPARE TRANSACTION/ COMMIT
+     * PREPARED/ROLLBACK PREPARED commands.
+     */
+    private final BaseConnection conn;
 
-  private Xid currentXid;
+    private Xid currentXid;
 
-  private State state;
-  private Xid preparedXid;
-  private boolean committedOrRolledBack;
-
-  /*
-   * When an XA transaction is started, we put the underlying connection into non-autocommit mode.
-   * The old setting is saved in localAutoCommitMode, so that we can restore it when the XA
-   * transaction ends and the connection returns into local transaction mode.
-   */
-  private boolean localAutoCommitMode = true;
-
-  private void debug(String s) {
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      LOGGER.log(Level.FINEST, "XAResource {0}: {1}", new Object[]{Integer.toHexString(this.hashCode()), s});
-    }
-  }
-
-  public PGXAConnection(BaseConnection conn) throws SQLException {
-    super(conn, true, true);
-    this.conn = conn;
-    this.state = State.IDLE;
-  }
-
-  /**
-   * XAConnection interface.
-   */
-  @SuppressWarnings("rawtypes")
-  @Override
-  public Connection getConnection() throws SQLException {
-    Connection conn = super.getConnection();
-
-    // When we're outside an XA transaction, autocommit
-    // is supposed to be true, per usual JDBC convention.
-    // When an XA transaction is in progress, it should be
-    // false.
-    if (state == State.IDLE) {
-      conn.setAutoCommit(true);
-    }
+    private State state;
+    private Xid preparedXid;
+    private boolean committedOrRolledBack;
 
     /*
-     * Wrap the connection in a proxy to forbid application from fiddling with transaction state
-     * directly during an XA transaction
+     * When an XA transaction is started, we put the underlying connection into non-autocommit mode.
+     * The old setting is saved in localAutoCommitMode, so that we can restore it when the XA
+     * transaction ends and the connection returns into local transaction mode.
      */
-    ConnectionHandler handler = new ConnectionHandler(conn);
-    return (Connection) Proxy.newProxyInstance(getClass().getClassLoader(),
-        new Class[]{Connection.class, PGConnection.class}, handler);
-  }
+    private boolean localAutoCommitMode = true;
 
-  @Override
-  public XAResource getXAResource() {
-    return this;
-  }
+    public PGXAConnection(BaseConnection conn) throws SQLException {
+        super(conn, true, true);
+        this.conn = conn;
+        this.state = State.IDLE;
+    }
 
-  /*
-   * A java.sql.Connection proxy class to forbid calls to transaction control methods while the
-   * connection is used for an XA transaction.
-   */
-  private class ConnectionHandler implements InvocationHandler {
-    private final Connection con;
+    private void debug(String s) {
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            LOGGER.log(Level.FINEST, "XAResource {0}: {1}", new Object[]{Integer.toHexString(this.hashCode()), s});
+        }
+    }
 
-    ConnectionHandler(Connection con) {
-      this.con = con;
+    /**
+     * XAConnection interface.
+     */
+    @SuppressWarnings("rawtypes")
+    @Override
+    public Connection getConnection() throws SQLException {
+        Connection conn = super.getConnection();
+
+        // When we're outside an XA transaction, autocommit
+        // is supposed to be true, per usual JDBC convention.
+        // When an XA transaction is in progress, it should be
+        // false.
+        if (state == State.IDLE) {
+            conn.setAutoCommit(true);
+        }
+
+        /*
+         * Wrap the connection in a proxy to forbid application from fiddling with transaction state
+         * directly during an XA transaction
+         */
+        ConnectionHandler handler = new ConnectionHandler(conn);
+        return (Connection) Proxy.newProxyInstance(getClass().getClassLoader(),
+                new Class[]{Connection.class, PGConnection.class}, handler);
     }
 
     @Override
-    @SuppressWarnings("throwing.nullable")
-    public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
-      if (state != State.IDLE) {
-        String methodName = method.getName();
-        if ("commit".equals(methodName)
-            || "rollback".equals(methodName)
-            || "setSavePoint".equals(methodName)
-            || ("setAutoCommit".equals(methodName) && (Boolean) args[0])) {
-          throw new PSQLException(
-              GT.tr(
-                  "Transaction control methods setAutoCommit(true), commit, rollback and setSavePoint not allowed while an XA transaction is active."),
-              PSQLState.OBJECT_NOT_IN_STATE);
+    public XAResource getXAResource() {
+        return this;
+    }
+
+    /**
+     * <p>Preconditions:</p>
+     * <ol>
+     *     <li>Flags must be one of TMNOFLAGS, TMRESUME or TMJOIN</li>
+     *     <li>xid != null</li>
+     *     <li>Connection must not be associated with a transaction</li>
+     *     <li>The TM hasn't seen the xid before</li>
+     * </ol>
+     *
+     * <p>Implementation deficiency preconditions:</p>
+     * <ol>
+     *     <li>TMRESUME not supported.</li>
+     *     <li>If flags is TMJOIN, we must be in ended state, and xid must be the current transaction</li>
+     *     <li>Unless flags is TMJOIN, previous transaction using the connection must be committed or prepared or rolled
+     *     back</li>
+     * </ol>
+     *
+     * <p>Postconditions:</p>
+     * <ol>
+     *     <li>Connection is associated with the transaction</li>
+     * </ol>
+     */
+    @Override
+    public void start(Xid xid, int flags) throws XAException {
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            debug("starting transaction xid = " + xid);
         }
-      }
-      try {
-        /*
-         * If the argument to equals-method is also a wrapper, present the original unwrapped
-         * connection to the underlying equals method.
-         */
-        if ("equals".equals(method.getName()) && args.length == 1) {
-          Object arg = args[0];
-          if (arg != null && Proxy.isProxyClass(arg.getClass())) {
-            InvocationHandler h = Proxy.getInvocationHandler(arg);
-            if (h instanceof ConnectionHandler) {
-              // unwrap argument
-              args = new Object[]{((ConnectionHandler) h).con};
+
+        // Check preconditions
+        if (flags != XAResource.TMNOFLAGS && flags != XAResource.TMRESUME
+                && flags != XAResource.TMJOIN) {
+            throw new PGXAException(GT.tr("Invalid flags {0}", flags), XAException.XAER_INVAL);
+        }
+
+        if (xid == null) {
+            throw new PGXAException(GT.tr("xid must not be null"), XAException.XAER_INVAL);
+        }
+
+        if (state == State.ACTIVE) {
+            throw new PGXAException(GT.tr("Connection is busy with another transaction"),
+                    XAException.XAER_PROTO);
+        }
+
+        // We can't check precondition 4 easily, so we don't. Duplicate xid will be catched in prepare
+        // phase.
+
+        // Check implementation deficiency preconditions
+        if (flags == TMRESUME) {
+            throw new PGXAException(GT.tr("suspend/resume not implemented"), XAException.XAER_RMERR);
+        }
+
+        // It's ok to join an ended transaction. WebLogic does that.
+        if (flags == TMJOIN) {
+            if (state != State.ENDED) {
+                throw new PGXAException(
+                        GT.tr(
+                                "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}",
+                                xid, currentXid, state, flags), XAException.XAER_RMERR);
             }
-          }
-        }
 
-        return method.invoke(con, args);
-      } catch (InvocationTargetException ex) {
-        throw ex.getTargetException();
-      }
-    }
-  }
-
-  /**
-   * <p>Preconditions:</p>
-   * <ol>
-   *     <li>Flags must be one of TMNOFLAGS, TMRESUME or TMJOIN</li>
-   *     <li>xid != null</li>
-   *     <li>Connection must not be associated with a transaction</li>
-   *     <li>The TM hasn't seen the xid before</li>
-   * </ol>
-   *
-   * <p>Implementation deficiency preconditions:</p>
-   * <ol>
-   *     <li>TMRESUME not supported.</li>
-   *     <li>If flags is TMJOIN, we must be in ended state, and xid must be the current transaction</li>
-   *     <li>Unless flags is TMJOIN, previous transaction using the connection must be committed or prepared or rolled
-   *     back</li>
-   * </ol>
-   *
-   * <p>Postconditions:</p>
-   * <ol>
-   *     <li>Connection is associated with the transaction</li>
-   * </ol>
-   */
-  @Override
-  public void start(Xid xid, int flags) throws XAException {
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      debug("starting transaction xid = " + xid);
-    }
-
-    // Check preconditions
-    if (flags != XAResource.TMNOFLAGS && flags != XAResource.TMRESUME
-        && flags != XAResource.TMJOIN) {
-      throw new PGXAException(GT.tr("Invalid flags {0}", flags), XAException.XAER_INVAL);
-    }
-
-    if (xid == null) {
-      throw new PGXAException(GT.tr("xid must not be null"), XAException.XAER_INVAL);
-    }
-
-    if (state == State.ACTIVE) {
-      throw new PGXAException(GT.tr("Connection is busy with another transaction"),
-          XAException.XAER_PROTO);
-    }
-
-    // We can't check precondition 4 easily, so we don't. Duplicate xid will be catched in prepare
-    // phase.
-
-    // Check implementation deficiency preconditions
-    if (flags == TMRESUME) {
-      throw new PGXAException(GT.tr("suspend/resume not implemented"), XAException.XAER_RMERR);
-    }
-
-    // It's ok to join an ended transaction. WebLogic does that.
-    if (flags == TMJOIN) {
-      if (state != State.ENDED) {
-        throw new PGXAException(
-            GT.tr(
-                "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}",
-                xid, currentXid, state, flags), XAException.XAER_RMERR);
-      }
-
-      if (!xid.equals(currentXid)) {
-        throw new PGXAException(
-            GT.tr(
-                "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}",
-                xid, currentXid, state, flags), XAException.XAER_RMERR);
-      }
-    } else if (state == State.ENDED) {
-      throw new PGXAException(GT.tr("Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}", xid, currentXid, state, flags),
-          XAException.XAER_RMERR);
-    }
-
-    // Only need save localAutoCommitMode for NOFLAGS, TMRESUME and TMJOIN already saved old
-    // localAutoCommitMode.
-    if (flags == TMNOFLAGS) {
-      try {
-        localAutoCommitMode = conn.getAutoCommit();
-        conn.setAutoCommit(false);
-      } catch (SQLException ex) {
-        throw new PGXAException(GT.tr("Error disabling autocommit"), ex, XAException.XAER_RMERR);
-      }
-    }
-
-    // Preconditions are met, Associate connection with the transaction
-    state = State.ACTIVE;
-    currentXid = xid;
-    preparedXid = null;
-    committedOrRolledBack = false;
-  }
-
-  /**
-   * <p>Preconditions:</p>
-   * <ol>
-   *     <li>Flags is one of TMSUCCESS, TMFAIL, TMSUSPEND</li>
-   *     <li>xid != null</li>
-   *     <li>Connection is associated with transaction xid</li>
-   * </ol>
-   *
-   * <p>Implementation deficiency preconditions:</p>
-   * <ol>
-   *     <li>Flags is not TMSUSPEND</li>
-   * </ol>
-   *
-   * <p>Postconditions:</p>
-   * <ol>
-   *     <li>Connection is disassociated from the transaction.</li>
-   * </ol>
-   */
-  @Override
-  public void end(Xid xid, int flags) throws XAException {
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      debug("ending transaction xid = " + xid);
-    }
-
-    // Check preconditions
-
-    if (flags != XAResource.TMSUSPEND && flags != XAResource.TMFAIL
-        && flags != XAResource.TMSUCCESS) {
-      throw new PGXAException(GT.tr("Invalid flags {0}", flags), XAException.XAER_INVAL);
-    }
-
-    if (xid == null) {
-      throw new PGXAException(GT.tr("xid must not be null"), XAException.XAER_INVAL);
-    }
-
-    if (state != State.ACTIVE || !xid.equals(currentXid)) {
-      throw new PGXAException(GT.tr("tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}", state, xid, currentXid, preparedXid),
-          XAException.XAER_PROTO);
-    }
-
-    // Check implementation deficiency preconditions
-    if (flags == XAResource.TMSUSPEND) {
-      throw new PGXAException(GT.tr("suspend/resume not implemented"), XAException.XAER_RMERR);
-    }
-
-    // We ignore TMFAIL. It's just a hint to the RM. We could roll back immediately
-    // if TMFAIL was given.
-
-    // All clear. We don't have any real work to do.
-    state = State.ENDED;
-  }
-
-  /**
-   * <p>Prepares transaction. Preconditions:</p>
-   * <ol>
-   *     <li>xid != null</li>
-   *     <li>xid is in ended state</li>
-   * </ol>
-   *
-   * <p>Implementation deficiency preconditions:</p>
-   * <ol>
-   *     <li>xid was associated with this connection</li>
-   * </ol>
-   *
-   * <p>Postconditions:</p>
-   * <ol>
-   *     <li>Transaction is prepared</li>
-   * </ol>
-   */
-  @Override
-  public int prepare(Xid xid) throws XAException {
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      debug("preparing transaction xid = " + xid);
-    }
-
-    // Check preconditions
-    if (currentXid == null && preparedXid != null) {
-      if (LOGGER.isLoggable(Level.FINEST)) {
-        debug("Prepare xid " + xid + " but current connection is not attached to a transaction"
-            + " while it was prepared in past with prepared xid " + preparedXid);
-      }
-      throw new PGXAException(GT.tr(
-          "Preparing already prepared transaction, the prepared xid {0}, prepare xid={1}", preparedXid, xid), XAException.XAER_PROTO);
-    } else if (currentXid == null) {
-      throw new PGXAException(GT.tr(
-          "Current connection does not have an associated xid. prepare xid={0}", xid), XAException.XAER_NOTA);
-    }
-    if (!currentXid.equals(xid)) {
-      if (LOGGER.isLoggable(Level.FINEST)) {
-        debug("Error to prepare xid " + xid + ", the current connection already bound with xid " + currentXid);
-      }
-      throw new PGXAException(GT.tr(
-          "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}", currentXid, xid),
-          XAException.XAER_RMERR);
-    }
-    if (state != State.ENDED) {
-      throw new PGXAException(GT.tr("Prepare called before end. prepare xid={0}, state={1}", xid), XAException.XAER_INVAL);
-    }
-
-    state = State.IDLE;
-    preparedXid = currentXid;
-    currentXid = null;
-
-    try {
-      String s = RecoveredXid.xidToString(xid);
-
-      Statement stmt = conn.createStatement();
-      try {
-        stmt.executeUpdate("PREPARE TRANSACTION '" + s + "'");
-      } finally {
-        stmt.close();
-      }
-      conn.setAutoCommit(localAutoCommitMode);
-
-      return XA_OK;
-    } catch (SQLException ex) {
-      throw new PGXAException(GT.tr("Error preparing transaction. prepare xid={0}", xid), ex, mapSQLStateToXAErrorCode(ex));
-    }
-  }
-
-  /**
-   * <p>Recovers transaction. Preconditions:</p>
-   * <ol>
-   *     <li>flag must be one of TMSTARTRSCAN, TMENDRSCAN, TMNOFLAGS or TMSTARTTRSCAN | TMENDRSCAN</li>
-   *     <li>If flag isn't TMSTARTRSCAN or TMSTARTRSCAN | TMENDRSCAN, a recovery scan must be in progress</li>
-   * </ol>
-   *
-   * <p>Postconditions:</p>
-   * <ol>
-   *     <li>list of prepared xids is returned</li>
-   * </ol>
-   */
-  @Override
-  public Xid[] recover(int flag) throws XAException {
-    // Check preconditions
-    if (flag != TMSTARTRSCAN && flag != TMENDRSCAN && flag != TMNOFLAGS
-        && flag != (TMSTARTRSCAN | TMENDRSCAN)) {
-      throw new PGXAException(GT.tr("Invalid flags {0}", flag), XAException.XAER_INVAL);
-    }
-
-    // We don't check for precondition 2, because we would have to add some additional state in
-    // this object to keep track of recovery scans.
-
-    // All clear. We return all the xids in the first TMSTARTRSCAN call, and always return
-    // an empty array otherwise.
-    if ((flag & TMSTARTRSCAN) == 0) {
-      return new Xid[0];
-    } else {
-      try {
-        Statement stmt = conn.createStatement();
-        try {
-          // If this connection is simultaneously used for a transaction,
-          // this query gets executed inside that transaction. It's OK,
-          // except if the transaction is in abort-only state and the
-          // backed refuses to process new queries. Hopefully not a problem
-          // in practise.
-          ResultSet rs = stmt.executeQuery(
-              "SELECT gid FROM pg_prepared_xacts where database = current_database()");
-          LinkedList<Xid> l = new LinkedList<>();
-          while (rs.next()) {
-            Xid recoveredXid = RecoveredXid.stringToXid(rs.getString(1));
-            if (recoveredXid != null) {
-              l.add(recoveredXid);
+            if (!xid.equals(currentXid)) {
+                throw new PGXAException(
+                        GT.tr(
+                                "Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}",
+                                xid, currentXid, state, flags), XAException.XAER_RMERR);
             }
-          }
-          rs.close();
-
-          return l.toArray(new Xid[0]);
-        } finally {
-          stmt.close();
+        } else if (state == State.ENDED) {
+            throw new PGXAException(GT.tr("Invalid protocol state requested. Attempted transaction interleaving is not supported. xid={0}, currentXid={1}, state={2}, flags={3}", xid, currentXid, state, flags),
+                    XAException.XAER_RMERR);
         }
-      } catch (SQLException ex) {
-        throw new PGXAException(GT.tr("Error during recover"), ex, XAException.XAER_RMERR);
-      }
-    }
-  }
 
-  /**
-   * <p>Preconditions:</p>
-   * <ol>
-   *     <li>xid is known to the RM or it's in prepared state</li>
-   * </ol>
-   *
-   * <p>Implementation deficiency preconditions:</p>
-   * <ol>
-   *     <li>xid must be associated with this connection if it's not in prepared state.</li>
-   * </ol>
-   *
-   * <p>Postconditions:</p>
-   * <ol>
-   *     <li>Transaction is rolled back and disassociated from connection</li>
-   * </ol>
-   */
-  @Override
-  public void rollback(Xid xid) throws XAException {
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      debug("rolling back xid = " + xid);
+        // Only need save localAutoCommitMode for NOFLAGS, TMRESUME and TMJOIN already saved old
+        // localAutoCommitMode.
+        if (flags == TMNOFLAGS) {
+            try {
+                localAutoCommitMode = conn.getAutoCommit();
+                conn.setAutoCommit(false);
+            } catch (SQLException ex) {
+                throw new PGXAException(GT.tr("Error disabling autocommit"), ex, XAException.XAER_RMERR);
+            }
+        }
+
+        // Preconditions are met, Associate connection with the transaction
+        state = State.ACTIVE;
+        currentXid = xid;
+        preparedXid = null;
+        committedOrRolledBack = false;
     }
 
-    // We don't explicitly check precondition 1.
+    /**
+     * <p>Preconditions:</p>
+     * <ol>
+     *     <li>Flags is one of TMSUCCESS, TMFAIL, TMSUSPEND</li>
+     *     <li>xid != null</li>
+     *     <li>Connection is associated with transaction xid</li>
+     * </ol>
+     *
+     * <p>Implementation deficiency preconditions:</p>
+     * <ol>
+     *     <li>Flags is not TMSUSPEND</li>
+     * </ol>
+     *
+     * <p>Postconditions:</p>
+     * <ol>
+     *     <li>Connection is disassociated from the transaction.</li>
+     * </ol>
+     */
+    @Override
+    public void end(Xid xid, int flags) throws XAException {
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            debug("ending transaction xid = " + xid);
+        }
+
+        // Check preconditions
+
+        if (flags != XAResource.TMSUSPEND && flags != XAResource.TMFAIL
+                && flags != XAResource.TMSUCCESS) {
+            throw new PGXAException(GT.tr("Invalid flags {0}", flags), XAException.XAER_INVAL);
+        }
+
+        if (xid == null) {
+            throw new PGXAException(GT.tr("xid must not be null"), XAException.XAER_INVAL);
+        }
+
+        if (state != State.ACTIVE || !xid.equals(currentXid)) {
+            throw new PGXAException(GT.tr("tried to call end without corresponding start call. state={0}, start xid={1}, currentXid={2}, preparedXid={3}", state, xid, currentXid, preparedXid),
+                    XAException.XAER_PROTO);
+        }
+
+        // Check implementation deficiency preconditions
+        if (flags == XAResource.TMSUSPEND) {
+            throw new PGXAException(GT.tr("suspend/resume not implemented"), XAException.XAER_RMERR);
+        }
+
+        // We ignore TMFAIL. It's just a hint to the RM. We could roll back immediately
+        // if TMFAIL was given.
+
+        // All clear. We don't have any real work to do.
+        state = State.ENDED;
+    }
+
+    /**
+     * <p>Prepares transaction. Preconditions:</p>
+     * <ol>
+     *     <li>xid != null</li>
+     *     <li>xid is in ended state</li>
+     * </ol>
+     *
+     * <p>Implementation deficiency preconditions:</p>
+     * <ol>
+     *     <li>xid was associated with this connection</li>
+     * </ol>
+     *
+     * <p>Postconditions:</p>
+     * <ol>
+     *     <li>Transaction is prepared</li>
+     * </ol>
+     */
+    @Override
+    public int prepare(Xid xid) throws XAException {
+        if (LOGGER.isLoggable(Level.FINEST)) {
+            debug("preparing transaction xid = " + xid);
+        }
+
+        // Check preconditions
+        if (currentXid == null && preparedXid != null) {
+            if (LOGGER.isLoggable(Level.FINEST)) {
+                debug("Prepare xid " + xid + " but current connection is not attached to a transaction"
+                        + " while it was prepared in past with prepared xid " + preparedXid);
+            }
+            throw new PGXAException(GT.tr(
+                    "Preparing already prepared transaction, the prepared xid {0}, prepare xid={1}", preparedXid, xid), XAException.XAER_PROTO);
+        } else if (currentXid == null) {
+            throw new PGXAException(GT.tr(
+                    "Current connection does not have an associated xid. prepare xid={0}", xid), XAException.XAER_NOTA);
+        }
+        if (!currentXid.equals(xid)) {
+            if (LOGGER.isLoggable(Level.FINEST)) {
+                debug("Error to prepare xid " + xid + ", the current connection already bound with xid " + currentXid);
+            }
+            throw new PGXAException(GT.tr(
+                    "Not implemented: Prepare must be issued using the same connection that started the transaction. currentXid={0}, prepare xid={1}", currentXid, xid),
+                    XAException.XAER_RMERR);
+        }
+        if (state != State.ENDED) {
+            throw new PGXAException(GT.tr("Prepare called before end. prepare xid={0}, state={1}", xid), XAException.XAER_INVAL);
+        }
 
-    try {
-      if (currentXid != null && currentXid.equals(xid)) {
         state = State.IDLE;
+        preparedXid = currentXid;
         currentXid = null;
-        conn.rollback();
-        conn.setAutoCommit(localAutoCommitMode);
-      } else {
-        String s = RecoveredXid.xidToString(xid);
 
-        conn.setAutoCommit(true);
-        Statement stmt = conn.createStatement();
         try {
-          stmt.executeUpdate("ROLLBACK PREPARED '" + s + "'");
-        } finally {
-          stmt.close();
+            String s = RecoveredXid.xidToString(xid);
+
+            Statement stmt = conn.createStatement();
+            try {
+                stmt.executeUpdate("PREPARE TRANSACTION '" + s + "'");
+            } finally {
+                stmt.close();
+            }
+            conn.setAutoCommit(localAutoCommitMode);
+
+            return XA_OK;
+        } catch (SQLException ex) {
+            throw new PGXAException(GT.tr("Error preparing transaction. prepare xid={0}", xid), ex, mapSQLStateToXAErrorCode(ex));
         }
-      }
-      committedOrRolledBack = true;
-    } catch (SQLException ex) {
-      int errorCode = XAException.XAER_RMERR;
-      if (PSQLState.UNDEFINED_OBJECT.getState().equals(ex.getSQLState())) {
-        if (committedOrRolledBack || !xid.equals(preparedXid)) {
-          if (LOGGER.isLoggable(Level.FINEST)) {
-            debug("rolling back xid " + xid + " while the connection prepared xid is " + preparedXid
-                + (committedOrRolledBack ? ", but the connection was already committed/rolled-back" : ""));
-          }
-          errorCode = XAException.XAER_NOTA;
+    }
+
+    /**
+     * <p>Recovers transaction. Preconditions:</p>
+     * <ol>
+     *     <li>flag must be one of TMSTARTRSCAN, TMENDRSCAN, TMNOFLAGS or TMSTARTTRSCAN | TMENDRSCAN</li>
+     *     <li>If flag isn't TMSTARTRSCAN or TMSTARTRSCAN | TMENDRSCAN, a recovery scan must be in progress</li>
+     * </ol>
+     *
+     * <p>Postconditions:</p>
+     * <ol>
+     *     <li>list of prepared xids is returned</li>
+     * </ol>
+     */
+    @Override
+    public Xid[] recover(int flag) throws XAException {
+        // Check preconditions
+        if (flag != TMSTARTRSCAN && flag != TMENDRSCAN && flag != TMNOFLAGS
+                && flag != (TMSTARTRSCAN | TMENDRSCAN)) {
+            throw new PGXAException(GT.tr("Invalid flags {0}", flag), XAException.XAER_INVAL);
         }
-      }
-      if (PSQLState.isConnectionError(ex.getSQLState())) {
+
+        // We don't check for precondition 2, because we would have to add some additional state in
+        // this object to keep track of recovery scans.
+
+        // All clear. We return all the xids in the first TMSTARTRSCAN call, and always return
+        // an empty array otherwise.
+        if ((flag & TMSTARTRSCAN) == 0) {
+            return new Xid[0];
+        } else {
+            try {
+                Statement stmt = conn.createStatement();
+                try {
+                    // If this connection is simultaneously used for a transaction,
+                    // this query gets executed inside that transaction. It's OK,
+                    // except if the transaction is in abort-only state and the
+                    // backed refuses to process new queries. Hopefully not a problem
+                    // in practise.
+                    ResultSet rs = stmt.executeQuery(
+                            "SELECT gid FROM pg_prepared_xacts where database = current_database()");
+                    LinkedList<Xid> l = new LinkedList<>();
+                    while (rs.next()) {
+                        Xid recoveredXid = RecoveredXid.stringToXid(rs.getString(1));
+                        if (recoveredXid != null) {
+                            l.add(recoveredXid);
+                        }
+                    }
+                    rs.close();
+
+                    return l.toArray(new Xid[0]);
+                } finally {
+                    stmt.close();
+                }
+            } catch (SQLException ex) {
+                throw new PGXAException(GT.tr("Error during recover"), ex, XAException.XAER_RMERR);
+            }
+        }
+    }
+
+    /**
+     * <p>Preconditions:</p>
+     * <ol>
+     *     <li>xid is known to the RM or it's in prepared state</li>
+     * </ol>
+     *
+     * <p>Implementation deficiency preconditions:</p>
+     * <ol>
+     *     <li>xid must be associated with this connection if it's not in prepared state.</li>
+     * </ol>
+     *
+     * <p>Postconditions:</p>
+     * <ol>
+     *     <li>Transaction is rolled back and disassociated from connection</li>
+     * </ol>
+     */
+    @Override
+    public void rollback(Xid xid) throws XAException {
         if (LOGGER.isLoggable(Level.FINEST)) {
-          debug("rollback connection failure (sql error code " + ex.getSQLState() + "), reconnection could be expected");
+            debug("rolling back xid = " + xid);
         }
-        errorCode = XAException.XAER_RMFAIL;
-      }
-      throw new PGXAException(GT.tr("Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}", xid, preparedXid, currentXid), ex, errorCode);
-    }
-  }
 
-  @Override
-  public void commit(Xid xid, boolean onePhase) throws XAException {
-    if (LOGGER.isLoggable(Level.FINEST)) {
-      debug("committing xid = " + xid + (onePhase ? " (one phase) " : " (two phase)"));
-    }
+        // We don't explicitly check precondition 1.
 
-    if (xid == null) {
-      throw new PGXAException(GT.tr("xid must not be null"), XAException.XAER_INVAL);
-    }
+        try {
+            if (currentXid != null && currentXid.equals(xid)) {
+                state = State.IDLE;
+                currentXid = null;
+                conn.rollback();
+                conn.setAutoCommit(localAutoCommitMode);
+            } else {
+                String s = RecoveredXid.xidToString(xid);
 
-    if (onePhase) {
-      commitOnePhase(xid);
-    } else {
-      commitPrepared(xid);
-    }
-  }
-
-  /**
-   * <p>Preconditions:</p>
-   * <ol>
-   *     <li>xid must in ended state.</li>
-   * </ol>
-   *
-   * <p>Implementation deficiency preconditions:</p>
-   * <ol>
-   *     <li>this connection must have been used to run the transaction</li>
-   * </ol>
-   *
-   * <p>Postconditions:</p>
-   * <ol>
-   *     <li>Transaction is committed</li>
-   * </ol>
-   */
-  private void commitOnePhase(Xid xid) throws XAException {
-    try {
-      // Check preconditions
-      if (xid.equals(preparedXid)) { // TODO: check if the condition should be negated
-        throw new PGXAException(GT.tr("One-phase commit called for xid {0} but connection was prepared with xid {1}",
-            xid, preparedXid), XAException.XAER_PROTO);
-      }
-      if (currentXid == null && !committedOrRolledBack) {
-        // In fact, we don't know if xid is bogus, or if it just wasn't associated with this connection.
-        // Assume it's our fault.
-        // TODO: pick proper error message. Current one does not clarify what went wrong
-        throw new PGXAException(GT.tr(
-            "Not implemented: one-phase commit must be issued using the same connection that was used to start it", xid),
-            XAException.XAER_RMERR);
-      }
-      if (!xid.equals(currentXid) || committedOrRolledBack) {
-        throw new PGXAException(GT.tr("One-phase commit with unknown xid. commit xid={0}, currentXid={1}",
-            xid, currentXid), XAException.XAER_NOTA);
-      }
-      if (state != State.ENDED) {
-        throw new PGXAException(GT.tr("commit called before end. commit xid={0}, state={1}", xid, state), XAException.XAER_PROTO);
-      }
-
-      // Preconditions are met. Commit
-      state = State.IDLE;
-      currentXid = null;
-      committedOrRolledBack = true;
-
-      conn.commit();
-      conn.setAutoCommit(localAutoCommitMode);
-    } catch (SQLException ex) {
-      throw new PGXAException(GT.tr("Error during one-phase commit. commit xid={0}", xid), ex, mapSQLStateToXAErrorCode(ex));
-    }
-  }
-
-  /**
-   * <p>Commits prepared transaction. Preconditions:</p>
-   * <ol>
-   *     <li>xid must be in prepared state in the server</li>
-   * </ol>
-   *
-   * <p>Implementation deficiency preconditions:</p>
-   * <ol>
-   *     <li>Connection must be in idle state</li>
-   * </ol>
-   *
-   * <p>Postconditions:</p>
-   * <ol>
-   *     <li>Transaction is committed</li>
-   * </ol>
-   */
-  private void commitPrepared(Xid xid) throws XAException {
-    try {
-      // Check preconditions. The connection mustn't be used for another
-      // other XA or local transaction, or the COMMIT PREPARED command
-      // would mess it up.
-      if (state != State.IDLE
-          || conn.getTransactionState() != TransactionState.IDLE) {
-        throw new PGXAException(
-            GT.tr("Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}", xid, currentXid, state, conn.getTransactionState()),
-            XAException.XAER_RMERR);
-      }
-
-      String s = RecoveredXid.xidToString(xid);
-
-      localAutoCommitMode = conn.getAutoCommit();
-      conn.setAutoCommit(true);
-      Statement stmt = conn.createStatement();
-      try {
-        stmt.executeUpdate("COMMIT PREPARED '" + s + "'");
-      } finally {
-        stmt.close();
-        conn.setAutoCommit(localAutoCommitMode);
-      }
-      committedOrRolledBack = true;
-    } catch (SQLException ex) {
-      int errorCode = XAException.XAER_RMERR;
-      if (PSQLState.UNDEFINED_OBJECT.getState().equals(ex.getSQLState())) {
-        if (committedOrRolledBack || !xid.equals(preparedXid)) {
-          if (LOGGER.isLoggable(Level.FINEST)) {
-            debug("committing xid " + xid + " while the connection prepared xid is " + preparedXid
-                + (committedOrRolledBack ? ", but the connection was already committed/rolled-back" : ""));
-          }
-          errorCode = XAException.XAER_NOTA;
+                conn.setAutoCommit(true);
+                Statement stmt = conn.createStatement();
+                try {
+                    stmt.executeUpdate("ROLLBACK PREPARED '" + s + "'");
+                } finally {
+                    stmt.close();
+                }
+            }
+            committedOrRolledBack = true;
+        } catch (SQLException ex) {
+            int errorCode = XAException.XAER_RMERR;
+            if (PSQLState.UNDEFINED_OBJECT.getState().equals(ex.getSQLState())) {
+                if (committedOrRolledBack || !xid.equals(preparedXid)) {
+                    if (LOGGER.isLoggable(Level.FINEST)) {
+                        debug("rolling back xid " + xid + " while the connection prepared xid is " + preparedXid
+                                + (committedOrRolledBack ? ", but the connection was already committed/rolled-back" : ""));
+                    }
+                    errorCode = XAException.XAER_NOTA;
+                }
+            }
+            if (PSQLState.isConnectionError(ex.getSQLState())) {
+                if (LOGGER.isLoggable(Level.FINEST)) {
+                    debug("rollback connection failure (sql error code " + ex.getSQLState() + "), reconnection could be expected");
+                }
+                errorCode = XAException.XAER_RMFAIL;
+            }
+            throw new PGXAException(GT.tr("Error rolling back prepared transaction. rollback xid={0}, preparedXid={1}, currentXid={2}", xid, preparedXid, currentXid), ex, errorCode);
         }
-      }
-      if (PSQLState.isConnectionError(ex.getSQLState())) {
+    }
+
+    @Override
+    public void commit(Xid xid, boolean onePhase) throws XAException {
         if (LOGGER.isLoggable(Level.FINEST)) {
-          debug("commit connection failure (sql error code " + ex.getSQLState() + "), reconnection could be expected");
+            debug("committing xid = " + xid + (onePhase ? " (one phase) " : " (two phase)"));
         }
-        errorCode = XAException.XAER_RMFAIL;
-      }
-      throw new PGXAException(GT.tr("Error committing prepared transaction. commit xid={0}, preparedXid={1}, currentXid={2}", xid, preparedXid, currentXid), ex, errorCode);
-    }
-  }
 
-  @Override
-  public boolean isSameRM(XAResource xares) throws XAException {
-    // This trivial implementation makes sure that the
-    // application server doesn't try to use another connection
-    // for prepare, commit and rollback commands.
-    return xares == this;
-  }
+        if (xid == null) {
+            throw new PGXAException(GT.tr("xid must not be null"), XAException.XAER_INVAL);
+        }
 
-  /**
-   * Does nothing, since we don't do heuristics.
-   */
-  @Override
-  public void forget(Xid xid) throws XAException {
-    throw new PGXAException(GT.tr("Heuristic commit/rollback not supported. forget xid={0}", xid),
-        XAException.XAER_NOTA);
-  }
-
-  /**
-   * We don't do transaction timeouts. Just returns 0.
-   */
-  @Override
-  public int getTransactionTimeout() {
-    return 0;
-  }
-
-  /**
-   * We don't do transaction timeouts. Returns false.
-   */
-  @Override
-  public boolean setTransactionTimeout(int seconds) {
-    return false;
-  }
-
-  private int mapSQLStateToXAErrorCode(SQLException sqlException) {
-    if (isPostgreSQLIntegrityConstraintViolation(sqlException)) {
-      return XAException.XA_RBINTEGRITY;
+        if (onePhase) {
+            commitOnePhase(xid);
+        } else {
+            commitPrepared(xid);
+        }
     }
 
-    return XAException.XAER_RMFAIL;
-  }
+    /**
+     * <p>Preconditions:</p>
+     * <ol>
+     *     <li>xid must in ended state.</li>
+     * </ol>
+     *
+     * <p>Implementation deficiency preconditions:</p>
+     * <ol>
+     *     <li>this connection must have been used to run the transaction</li>
+     * </ol>
+     *
+     * <p>Postconditions:</p>
+     * <ol>
+     *     <li>Transaction is committed</li>
+     * </ol>
+     */
+    private void commitOnePhase(Xid xid) throws XAException {
+        try {
+            // Check preconditions
+            if (xid.equals(preparedXid)) { // TODO: check if the condition should be negated
+                throw new PGXAException(GT.tr("One-phase commit called for xid {0} but connection was prepared with xid {1}",
+                        xid, preparedXid), XAException.XAER_PROTO);
+            }
+            if (currentXid == null && !committedOrRolledBack) {
+                // In fact, we don't know if xid is bogus, or if it just wasn't associated with this connection.
+                // Assume it's our fault.
+                // TODO: pick proper error message. Current one does not clarify what went wrong
+                throw new PGXAException(GT.tr(
+                        "Not implemented: one-phase commit must be issued using the same connection that was used to start it", xid),
+                        XAException.XAER_RMERR);
+            }
+            if (!xid.equals(currentXid) || committedOrRolledBack) {
+                throw new PGXAException(GT.tr("One-phase commit with unknown xid. commit xid={0}, currentXid={1}",
+                        xid, currentXid), XAException.XAER_NOTA);
+            }
+            if (state != State.ENDED) {
+                throw new PGXAException(GT.tr("commit called before end. commit xid={0}, state={1}", xid, state), XAException.XAER_PROTO);
+            }
 
-  private boolean isPostgreSQLIntegrityConstraintViolation(SQLException sqlException) {
-    if (!(sqlException instanceof PSQLException)) {
-      return false;
+            // Preconditions are met. Commit
+            state = State.IDLE;
+            currentXid = null;
+            committedOrRolledBack = true;
+
+            conn.commit();
+            conn.setAutoCommit(localAutoCommitMode);
+        } catch (SQLException ex) {
+            throw new PGXAException(GT.tr("Error during one-phase commit. commit xid={0}", xid), ex, mapSQLStateToXAErrorCode(ex));
+        }
     }
-    String sqlState = sqlException.getSQLState();
-    return sqlState != null
-        && sqlState.length() == 5
-        && sqlState.startsWith("23"); // Class 23 - Integrity Constraint Violation
-  }
 
-  private enum State {
     /**
-     * {@code PGXAConnection} not associated with a XA-transaction. You can still call {@link #getConnection()} and
-     * use the connection outside XA. {@code currentXid} is {@code null}. autoCommit is {@code true} on a connection
-     * by getConnection, per normal JDBC rules, though the caller can change it to {@code false} and manage
-     * transactions itself using Connection.commit and rollback.
+     * <p>Commits prepared transaction. Preconditions:</p>
+     * <ol>
+     *     <li>xid must be in prepared state in the server</li>
+     * </ol>
+     *
+     * <p>Implementation deficiency preconditions:</p>
+     * <ol>
+     *     <li>Connection must be in idle state</li>
+     * </ol>
+     *
+     * <p>Postconditions:</p>
+     * <ol>
+     *     <li>Transaction is committed</li>
+     * </ol>
      */
-    IDLE,
+    private void commitPrepared(Xid xid) throws XAException {
+        try {
+            // Check preconditions. The connection mustn't be used for another
+            // other XA or local transaction, or the COMMIT PREPARED command
+            // would mess it up.
+            if (state != State.IDLE
+                    || conn.getTransactionState() != TransactionState.IDLE) {
+                throw new PGXAException(
+                        GT.tr("Not implemented: 2nd phase commit must be issued using an idle connection. commit xid={0}, currentXid={1}, state={2}, transactionState={3}", xid, currentXid, state, conn.getTransactionState()),
+                        XAException.XAER_RMERR);
+            }
+
+            String s = RecoveredXid.xidToString(xid);
+
+            localAutoCommitMode = conn.getAutoCommit();
+            conn.setAutoCommit(true);
+            Statement stmt = conn.createStatement();
+            try {
+                stmt.executeUpdate("COMMIT PREPARED '" + s + "'");
+            } finally {
+                stmt.close();
+                conn.setAutoCommit(localAutoCommitMode);
+            }
+            committedOrRolledBack = true;
+        } catch (SQLException ex) {
+            int errorCode = XAException.XAER_RMERR;
+            if (PSQLState.UNDEFINED_OBJECT.getState().equals(ex.getSQLState())) {
+                if (committedOrRolledBack || !xid.equals(preparedXid)) {
+                    if (LOGGER.isLoggable(Level.FINEST)) {
+                        debug("committing xid " + xid + " while the connection prepared xid is " + preparedXid
+                                + (committedOrRolledBack ? ", but the connection was already committed/rolled-back" : ""));
+                    }
+                    errorCode = XAException.XAER_NOTA;
+                }
+            }
+            if (PSQLState.isConnectionError(ex.getSQLState())) {
+                if (LOGGER.isLoggable(Level.FINEST)) {
+                    debug("commit connection failure (sql error code " + ex.getSQLState() + "), reconnection could be expected");
+                }
+                errorCode = XAException.XAER_RMFAIL;
+            }
+            throw new PGXAException(GT.tr("Error committing prepared transaction. commit xid={0}, preparedXid={1}, currentXid={2}", xid, preparedXid, currentXid), ex, errorCode);
+        }
+    }
+
+    @Override
+    public boolean isSameRM(XAResource xares) throws XAException {
+        // This trivial implementation makes sure that the
+        // application server doesn't try to use another connection
+        // for prepare, commit and rollback commands.
+        return xares == this;
+    }
+
     /**
-     * {@link #start(Xid, int)} has been called, and we're associated with an XA transaction. {@code currentXid}
-     * is valid. autoCommit is false on a connection returned by getConnection, and should not be messed with by
-     * the caller or the XA transaction will be broken.
+     * Does nothing, since we don't do heuristics.
      */
-    ACTIVE,
+    @Override
+    public void forget(Xid xid) throws XAException {
+        throw new PGXAException(GT.tr("Heuristic commit/rollback not supported. forget xid={0}", xid),
+                XAException.XAER_NOTA);
+    }
+
     /**
-     * {@link #end(Xid, int)} has been called, but the transaction has not yet been prepared. {@code currentXid}
-     * is still valid. You shouldn't use the connection for anything else than issuing a {@link XAResource#commit(Xid, boolean)} or
-     * rollback.
+     * We don't do transaction timeouts. Just returns 0.
      */
-    ENDED
-  }
+    @Override
+    public int getTransactionTimeout() {
+        return 0;
+    }
+
+    /**
+     * We don't do transaction timeouts. Returns false.
+     */
+    @Override
+    public boolean setTransactionTimeout(int seconds) {
+        return false;
+    }
+
+    private int mapSQLStateToXAErrorCode(SQLException sqlException) {
+        if (isPostgreSQLIntegrityConstraintViolation(sqlException)) {
+            return XAException.XA_RBINTEGRITY;
+        }
+
+        return XAException.XAER_RMFAIL;
+    }
+
+    private boolean isPostgreSQLIntegrityConstraintViolation(SQLException sqlException) {
+        if (!(sqlException instanceof PSQLException)) {
+            return false;
+        }
+        String sqlState = sqlException.getSQLState();
+        return sqlState != null
+                && sqlState.length() == 5
+                && sqlState.startsWith("23"); // Class 23 - Integrity Constraint Violation
+    }
+
+    private enum State {
+        /**
+         * {@code PGXAConnection} not associated with a XA-transaction. You can still call {@link #getConnection()} and
+         * use the connection outside XA. {@code currentXid} is {@code null}. autoCommit is {@code true} on a connection
+         * by getConnection, per normal JDBC rules, though the caller can change it to {@code false} and manage
+         * transactions itself using Connection.commit and rollback.
+         */
+        IDLE,
+        /**
+         * {@link #start(Xid, int)} has been called, and we're associated with an XA transaction. {@code currentXid}
+         * is valid. autoCommit is false on a connection returned by getConnection, and should not be messed with by
+         * the caller or the XA transaction will be broken.
+         */
+        ACTIVE,
+        /**
+         * {@link #end(Xid, int)} has been called, but the transaction has not yet been prepared. {@code currentXid}
+         * is still valid. You shouldn't use the connection for anything else than issuing a {@link XAResource#commit(Xid, boolean)} or
+         * rollback.
+         */
+        ENDED
+    }
+
+    /*
+     * A java.sql.Connection proxy class to forbid calls to transaction control methods while the
+     * connection is used for an XA transaction.
+     */
+    private class ConnectionHandler implements InvocationHandler {
+        private final Connection con;
+
+        ConnectionHandler(Connection con) {
+            this.con = con;
+        }
+
+        @Override
+        @SuppressWarnings("throwing.nullable")
+        public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
+            if (state != State.IDLE) {
+                String methodName = method.getName();
+                if ("commit".equals(methodName)
+                        || "rollback".equals(methodName)
+                        || "setSavePoint".equals(methodName)
+                        || ("setAutoCommit".equals(methodName) && (Boolean) args[0])) {
+                    throw new PSQLException(
+                            GT.tr(
+                                    "Transaction control methods setAutoCommit(true), commit, rollback and setSavePoint not allowed while an XA transaction is active."),
+                            PSQLState.OBJECT_NOT_IN_STATE);
+                }
+            }
+            try {
+                /*
+                 * If the argument to equals-method is also a wrapper, present the original unwrapped
+                 * connection to the underlying equals method.
+                 */
+                if ("equals".equals(method.getName()) && args.length == 1) {
+                    Object arg = args[0];
+                    if (arg != null && Proxy.isProxyClass(arg.getClass())) {
+                        InvocationHandler h = Proxy.getInvocationHandler(arg);
+                        if (h instanceof ConnectionHandler) {
+                            // unwrap argument
+                            args = new Object[]{((ConnectionHandler) h).con};
+                        }
+                    }
+                }
+
+                return method.invoke(con, args);
+            } catch (InvocationTargetException ex) {
+                throw ex.getTargetException();
+            }
+        }
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/xa/PGXADataSource.java b/pgjdbc/src/main/java/org/postgresql/xa/PGXADataSource.java
index ab12d00..714facf 100644
--- a/pgjdbc/src/main/java/org/postgresql/xa/PGXADataSource.java
+++ b/pgjdbc/src/main/java/org/postgresql/xa/PGXADataSource.java
@@ -5,16 +5,14 @@
 
 package org.postgresql.xa;
 
-import org.postgresql.core.BaseConnection;
-import org.postgresql.ds.common.BaseDataSource;
-import org.postgresql.util.DriverInfo;
-
 import java.sql.Connection;
 import java.sql.SQLException;
-
 import javax.naming.Reference;
 import javax.sql.XAConnection;
 import javax.sql.XADataSource;
+import org.postgresql.core.BaseConnection;
+import org.postgresql.ds.common.BaseDataSource;
+import org.postgresql.util.DriverInfo;
 
 /**
  * XA-enabled DataSource implementation.
@@ -22,45 +20,45 @@ import javax.sql.XADataSource;
  * @author Heikki Linnakangas (heikki.linnakangas@iki.fi)
  */
 public class PGXADataSource extends BaseDataSource implements XADataSource {
-  /**
-   * Gets a connection to the PostgreSQL database. The database is identified by the DataSource
-   * properties serverName, databaseName, and portNumber. The user to connect as is identified by
-   * the DataSource properties user and password.
-   *
-   * @return A valid database connection.
-   * @throws SQLException Occurs when the database connection cannot be established.
-   */
-  @Override
-  public XAConnection getXAConnection() throws SQLException {
-    return getXAConnection(getUser(), getPassword());
-  }
+    /**
+     * Gets a connection to the PostgreSQL database. The database is identified by the DataSource
+     * properties serverName, databaseName, and portNumber. The user to connect as is identified by
+     * the DataSource properties user and password.
+     *
+     * @return A valid database connection.
+     * @throws SQLException Occurs when the database connection cannot be established.
+     */
+    @Override
+    public XAConnection getXAConnection() throws SQLException {
+        return getXAConnection(getUser(), getPassword());
+    }
 
-  /**
-   * Gets a XA-enabled connection to the PostgreSQL database. The database is identified by the
-   * DataSource properties serverName, databaseName, and portNumber. The user to connect as is
-   * identified by the arguments user and password, which override the DataSource properties by the
-   * same name.
-   *
-   * @return A valid database connection.
-   * @throws SQLException Occurs when the database connection cannot be established.
-   */
-  @Override
-  public XAConnection getXAConnection(String user, String password)
-      throws SQLException {
-    Connection con = super.getConnection(user, password);
-    return new PGXAConnection((BaseConnection) con);
-  }
+    /**
+     * Gets a XA-enabled connection to the PostgreSQL database. The database is identified by the
+     * DataSource properties serverName, databaseName, and portNumber. The user to connect as is
+     * identified by the arguments user and password, which override the DataSource properties by the
+     * same name.
+     *
+     * @return A valid database connection.
+     * @throws SQLException Occurs when the database connection cannot be established.
+     */
+    @Override
+    public XAConnection getXAConnection(String user, String password)
+            throws SQLException {
+        Connection con = super.getConnection(user, password);
+        return new PGXAConnection((BaseConnection) con);
+    }
 
-  @Override
-  public String getDescription() {
-    return "XA-enabled DataSource from " + DriverInfo.DRIVER_FULL_NAME;
-  }
+    @Override
+    public String getDescription() {
+        return "XA-enabled DataSource from " + DriverInfo.DRIVER_FULL_NAME;
+    }
 
-  /**
-   * Generates a reference using the appropriate object factory.
-   */
-  protected Reference createReference() {
-    return new Reference(getClass().getName(), PGXADataSourceFactory.class.getName(), null);
-  }
+    /**
+     * Generates a reference using the appropriate object factory.
+     */
+    protected Reference createReference() {
+        return new Reference(getClass().getName(), PGXADataSourceFactory.class.getName(), null);
+    }
 
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/xa/PGXADataSourceFactory.java b/pgjdbc/src/main/java/org/postgresql/xa/PGXADataSourceFactory.java
index ab8a6d9..275b9fb 100644
--- a/pgjdbc/src/main/java/org/postgresql/xa/PGXADataSourceFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/xa/PGXADataSourceFactory.java
@@ -5,39 +5,37 @@
 
 package org.postgresql.xa;
 
-import org.postgresql.ds.common.PGObjectFactory;
-
 import java.util.Hashtable;
-
 import javax.naming.Context;
 import javax.naming.Name;
 import javax.naming.Reference;
+import org.postgresql.ds.common.PGObjectFactory;
 
 /**
  * An ObjectFactory implementation for PGXADataSource-objects.
  */
 
 public class PGXADataSourceFactory extends PGObjectFactory {
-  /*
-   * All the other PostgreSQL DataSource use PGObjectFactory directly, but we can't do that with
-   * PGXADataSource because referencing PGXADataSource from PGObjectFactory would break
-   * "JDBC2 Enterprise" edition build which doesn't include PGXADataSource.
-   */
+    /*
+     * All the other PostgreSQL DataSource use PGObjectFactory directly, but we can't do that with
+     * PGXADataSource because referencing PGXADataSource from PGObjectFactory would break
+     * "JDBC2 Enterprise" edition build which doesn't include PGXADataSource.
+     */
 
-  @Override
-  public Object getObjectInstance(Object obj, Name name, Context nameCtx,
-      Hashtable<?, ?> environment) throws Exception {
-    Reference ref = (Reference) obj;
-    String className = ref.getClassName();
-    if ("org.postgresql.xa.PGXADataSource".equals(className)) {
-      return loadXADataSource(ref);
-    } else {
-      return null;
+    @Override
+    public Object getObjectInstance(Object obj, Name name, Context nameCtx,
+                                    Hashtable<?, ?> environment) throws Exception {
+        Reference ref = (Reference) obj;
+        String className = ref.getClassName();
+        if ("org.postgresql.xa.PGXADataSource".equals(className)) {
+            return loadXADataSource(ref);
+        } else {
+            return null;
+        }
     }
-  }
 
-  private Object loadXADataSource(Reference ref) {
-    PGXADataSource ds = new PGXADataSource();
-    return loadBaseDataSource(ds, ref);
-  }
+    private Object loadXADataSource(Reference ref) {
+        PGXADataSource ds = new PGXADataSource();
+        return loadBaseDataSource(ds, ref);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/xa/PGXAException.java b/pgjdbc/src/main/java/org/postgresql/xa/PGXAException.java
index c8c5631..0f2852e 100644
--- a/pgjdbc/src/main/java/org/postgresql/xa/PGXAException.java
+++ b/pgjdbc/src/main/java/org/postgresql/xa/PGXAException.java
@@ -16,22 +16,22 @@ import javax.transaction.xa.XAException;
  */
 @SuppressWarnings("serial")
 public class PGXAException extends XAException {
-  PGXAException(String message, int errorCode) {
-    super(message);
+    PGXAException(String message, int errorCode) {
+        super(message);
 
-    this.errorCode = errorCode;
-  }
+        this.errorCode = errorCode;
+    }
 
-  PGXAException(String message, Throwable cause, int errorCode) {
-    super(message);
+    PGXAException(String message, Throwable cause, int errorCode) {
+        super(message);
 
-    initCause(cause);
-    this.errorCode = errorCode;
-  }
+        initCause(cause);
+        this.errorCode = errorCode;
+    }
 
-  PGXAException(Throwable cause, int errorCode) {
-    super(errorCode);
+    PGXAException(Throwable cause, int errorCode) {
+        super(errorCode);
 
-    initCause(cause);
-  }
+        initCause(cause);
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/xa/RecoveredXid.java b/pgjdbc/src/main/java/org/postgresql/xa/RecoveredXid.java
index e7a9252..e4dc79b 100644
--- a/pgjdbc/src/main/java/org/postgresql/xa/RecoveredXid.java
+++ b/pgjdbc/src/main/java/org/postgresql/xa/RecoveredXid.java
@@ -10,109 +10,108 @@ import java.util.Base64;
 import java.util.logging.Level;
 import java.util.logging.LogRecord;
 import java.util.logging.Logger;
-
 import javax.transaction.xa.Xid;
 
 class RecoveredXid implements Xid {
-  int formatId;
-  byte[] globalTransactionId;
-  byte[] branchQualifier;
+    int formatId;
+    byte[] globalTransactionId;
+    byte[] branchQualifier;
 
-  RecoveredXid(int formatId, byte[] globalTransactionId, byte[] branchQualifier) {
-    this.formatId = formatId;
-    this.globalTransactionId = globalTransactionId;
-    this.branchQualifier = branchQualifier;
-  }
-
-  @Override
-  public int getFormatId() {
-    return formatId;
-  }
-
-  @Override
-  public byte[] getGlobalTransactionId() {
-    return globalTransactionId;
-  }
-
-  @Override
-  public byte[] getBranchQualifier() {
-    return branchQualifier;
-  }
-
-  @Override
-  public int hashCode() {
-    final int prime = 31;
-    int result = 1;
-    result = prime * result + Arrays.hashCode(branchQualifier);
-    result = prime * result + formatId;
-    result = prime * result + Arrays.hashCode(globalTransactionId);
-    return result;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (o == this) {
-      // optimization for the common case.
-      return true;
+    RecoveredXid(int formatId, byte[] globalTransactionId, byte[] branchQualifier) {
+        this.formatId = formatId;
+        this.globalTransactionId = globalTransactionId;
+        this.branchQualifier = branchQualifier;
     }
 
-    if (!(o instanceof Xid)) {
-      return false;
+    static String xidToString(Xid xid) {
+        final byte[] globalTransactionId = xid.getGlobalTransactionId();
+        final byte[] branchQualifier = xid.getBranchQualifier();
+        final StringBuilder sb = new StringBuilder((int) (16 + globalTransactionId.length * 1.5 + branchQualifier.length * 1.5));
+        sb.append(xid.getFormatId())
+                .append('_')
+                .append(Base64.getEncoder().encodeToString(globalTransactionId))
+                .append('_')
+                .append(Base64.getEncoder().encodeToString(branchQualifier));
+        return sb.toString();
     }
 
-    Xid other = (Xid) o;
-    return formatId == other.getFormatId()
-        && Arrays.equals(globalTransactionId, other.getGlobalTransactionId())
-        && Arrays.equals(branchQualifier, other.getBranchQualifier());
-  }
+    /**
+     * @return recovered xid, or null if s does not represent a valid xid encoded by the driver.
+     */
+    static Xid stringToXid(String s) {
+        final int a = s.indexOf('_');
+        final int b = s.lastIndexOf('_');
 
-  /**
-   * This is for debugging purposes only.
-   */
-  @Override
-  public String toString() {
-    return xidToString(this);
-  }
+        if (a == b) {
+            // this also catches the case a == b == -1.
+            return null;
+        }
 
-  // --- Routines for converting xid to string and back.
-
-  static String xidToString(Xid xid) {
-    final byte[] globalTransactionId = xid.getGlobalTransactionId();
-    final byte[] branchQualifier = xid.getBranchQualifier();
-    final StringBuilder sb = new StringBuilder((int) (16 + globalTransactionId.length * 1.5 + branchQualifier.length * 1.5));
-    sb.append(xid.getFormatId())
-        .append('_')
-        .append(Base64.getEncoder().encodeToString(globalTransactionId))
-        .append('_')
-        .append(Base64.getEncoder().encodeToString(branchQualifier));
-    return sb.toString();
-  }
-
-  /**
-   * @return recovered xid, or null if s does not represent a valid xid encoded by the driver.
-   */
-  static Xid stringToXid(String s) {
-    final int a = s.indexOf('_');
-    final int b = s.lastIndexOf('_');
-
-    if (a == b) {
-      // this also catches the case a == b == -1.
-      return null;
+        try {
+            int formatId = Integer.parseInt(s.substring(0, a));
+            //mime decoder is more forgiving to extraneous characters by ignoring them
+            byte[] globalTransactionId = Base64.getMimeDecoder().decode(s.substring(a + 1, b));
+            byte[] branchQualifier = Base64.getMimeDecoder().decode(s.substring(b + 1));
+            return new RecoveredXid(formatId, globalTransactionId, branchQualifier);
+        } catch (Exception ex) {
+            final LogRecord logRecord = new LogRecord(Level.FINE, "XID String is invalid: [{0}]");
+            logRecord.setParameters(new Object[]{s});
+            logRecord.setThrown(ex);
+            Logger.getLogger(RecoveredXid.class.getName()).log(logRecord);
+            // Doesn't seem to be an xid generated by this driver.
+            return null;
+        }
     }
 
-    try {
-      int formatId = Integer.parseInt(s.substring(0, a));
-      //mime decoder is more forgiving to extraneous characters by ignoring them
-      byte[] globalTransactionId = Base64.getMimeDecoder().decode(s.substring(a + 1, b));
-      byte[] branchQualifier = Base64.getMimeDecoder().decode(s.substring(b + 1));
-      return new RecoveredXid(formatId, globalTransactionId, branchQualifier);
-    } catch (Exception ex) {
-      final LogRecord logRecord = new LogRecord(Level.FINE, "XID String is invalid: [{0}]");
-      logRecord.setParameters(new Object[]{s});
-      logRecord.setThrown(ex);
-      Logger.getLogger(RecoveredXid.class.getName()).log(logRecord);
-      // Doesn't seem to be an xid generated by this driver.
-      return null;
+    @Override
+    public int getFormatId() {
+        return formatId;
+    }
+
+    @Override
+    public byte[] getGlobalTransactionId() {
+        return globalTransactionId;
+    }
+
+    @Override
+    public byte[] getBranchQualifier() {
+        return branchQualifier;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + Arrays.hashCode(branchQualifier);
+        result = prime * result + formatId;
+        result = prime * result + Arrays.hashCode(globalTransactionId);
+        return result;
+    }
+
+    // --- Routines for converting xid to string and back.
+
+    @Override
+    public boolean equals(Object o) {
+        if (o == this) {
+            // optimization for the common case.
+            return true;
+        }
+
+        if (!(o instanceof Xid)) {
+            return false;
+        }
+
+        Xid other = (Xid) o;
+        return formatId == other.getFormatId()
+                && Arrays.equals(globalTransactionId, other.getGlobalTransactionId())
+                && Arrays.equals(branchQualifier, other.getBranchQualifier());
+    }
+
+    /**
+     * This is for debugging purposes only.
+     */
+    @Override
+    public String toString() {
+        return xidToString(this);
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/xml/DefaultPGXmlFactoryFactory.java b/pgjdbc/src/main/java/org/postgresql/xml/DefaultPGXmlFactoryFactory.java
index bd59458..7ea7fe8 100644
--- a/pgjdbc/src/main/java/org/postgresql/xml/DefaultPGXmlFactoryFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/xml/DefaultPGXmlFactoryFactory.java
@@ -5,10 +5,6 @@
 
 package org.postgresql.xml;
 
-import org.xml.sax.SAXException;
-import org.xml.sax.XMLReader;
-import org.xml.sax.helpers.XMLReaderFactory;
-
 import javax.xml.XMLConstants;
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
@@ -17,6 +13,9 @@ import javax.xml.stream.XMLInputFactory;
 import javax.xml.stream.XMLOutputFactory;
 import javax.xml.transform.TransformerFactory;
 import javax.xml.transform.sax.SAXTransformerFactory;
+import org.xml.sax.SAXException;
+import org.xml.sax.XMLReader;
+import org.xml.sax.helpers.XMLReaderFactory;
 
 /**
  * Default implementation of PGXmlFactoryFactory that configures each factory per OWASP recommendations.
@@ -24,117 +23,117 @@ import javax.xml.transform.sax.SAXTransformerFactory;
  * @see <a href="https://cheatsheetseries.owasp.org/cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html">https://cheatsheetseries.owasp.org/cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html</a>
  */
 public class DefaultPGXmlFactoryFactory implements PGXmlFactoryFactory {
-  public static final DefaultPGXmlFactoryFactory INSTANCE = new DefaultPGXmlFactoryFactory();
+    public static final DefaultPGXmlFactoryFactory INSTANCE = new DefaultPGXmlFactoryFactory();
 
-  private DefaultPGXmlFactoryFactory() {
-  }
-
-  private DocumentBuilderFactory getDocumentBuilderFactory() {
-    DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
-    setFactoryProperties(factory);
-    factory.setXIncludeAware(false);
-    factory.setExpandEntityReferences(false);
-    return factory;
-  }
-
-  @Override
-  public DocumentBuilder newDocumentBuilder() throws ParserConfigurationException {
-    DocumentBuilder builder = getDocumentBuilderFactory().newDocumentBuilder();
-    builder.setEntityResolver(EmptyStringEntityResolver.INSTANCE);
-    builder.setErrorHandler(NullErrorHandler.INSTANCE);
-    return builder;
-  }
-
-  @Override
-  public TransformerFactory newTransformerFactory() {
-    TransformerFactory factory = TransformerFactory.newInstance();
-    setFactoryProperties(factory);
-    return factory;
-  }
-
-  @Override
-  public SAXTransformerFactory newSAXTransformerFactory() {
-    SAXTransformerFactory factory = (SAXTransformerFactory) SAXTransformerFactory.newInstance();
-    setFactoryProperties(factory);
-    return factory;
-  }
-
-  @Override
-  public XMLInputFactory newXMLInputFactory() {
-    XMLInputFactory factory = XMLInputFactory.newInstance();
-    setPropertyQuietly(factory, XMLInputFactory.SUPPORT_DTD, false);
-    setPropertyQuietly(factory, XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false);
-    return factory;
-  }
-
-  @Override
-  public XMLOutputFactory newXMLOutputFactory() {
-    return XMLOutputFactory.newInstance();
-  }
-
-  @SuppressWarnings("deprecation")
-  @Override
-  public XMLReader createXMLReader() throws SAXException {
-    XMLReader factory = XMLReaderFactory.createXMLReader();
-    setFeatureQuietly(factory, "http://apache.org/xml/features/disallow-doctype-decl", true);
-    setFeatureQuietly(factory, "http://apache.org/xml/features/nonvalidating/load-external-dtd", false);
-    setFeatureQuietly(factory, "http://xml.org/sax/features/external-general-entities", false);
-    setFeatureQuietly(factory, "http://xml.org/sax/features/external-parameter-entities", false);
-    factory.setErrorHandler(NullErrorHandler.INSTANCE);
-    return factory;
-  }
-
-  private static void setFeatureQuietly(Object factory, String name, boolean value) {
-    try {
-      if (factory instanceof DocumentBuilderFactory) {
-        ((DocumentBuilderFactory) factory).setFeature(name, value);
-      } else if (factory instanceof TransformerFactory) {
-        ((TransformerFactory) factory).setFeature(name, value);
-      } else if (factory instanceof XMLReader) {
-        ((XMLReader) factory).setFeature(name, value);
-      } else {
-        throw new Error("Invalid factory class: " + factory.getClass());
-      }
-      return;
-    } catch (Exception ignore) {
+    private DefaultPGXmlFactoryFactory() {
     }
-  }
 
-  private static void setAttributeQuietly(Object factory, String name, Object value) {
-    try {
-      if (factory instanceof DocumentBuilderFactory) {
-        ((DocumentBuilderFactory) factory).setAttribute(name, value);
-      } else if (factory instanceof TransformerFactory) {
-        ((TransformerFactory) factory).setAttribute(name, value);
-      } else {
-        throw new Error("Invalid factory class: " + factory.getClass());
-      }
-    } catch (Exception ignore) {
+    private static void setFeatureQuietly(Object factory, String name, boolean value) {
+        try {
+            if (factory instanceof DocumentBuilderFactory) {
+                ((DocumentBuilderFactory) factory).setFeature(name, value);
+            } else if (factory instanceof TransformerFactory) {
+                ((TransformerFactory) factory).setFeature(name, value);
+            } else if (factory instanceof XMLReader) {
+                ((XMLReader) factory).setFeature(name, value);
+            } else {
+                throw new Error("Invalid factory class: " + factory.getClass());
+            }
+            return;
+        } catch (Exception ignore) {
+        }
     }
-  }
 
-  private static void setFactoryProperties(Object factory) {
-    setFeatureQuietly(factory, XMLConstants.FEATURE_SECURE_PROCESSING, true);
-    setFeatureQuietly(factory, "http://apache.org/xml/features/disallow-doctype-decl", true);
-    setFeatureQuietly(factory, "http://apache.org/xml/features/nonvalidating/load-external-dtd", false);
-    setFeatureQuietly(factory, "http://xml.org/sax/features/external-general-entities", false);
-    setFeatureQuietly(factory, "http://xml.org/sax/features/external-parameter-entities", false);
-    // Values from XMLConstants inlined for JDK 1.6 compatibility
-    setAttributeQuietly(factory, "http://javax.xml.XMLConstants/property/accessExternalDTD", "");
-    setAttributeQuietly(factory, "http://javax.xml.XMLConstants/property/accessExternalSchema", "");
-    setAttributeQuietly(factory, "http://javax.xml.XMLConstants/property/accessExternalStylesheet", "");
-  }
-
-  private static void setPropertyQuietly(Object factory, String name, Object value) {
-    try {
-      if (factory instanceof XMLReader) {
-        ((XMLReader) factory).setProperty(name, value);
-      } else if (factory instanceof XMLInputFactory) {
-        ((XMLInputFactory) factory).setProperty(name, value);
-      } else {
-        throw new Error("Invalid factory class: " + factory.getClass());
-      }
-    } catch (Exception ignore) {
+    private static void setAttributeQuietly(Object factory, String name, Object value) {
+        try {
+            if (factory instanceof DocumentBuilderFactory) {
+                ((DocumentBuilderFactory) factory).setAttribute(name, value);
+            } else if (factory instanceof TransformerFactory) {
+                ((TransformerFactory) factory).setAttribute(name, value);
+            } else {
+                throw new Error("Invalid factory class: " + factory.getClass());
+            }
+        } catch (Exception ignore) {
+        }
+    }
+
+    private static void setFactoryProperties(Object factory) {
+        setFeatureQuietly(factory, XMLConstants.FEATURE_SECURE_PROCESSING, true);
+        setFeatureQuietly(factory, "http://apache.org/xml/features/disallow-doctype-decl", true);
+        setFeatureQuietly(factory, "http://apache.org/xml/features/nonvalidating/load-external-dtd", false);
+        setFeatureQuietly(factory, "http://xml.org/sax/features/external-general-entities", false);
+        setFeatureQuietly(factory, "http://xml.org/sax/features/external-parameter-entities", false);
+        // Values from XMLConstants inlined for JDK 1.6 compatibility
+        setAttributeQuietly(factory, "http://javax.xml.XMLConstants/property/accessExternalDTD", "");
+        setAttributeQuietly(factory, "http://javax.xml.XMLConstants/property/accessExternalSchema", "");
+        setAttributeQuietly(factory, "http://javax.xml.XMLConstants/property/accessExternalStylesheet", "");
+    }
+
+    private static void setPropertyQuietly(Object factory, String name, Object value) {
+        try {
+            if (factory instanceof XMLReader) {
+                ((XMLReader) factory).setProperty(name, value);
+            } else if (factory instanceof XMLInputFactory) {
+                ((XMLInputFactory) factory).setProperty(name, value);
+            } else {
+                throw new Error("Invalid factory class: " + factory.getClass());
+            }
+        } catch (Exception ignore) {
+        }
+    }
+
+    private DocumentBuilderFactory getDocumentBuilderFactory() {
+        DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
+        setFactoryProperties(factory);
+        factory.setXIncludeAware(false);
+        factory.setExpandEntityReferences(false);
+        return factory;
+    }
+
+    @Override
+    public DocumentBuilder newDocumentBuilder() throws ParserConfigurationException {
+        DocumentBuilder builder = getDocumentBuilderFactory().newDocumentBuilder();
+        builder.setEntityResolver(EmptyStringEntityResolver.INSTANCE);
+        builder.setErrorHandler(NullErrorHandler.INSTANCE);
+        return builder;
+    }
+
+    @Override
+    public TransformerFactory newTransformerFactory() {
+        TransformerFactory factory = TransformerFactory.newInstance();
+        setFactoryProperties(factory);
+        return factory;
+    }
+
+    @Override
+    public SAXTransformerFactory newSAXTransformerFactory() {
+        SAXTransformerFactory factory = (SAXTransformerFactory) SAXTransformerFactory.newInstance();
+        setFactoryProperties(factory);
+        return factory;
+    }
+
+    @Override
+    public XMLInputFactory newXMLInputFactory() {
+        XMLInputFactory factory = XMLInputFactory.newInstance();
+        setPropertyQuietly(factory, XMLInputFactory.SUPPORT_DTD, false);
+        setPropertyQuietly(factory, XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false);
+        return factory;
+    }
+
+    @Override
+    public XMLOutputFactory newXMLOutputFactory() {
+        return XMLOutputFactory.newInstance();
+    }
+
+    @SuppressWarnings("deprecation")
+    @Override
+    public XMLReader createXMLReader() throws SAXException {
+        XMLReader factory = XMLReaderFactory.createXMLReader();
+        setFeatureQuietly(factory, "http://apache.org/xml/features/disallow-doctype-decl", true);
+        setFeatureQuietly(factory, "http://apache.org/xml/features/nonvalidating/load-external-dtd", false);
+        setFeatureQuietly(factory, "http://xml.org/sax/features/external-general-entities", false);
+        setFeatureQuietly(factory, "http://xml.org/sax/features/external-parameter-entities", false);
+        factory.setErrorHandler(NullErrorHandler.INSTANCE);
+        return factory;
     }
-  }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/xml/EmptyStringEntityResolver.java b/pgjdbc/src/main/java/org/postgresql/xml/EmptyStringEntityResolver.java
index 506e0fd..27abb4c 100644
--- a/pgjdbc/src/main/java/org/postgresql/xml/EmptyStringEntityResolver.java
+++ b/pgjdbc/src/main/java/org/postgresql/xml/EmptyStringEntityResolver.java
@@ -5,22 +5,21 @@
 
 package org.postgresql.xml;
 
+import java.io.IOException;
+import java.io.StringReader;
 import org.xml.sax.EntityResolver;
 import org.xml.sax.InputSource;
 import org.xml.sax.SAXException;
 
-import java.io.IOException;
-import java.io.StringReader;
-
 public class EmptyStringEntityResolver implements EntityResolver {
-  public static final EmptyStringEntityResolver INSTANCE = new EmptyStringEntityResolver();
+    public static final EmptyStringEntityResolver INSTANCE = new EmptyStringEntityResolver();
 
-  public EmptyStringEntityResolver() {
-  }
+    public EmptyStringEntityResolver() {
+    }
 
-  @Override
-  public InputSource resolveEntity(String publicId, String systemId)
-      throws SAXException, IOException {
-    return new InputSource(new StringReader(""));
-  }
+    @Override
+    public InputSource resolveEntity(String publicId, String systemId)
+            throws SAXException, IOException {
+        return new InputSource(new StringReader(""));
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/xml/LegacyInsecurePGXmlFactoryFactory.java b/pgjdbc/src/main/java/org/postgresql/xml/LegacyInsecurePGXmlFactoryFactory.java
index 579ab5e..1c5f648 100644
--- a/pgjdbc/src/main/java/org/postgresql/xml/LegacyInsecurePGXmlFactoryFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/xml/LegacyInsecurePGXmlFactoryFactory.java
@@ -5,10 +5,6 @@
 
 package org.postgresql.xml;
 
-import org.xml.sax.SAXException;
-import org.xml.sax.XMLReader;
-import org.xml.sax.helpers.XMLReaderFactory;
-
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
 import javax.xml.parsers.ParserConfigurationException;
@@ -16,43 +12,46 @@ import javax.xml.stream.XMLInputFactory;
 import javax.xml.stream.XMLOutputFactory;
 import javax.xml.transform.TransformerFactory;
 import javax.xml.transform.sax.SAXTransformerFactory;
+import org.xml.sax.SAXException;
+import org.xml.sax.XMLReader;
+import org.xml.sax.helpers.XMLReaderFactory;
 
 public class LegacyInsecurePGXmlFactoryFactory implements PGXmlFactoryFactory {
-  public static final LegacyInsecurePGXmlFactoryFactory INSTANCE = new LegacyInsecurePGXmlFactoryFactory();
+    public static final LegacyInsecurePGXmlFactoryFactory INSTANCE = new LegacyInsecurePGXmlFactoryFactory();
 
-  private LegacyInsecurePGXmlFactoryFactory() {
-  }
+    private LegacyInsecurePGXmlFactoryFactory() {
+    }
 
-  @Override
-  public DocumentBuilder newDocumentBuilder() throws ParserConfigurationException {
-    DocumentBuilder builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
-    builder.setErrorHandler(NullErrorHandler.INSTANCE);
-    return builder;
-  }
+    @Override
+    public DocumentBuilder newDocumentBuilder() throws ParserConfigurationException {
+        DocumentBuilder builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
+        builder.setErrorHandler(NullErrorHandler.INSTANCE);
+        return builder;
+    }
 
-  @Override
-  public TransformerFactory newTransformerFactory() {
-    return TransformerFactory.newInstance();
-  }
+    @Override
+    public TransformerFactory newTransformerFactory() {
+        return TransformerFactory.newInstance();
+    }
 
-  @Override
-  public SAXTransformerFactory newSAXTransformerFactory() {
-    return (SAXTransformerFactory) SAXTransformerFactory.newInstance();
-  }
+    @Override
+    public SAXTransformerFactory newSAXTransformerFactory() {
+        return (SAXTransformerFactory) SAXTransformerFactory.newInstance();
+    }
 
-  @Override
-  public XMLInputFactory newXMLInputFactory() {
-    return XMLInputFactory.newInstance();
-  }
+    @Override
+    public XMLInputFactory newXMLInputFactory() {
+        return XMLInputFactory.newInstance();
+    }
 
-  @Override
-  public XMLOutputFactory newXMLOutputFactory() {
-    return XMLOutputFactory.newInstance();
-  }
+    @Override
+    public XMLOutputFactory newXMLOutputFactory() {
+        return XMLOutputFactory.newInstance();
+    }
 
-  @SuppressWarnings("deprecation")
-  @Override
-  public XMLReader createXMLReader() throws SAXException {
-    return XMLReaderFactory.createXMLReader();
-  }
+    @SuppressWarnings("deprecation")
+    @Override
+    public XMLReader createXMLReader() throws SAXException {
+        return XMLReaderFactory.createXMLReader();
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/xml/NullErrorHandler.java b/pgjdbc/src/main/java/org/postgresql/xml/NullErrorHandler.java
index fcf6575..2b8fd50 100644
--- a/pgjdbc/src/main/java/org/postgresql/xml/NullErrorHandler.java
+++ b/pgjdbc/src/main/java/org/postgresql/xml/NullErrorHandler.java
@@ -12,20 +12,20 @@ import org.xml.sax.SAXParseException;
  * Error handler that silently suppresses all errors.
  */
 public class NullErrorHandler implements ErrorHandler {
-  public static final NullErrorHandler INSTANCE = new NullErrorHandler();
+    public static final NullErrorHandler INSTANCE = new NullErrorHandler();
 
-  public NullErrorHandler() {
-  }
+    public NullErrorHandler() {
+    }
 
-  @Override
-  public void error(SAXParseException e) {
-  }
+    @Override
+    public void error(SAXParseException e) {
+    }
 
-  @Override
-  public void fatalError(SAXParseException e) {
-  }
+    @Override
+    public void fatalError(SAXParseException e) {
+    }
 
-  @Override
-  public void warning(SAXParseException e) {
-  }
+    @Override
+    public void warning(SAXParseException e) {
+    }
 }
diff --git a/pgjdbc/src/main/java/org/postgresql/xml/PGXmlFactoryFactory.java b/pgjdbc/src/main/java/org/postgresql/xml/PGXmlFactoryFactory.java
index d5c74d5..debfd2c 100644
--- a/pgjdbc/src/main/java/org/postgresql/xml/PGXmlFactoryFactory.java
+++ b/pgjdbc/src/main/java/org/postgresql/xml/PGXmlFactoryFactory.java
@@ -5,26 +5,25 @@
 
 package org.postgresql.xml;
 
-import org.xml.sax.SAXException;
-import org.xml.sax.XMLReader;
-
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.ParserConfigurationException;
 import javax.xml.stream.XMLInputFactory;
 import javax.xml.stream.XMLOutputFactory;
 import javax.xml.transform.TransformerFactory;
 import javax.xml.transform.sax.SAXTransformerFactory;
+import org.xml.sax.SAXException;
+import org.xml.sax.XMLReader;
 
 public interface PGXmlFactoryFactory {
-  DocumentBuilder newDocumentBuilder() throws ParserConfigurationException;
+    DocumentBuilder newDocumentBuilder() throws ParserConfigurationException;
 
-  TransformerFactory newTransformerFactory();
+    TransformerFactory newTransformerFactory();
 
-  SAXTransformerFactory newSAXTransformerFactory();
+    SAXTransformerFactory newSAXTransformerFactory();
 
-  XMLInputFactory newXMLInputFactory();
+    XMLInputFactory newXMLInputFactory();
 
-  XMLOutputFactory newXMLOutputFactory();
+    XMLOutputFactory newXMLOutputFactory();
 
-  XMLReader createXMLReader() throws SAXException;
+    XMLReader createXMLReader() throws SAXException;
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/core/AsciiStringInternerTest.java b/pgjdbc/src/test/java/org/postgresql/core/AsciiStringInternerTest.java
deleted file mode 100644
index 450941f..0000000
--- a/pgjdbc/src/test/java/org/postgresql/core/AsciiStringInternerTest.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2020, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.core;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertNotSame;
-import static org.junit.jupiter.api.Assertions.assertSame;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.junit.jupiter.api.Test;
-
-import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-import java.util.Arrays;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ThreadLocalRandom;
-import java.util.concurrent.atomic.LongAdder;
-
-/**
- *
- * @author Brett Okken
- */
-class AsciiStringInternerTest {
-
-  @Test
-  void canonicalValue() throws Exception {
-    AsciiStringInterner interner = new AsciiStringInterner();
-    String s1 = "testCanonicalValue";
-    byte[] bytes = s1.getBytes(StandardCharsets.US_ASCII);
-    String interned = interner.getString(bytes, 0, bytes.length, null);
-
-    //interned value should be equal
-    assertEquals(s1, interned);
-    //but should be different instance
-    assertNotSame(s1, interned);
-    //asking for it again, however should return same instance
-    assertSame(interned, interner.getString(bytes, 0, bytes.length, null));
-
-    //now show that we can get the value back from a different byte[]
-    byte[] bytes2 = new byte[128];
-    System.arraycopy(bytes, 0, bytes2, 73, bytes.length);
-    assertSame(interned, interner.getString(bytes2, 73, bytes.length, null));
-
-    //now we will mutate the original byte[] to show that does not affect the map
-    Arrays.fill(bytes, (byte) 13);
-    assertSame(interned, interner.getString(bytes2, 73, bytes.length, null));
-  }
-
-  @Test
-  void stagedValue() throws Exception {
-    AsciiStringInterner interner = new AsciiStringInterner();
-    String s1 = "testStagedValue";
-    interner.putString(s1);
-    byte[] bytes = s1.getBytes(StandardCharsets.US_ASCII);
-    String interned = interner.getString(bytes, 0, bytes.length, null);
-    // should be same instance
-    assertSame(s1, interned);
-    //asking for it again should also return same instance
-    assertSame(s1, interner.getString(bytes, 0, bytes.length, null));
-
-    //now show that we can get the value back from a different byte[]
-    byte[] bytes2 = new byte[128];
-    System.arraycopy(bytes, 0, bytes2, 73, bytes.length);
-    assertSame(s1, interner.getString(bytes2, 73, bytes.length, null));
-  }
-
-  @Test
-  void nonAsciiValue() throws Exception {
-    final Encoding encoding = Encoding.getJVMEncoding("UTF-8");
-    AsciiStringInterner interner = new AsciiStringInterner();
-    String s1 = "testNonAsciiValue" + '\u03C0'; // add multi-byte to string to make invalid for intern
-    byte[] bytes = s1.getBytes(StandardCharsets.UTF_8);
-    String interned = interner.getString(bytes, 0, bytes.length, encoding);
-
-    //interned value should be equal
-    assertEquals(s1, interned);
-    //but should be different instance
-    assertNotSame(s1, interned);
-    //asking for it again should again return a different instance
-    final String interned2 = interner.getString(bytes, 0, bytes.length, encoding);
-    assertEquals(s1, interned2);
-    assertNotSame(s1, interned2);
-    assertNotSame(interned, interned2);
-  }
-
-  @Test
-  void testToString() throws Exception {
-    AsciiStringInterner interner = new AsciiStringInterner();
-    assertEquals("AsciiStringInterner []", interner.toString(), "empty");
-    interner.putString("s1");
-    assertEquals("AsciiStringInterner ['s1']", interner.toString(), "empty");
-    interner.getString("s2".getBytes(StandardCharsets.US_ASCII), 0, 2, null);
-    assertEquals("AsciiStringInterner ['s1', 's2']", interner.toString(), "empty");
-  }
-
-  @Test
-  void garbageCleaning() throws Exception {
-    final byte[] bytes = new byte[100000];
-    for (int i = 0; i < 100000; i++) {
-      bytes[i] = (byte) ThreadLocalRandom.current().nextInt(128);
-    }
-    final AsciiStringInterner interner = new AsciiStringInterner();
-    final LongAdder length = new LongAdder();
-    final Callable<Void> c = () -> {
-      for (int i = 0; i < 25000; i++) {
-        String str;
-        try {
-          str = interner.getString(bytes, 0, ThreadLocalRandom.current().nextInt(1000, bytes.length), null);
-        } catch (IOException e) {
-          throw new IllegalStateException(e);
-        }
-        length.add(str.length());
-      }
-      return null;
-    };
-    final ExecutorService exec = Executors.newCachedThreadPool();
-    try {
-      exec.invokeAll(Arrays.asList(c, c, c, c));
-    } finally {
-      exec.shutdown();
-    }
-    //this is really just done to make sure java cannot tell that nothing is really being done
-    assertTrue(length.sum() > 0);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/core/CommandCompleteParserNegativeTest.java b/pgjdbc/src/test/java/org/postgresql/core/CommandCompleteParserNegativeTest.java
deleted file mode 100644
index 4f7beeb..0000000
--- a/pgjdbc/src/test/java/org/postgresql/core/CommandCompleteParserNegativeTest.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.core;
-
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.util.PSQLException;
-
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.MethodSource;
-
-import java.util.Arrays;
-
-public class CommandCompleteParserNegativeTest {
-  public static Iterable<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-        {"SELECT 0_0 42"},
-        {"SELECT 42 0_0"},
-        {"SELECT 0_0 0_0"},
-    });
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "input={0}")
-  void run(String input) throws PSQLException {
-    CommandCompleteParser parser = new CommandCompleteParser();
-    try {
-      parser.parse(input);
-      fail("CommandCompleteParser should throw NumberFormatException for " + input);
-    } catch (PSQLException e) {
-      Throwable cause = e.getCause();
-      if (cause == null) {
-        throw e;
-      }
-      if (!(cause instanceof NumberFormatException)) {
-        throw e;
-      }
-      // NumerFormatException is expected
-    }
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/core/CommandCompleteParserTest.java b/pgjdbc/src/test/java/org/postgresql/core/CommandCompleteParserTest.java
deleted file mode 100644
index 245e695..0000000
--- a/pgjdbc/src/test/java/org/postgresql/core/CommandCompleteParserTest.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.core;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-import org.postgresql.util.PSQLException;
-
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.MethodSource;
-
-import java.util.Arrays;
-
-public class CommandCompleteParserTest {
-  public static Iterable<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-        {"SELECT 0", 0, 0},
-        {"SELECT -42", 0, 0},
-        {"SELECT", 0, 0},
-        {"", 0, 0},
-        {"A", 0, 0},
-        {"SELECT 42", 0, 42},
-        {"UPDATE 43 42", 43, 42},
-        {"UPDATE 43 " + Long.MAX_VALUE, 43, Long.MAX_VALUE},
-        {"UPDATE " + Long.MAX_VALUE + " " + Long.MAX_VALUE, Long.MAX_VALUE, Long.MAX_VALUE},
-        {"UPDATE " + (Long.MAX_VALUE / 10) + " " + (Long.MAX_VALUE / 10), (Long.MAX_VALUE / 10),
-            (Long.MAX_VALUE / 10)},
-        {"UPDATE " + (Long.MAX_VALUE / 100) + " " + (Long.MAX_VALUE / 100), (Long.MAX_VALUE / 100),
-            (Long.MAX_VALUE / 100)},
-        {"CREATE TABLE " + (Long.MAX_VALUE / 100) + " " + (Long.MAX_VALUE / 100),
-            (Long.MAX_VALUE / 100), (Long.MAX_VALUE / 100)},
-        {"CREATE TABLE", 0, 0},
-        {"CREATE OR DROP OR DELETE TABLE 42", 0, 42},
-    });
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "input={0}, oid={1}, rows={2}")
-  void run(String input, long oid, long rows) throws PSQLException {
-    CommandCompleteParser expected = new CommandCompleteParser();
-    CommandCompleteParser actual = new CommandCompleteParser();
-    expected.set(oid, rows);
-    actual.parse(input);
-    assertEquals(expected, actual, input);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/core/OidToStringTest.java b/pgjdbc/src/test/java/org/postgresql/core/OidToStringTest.java
deleted file mode 100644
index 76d6ff4..0000000
--- a/pgjdbc/src/test/java/org/postgresql/core/OidToStringTest.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.core;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-import org.postgresql.util.PSQLException;
-
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.MethodSource;
-
-import java.util.Arrays;
-
-public class OidToStringTest {
-  public static Iterable<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-        {142, "XML"},
-        {0, "UNSPECIFIED"},
-        {-235, "<unknown:-235>"},
-    });
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "expected={1}, value={0}")
-  void run(int value, String expected) throws PSQLException {
-    assertEquals(expected, Oid.toString(value));
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/core/OidValueOfTest.java b/pgjdbc/src/test/java/org/postgresql/core/OidValueOfTest.java
deleted file mode 100644
index f72ad89..0000000
--- a/pgjdbc/src/test/java/org/postgresql/core/OidValueOfTest.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.core;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-import org.postgresql.util.PSQLException;
-
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.MethodSource;
-
-import java.util.Arrays;
-
-public class OidValueOfTest {
-  public static Iterable<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-        {25, "TEXT"},
-        {0, "UNSPECIFIED"},
-        {199, "JSON_ARRAY"},
-        {100, "100"},
-    });
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "expected={0}, value={1}")
-  void run(int expected, String value) throws PSQLException {
-    assertEquals(expected, Oid.valueOf(value));
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/core/OidValuesCorrectnessTest.java b/pgjdbc/src/test/java/org/postgresql/core/OidValuesCorrectnessTest.java
deleted file mode 100644
index 5446e62..0000000
--- a/pgjdbc/src/test/java/org/postgresql/core/OidValuesCorrectnessTest.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (c) 2020, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.core;
-
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.jdbc2.BaseTest4;
-
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.lang.reflect.Field;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-
-/**
- * Test to check if values in Oid class are correct with Oid values in a database.
- */
-@RunWith(Parameterized.class)
-public class OidValuesCorrectnessTest extends BaseTest4 {
-
-  @Parameterized.Parameter(0)
-  public String oidName;
-  @Parameterized.Parameter(1)
-  public int oidValue;
-
-  /**
-   * List to contain names of all variables, which should be ignored by this test.
-   * Prevents situation that a new value will be added to Oid class with ignoring the test.
-   */
-  private static List<String> oidsToIgnore = Arrays.asList(
-      "UNSPECIFIED" //UNSPECIFIED isn't an Oid, it's a value to specify that Oid value is unspecified
-  );
-
-  /**
-   * Map to contain Oid names with server version of their support.
-   * Prevents that some Oid values will be tested with a database not supporting given Oid.
-   */
-  private static Map<String, ServerVersion> oidsMinimumVersions;
-
-  static {
-    oidsMinimumVersions = new HashMap<>();
-    oidsMinimumVersions.put("JSON", ServerVersion.v9_2);
-    oidsMinimumVersions.put("JSON_ARRAY", ServerVersion.v9_2);
-    oidsMinimumVersions.put("JSONB", ServerVersion.v9_4);
-    oidsMinimumVersions.put("JSONB_ARRAY", ServerVersion.v9_4);
-    oidsMinimumVersions.put("MACADDR8", ServerVersion.v10);
-  }
-
-  /**
-   * Map to contain Oid names with their proper names from pg_type table (typname) if they are different.
-   * Helps in situation when variable name in Oid class isn't the same as typname in pg_type table.
-   */
-  private static Map<String, String> oidTypeNames;
-
-  static {
-    oidTypeNames = new HashMap<>();
-    oidTypeNames.put("BOX_ARRAY", "_BOX");
-    oidTypeNames.put("INT2_ARRAY", "_INT2");
-    oidTypeNames.put("INT4_ARRAY", "_INT4");
-    oidTypeNames.put("INT8_ARRAY", "_INT8");
-    oidTypeNames.put("TEXT_ARRAY", "_TEXT");
-    oidTypeNames.put("NUMERIC_ARRAY", "_NUMERIC");
-    oidTypeNames.put("FLOAT4_ARRAY", "_FLOAT4");
-    oidTypeNames.put("FLOAT8_ARRAY", "_FLOAT8");
-    oidTypeNames.put("BOOL_ARRAY", "_BOOL");
-    oidTypeNames.put("DATE_ARRAY", "_DATE");
-    oidTypeNames.put("TIME_ARRAY", "_TIME");
-    oidTypeNames.put("TIMETZ_ARRAY", "_TIMETZ");
-    oidTypeNames.put("TIMESTAMP_ARRAY", "_TIMESTAMP");
-    oidTypeNames.put("TIMESTAMPTZ_ARRAY", "_TIMESTAMPTZ");
-    oidTypeNames.put("BYTEA_ARRAY", "_BYTEA");
-    oidTypeNames.put("VARCHAR_ARRAY", "_VARCHAR");
-    oidTypeNames.put("OID_ARRAY", "_OID");
-    oidTypeNames.put("BPCHAR_ARRAY", "_BPCHAR");
-    oidTypeNames.put("MONEY_ARRAY", "_MONEY");
-    oidTypeNames.put("NAME_ARRAY", "_NAME");
-    oidTypeNames.put("BIT_ARRAY", "_BIT");
-    oidTypeNames.put("INTERVAL_ARRAY", "_INTERVAl");
-    oidTypeNames.put("CHAR_ARRAY", "_CHAR");
-    oidTypeNames.put("VARBIT_ARRAY", "_VARBIT");
-    oidTypeNames.put("UUID_ARRAY", "_UUID");
-    oidTypeNames.put("XML_ARRAY", "_XML");
-    oidTypeNames.put("POINT_ARRAY", "_POINT");
-    oidTypeNames.put("JSONB_ARRAY", "_JSONB");
-    oidTypeNames.put("JSON_ARRAY", "_JSON");
-    oidTypeNames.put("REF_CURSOR", "REFCURSOR");
-    oidTypeNames.put("REF_CURSOR_ARRAY", "_REFCURSOR");
-  }
-
-  @Parameterized.Parameters(name = "oidName={0}, oidValue={1}")
-  public static Iterable<Object[]> data() throws IllegalAccessException {
-    Field[] fields = Oid.class.getFields();
-    List<Object[]> data = new ArrayList<>();
-
-    for (Field field : fields) {
-      if (!oidsToIgnore.contains(field.getName())) {
-        data.add(new Object[]{field.getName(), field.getInt(null)});
-      }
-    }
-
-    return data;
-  }
-
-  /**
-   * The testcase to check if expected value of Oid, read from a database, is the same as value
-   * written in the Oid class.
-   */
-  @Test
-  public void testValue() throws SQLException {
-    // check if Oid can be tested with given database by checking version
-    if (oidsMinimumVersions.containsKey(oidName)) {
-      Assume.assumeTrue(TestUtil.haveMinimumServerVersion(con, oidsMinimumVersions.get(oidName)));
-    }
-
-    String typeName = oidTypeNames.getOrDefault(oidName, oidName);
-
-    Statement stmt = con.createStatement();
-    ResultSet resultSet;
-    stmt.execute("select oid from pg_type where typname = '" + typeName.toLowerCase(Locale.ROOT) + "'");
-    resultSet = stmt.getResultSet();
-
-    // resultSet have to have next row
-    Assert.assertTrue("Oid value doesn't exist for oid " + oidName + ";with used type: " + typeName,
-        resultSet.next());
-    // check if expected value from a database is the same as value in Oid class
-    Assert.assertEquals("Wrong value for oid: " + oidName + ";with used type: " + typeName,
-        resultSet.getInt(1), oidValue);
-
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/core/ParserTest.java b/pgjdbc/src/test/java/org/postgresql/core/ParserTest.java
deleted file mode 100644
index e903df2..0000000
--- a/pgjdbc/src/test/java/org/postgresql/core/ParserTest.java
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright (c) 2003, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.core;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.postgresql.jdbc.EscapeSyntaxCallMode;
-
-import org.junit.jupiter.api.Disabled;
-import org.junit.jupiter.api.Test;
-
-import java.sql.SQLException;
-import java.util.List;
-
-/**
- * Test cases for the Parser.
- * @author Jeremy Whiting jwhiting@redhat.com
- */
-class ParserTest {
-
-  /**
-   * Test to make sure delete command is detected by parser and detected via
-   * api. Mix up the case of the command to check detection continues to work.
-   */
-  @Test
-  void deleteCommandParsing() {
-    char[] command = new char[6];
-    "DELETE".getChars(0, 6, command, 0);
-    assertTrue(Parser.parseDeleteKeyword(command, 0), "Failed to correctly parse upper case command.");
-    "DelEtE".getChars(0, 6, command, 0);
-    assertTrue(Parser.parseDeleteKeyword(command, 0), "Failed to correctly parse mixed case command.");
-    "deleteE".getChars(0, 6, command, 0);
-    assertTrue(Parser.parseDeleteKeyword(command, 0), "Failed to correctly parse mixed case command.");
-    "delete".getChars(0, 6, command, 0);
-    assertTrue(Parser.parseDeleteKeyword(command, 0), "Failed to correctly parse lower case command.");
-    "Delete".getChars(0, 6, command, 0);
-    assertTrue(Parser.parseDeleteKeyword(command, 0), "Failed to correctly parse mixed case command.");
-  }
-
-  /**
-   * Test UPDATE command parsing.
-   */
-  @Test
-  void updateCommandParsing() {
-    char[] command = new char[6];
-    "UPDATE".getChars(0, 6, command, 0);
-    assertTrue(Parser.parseUpdateKeyword(command, 0), "Failed to correctly parse upper case command.");
-    "UpDateE".getChars(0, 6, command, 0);
-    assertTrue(Parser.parseUpdateKeyword(command, 0), "Failed to correctly parse mixed case command.");
-    "updatE".getChars(0, 6, command, 0);
-    assertTrue(Parser.parseUpdateKeyword(command, 0), "Failed to correctly parse mixed case command.");
-    "Update".getChars(0, 6, command, 0);
-    assertTrue(Parser.parseUpdateKeyword(command, 0), "Failed to correctly parse mixed case command.");
-    "update".getChars(0, 6, command, 0);
-    assertTrue(Parser.parseUpdateKeyword(command, 0), "Failed to correctly parse lower case command.");
-  }
-
-  /**
-   * Test MOVE command parsing.
-   */
-  @Test
-  void moveCommandParsing() {
-    char[] command = new char[4];
-    "MOVE".getChars(0, 4, command, 0);
-    assertTrue(Parser.parseMoveKeyword(command, 0), "Failed to correctly parse upper case command.");
-    "mOVe".getChars(0, 4, command, 0);
-    assertTrue(Parser.parseMoveKeyword(command, 0), "Failed to correctly parse mixed case command.");
-    "movE".getChars(0, 4, command, 0);
-    assertTrue(Parser.parseMoveKeyword(command, 0), "Failed to correctly parse mixed case command.");
-    "Move".getChars(0, 4, command, 0);
-    assertTrue(Parser.parseMoveKeyword(command, 0), "Failed to correctly parse mixed case command.");
-    "move".getChars(0, 4, command, 0);
-    assertTrue(Parser.parseMoveKeyword(command, 0), "Failed to correctly parse lower case command.");
-  }
-
-  /**
-   * Test WITH command parsing.
-   */
-  @Test
-  void withCommandParsing() {
-    char[] command = new char[4];
-    "WITH".getChars(0, 4, command, 0);
-    assertTrue(Parser.parseWithKeyword(command, 0), "Failed to correctly parse upper case command.");
-    "wITh".getChars(0, 4, command, 0);
-    assertTrue(Parser.parseWithKeyword(command, 0), "Failed to correctly parse mixed case command.");
-    "witH".getChars(0, 4, command, 0);
-    assertTrue(Parser.parseWithKeyword(command, 0), "Failed to correctly parse mixed case command.");
-    "With".getChars(0, 4, command, 0);
-    assertTrue(Parser.parseWithKeyword(command, 0), "Failed to correctly parse mixed case command.");
-    "with".getChars(0, 4, command, 0);
-    assertTrue(Parser.parseWithKeyword(command, 0), "Failed to correctly parse lower case command.");
-  }
-
-  /**
-   * Test SELECT command parsing.
-   */
-  @Test
-  void selectCommandParsing() {
-    char[] command = new char[6];
-    "SELECT".getChars(0, 6, command, 0);
-    assertTrue(Parser.parseSelectKeyword(command, 0), "Failed to correctly parse upper case command.");
-    "sELect".getChars(0, 6, command, 0);
-    assertTrue(Parser.parseSelectKeyword(command, 0), "Failed to correctly parse mixed case command.");
-    "selecT".getChars(0, 6, command, 0);
-    assertTrue(Parser.parseSelectKeyword(command, 0), "Failed to correctly parse mixed case command.");
-    "Select".getChars(0, 6, command, 0);
-    assertTrue(Parser.parseSelectKeyword(command, 0), "Failed to correctly parse mixed case command.");
-    "select".getChars(0, 6, command, 0);
-    assertTrue(Parser.parseSelectKeyword(command, 0), "Failed to correctly parse lower case command.");
-  }
-
-  @Test
-  void escapeProcessing() throws Exception {
-    assertEquals("DATE '1999-01-09'", Parser.replaceProcessing("{d '1999-01-09'}", true, false));
-    assertEquals("DATE '1999-01-09'", Parser.replaceProcessing("{D  '1999-01-09'}", true, false));
-    assertEquals("TIME '20:00:03'", Parser.replaceProcessing("{t '20:00:03'}", true, false));
-    assertEquals("TIME '20:00:03'", Parser.replaceProcessing("{T '20:00:03'}", true, false));
-    assertEquals("TIMESTAMP '1999-01-09 20:11:11.123455'", Parser.replaceProcessing("{ts '1999-01-09 20:11:11.123455'}", true, false));
-    assertEquals("TIMESTAMP '1999-01-09 20:11:11.123455'", Parser.replaceProcessing("{Ts '1999-01-09 20:11:11.123455'}", true, false));
-
-    assertEquals("user", Parser.replaceProcessing("{fn user()}", true, false));
-    assertEquals("cos(1)", Parser.replaceProcessing("{fn cos(1)}", true, false));
-    assertEquals("extract(week from DATE '2005-01-24')", Parser.replaceProcessing("{fn week({d '2005-01-24'})}", true, false));
-
-    assertEquals("\"T1\" LEFT OUTER JOIN t2 ON \"T1\".id = t2.id",
-            Parser.replaceProcessing("{oj \"T1\" LEFT OUTER JOIN t2 ON \"T1\".id = t2.id}", true, false));
-
-    assertEquals("ESCAPE '_'", Parser.replaceProcessing("{escape '_'}", true, false));
-
-    // nothing should be changed in that case, no valid escape code
-    assertEquals("{obj : 1}", Parser.replaceProcessing("{obj : 1}", true, false));
-  }
-
-  @Test
-  void modifyJdbcCall() throws SQLException {
-    assertEquals("select * from pack_getValue(?) as result", Parser.modifyJdbcCall("{ ? = call pack_getValue}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql());
-    assertEquals("select * from pack_getValue(?,?)  as result", Parser.modifyJdbcCall("{ ? = call pack_getValue(?) }", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql());
-    assertEquals("select * from pack_getValue(?) as result", Parser.modifyJdbcCall("{ ? = call pack_getValue()}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql());
-    assertEquals("select * from pack_getValue(?,?,?,?)  as result", Parser.modifyJdbcCall("{ ? = call pack_getValue(?,?,?) }", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql());
-    assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql());
-    assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.CALL_IF_NO_RETURN).getSql());
-    assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.CALL).getSql());
-    assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql());
-    assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.CALL_IF_NO_RETURN).getSql());
-    assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.CALL).getSql());
-    assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql());
-    assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.CALL_IF_NO_RETURN).getSql());
-    assertEquals("call lower(?,?)", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.CALL).getSql());
-    assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql());
-    assertEquals("call lower(?,?)", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.CALL_IF_NO_RETURN).getSql());
-    assertEquals("call lower(?,?)", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.CALL).getSql());
-  }
-
-  @Test
-  void unterminatedEscape() throws Exception {
-    assertEquals("{oj ", Parser.replaceProcessing("{oj ", true, false));
-  }
-
-  @Test
-  @Disabled(value = "returning in the select clause is hard to distinguish from insert ... returning *")
-  void insertSelectFakeReturning() throws SQLException {
-    String query =
-        "insert test(id, name) select 1, 'value' as RETURNING from test2";
-    List<NativeQuery> qry =
-        Parser.parseJdbcSql(
-            query, true, true, true, true, true);
-    boolean returningKeywordPresent = qry.get(0).command.isReturningKeywordPresent();
-    assertFalse(returningKeywordPresent, "Query does not have returning clause " + query);
-  }
-
-  @Test
-  void insertSelectReturning() throws SQLException {
-    String query =
-        "insert test(id, name) select 1, 'value' from test2 RETURNING id";
-    List<NativeQuery> qry =
-        Parser.parseJdbcSql(
-            query, true, true, true, true, true);
-    boolean returningKeywordPresent = qry.get(0).command.isReturningKeywordPresent();
-    assertTrue(returningKeywordPresent, "Query has a returning clause " + query);
-  }
-
-  @Test
-  void insertReturningInWith() throws SQLException {
-    String query =
-        "with x as (insert into mytab(x) values(1) returning x) insert test(id, name) select 1, 'value' from test2";
-    List<NativeQuery> qry =
-        Parser.parseJdbcSql(
-            query, true, true, true, true, true);
-    boolean returningKeywordPresent = qry.get(0).command.isReturningKeywordPresent();
-    assertFalse(returningKeywordPresent, "There's no top-level <<returning>> clause " + query);
-  }
-
-  @Test
-  void insertBatchedReWriteOnConflict() throws SQLException {
-    String query = "insert into test(id, name) values (:id,:name) ON CONFLICT (id) DO NOTHING";
-    List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true);
-    SqlCommand command = qry.get(0).getCommand();
-    assertEquals(34, command.getBatchRewriteValuesBraceOpenPosition());
-    assertEquals(44, command.getBatchRewriteValuesBraceClosePosition());
-  }
-
-  @Test
-  void insertBatchedReWriteOnConflictUpdateBind() throws SQLException {
-    String query = "insert into test(id, name) values (?,?) ON CONFLICT (id) UPDATE SET name=?";
-    List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true);
-    SqlCommand command = qry.get(0).getCommand();
-    assertFalse(command.isBatchedReWriteCompatible(), "update set name=? is NOT compatible with insert rewrite");
-  }
-
-  @Test
-  void insertBatchedReWriteOnConflictUpdateConstant() throws SQLException {
-    String query = "insert into test(id, name) values (?,?) ON CONFLICT (id) UPDATE SET name='default'";
-    List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true);
-    SqlCommand command = qry.get(0).getCommand();
-    assertTrue(command.isBatchedReWriteCompatible(), "update set name='default' is compatible with insert rewrite");
-  }
-
-  @Test
-  void insertMultiInsert() throws SQLException {
-    String query =
-        "insert into test(id, name) values (:id,:name),(:id,:name) ON CONFLICT (id) DO NOTHING";
-    List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true);
-    SqlCommand command = qry.get(0).getCommand();
-    assertEquals(34, command.getBatchRewriteValuesBraceOpenPosition());
-    assertEquals(56, command.getBatchRewriteValuesBraceClosePosition());
-  }
-
-  @Test
-  void valuesTableParse() throws SQLException {
-    String query = "insert into values_table (id, name) values (?,?)";
-    List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true);
-    SqlCommand command = qry.get(0).getCommand();
-    assertEquals(43, command.getBatchRewriteValuesBraceOpenPosition());
-    assertEquals(49, command.getBatchRewriteValuesBraceClosePosition());
-
-    query = "insert into table_values (id, name) values (?,?)";
-    qry = Parser.parseJdbcSql(query, true, true, true, true, true);
-    command = qry.get(0).getCommand();
-    assertEquals(43, command.getBatchRewriteValuesBraceOpenPosition());
-    assertEquals(49, command.getBatchRewriteValuesBraceClosePosition());
-  }
-
-  @Test
-  void createTableParseWithOnDeleteClause() throws SQLException {
-    String[] returningColumns = {"*"};
-    String query = "create table \"testTable\" (\"id\" INT SERIAL NOT NULL PRIMARY KEY, \"foreignId\" INT REFERENCES \"otherTable\" (\"id\") ON DELETE NO ACTION)";
-    List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true, returningColumns);
-    SqlCommand command = qry.get(0).getCommand();
-    assertFalse(command.isReturningKeywordPresent(), "No returning keyword should be present");
-    assertEquals(SqlCommandType.CREATE, command.getType());
-  }
-
-  @Test
-  void createTableParseWithOnUpdateClause() throws SQLException {
-    String[] returningColumns = {"*"};
-    String query = "create table \"testTable\" (\"id\" INT SERIAL NOT NULL PRIMARY KEY, \"foreignId\" INT REFERENCES \"otherTable\" (\"id\")) ON UPDATE NO ACTION";
-    List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true, returningColumns);
-    SqlCommand command = qry.get(0).getCommand();
-    assertFalse(command.isReturningKeywordPresent(), "No returning keyword should be present");
-    assertEquals(SqlCommandType.CREATE, command.getType());
-  }
-
-  @Test
-  void alterTableParseWithOnDeleteClause() throws SQLException {
-    String[] returningColumns = {"*"};
-    String query = "alter table \"testTable\" ADD \"foreignId\" INT REFERENCES \"otherTable\" (\"id\") ON DELETE NO ACTION";
-    List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true, returningColumns);
-    SqlCommand command = qry.get(0).getCommand();
-    assertFalse(command.isReturningKeywordPresent(), "No returning keyword should be present");
-    assertEquals(SqlCommandType.ALTER, command.getType());
-  }
-
-  @Test
-  void alterTableParseWithOnUpdateClause() throws SQLException {
-    String[] returningColumns = {"*"};
-    String query = "alter table \"testTable\" ADD \"foreignId\" INT REFERENCES \"otherTable\" (\"id\") ON UPDATE RESTRICT";
-    List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true, returningColumns);
-    SqlCommand command = qry.get(0).getCommand();
-    assertFalse(command.isReturningKeywordPresent(), "No returning keyword should be present");
-    assertEquals(SqlCommandType.ALTER, command.getType());
-  }
-
-  @Test
-  void parseV14functions() throws SQLException {
-    String[] returningColumns = {"*"};
-    String query = "CREATE OR REPLACE FUNCTION asterisks(n int)\n"
-        + "  RETURNS SETOF text\n"
-        + "  LANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE\n"
-        + "BEGIN ATOMIC\n"
-        + "SELECT repeat('*', g) FROM generate_series (1, n) g; \n"
-        + "END;";
-    List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true, returningColumns);
-    assertNotNull(qry);
-    assertEquals(1, qry.size(), "There should only be one query returned here");
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/core/ReturningParserTest.java b/pgjdbc/src/test/java/org/postgresql/core/ReturningParserTest.java
deleted file mode 100644
index 3b8292e..0000000
--- a/pgjdbc/src/test/java/org/postgresql/core/ReturningParserTest.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2003, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.core;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.MethodSource;
-
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-public class ReturningParserTest {
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-
-    String[] delimiters = {"", "_", "3", "*", " "};
-
-    for (String columnName : new String[]{"returning", "returningreturning"}) {
-      for (String prefix : delimiters) {
-        for (String suffix : delimiters) {
-          for (String returning : new String[]{"returning", "returningreturning"}) {
-            ids.add(new Object[]{columnName, returning, prefix, suffix});
-          }
-        }
-      }
-    }
-    return ids;
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "columnName={2} {0} {3}, returning={2} {1} {3}")
-  void test(String columnName, String returning, String prefix, String suffix) throws SQLException {
-    String query =
-        "insert into\"prep\"(a, " + prefix + columnName + suffix + ")values(1,2)" + prefix
-            + returning + suffix;
-    List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true);
-    boolean returningKeywordPresent = qry.get(0).command.isReturningKeywordPresent();
-
-    boolean expectedReturning = "returning".equalsIgnoreCase(returning)
-        && (prefix.isEmpty() || !Character.isJavaIdentifierStart(prefix.charAt(0)))
-        && (suffix.isEmpty() || !Character.isJavaIdentifierPart(suffix.charAt(0)));
-    if (expectedReturning != returningKeywordPresent) {
-      assertEquals(expectedReturning,
-          returningKeywordPresent,
-          "Wrong <returning_clause> detected in SQL " + query);
-    }
-  }
-
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/core/UTF8EncodingTest.java b/pgjdbc/src/test/java/org/postgresql/core/UTF8EncodingTest.java
deleted file mode 100644
index 9e3e846..0000000
--- a/pgjdbc/src/test/java/org/postgresql/core/UTF8EncodingTest.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2019, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.core;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.MethodSource;
-
-import java.util.ArrayList;
-import java.util.List;
-
-public class UTF8EncodingTest {
-
-  private static final int STEP = 8 * 1024;
-
-  public static Iterable<Object[]> data() {
-    final StringBuilder reallyLongString = new StringBuilder(1024 * 1024);
-    for (int i = 0; i < 185000; i++) {
-      reallyLongString.append(i);
-    }
-
-    final List<String> strings = new ArrayList<>(150);
-    strings.add("short simple");
-    strings.add("longer but still not really all that long");
-    strings.add(reallyLongString.toString());
-    strings.add(reallyLongString.append('\u03C0').toString()); // add multi-byte to end of a long string
-    strings.add(reallyLongString.delete((32 * 1024) + 5, reallyLongString.capacity() - 1).toString());
-    strings.add(reallyLongString.append('\u00DC').toString()); // add high order char to end of mid length string
-    strings.add(reallyLongString.delete((16 * 1024) + 5, reallyLongString.capacity() - 1).toString());
-    strings.add(reallyLongString.append('\u00DD').toString()); // add high order char to end of mid length string
-    strings.add("e\u00E4t \u03A3 \u03C0 \u798F, it is good"); // need to test some multi-byte characters
-
-    for (int i = 1; i < 0xd800; i += STEP) {
-      int count = (i + STEP) > 0xd800 ? 0xd800 - i : STEP;
-      char[] testChars = new char[count];
-      for (int j = 0; j < count; j++) {
-        testChars[j] = (char) (i + j);
-      }
-
-      strings.add(new String(testChars));
-    }
-
-    for (int i = 0xe000; i < 0x10000; i += STEP) {
-      int count = (i + STEP) > 0x10000 ? 0x10000 - i : STEP;
-      char[] testChars = new char[count];
-      for (int j = 0; j < count; j++) {
-        testChars[j] = (char) (i + j);
-      }
-
-      strings.add(new String(testChars));
-    }
-
-    for (int i = 0x10000; i < 0x110000; i += STEP) {
-      int count = (i + STEP) > 0x110000 ? 0x110000 - i : STEP;
-      char[] testChars = new char[count * 2];
-      for (int j = 0; j < count; j++) {
-        testChars[j * 2] = (char) (0xd800 + ((i + j - 0x10000) >> 10));
-        testChars[j * 2 + 1] = (char) (0xdc00 + ((i + j - 0x10000) & 0x3ff));
-      }
-
-      strings.add(new String(testChars));
-    }
-
-    final List<Object[]> data = new ArrayList<>(strings.size() * 2);
-    for (String string : strings) {
-      String shortString = string;
-      if (shortString != null && shortString.length() > 1000) {
-        shortString = shortString.substring(0, 100) + "...(" + string.length() + " chars)";
-      }
-      data.add(new Object[]{Encoding.getDatabaseEncoding("UNICODE"), string, shortString});
-    }
-    return data;
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "string={2}, encoding={0}")
-  void test(Encoding encoding, String string, String shortString) throws Exception {
-    final byte[] encoded = encoding.encode(string);
-    assertEquals(string, encoding.decode(encoded));
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/core/v3/V3ParameterListTests.java b/pgjdbc/src/test/java/org/postgresql/core/v3/V3ParameterListTests.java
deleted file mode 100644
index e26d5e4..0000000
--- a/pgjdbc/src/test/java/org/postgresql/core/v3/V3ParameterListTests.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2003, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.core.v3;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
-import java.sql.SQLException;
-
-/**
- * Test cases to make sure the parameterlist implementation works as expected.
- *
- * @author Jeremy Whiting jwhiting@redhat.com
- *
- */
-class V3ParameterListTests {
-  private TypeTransferModeRegistry transferModeRegistry;
-
-  @BeforeEach
-  void setUp() throws Exception {
-    transferModeRegistry = new TypeTransferModeRegistry() {
-        @Override
-        public boolean useBinaryForSend(int oid) {
-          return false;
-        }
-
-        @Override
-        public boolean useBinaryForReceive(int oid) {
-          return false;
-        }
-    };
-  }
-
-  /**
-   * Test to check the merging of two collections of parameters. All elements
-   * are kept.
-   *
-   * @throws SQLException
-   *           raised exception if setting parameter fails.
-   */
-  @Test
-  void mergeOfParameterLists() throws SQLException {
-    SimpleParameterList s1SPL = new SimpleParameterList(8, transferModeRegistry);
-    s1SPL.setIntParameter(1, 1);
-    s1SPL.setIntParameter(2, 2);
-    s1SPL.setIntParameter(3, 3);
-    s1SPL.setIntParameter(4, 4);
-
-    SimpleParameterList s2SPL = new SimpleParameterList(4, transferModeRegistry);
-    s2SPL.setIntParameter(1, 5);
-    s2SPL.setIntParameter(2, 6);
-    s2SPL.setIntParameter(3, 7);
-    s2SPL.setIntParameter(4, 8);
-
-    s1SPL.appendAll(s2SPL);
-    assertEquals(
-        "<[('1'::int4) ,('2'::int4) ,('3'::int4) ,('4'::int4) ,('5'::int4) ,('6'::int4) ,('7'::int4) ,('8'::int4)]>", s1SPL.toString(), "Expected string representation of values does not match outcome.");
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheTest.java b/pgjdbc/src/test/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheTest.java
deleted file mode 100644
index 42e2c2c..0000000
--- a/pgjdbc/src/test/java/org/postgresql/core/v3/adaptivefetch/AdaptiveFetchCacheTest.java
+++ /dev/null
@@ -1,1088 +0,0 @@
-/*
- * Copyright (c) 2020, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.core.v3.adaptivefetch;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertNull;
-
-import org.postgresql.PGProperty;
-import org.postgresql.core.ParameterList;
-import org.postgresql.core.Query;
-import org.postgresql.core.SqlCommand;
-
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
-import java.lang.reflect.Field;
-import java.sql.SQLException;
-import java.util.Map;
-import java.util.Properties;
-
-/**
- * Unit tests for AdaptiveFetchCache class.
- */
-class AdaptiveFetchCacheTest {
-
-  private AdaptiveFetchCache adaptiveFetchCache;
-  private int size;
-
-  // Strings containing variables names in AdaptiveFetchCache class
-  private static final String infoMapVariableName = "adaptiveFetchInfoMap";
-  private static final String minimumSizeVariableName = "minimumAdaptiveFetchSize";
-  private static final String maximumSizeVariableName = "maximumAdaptiveFetchSize";
-  private static final String adaptiveFetchVariableName = "adaptiveFetch";
-  private static final String maximumBufferSizeVariableName = "maximumResultBufferSize";
-
-  /**
-   * Simple setup to create new AdaptiveFetchCache with buffer size 1000.
-   */
-  @BeforeEach
-  void setUp() throws SQLException {
-    Properties properties = new Properties();
-    size = 1000;
-    adaptiveFetchCache = new AdaptiveFetchCache(size, properties);
-  }
-
-  /**
-   * Tests for calling constructor with empty properties (just asserts after setUp).
-   */
-  @Test
-  void constructorDefault() throws NoSuchFieldException, IllegalAccessException {
-    assertNotNull(getInfoMapVariable());
-    assertEquals(size, getMaximumBufferVariable());
-    assertFalse(getAdaptiveFetchVariable());
-    assertEquals(0, getMinimumSizeVariable());
-    assertEquals(-1, getMaximumSizeVariable());
-  }
-
-  /**
-   * Test for calling constructor with information about adaptiveFetch property.
-   */
-  @Test
-  void constructorWithAdaptiveFetch()
-      throws SQLException, NoSuchFieldException, IllegalAccessException {
-    Properties properties = new Properties();
-    boolean expectedValue = true;
-    PGProperty.ADAPTIVE_FETCH.set(properties, expectedValue);
-
-    adaptiveFetchCache = new AdaptiveFetchCache(size, properties);
-
-    assertNotNull(getInfoMapVariable());
-    assertEquals(size, getMaximumBufferVariable());
-    assertEquals(expectedValue, getAdaptiveFetchVariable());
-    assertEquals(0, getMinimumSizeVariable());
-    assertEquals(-1, getMaximumSizeVariable());
-  }
-
-  /**
-   * Test for calling constructor with information about adaptiveFetchMinimum property.
-   */
-  @Test
-  void constructorWithMinimumSize()
-      throws SQLException, NoSuchFieldException, IllegalAccessException {
-    Properties properties = new Properties();
-    int expectedValue = 100;
-    PGProperty.ADAPTIVE_FETCH_MINIMUM.set(properties, expectedValue);
-
-    adaptiveFetchCache = new AdaptiveFetchCache(size, properties);
-
-    assertNotNull(getInfoMapVariable());
-    assertEquals(size, getMaximumBufferVariable());
-    assertFalse(getAdaptiveFetchVariable());
-    assertEquals(expectedValue, getMinimumSizeVariable());
-    assertEquals(-1, getMaximumSizeVariable());
-  }
-
-  /**
-   * Test for calling constructor with information about adaptiveFetchMaximum property.
-   */
-  @Test
-  void constructorWithMaximumSize()
-      throws SQLException, NoSuchFieldException, IllegalAccessException {
-    Properties properties = new Properties();
-    int expectedValue = 100;
-    PGProperty.ADAPTIVE_FETCH_MAXIMUM.set(properties, expectedValue);
-
-    adaptiveFetchCache = new AdaptiveFetchCache(size, properties);
-
-    assertNotNull(getInfoMapVariable());
-    assertEquals(size, getMaximumBufferVariable());
-    assertFalse(getAdaptiveFetchVariable());
-    assertEquals(0, getMinimumSizeVariable());
-    assertEquals(expectedValue, getMaximumSizeVariable());
-  }
-
-  /**
-   * Test for calling constructor with information about adaptiveFetch, adaptiveFetchMinimum and
-   * adaptiveFetchMaximum properties.
-   */
-  @Test
-  void constructorWithAllProperties()
-      throws SQLException, NoSuchFieldException, IllegalAccessException {
-    Properties properties = new Properties();
-    boolean expectedAdaptiveFetchValue = false;
-    int expectedMinimumSizeValue = 70;
-    int expectedMaximumSizeValue = 130;
-    PGProperty.ADAPTIVE_FETCH.set(properties, expectedAdaptiveFetchValue);
-    PGProperty.ADAPTIVE_FETCH_MINIMUM.set(properties, expectedMinimumSizeValue);
-    PGProperty.ADAPTIVE_FETCH_MAXIMUM.set(properties, expectedMaximumSizeValue);
-
-    adaptiveFetchCache = new AdaptiveFetchCache(size, properties);
-
-    assertNotNull(getInfoMapVariable());
-    assertEquals(size, getMaximumBufferVariable());
-    assertEquals(expectedAdaptiveFetchValue, getAdaptiveFetchVariable());
-    assertEquals(expectedMinimumSizeValue, getMinimumSizeVariable());
-    assertEquals(expectedMaximumSizeValue, getMaximumSizeVariable());
-  }
-
-
-  /**
-   * Test for calling addNewQuery method.
-   */
-  @Test
-  void addingSingleQuery() throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query";
-    boolean adaptiveFetch = true;
-
-    adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    assertEquals(1, map.size());
-    assertNotNull(map.get(expectedQuery));
-  }
-
-  /**
-   * Test for calling addNewQuery method, but adaptiveFetch is set to false.
-   */
-  @Test
-  void addingSingleQueryWithoutAdaptiveFetch()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query";
-    boolean adaptiveFetch = false;
-
-    adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    assertEquals(0, map.size());
-    assertNull(map.get(expectedQuery));
-  }
-
-  /**
-   * Test for calling addNewQuery method twice with the same query. The query should be added only
-   * once, with counter set as 2.
-   */
-  @Test
-  void addingSameQueryTwoTimes() throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query";
-    boolean adaptiveFetch = true;
-
-    adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-    adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    assertEquals(1, map.size());
-    assertNotNull(map.get(expectedQuery));
-    assertEquals(2, map.get(expectedQuery).getCounter());
-  }
-
-  /**
-   * Test for calling addNewQuery method twice with the same query, but with adaptiveFetch is set to
-   * false. The query shouldn't be added.
-   */
-  @Test
-  void addingSameQueryTwoTimesWithoutAdaptiveFetch()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query";
-    boolean adaptiveFetch = false;
-
-    adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-    adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    assertEquals(0, map.size());
-    assertNull(map.get(expectedQuery));
-  }
-
-  /**
-   * Test for calling addNewQuery method twice with different queries. Both queries should be
-   * added.
-   */
-  @Test
-  void addingTwoDifferentQueries() throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query-1";
-    String expectedQuery2 = "test-query-2";
-    boolean adaptiveFetch = true;
-
-    adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-    adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery2));
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    assertEquals(2, map.size());
-    assertNotNull(map.get(expectedQuery));
-    assertEquals(1, map.get(expectedQuery).getCounter());
-    assertNotNull(map.get(expectedQuery2));
-    assertEquals(1, map.get(expectedQuery).getCounter());
-  }
-
-  /**
-   * Test for calling addNewQuery method twice with different queries, but adaptiveFetch is set to
-   * false. Both queries shouldn't be added.
-   */
-  @Test
-  void addingTwoDifferentQueriesWithoutAdaptiveFetch()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query-1";
-    String expectedQuery2 = "test-query-2";
-    boolean adaptiveFetch = false;
-
-    adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-    adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery2));
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    assertEquals(0, map.size());
-    assertNull(map.get(expectedQuery));
-  }
-
-  /**
-   * Test for calling getAdaptiveFetch method with value true.
-   */
-  @Test
-  void gettingAdaptiveFetchIfTrue()
-      throws NoSuchFieldException, IllegalAccessException {
-    boolean expectedResult = true;
-
-    setAdaptiveFetchVariable(expectedResult);
-
-    assertEquals(expectedResult, adaptiveFetchCache.getAdaptiveFetch());
-  }
-
-  /**
-   * Test for calling getAdaptiveFetch method with value false.
-   */
-  @Test
-  void gettingAdaptiveFetchIfFalse()
-      throws NoSuchFieldException, IllegalAccessException {
-    boolean expectedResult = false;
-
-    setAdaptiveFetchVariable(expectedResult);
-
-    assertEquals(expectedResult, adaptiveFetchCache.getAdaptiveFetch());
-  }
-
-  /**
-   * Test for calling getFetchSizeForQuery method for not existing query. Should return value -1.
-   */
-  @Test
-  void gettingFetchSizeForNotExistingQuery() {
-    String expectedQuery = "test-query";
-    boolean adaptiveFetch = true;
-
-    int resultSize = adaptiveFetchCache
-        .getFetchSizeForQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    assertEquals(-1, resultSize);
-  }
-
-  /**
-   * Test for calling getFetchSizeForQuery method for not existing query, but adaptiveFetch is set
-   * to false. Should return value -1.
-   */
-  @Test
-  void gettingFetchSizeForNotExistingQueryIfAdaptiveFetchFalse() {
-    String expectedQuery = "test-query";
-    boolean adaptiveFetch = false;
-
-    int resultSize = adaptiveFetchCache
-        .getFetchSizeForQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    assertEquals(-1, resultSize);
-  }
-
-  /**
-   * Test for calling getFetchSizeForQuery method for existing query. Should return set fetch size
-   * for the query.
-   */
-  @Test
-  void gettingFetchSizeForExistingQuery()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query";
-    boolean adaptiveFetch = true;
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    int expectedSize = 500;
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry.setSize(expectedSize);
-    map.put(expectedQuery, adaptiveFetchCacheEntry);
-
-    int resultSize = adaptiveFetchCache
-        .getFetchSizeForQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    assertEquals(expectedSize, resultSize);
-  }
-
-  /**
-   * Test for calling getFetchSizeForQuery method for existing query, but adaptiveFetch is set to
-   * false. Should return value -1.
-   */
-  @Test
-  void gettingFetchSizeForExistingQueryIfAdaptiveFetchFalse()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query";
-    boolean adaptiveFetch = false;
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    int newSize = 500;
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry.setSize(newSize);
-    map.put(expectedQuery, adaptiveFetchCacheEntry);
-
-    int resultSize = adaptiveFetchCache
-        .getFetchSizeForQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    assertEquals(-1, resultSize);
-  }
-
-  /**
-   * Test for calling removeQuery method for not existing query. Should nothing happen.
-   */
-  @Test
-  void removingNotExistingQuery()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query";
-    boolean adaptiveFetch = true;
-
-    adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    assertEquals(0, map.size());
-  }
-
-  /**
-   * Test for calling removeQuery method for not existing query, but adaptiveFetch is set false.
-   * Should nothing happen.
-   */
-  @Test
-  void removingNotExistingQueryIfAdaptiveFetchFalse()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query";
-    boolean adaptiveFetch = false;
-
-    adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    assertEquals(0, map.size());
-  }
-
-  /**
-   * Test for calling removeQuery method for existing query. The query should be removed from the
-   * map inside AdaptiveFetchCache.
-   */
-  @Test
-  void removingExistingQuery()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query";
-    boolean adaptiveFetch = true;
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry.setCounter(1);
-    map.put(expectedQuery, adaptiveFetchCacheEntry);
-
-    assertEquals(1, map.size());
-
-    adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    assertEquals(0, map.size());
-    assertNull(map.get(expectedQuery));
-  }
-
-  /**
-   * Test for calling removeQuery method for existing query, but adaptiveFetch is set false. The
-   * query shouldn't be removed.
-   */
-  @Test
-  void removingExistingQueryIfAdaptiveFetchFalse()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query";
-    boolean adaptiveFetch = false;
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry.setCounter(1);
-    map.put(expectedQuery, adaptiveFetchCacheEntry);
-
-    assertEquals(1, map.size());
-
-    adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    assertEquals(1, map.size());
-    assertNotNull(map.get(expectedQuery));
-    assertEquals(1, map.get(expectedQuery).getCounter());
-  }
-
-  /**
-   * Test for calling removeQuery method for existing query with counter set to 2. After call, query
-   * shouldn't be removed, but counter set to 1. After next call, query should be removed.
-   */
-  @Test
-  void removingExistingQueryWithLargeCounter()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query";
-    boolean adaptiveFetch = true;
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry.setCounter(2);
-    map.put(expectedQuery, adaptiveFetchCacheEntry);
-
-    adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    assertEquals(1, map.size());
-    assertNotNull(map.get(expectedQuery));
-    assertEquals(1, map.get(expectedQuery).getCounter());
-
-    adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    assertEquals(0, map.size());
-    assertNull(map.get(expectedQuery));
-  }
-
-  /**
-   * Test for calling removeQuery method for existing query with counter set to 2, but with
-   * adaptiveFetch set false. After both calls query should be removed and counter shouldn't
-   * change.
-   */
-  @Test
-  void removingExistingQueryWithLargeCounterIfAdaptiveFetchFalse()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query";
-    boolean adaptiveFetch = false;
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry.setCounter(2);
-    map.put(expectedQuery, adaptiveFetchCacheEntry);
-
-    adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    assertEquals(1, map.size());
-    assertNotNull(map.get(expectedQuery));
-    assertEquals(2, map.get(expectedQuery).getCounter());
-
-    adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    assertEquals(1, map.size());
-    assertNotNull(map.get(expectedQuery));
-    assertEquals(2, map.get(expectedQuery).getCounter());
-  }
-
-  /**
-   * Test for calling removeQuery method for existing query with more queries put in the map. Only
-   * query used in method call should be removed, other shouldn't change.
-   */
-  @Test
-  void removingExistingQueryWithMoreQueriesCached()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query-1";
-    String expectedQuery2 = "test-query-2";
-    String expectedQuery3 = "test-query-3";
-    boolean adaptiveFetch = true;
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    int expectedCounter1 = 1;
-    int expectedCounter2 = 37;
-    int expectedCounter3 = 14;
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry1 = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry1.setCounter(expectedCounter1);
-    map.put(expectedQuery, adaptiveFetchCacheEntry1);
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry2 = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry2.setCounter(expectedCounter2);
-    map.put(expectedQuery2, adaptiveFetchCacheEntry2);
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry3 = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry3.setCounter(expectedCounter3);
-    map.put(expectedQuery3, adaptiveFetchCacheEntry3);
-
-    adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    AdaptiveFetchCacheEntry resultInfo1 = map.get(expectedQuery);
-    AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2);
-    AdaptiveFetchCacheEntry resultInfo3 = map.get(expectedQuery3);
-
-    assertEquals(2, map.size());
-    assertNull(resultInfo1);
-    assertNotNull(resultInfo2);
-    assertEquals(adaptiveFetchCacheEntry2, resultInfo2);
-    assertEquals(expectedCounter2, resultInfo2.getCounter());
-    assertNotNull(resultInfo3);
-    assertEquals(adaptiveFetchCacheEntry3, resultInfo3);
-    assertEquals(expectedCounter3, resultInfo3.getCounter());
-  }
-
-  /**
-   * Test for calling removeQuery method for existing query with more queries put in the map, but
-   * adaptiveFetch is set false. Queries shouldn't change
-   */
-  @Test
-  void removingExistingQueryWithMoreQueriesCachedIfAdaptiveFetchFalse()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query-1";
-    String expectedQuery2 = "test-query-2";
-    String expectedQuery3 = "test-query-3";
-    boolean adaptiveFetch = false;
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    int expectedCounter1 = 1;
-    int expectedCounter2 = 37;
-    int expectedCounter3 = 14;
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry1 = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry1.setCounter(expectedCounter1);
-    map.put(expectedQuery, adaptiveFetchCacheEntry1);
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry2 = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry2.setCounter(expectedCounter2);
-    map.put(expectedQuery2, adaptiveFetchCacheEntry2);
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry3 = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry3.setCounter(expectedCounter3);
-    map.put(expectedQuery3, adaptiveFetchCacheEntry3);
-
-    adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
-
-    AdaptiveFetchCacheEntry resultInfo1 = map.get(expectedQuery);
-    AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2);
-    AdaptiveFetchCacheEntry resultInfo3 = map.get(expectedQuery3);
-
-    assertEquals(3, map.size());
-    assertNotNull(resultInfo1);
-    assertEquals(adaptiveFetchCacheEntry1, resultInfo1);
-    assertEquals(expectedCounter1, resultInfo1.getCounter());
-    assertNotNull(resultInfo2);
-    assertEquals(adaptiveFetchCacheEntry2, resultInfo2);
-    assertEquals(expectedCounter2, resultInfo2.getCounter());
-    assertNotNull(resultInfo3);
-    assertEquals(adaptiveFetchCacheEntry3, resultInfo3);
-    assertEquals(expectedCounter3, resultInfo3.getCounter());
-  }
-
-  /**
-   * Test for calling setAdaptiveFetch method with true value.
-   */
-  @Test
-  void settingAdaptiveFetchAsTrue()
-      throws NoSuchFieldException, IllegalAccessException {
-    boolean expectedAdaptiveFetch = true;
-
-    adaptiveFetchCache.setAdaptiveFetch(expectedAdaptiveFetch);
-
-    boolean resultAdaptiveFetch = getAdaptiveFetchVariable();
-
-    assertEquals(expectedAdaptiveFetch, resultAdaptiveFetch);
-  }
-
-  /**
-   * Test for calling setAdaptiveFetch method with false value.
-   */
-  @Test
-  void settingAdaptiveFetchAsFalse()
-      throws NoSuchFieldException, IllegalAccessException {
-    boolean expectedAdaptiveFetch = false;
-
-    adaptiveFetchCache.setAdaptiveFetch(expectedAdaptiveFetch);
-
-    boolean resultAdaptiveFetch = getAdaptiveFetchVariable();
-
-    assertEquals(expectedAdaptiveFetch, resultAdaptiveFetch);
-  }
-
-  /**
-   * Test for calling updateQueryFetchSize method. Method should update a value for a query.
-   */
-  @Test
-  void updatingAdaptiveFetchSize()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query-1";
-    boolean adaptiveFetch = true;
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    int rowSize = 33;
-    int startSize = size / rowSize - 15;
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry.setSize(startSize);
-    map.put(expectedQuery, adaptiveFetchCacheEntry);
-
-    adaptiveFetchCache
-      .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
-
-    AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
-
-    assertNotNull(resultInfo);
-    assertEquals(size / rowSize, resultInfo.getSize());
-  }
-
-  /**
-   * Test for calling updateQueryFetchSize method, but adaptiveFetch is set false. Method shouldn't
-   * update any values.
-   */
-  @Test
-  void updatingAdaptiveFetchSizeIfAdaptiveFetchFalse()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query-1";
-    boolean adaptiveFetch = false;
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    int rowSize = 33;
-    int startSize = size / rowSize - 15;
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry.setSize(startSize);
-    map.put(expectedQuery, adaptiveFetchCacheEntry);
-
-    adaptiveFetchCache
-      .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
-
-    AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
-
-    assertNotNull(resultInfo);
-    assertEquals(startSize, resultInfo.getSize());
-  }
-
-  /**
-   * Test for calling updateQueryFetchSize method for not existing query. Method shouldn't update
-   * any values.
-   */
-  @Test
-  void updatingAdaptiveFetchSizeForNotExistingQuery()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query-1";
-    String expectedQuery2 = "test-query-2";
-    boolean adaptiveFetch = true;
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    int rowSize = 33;
-    int startSize = size / rowSize - 15;
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry.setSize(startSize);
-    map.put(expectedQuery2, adaptiveFetchCacheEntry);
-
-    adaptiveFetchCache
-      .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
-
-    AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
-    AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2);
-
-    assertNull(resultInfo);
-    assertNotNull(resultInfo2);
-    assertEquals(adaptiveFetchCacheEntry, resultInfo2);
-    assertEquals(startSize, resultInfo2.getSize());
-    assertEquals(1, map.size());
-  }
-
-  /**
-   * Test for calling updateQueryFetchSize method for not existing query, but adaptiveFetch is set
-   * false. Method shouldn't update any values.
-   */
-  @Test
-  void updatingAdaptiveFetchSizeForNotExistingQueryIfAdaptiveFetchFalse()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query-1";
-    String expectedQuery2 = "test-query-2";
-    boolean adaptiveFetch = false;
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    int rowSize = 33;
-    int startSize = size / rowSize - 15;
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry.setSize(startSize);
-    map.put(expectedQuery2, adaptiveFetchCacheEntry);
-
-    adaptiveFetchCache
-      .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
-
-    AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
-    AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2);
-
-    assertNull(resultInfo);
-    assertNotNull(resultInfo2);
-    assertEquals(adaptiveFetchCacheEntry, resultInfo2);
-    assertEquals(startSize, resultInfo2.getSize());
-    assertEquals(1, map.size());
-  }
-
-  /**
-   * Test for calling updateQueryFetchSize method in a situation when there are more queries saved
-   * in a map. The method should only change value for query used in a call.
-   */
-  @Test
-  void updatingAdaptiveFetchSizeWithMoreQueriesInMap()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query-1";
-    String expectedQuery2 = "test-query-2";
-    boolean adaptiveFetch = true;
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    int rowSize = 33;
-    int startSize = size / rowSize - 15;
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry.setSize(startSize);
-    map.put(expectedQuery, adaptiveFetchCacheEntry);
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry2 = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry2.setSize(startSize);
-    map.put(expectedQuery2, adaptiveFetchCacheEntry2);
-
-    adaptiveFetchCache
-      .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
-
-    AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
-    AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2);
-
-    assertNotNull(resultInfo);
-    assertEquals(adaptiveFetchCacheEntry, resultInfo);
-    assertEquals(size / rowSize, resultInfo.getSize());
-    assertNotNull(resultInfo2);
-    assertEquals(adaptiveFetchCacheEntry2, resultInfo2);
-    assertEquals(startSize, resultInfo2.getSize());
-    assertEquals(2, map.size());
-  }
-
-  /**
-   * Test for calling updateQueryFetchSize method in a situation when there are more queries saved
-   * in a map, but adaptiveFetch is set false. The method shouldn't change any values.
-   */
-  @Test
-  void updatingAdaptiveFetchSizeWithMoreQueriesInMapIfAdaptiveFetchFalse()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query-1";
-    String expectedQuery2 = "test-query-2";
-    boolean adaptiveFetch = false;
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    int rowSize = 33;
-    int startSize = size / rowSize - 15;
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry.setSize(startSize);
-    map.put(expectedQuery, adaptiveFetchCacheEntry);
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry2 = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry2.setSize(startSize);
-    map.put(expectedQuery2, adaptiveFetchCacheEntry2);
-
-    adaptiveFetchCache
-      .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
-
-    AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
-    AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2);
-
-    assertNotNull(resultInfo);
-    assertEquals(adaptiveFetchCacheEntry, resultInfo);
-    assertEquals(startSize, resultInfo.getSize());
-    assertNotNull(resultInfo2);
-    assertEquals(adaptiveFetchCacheEntry2, resultInfo2);
-    assertEquals(startSize, resultInfo2.getSize());
-    assertEquals(2, map.size());
-  }
-
-  /**
-   * Test for calling updateQueryFetchSize method with value to make computed value below minimum
-   * value. The method should update a query to have value of minimum.
-   */
-  @Test
-  void updatingAdaptiveFetchSizeWithMinimumSize()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query-1";
-    boolean adaptiveFetch = true;
-
-    int rowSize = size + 1000;
-    int startSize = 2;
-    int expectedSize = 10;
-
-    setMinimumSizeVariable(expectedSize);
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry.setSize(startSize);
-    map.put(expectedQuery, adaptiveFetchCacheEntry);
-
-    adaptiveFetchCache
-      .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
-
-    AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
-
-    assertNotNull(resultInfo);
-    assertEquals(expectedSize, resultInfo.getSize());
-  }
-
-  /**
-   * Test for calling updateQueryFetchSize method with value to make computed value below minimum
-   * value, but adaptiveFetch is set false. The method shouldn't update size for a query.
-   */
-  @Test
-  void updatingAdaptiveFetchSizeWithMinimumSizeIfAdaptiveFetchFalse()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query-1";
-    boolean adaptiveFetch = false;
-
-    int rowSize = size + 1000;
-    int startSize = 2;
-    int expectedSize = 10;
-
-    setMinimumSizeVariable(expectedSize);
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry.setSize(startSize);
-    map.put(expectedQuery, adaptiveFetchCacheEntry);
-
-    adaptiveFetchCache
-      .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
-
-    AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
-
-    assertNotNull(resultInfo);
-    assertEquals(startSize, resultInfo.getSize());
-  }
-
-  /**
-   * Test for calling updateQueryFetchSize method with value to make computed value above maximum
-   * value. The method should update a query to have value of maximum.
-   */
-  @Test
-  void updatingAdaptiveFetchSizeWithMaximumSize()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query-1";
-    boolean adaptiveFetch = true;
-
-    int rowSize = 1;
-    int startSize = 2;
-    int expectedSize = size / rowSize - 20;
-
-    setMaximumSizeVariable(expectedSize);
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
-    adaptiveFetchCacheEntry.setSize(startSize);
-    map.put(expectedQuery, adaptiveFetchCacheEntry);
-
-    adaptiveFetchCache
-      .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
-
-    AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
-
-    assertNotNull(resultInfo);
-    assertEquals(expectedSize, resultInfo.getSize());
-  }
-
-  /**
-   * Test for calling updateQueryFetchSize method with value to make computed value below maximum
-   * value, but adaptiveFetch is set false. The method shouldn't update size for a query.
-   */
-  @Test
-  void updatingAdaptiveFetchSizeWithMaximumSizeIfAdaptiveFetchFalse()
-      throws NoSuchFieldException, IllegalAccessException {
-    String expectedQuery = "test-query-1";
-    boolean adaptiveFetch = false;
-
-    int rowSize = 1;
-    int startSize = 2;
-    int expectedSize = size / rowSize - 20;
-
-    setMaximumSizeVariable(expectedSize);
-
-    Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
-
-    AdaptiveFetchCacheEntry adaptiveFetchQueryInfo = new AdaptiveFetchCacheEntry();
-    adaptiveFetchQueryInfo.setSize(startSize);
-    map.put(expectedQuery, adaptiveFetchQueryInfo);
-
-    adaptiveFetchCache
-      .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
-
-    AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
-
-    assertNotNull(resultInfo);
-    assertEquals(startSize, resultInfo.getSize());
-  }
-
-  // Here are methods for retrieving values from adaptiveFetchCache without calling methods
-
-  private Map<String, AdaptiveFetchCacheEntry> getInfoMapVariable()
-      throws IllegalAccessException, NoSuchFieldException {
-    Field field = adaptiveFetchCache.getClass().getDeclaredField(infoMapVariableName);
-    field.setAccessible(true);
-    return (Map<String, AdaptiveFetchCacheEntry>) field.get(adaptiveFetchCache);
-  }
-
-  private int getMinimumSizeVariable() throws NoSuchFieldException, IllegalAccessException {
-    Field field = adaptiveFetchCache.getClass().getDeclaredField(minimumSizeVariableName);
-    field.setAccessible(true);
-    return (Integer) field.get(adaptiveFetchCache);
-  }
-
-  private int getMaximumSizeVariable() throws NoSuchFieldException, IllegalAccessException {
-    Field field = adaptiveFetchCache.getClass().getDeclaredField(maximumSizeVariableName);
-    field.setAccessible(true);
-    return (Integer) field.get(adaptiveFetchCache);
-  }
-
-  private boolean getAdaptiveFetchVariable() throws NoSuchFieldException, IllegalAccessException {
-    Field field = adaptiveFetchCache.getClass()
-        .getDeclaredField(adaptiveFetchVariableName);
-    field.setAccessible(true);
-    return (Boolean) field.get(adaptiveFetchCache);
-  }
-
-  private long getMaximumBufferVariable() throws NoSuchFieldException, IllegalAccessException {
-    Field field = adaptiveFetchCache.getClass()
-        .getDeclaredField(maximumBufferSizeVariableName);
-    field.setAccessible(true);
-    return (Long) field.get(adaptiveFetchCache);
-  }
-
-  private void setMinimumSizeVariable(int value)
-      throws NoSuchFieldException, IllegalAccessException {
-    Field field = adaptiveFetchCache.getClass().getDeclaredField(minimumSizeVariableName);
-    field.setAccessible(true);
-    field.set(adaptiveFetchCache, value);
-  }
-
-  private void setMaximumSizeVariable(int value)
-      throws NoSuchFieldException, IllegalAccessException {
-    Field field = adaptiveFetchCache.getClass()
-        .getDeclaredField(maximumSizeVariableName);
-    field.setAccessible(true);
-    field.set(adaptiveFetchCache, value);
-  }
-
-  private void setAdaptiveFetchVariable(boolean value)
-      throws NoSuchFieldException, IllegalAccessException {
-    Field field = adaptiveFetchCache.getClass()
-        .getDeclaredField(adaptiveFetchVariableName);
-    field.setAccessible(true);
-    field.set(adaptiveFetchCache, value);
-  }
-
-  private void setMaximumBufferVariable(long value)
-      throws NoSuchFieldException, IllegalAccessException {
-    Field field = adaptiveFetchCache.getClass()
-        .getDeclaredField(maximumBufferSizeVariableName);
-    field.setAccessible(true);
-    field.set(adaptiveFetchCache, value);
-  }
-
-  /**
-   * Class to mock object with Query interface. As AdaptiveFetchCache is using only
-   * getNativeSql method from Query interface, other shouldn't be called.
-   */
-  private class MockUpQuery implements Query {
-
-    public String sql;
-
-    MockUpQuery(String sql) {
-      this.sql = sql;
-    }
-
-    @Override
-    public ParameterList createParameterList() {
-      throw new WrongMethodCallException("Method shouldn't be called.");
-    }
-
-    @Override
-    public String toString(ParameterList parameters) {
-      throw new WrongMethodCallException("Method shouldn't be called.");
-    }
-
-    @Override
-    public String getNativeSql() {
-      return this.sql;
-    }
-
-    @Override
-    public SqlCommand getSqlCommand() {
-      throw new WrongMethodCallException("Method shouldn't be called.");
-    }
-
-    @Override
-    public void close() {
-      throw new WrongMethodCallException("Method shouldn't be called.");
-    }
-
-    @Override
-    public boolean isStatementDescribed() {
-      throw new WrongMethodCallException("Method shouldn't be called.");
-    }
-
-    @Override
-    public boolean isEmpty() {
-      throw new WrongMethodCallException("Method shouldn't be called.");
-    }
-
-    @Override
-    public int getBatchSize() {
-      throw new WrongMethodCallException("Method shouldn't be called.");
-    }
-
-    @Override
-    public Map<String, Integer> getResultSetColumnNameIndexMap() {
-      throw new WrongMethodCallException("Method shouldn't be called.");
-    }
-
-    @Override
-    public Query[] getSubqueries() {
-      throw new WrongMethodCallException("Method shouldn't be called.");
-    }
-  }
-
-  /**
-   * An exception used when method shouldn't be called in MockUpQuery class.
-   */
-  private class WrongMethodCallException extends RuntimeException {
-
-    WrongMethodCallException(String msg) {
-      super(msg);
-    }
-
-  }
-
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/AbstractArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/AbstractArraysTest.java
deleted file mode 100644
index 3f0f5b2..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/AbstractArraysTest.java
+++ /dev/null
@@ -1,1116 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-import org.postgresql.PGNotification;
-import org.postgresql.copy.CopyManager;
-import org.postgresql.core.BaseConnection;
-import org.postgresql.core.CachedQuery;
-import org.postgresql.core.Encoding;
-import org.postgresql.core.QueryExecutor;
-import org.postgresql.core.ReplicationProtocol;
-import org.postgresql.core.TransactionState;
-import org.postgresql.core.TypeInfo;
-import org.postgresql.core.Version;
-import org.postgresql.fastpath.Fastpath;
-import org.postgresql.jdbc.FieldMetadata.Key;
-import org.postgresql.largeobject.LargeObjectManager;
-import org.postgresql.replication.PGReplicationConnection;
-import org.postgresql.util.LruCache;
-import org.postgresql.util.PGobject;
-import org.postgresql.xml.PGXmlFactoryFactory;
-
-import org.junit.jupiter.api.Test;
-
-import java.lang.reflect.Array;
-import java.sql.Blob;
-import java.sql.CallableStatement;
-import java.sql.Clob;
-import java.sql.DatabaseMetaData;
-import java.sql.NClob;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLClientInfoException;
-import java.sql.SQLException;
-import java.sql.SQLWarning;
-import java.sql.SQLXML;
-import java.sql.Savepoint;
-import java.sql.Statement;
-import java.sql.Struct;
-import java.util.Map;
-import java.util.Properties;
-import java.util.TimerTask;
-import java.util.concurrent.Executor;
-import java.util.logging.Logger;
-
-public abstract class AbstractArraysTest<A> {
-
-  private static final BaseConnection ENCODING_CONNECTION = new EncodingConnection(Encoding.getJVMEncoding("utf-8"));
-
-  private final A[][] testData;
-
-  private final boolean binarySupported;
-
-  private final int arrayTypeOid;
-
-  /**
-   *
-   * @param testData
-   *          3 dimensional array to use for testing.
-   * @param binarySupported
-   *          Indicates if binary support is expected for the type.
-   */
-  public AbstractArraysTest(A[][] testData, boolean binarySupported, int arrayTypeOid) {
-    super();
-    this.testData = testData;
-    this.binarySupported = binarySupported;
-    this.arrayTypeOid = arrayTypeOid;
-  }
-
-  protected void assertArraysEquals(String message, A expected, Object actual) {
-    final int expectedLength = Array.getLength(expected);
-    assertEquals(expectedLength, Array.getLength(actual), message + " size");
-    for (int i = 0; i < expectedLength; i++) {
-      assertEquals(Array.get(expected, i), Array.get(actual, i), message + " value at " + i);
-    }
-  }
-
-  @Test
-  public void binary() throws Exception {
-
-    A data = testData[0][0];
-
-    ArrayEncoding.ArrayEncoder<A> support = ArrayEncoding.getArrayEncoder(data);
-
-    final int defaultArrayTypeOid = support.getDefaultArrayTypeOid();
-
-    assertEquals(binarySupported, support.supportBinaryRepresentation(defaultArrayTypeOid));
-
-    if (binarySupported) {
-
-      final PgArray pgArray = new PgArray(ENCODING_CONNECTION, defaultArrayTypeOid,
-          support.toBinaryRepresentation(ENCODING_CONNECTION, data, defaultArrayTypeOid));
-
-      Object actual = pgArray.getArray();
-
-      assertArraysEquals("", data, actual);
-    }
-  }
-
-  @Test
-  public void string() throws Exception {
-
-    A data = testData[0][0];
-
-    ArrayEncoding.ArrayEncoder<A> support = ArrayEncoding.getArrayEncoder(data);
-
-    final String arrayString = support.toArrayString(',', data);
-
-    final PgArray pgArray = new PgArray(ENCODING_CONNECTION, arrayTypeOid, arrayString);
-
-    Object actual = pgArray.getArray();
-
-    assertArraysEquals("", data, actual);
-  }
-
-  @Test
-  public void test2dBinary() throws Exception {
-
-    A[] data = testData[0];
-
-    ArrayEncoding.ArrayEncoder<A[]> support = ArrayEncoding.getArrayEncoder(data);
-
-    final int defaultArrayTypeOid = support.getDefaultArrayTypeOid();
-
-    assertEquals(binarySupported, support.supportBinaryRepresentation(defaultArrayTypeOid));
-
-    if (binarySupported) {
-
-      final PgArray pgArray = new PgArray(ENCODING_CONNECTION, support.getDefaultArrayTypeOid(),
-          support.toBinaryRepresentation(ENCODING_CONNECTION, data, defaultArrayTypeOid));
-
-      Object[] actual = (Object[]) pgArray.getArray();
-
-      assertEquals(data.length, actual.length);
-
-      for (int i = 0; i < data.length; i++) {
-        assertArraysEquals("array at position " + i, data[i], actual[i]);
-      }
-    }
-  }
-
-  @Test
-  public void test2dString() throws Exception {
-
-    final A[] data = testData[0];
-
-    final ArrayEncoding.ArrayEncoder<A[]> support = ArrayEncoding.getArrayEncoder(data);
-
-    final String arrayString = support.toArrayString(',', data);
-
-    final PgArray pgArray = new PgArray(ENCODING_CONNECTION, arrayTypeOid, arrayString);
-
-    Object[] actual = (Object[]) pgArray.getArray();
-
-    assertEquals(data.length, actual.length);
-
-    for (int i = 0; i < data.length; i++) {
-      assertArraysEquals("array at position " + i, data[i], actual[i]);
-    }
-  }
-
-  @Test
-  public void test3dBinary() throws Exception {
-
-    ArrayEncoding.ArrayEncoder<A[][]> support = ArrayEncoding.getArrayEncoder(testData);
-
-    final int defaultArrayTypeOid = support.getDefaultArrayTypeOid();
-
-    assertEquals(binarySupported, support.supportBinaryRepresentation(defaultArrayTypeOid));
-
-    if (binarySupported) {
-
-      final PgArray pgArray = new PgArray(ENCODING_CONNECTION, support.getDefaultArrayTypeOid(),
-          support.toBinaryRepresentation(ENCODING_CONNECTION, testData, defaultArrayTypeOid));
-
-      Object[][] actual = (Object[][]) pgArray.getArray();
-
-      assertEquals(testData.length, actual.length);
-
-      for (int i = 0; i < testData.length; i++) {
-        assertEquals(testData[i].length, actual[i].length, "array length at " + i);
-        for (int j = 0; j < testData[i].length; j++) {
-          assertArraysEquals("array at " + i + ',' + j, testData[i][j], actual[i][j]);
-        }
-      }
-    }
-  }
-
-  @Test
-  public void test3dString() throws Exception {
-
-    final ArrayEncoding.ArrayEncoder<A[][]> support = ArrayEncoding.getArrayEncoder(testData);
-
-    final String arrayString = support.toArrayString(',', testData);
-
-    final PgArray pgArray = new PgArray(ENCODING_CONNECTION, arrayTypeOid, arrayString);
-
-    Object[][] actual = (Object[][]) pgArray.getArray();
-
-    assertEquals(testData.length, actual.length);
-
-    for (int i = 0; i < testData.length; i++) {
-      assertEquals(testData[i].length, actual[i].length, "array length at " + i);
-      for (int j = 0; j < testData[i].length; j++) {
-        assertArraysEquals("array at " + i + ',' + j, testData[i][j], actual[i][j]);
-      }
-    }
-  }
-
-  @Test
-  public void objectArrayCopy() throws Exception {
-    final Object[] copy = new Object[testData.length];
-    for (int i = 0; i < testData.length; i++) {
-      copy[i] = testData[i];
-    }
-
-    final ArrayEncoding.ArrayEncoder<A[][]> support = ArrayEncoding.getArrayEncoder(testData);
-    final String arrayString = support.toArrayString(',', testData);
-
-    final ArrayEncoding.ArrayEncoder<Object[]> copySupport = ArrayEncoding.getArrayEncoder(copy);
-    final String actual = copySupport.toArrayString(',', copy);
-
-    assertEquals(arrayString, actual);
-  }
-
-  @Test
-  public void object2dArrayCopy() throws Exception {
-    final Object[][] copy = new Object[testData.length][];
-    for (int  i = 0; i < testData.length; i++) {
-      copy[i] = testData[i];
-    }
-
-    final ArrayEncoding.ArrayEncoder<A[][]> support = ArrayEncoding.getArrayEncoder(testData);
-    final String arrayString = support.toArrayString(',', testData);
-
-    final ArrayEncoding.ArrayEncoder<Object[]> copySupport = ArrayEncoding.getArrayEncoder(copy);
-    final String actual = copySupport.toArrayString(',', copy);
-
-    assertEquals(arrayString, actual);
-  }
-
-  @Test
-  public void object3dArrayCopy() throws Exception {
-    final A[][][] source = (A[][][]) Array.newInstance(testData.getClass(), 2);
-    source[0] = testData;
-    source[1] = testData;
-    final Object[][][] copy = new Object[][][]{testData, testData};
-
-    final ArrayEncoding.ArrayEncoder<A[][][]> support = ArrayEncoding.getArrayEncoder(source);
-    final String arrayString = support.toArrayString(',', source);
-
-    final ArrayEncoding.ArrayEncoder<Object[]> copySupport = ArrayEncoding.getArrayEncoder(copy);
-    final String actual = copySupport.toArrayString(',', copy);
-
-    assertEquals(arrayString, actual);
-  }
-
-  private static final class EncodingConnection implements BaseConnection {
-    private final Encoding encoding;
-    private final TypeInfo typeInfo = new TypeInfoCache(this, -1);
-
-    EncodingConnection(Encoding encoding) {
-      this.encoding = encoding;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public Encoding getEncoding() throws SQLException {
-      return encoding;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public TypeInfo getTypeInfo() {
-      return typeInfo;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void cancelQuery() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public ResultSet execSQLQuery(String s) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public ResultSet execSQLQuery(String s, int resultSetType, int resultSetConcurrency) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void execSQLUpdate(String s) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public QueryExecutor getQueryExecutor() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public ReplicationProtocol getReplicationProtocol() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public Object getObject(String type, String value, byte[] byteValue) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean haveMinimumServerVersion(int ver) {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean haveMinimumServerVersion(Version ver) {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public byte[] encodeString(String str) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public String escapeString(String str) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean getStandardConformingStrings() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public TimestampUtils getTimestampUtils() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public Logger getLogger() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean getStringVarcharFlag() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public TransactionState getTransactionState() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean binaryTransferSend(int oid) {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean isColumnSanitiserDisabled() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void addTimerTask(TimerTask timerTask, long milliSeconds) {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void purgeTimerTasks() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public LruCache<Key, FieldMetadata> getFieldMetadataCache() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized, String... columnNames)
-        throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate) {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public Statement createStatement() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public PreparedStatement prepareStatement(String sql) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public CallableStatement prepareCall(String sql) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public String nativeSQL(String sql) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void setAutoCommit(boolean autoCommit) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean getAutoCommit() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void commit() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void rollback() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void close() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean isClosed() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public DatabaseMetaData getMetaData() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void setReadOnly(boolean readOnly) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean isReadOnly() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void setCatalog(String catalog) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public String getCatalog() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void setTransactionIsolation(int level) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public int getTransactionIsolation() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public SQLWarning getWarnings() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void clearWarnings() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency)
-        throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public Map<String, Class<?>> getTypeMap() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void setHoldability(int holdability) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public int getHoldability() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public Savepoint setSavepoint() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public Savepoint setSavepoint(String name) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void rollback(Savepoint savepoint) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void releaseSavepoint(Savepoint savepoint) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability)
-        throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency,
-        int resultSetHoldability) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency,
-        int resultSetHoldability) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public Clob createClob() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public Blob createBlob() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public NClob createNClob() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public SQLXML createSQLXML() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean isValid(int timeout) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void setClientInfo(String name, String value) throws SQLClientInfoException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void setClientInfo(Properties properties) throws SQLClientInfoException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public String getClientInfo(String name) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public Properties getClientInfo() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public java.sql.Array createArrayOf(String typeName, Object[] elements) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void setSchema(String schema) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public String getSchema() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void abort(Executor executor) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public int getNetworkTimeout() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public <T> T unwrap(Class<T> iface) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean isWrapperFor(Class<?> iface) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public java.sql.Array createArrayOf(String typeName, Object elements) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public PGNotification[] getNotifications() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public PGNotification[] getNotifications(int timeoutMillis) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public CopyManager getCopyAPI() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public LargeObjectManager getLargeObjectAPI() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public Fastpath getFastpathAPI() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void addDataType(String type, String className) {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void addDataType(String type, Class<? extends PGobject> klass) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void setPrepareThreshold(int threshold) {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public int getPrepareThreshold() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void setDefaultFetchSize(int fetchSize) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public int getDefaultFetchSize() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public int getBackendPID() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public String escapeIdentifier(String identifier) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public String escapeLiteral(String literal) throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public PreferQueryMode getPreferQueryMode() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public AutoSave getAutosave() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void setAutosave(AutoSave autoSave) {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public PGReplicationConnection getReplicationAPI() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public Map<String, String> getParameterStatuses() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public String getParameterStatus(String parameterName) {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public PGXmlFactoryFactory getXmlFactoryFactory() throws SQLException {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean hintReadOnly() {
-      return false;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void setAdaptiveFetch(boolean adaptiveFetch) {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean getAdaptiveFetch() {
-      throw new UnsupportedOperationException();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public boolean getLogServerErrorDetail() {
-      return false;
-    }
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ArraysTest.java
deleted file mode 100644
index 5f7e0d0..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/ArraysTest.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-
-import org.postgresql.core.Oid;
-import org.postgresql.util.PSQLException;
-
-import org.junit.jupiter.api.Test;
-
-import java.math.BigDecimal;
-import java.sql.SQLFeatureNotSupportedException;
-
-class ArraysTest {
-
-  @Test
-  void nonArrayNotSupported() throws Exception {
-    assertThrows(PSQLException.class, () -> {
-      ArrayEncoding.getArrayEncoder("asdflkj");
-    });
-  }
-
-  @Test
-  void noByteArray() throws Exception {
-    assertThrows(PSQLException.class, () -> {
-      ArrayEncoding.getArrayEncoder(new byte[]{});
-    });
-  }
-
-  @Test
-  void binaryNotSupported() throws Exception {
-    assertThrows(SQLFeatureNotSupportedException.class, () -> {
-      final ArrayEncoding.ArrayEncoder<BigDecimal[]> support = ArrayEncoding.getArrayEncoder(new BigDecimal[]{});
-
-      assertFalse(support.supportBinaryRepresentation(Oid.FLOAT8_ARRAY));
-
-      support.toBinaryRepresentation(null, new BigDecimal[]{BigDecimal.valueOf(3)}, Oid.FLOAT8_ARRAY);
-    });
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ArraysTestSuite.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ArraysTestSuite.java
deleted file mode 100644
index 0cb8395..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/ArraysTestSuite.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import org.junit.runner.RunWith;
-import org.junit.runners.Suite;
-
-@RunWith(Suite.class)
-@Suite.SuiteClasses({
-    BigDecimalObjectArraysTest.class,
-    BooleanArraysTest.class,
-    BooleanObjectArraysTest.class,
-    ByteaArraysTest.class,
-    DoubleArraysTest.class,
-    DoubleObjectArraysTest.class,
-    FloatArraysTest.class,
-    FloatObjectArraysTest.class,
-    IntArraysTest.class,
-    IntegerObjectArraysTest.class,
-    LongArraysTest.class,
-    LongObjectArraysTest.class,
-    ShortArraysTest.class,
-    ShortObjectArraysTest.class,
-    StringArraysTest.class,
-    UUIDArrayTest.class
-})
-public class ArraysTestSuite {
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/BigDecimalObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/BigDecimalObjectArraysTest.java
deleted file mode 100644
index 61aaf3d..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/BigDecimalObjectArraysTest.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import static java.math.BigDecimal.valueOf;
-
-import org.postgresql.core.Oid;
-
-import java.math.BigDecimal;
-
-public class BigDecimalObjectArraysTest extends AbstractArraysTest<BigDecimal[]> {
-
-  private static final BigDecimal[][][] doubles = new BigDecimal[][][]{
-      {{valueOf(1.3), valueOf(2.4), valueOf(3.1), valueOf(4.2)},
-          {valueOf(5D), valueOf(6D), valueOf(7D), valueOf(8D)},
-          {valueOf(9D), valueOf(10D), valueOf(11D), valueOf(12D)}},
-      {{valueOf(13D), valueOf(14D), valueOf(15D), valueOf(16D)}, {valueOf(17D), valueOf(18D), valueOf(19D), null},
-          {valueOf(21D), valueOf(22D), valueOf(23D), valueOf(24D)}}};
-
-  public BigDecimalObjectArraysTest() {
-    super(doubles, false, Oid.NUMERIC_ARRAY);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/BitFieldTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/BitFieldTest.java
deleted file mode 100644
index d8b5b26..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/BitFieldTest.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2020, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.jdbc2.BaseTest4;
-import org.postgresql.util.PGobject;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-
-public class BitFieldTest extends BaseTest4 {
-
-  private static class TestData {
-    private final String bitValue;
-    private final String tableName;
-    private final String tableFields;
-    private final boolean isVarBit;
-
-    TestData(String bitValue, String tableName, String tableFields, boolean isVarBit) {
-      this.bitValue = bitValue;
-      this.tableName = tableName;
-      this.tableFields = tableFields;
-      this.isVarBit = isVarBit;
-    }
-
-    public String getBitValue() {
-      return bitValue;
-    }
-
-    public String getTableName() {
-      return tableName;
-    }
-
-    public String getTableFields() {
-      return tableFields;
-    }
-
-    public boolean getIsVarBit() {
-      return isVarBit;
-    }
-  }
-
-  private static final String fieldName = "field_bit";
-  public static final String testBitValue = "0101010100101010101010100101";
-  private static final TestData[] testBitValues = new TestData[]{
-      new TestData("0", "test_bit_field_0a", fieldName + " bit", false),
-      new TestData("0", "test_bit_field_0b", fieldName + " bit(1)", false),
-      new TestData("1", "test_bit_field_1a", fieldName + " bit", false),
-      new TestData("1", "test_bit_field_1b", fieldName + " bit(1)", false),
-      new TestData(testBitValue, "test_bit_field_gt1_1", String.format("%s bit(%d)", fieldName,
-          testBitValue.length()), false),
-      new TestData(testBitValue, "test_varbit_field_gt1_1", String.format("%s varbit(%d)", fieldName,
-          testBitValue.length()), true),
-      new TestData("1", "test_varbit_field_1", String.format("%s varbit(1)", fieldName), true),
-      new TestData("0", "test_varbit_field_0", String.format("%s varbit(1)", fieldName), true)
-  };
-
-  @Override
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
-    con = TestUtil.openDB();
-    Statement stmt = con.createStatement();
-    for (TestData testData : testBitValues) {
-      TestUtil.createTempTable(con, testData.getTableName(), testData.getTableFields());
-      stmt.execute(String.format("INSERT INTO %s values(b'%s')", testData.getTableName(),
-          testData.getBitValue()));
-    }
-  }
-
-  @After
-  public void tearDown() throws SQLException {
-    Statement stmt = con.createStatement();
-    for (TestData testData : testBitValues) {
-      stmt.execute(String.format("DROP TABLE %s", testData.getTableName()));
-    }
-    stmt.close();
-    TestUtil.closeDB(con);
-  }
-
-  @Test
-  public void TestGetObjectForBitFields() throws SQLException {
-    // Start from 1 to skip the first testBit value
-    for (TestData testData : testBitValues) {
-      PreparedStatement pstmt = con.prepareStatement(String.format("SELECT field_bit FROM %s "
-          + "limit 1", testData.getTableName()));
-      checkBitFieldValue(pstmt, testData.getBitValue(), testData.getIsVarBit());
-      pstmt.close();
-    }
-  }
-
-  @Test
-  public void TestSetBitParameter() throws SQLException {
-    for (TestData testData : testBitValues) {
-      PreparedStatement pstmt = con.prepareStatement(
-          String.format("SELECT field_bit FROM %s where ", testData.getTableName())
-              + "field_bit = ?");
-      PGobject param = new PGobject();
-      param.setValue(testData.getBitValue());
-      param.setType(testData.getIsVarBit() ? "varbit" : "bit");
-      pstmt.setObject(1, param);
-      checkBitFieldValue(pstmt, testData.getBitValue(), testData.getIsVarBit());
-      pstmt.close();
-    }
-  }
-
-  private void checkBitFieldValue(PreparedStatement pstmt, String bitValue, boolean isVarBit) throws SQLException {
-    ResultSet rs = pstmt.executeQuery();
-    Assert.assertTrue(rs.next());
-    Object o = rs.getObject(1);
-    if (bitValue.length() == 1 && !isVarBit) {
-      Assert.assertTrue("Failed for " + bitValue, o instanceof java.lang.Boolean);
-      Boolean b = (Boolean) o;
-      Assert.assertEquals("Failed for " + bitValue, bitValue.charAt(0) == '1', b);
-    } else {
-      Assert.assertTrue("Failed for " + bitValue, o instanceof PGobject);
-      PGobject pGobject = (PGobject) o;
-      Assert.assertEquals("Failed for " + bitValue, bitValue, pGobject.getValue());
-    }
-    String s = rs.getString(1);
-    Assert.assertEquals(bitValue, s);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/BooleanArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/BooleanArraysTest.java
deleted file mode 100644
index cc90451..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/BooleanArraysTest.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import org.postgresql.core.Oid;
-
-public class BooleanArraysTest extends AbstractArraysTest<boolean[]> {
-  private static final boolean[][][] booleans = new boolean[][][]{
-      {{true, false, false, true}, {false, false, true, true}, {true, true, false, false}},
-      {{false, true, true, false}, {true, false, true, false}, {false, true, false, true}}};
-
-  public BooleanArraysTest() {
-    super(booleans, true, Oid.BOOL_ARRAY);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/BooleanObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/BooleanObjectArraysTest.java
deleted file mode 100644
index 9357f09..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/BooleanObjectArraysTest.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import org.postgresql.core.Oid;
-
-public class BooleanObjectArraysTest extends AbstractArraysTest<Boolean[]> {
-  private static final Boolean[][][] booleans = new Boolean[][][]{
-      {{true, false, null, true}, {false, false, true, true}, {true, true, false, false}},
-      {{false, true, true, false}, {true, false, true, null}, {false, true, false, true}}};
-
-  public BooleanObjectArraysTest() {
-    super(booleans, true, Oid.BOOL_ARRAY);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ByteaArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ByteaArraysTest.java
deleted file mode 100644
index b777a11..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/ByteaArraysTest.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import static org.junit.jupiter.api.Assertions.assertArrayEquals;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.core.Oid;
-
-import org.junit.jupiter.api.Test;
-
-import java.lang.reflect.Array;
-
-public class ByteaArraysTest extends AbstractArraysTest<byte[][]> {
-
-  private static final byte[][][][] longs = new byte[][][][]{
-      {{{0x1, 0x23, (byte) 0xDF, 0x43}, {0x5, 0x6, 0x7, (byte) 0xFF}, null, {0x9, 0x10, 0x11, 0x12}},
-          {null, {0x13, 0x14, 0x15, 0x16}, {0x17, 0x18, (byte) 0xFF, 0x20}, {0x1, 0x2, (byte) 0xFF, 0x4F}},
-          {{0x1, 0x2, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFF, 0x4},
-              {0x1, 0x2, (byte) 0xFF, 0x4}}},
-      {{{0x1, 0x2, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFF, 0x4},
-          {0x1, 0x2, (byte) 0xFE, 0x4}},
-          {{0x1, 0x2, (byte) 0xCD, 0x4}, {0x1, 0x73, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFF, 0x4},
-              {0x1, 0x2, (byte) 0xFF, 0x4}},
-          {{0x1, 0x2, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFE, 0x10}, {0x1, 0x2, (byte) 0xFF, 0x4},
-              {0x1, 0x2, (byte) 0xFF, 0x4}}}};
-
-  public ByteaArraysTest() {
-    super(longs, true, Oid.BYTEA_ARRAY);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void assertArraysEquals(String message, byte[][] expected, Object actual) {
-    final int expectedLength = Array.getLength(expected);
-    assertEquals(expectedLength, Array.getLength(actual), message + " size");
-    for (int i = 0; i < expectedLength; i++) {
-      assertArrayEquals(expected[i], (byte[]) Array.get(actual, i), message + " value at " + i);
-    }
-  }
-
-  @Test
-  void objectArrayWrapper() throws Exception {
-    final Object[] array = new Object[]{new byte[]{0x1, 0x2, (byte) 0xFF, 0x4}, new byte[]{0x5, 0x6, 0x7, (byte) 0xFF}};
-
-    final ArrayEncoding.ArrayEncoder<Object[]> copySupport = ArrayEncoding.getArrayEncoder(array);
-    try {
-      copySupport.toArrayString(',', array);
-      fail("byte[] in Object[] should not be supported");
-    } catch (UnsupportedOperationException e) {
-      assertEquals("byte[] nested inside Object[]", e.getMessage());
-    }
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ConnectionValidTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ConnectionValidTest.java
deleted file mode 100644
index 8709b3b..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/ConnectionValidTest.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (c) 2020, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import static org.hamcrest.CoreMatchers.equalTo;
-import static org.hamcrest.MatcherAssert.assertThat;
-
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.annotations.DisabledIfServerVersionBelow;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.Timeout;
-
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.ServerSocket;
-import java.net.Socket;
-import java.sql.Connection;
-import java.util.Properties;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-@DisabledIfServerVersionBelow("9.4")
-class ConnectionValidTest {
-  private static final int LOCAL_SHADOW_PORT = 9009;
-
-  private Connection connection;
-
-  private ConnectionBreaker connectionBreaker;
-
-  @BeforeEach
-  void setUp() throws Exception {
-    final Properties shadowProperties = new Properties();
-    shadowProperties.setProperty(TestUtil.SERVER_HOST_PORT_PROP,
-        String.format("%s:%s", "localhost", LOCAL_SHADOW_PORT));
-
-    connectionBreaker = new ConnectionBreaker(LOCAL_SHADOW_PORT,
-        TestUtil.getServer(),
-        TestUtil.getPort());
-    connectionBreaker.acceptAsyncConnection();
-    connection = TestUtil.openDB(shadowProperties);
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    connectionBreaker.close();
-    connection.close();
-  }
-
-  /**
-   * Tests if a connection is valid within 5 seconds.
-   * @throws Exception if a database exception occurs.
-   */
-  @Test
-  @Timeout(30)
-  void isValid() throws Exception {
-    connectionBreaker.breakConnection();
-    boolean result = connection.isValid(5);
-
-    assertThat("Is connection valid?",
-        result,
-        equalTo(false)
-    );
-  }
-
-  private static final class ConnectionBreaker {
-
-    private final ExecutorService workers;
-
-    private final ServerSocket internalServer;
-
-    private final Socket pgSocket;
-
-    private boolean breakConnection;
-
-    /**
-     * Constructor of the forwarder for the PostgreSQL server.
-     *
-     * @param serverPort The forwarder server port.
-     * @param pgServer   The PostgreSQL server address.
-     * @param pgPort     The PostgreSQL server port.
-     * @throws Exception if anything goes wrong binding the server.
-     */
-    ConnectionBreaker(final int serverPort, final String pgServer,
-        final int pgPort) throws Exception {
-      workers = Executors.newCachedThreadPool();
-      internalServer = new ServerSocket(serverPort);
-      pgSocket = new Socket(pgServer, pgPort);
-      breakConnection = false;
-    }
-
-    /**
-     * Starts to accept a asynchronous connection.
-     *
-     * @throws Exception if something goes wrong with the sockets.
-     */
-    public void acceptAsyncConnection() throws Exception {
-      final InputStream pgServerInputStream = pgSocket.getInputStream();
-      final OutputStream pgServerOutputStream = pgSocket.getOutputStream();
-
-      // Future socket;
-      final Future<Socket> futureConnection = workers.submit(internalServer::accept);
-
-      // Forward reads;
-      workers.submit(() -> {
-        while (!breakConnection) {
-          final Socket conn = futureConnection.get();
-          int read = pgServerInputStream.read();
-          conn.getOutputStream().write(read);
-        }
-        return null;
-      });
-
-      // Forwards writes;
-      workers.submit(() -> {
-        while (!breakConnection) {
-          final Socket conn = futureConnection.get();
-          int read = conn.getInputStream().read();
-          pgServerOutputStream.write(read);
-        }
-        return null;
-      });
-    }
-
-    /**
-     * Breaks the forwarding.
-     */
-    public void breakConnection() {
-      this.breakConnection = true;
-    }
-
-    /**
-     * Closes the sockets.
-     */
-    public void close() throws Exception {
-      this.workers.shutdown();
-      this.workers.awaitTermination(5, TimeUnit.SECONDS);
-      this.internalServer.close();
-      this.pgSocket.close();
-    }
-
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/DeepBatchedInsertStatementTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/DeepBatchedInsertStatementTest.java
deleted file mode 100644
index d7d0207..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/DeepBatchedInsertStatementTest.java
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Copyright (c) 2003, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import static org.junit.Assert.assertEquals;
-
-import org.postgresql.PGProperty;
-import org.postgresql.core.ParameterList;
-import org.postgresql.core.Query;
-import org.postgresql.core.v3.BatchedQuery;
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.jdbc2.BaseTest4;
-import org.postgresql.test.jdbc2.BatchExecuteTest;
-
-import org.junit.Test;
-
-import java.lang.reflect.Method;
-import java.sql.Date;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Properties;
-
-/**
- * This object tests the internals of the BatchedStatementDecorator during
- * execution. Rather than rely on testing at the jdbc api layer.
- * on.
- */
-public class DeepBatchedInsertStatementTest extends BaseTest4 {
-
-  /*
-   * Set up the fixture for this testcase: a connection to a database with a
-   * table for this test.
-   */
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    Statement stmt = con.createStatement();
-
-    /*
-     * Drop the test table if it already exists for some reason. It is not an
-     * error if it doesn't exist.
-     */
-    TestUtil.createTable(con, "testbatch", "pk INTEGER, col1 INTEGER");
-    TestUtil.createTable(con, "testunspecified", "pk INTEGER, bday TIMESTAMP");
-
-    stmt.executeUpdate("INSERT INTO testbatch VALUES (1, 0)");
-    stmt.close();
-
-    /*
-     * Generally recommended with batch updates. By default we run all tests in
-     * this test case with autoCommit disabled.
-     */
-    con.setAutoCommit(false);
-  }
-
-  // Tear down the fixture for this test case.
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "testbatch");
-    TestUtil.dropTable(con, "testunspecified");
-    super.tearDown();
-  }
-
-  @Override
-  protected void updateProperties(Properties props) {
-    PGProperty.REWRITE_BATCHED_INSERTS.set(props, true);
-    forceBinary(props);
-  }
-
-  @Test
-  public void testDeepInternalsBatchedQueryDecorator() throws Exception {
-    PgPreparedStatement pstmt = null;
-    try {
-      pstmt = (PgPreparedStatement) con.prepareStatement("INSERT INTO testbatch VALUES (?,?)");
-
-      pstmt.setInt(1, 1);
-      pstmt.setInt(2, 2);
-      pstmt.addBatch(); // initial pass
-      pstmt.setInt(1, 3);
-      pstmt.setInt(2, 4);
-      pstmt.addBatch();// preparedQuery should be wrapped
-
-      BatchedQuery[] bqds;
-      bqds = transformBQD(pstmt);
-      assertEquals(2, getBatchSize(bqds));
-
-      pstmt.setInt(1, 5);
-      pstmt.setInt(2, 6);
-      pstmt.addBatch();
-
-      bqds = transformBQD(pstmt);
-      assertEquals(3, getBatchSize(bqds));
-
-      BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
-      bqds = transformBQD(pstmt);
-
-      assertEquals(0, getBatchSize(bqds));
-
-      pstmt.setInt(1, 1);
-      pstmt.setInt(2, 2);
-      pstmt.addBatch();
-
-      bqds = transformBQD(pstmt);
-      assertEquals(1, getBatchSize(bqds));
-
-      pstmt.setInt(1, 3);
-      pstmt.setInt(2, 4);
-      pstmt.addBatch();
-      bqds = transformBQD(pstmt);
-      assertEquals(2, getBatchSize(bqds));
-
-      pstmt.setInt(1, 5);
-      pstmt.setInt(2, 6);
-      pstmt.addBatch();
-      bqds = transformBQD(pstmt);
-      assertEquals(3, getBatchSize(bqds));
-
-      BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
-
-      pstmt.setInt(1, 1);
-      pstmt.setInt(2, 2);
-      pstmt.addBatch();
-      bqds = transformBQD(pstmt);
-      assertEquals(1, getBatchSize(bqds));
-      pstmt.setInt(1, 3);
-      pstmt.setInt(2, 4);
-      pstmt.addBatch();
-      bqds = transformBQD(pstmt);
-      assertEquals(2, getBatchSize(bqds));
-
-      pstmt.setInt(1, 5);
-      pstmt.setInt(2, 6);
-      pstmt.addBatch();
-      bqds = transformBQD(pstmt);
-      assertEquals(3, getBatchSize(bqds));
-
-      pstmt.setInt(1, 7);
-      pstmt.setInt(2, 8);
-      pstmt.addBatch();
-      bqds = transformBQD(pstmt);
-      assertEquals(4, getBatchSize(bqds));
-
-      BatchExecuteTest.assertSimpleInsertBatch(4, pstmt.executeBatch());
-
-      pstmt.setInt(1, 1);
-      pstmt.setInt(2, 2);
-      pstmt.addBatch();
-      bqds = transformBQD(pstmt);
-      assertEquals(1, getBatchSize(bqds));
-      pstmt.setInt(1, 3);
-      pstmt.setInt(2, 4);
-      pstmt.addBatch();
-      bqds = transformBQD(pstmt);
-      assertEquals(2, getBatchSize(bqds));
-
-      pstmt.setInt(1, 5);
-      pstmt.setInt(2, 6);
-      pstmt.addBatch();
-      bqds = transformBQD(pstmt);
-      assertEquals(3, getBatchSize(bqds));
-
-      BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
-
-      pstmt.setInt(1, 1);
-      pstmt.setInt(2, 2);
-      pstmt.addBatch();
-      bqds = transformBQD(pstmt);
-      assertEquals(1, getBatchSize(bqds));
-      pstmt.setInt(1, 3);
-      pstmt.setInt(2, 4);
-      pstmt.addBatch();
-      bqds = transformBQD(pstmt);
-      assertEquals(2, getBatchSize(bqds));
-
-      pstmt.setInt(1, 5);
-      pstmt.setInt(2, 6);
-      pstmt.addBatch();
-      bqds = transformBQD(pstmt);
-      assertEquals(3, getBatchSize(bqds));
-
-      BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
-
-      pstmt.setInt(1, 1);
-      pstmt.setInt(2, 2);
-      pstmt.addBatch();
-      bqds = transformBQD(pstmt);
-      assertEquals(1, getBatchSize(bqds));
-      pstmt.setInt(1, 3);
-      pstmt.setInt(2, 4);
-      pstmt.addBatch();
-      bqds = transformBQD(pstmt);
-      assertEquals(2, getBatchSize(bqds));
-
-      BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch());
-    } finally {
-      TestUtil.closeQuietly(pstmt);
-    }
-  }
-
-  /**
-   *
-   */
-  @Test
-  public void testUnspecifiedParameterType() throws Exception {
-    PgPreparedStatement pstmt = null;
-    try {
-      pstmt = (PgPreparedStatement) con
-          .prepareStatement("INSERT INTO testunspecified VALUES (?,?)");
-
-      pstmt.setInt(1, 1);
-      pstmt.setDate(2, new Date(1));
-      pstmt.addBatch();
-
-      pstmt.setInt(1, 2);
-      pstmt.setDate(2, new Date(2));
-      pstmt.addBatch();
-
-      BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch());
-
-      pstmt.setInt(1, 1);
-      pstmt.setDate(2, new Date(3));
-      pstmt.addBatch();
-      pstmt.setInt(1, 2);
-      pstmt.setDate(2, new Date(4));
-      pstmt.addBatch();
-
-      BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch());
-    } finally {
-      TestUtil.closeQuietly(pstmt);
-    }
-  }
-
-  /**
-   * Test to check the statement can provide the necessary number of prepared
-   * type fields. This is after running with a batch size of 1.
-   */
-  @Test
-  public void testVaryingTypeCounts() throws SQLException {
-    PgPreparedStatement pstmt = null;
-    try {
-      pstmt = (PgPreparedStatement) con.prepareStatement("INSERT INTO testunspecified VALUES (?,?)");
-      pstmt.setInt(1, 1);
-      pstmt.setDate(2, new Date(1));
-      pstmt.addBatch();
-
-      BatchExecuteTest.assertSimpleInsertBatch(1, pstmt.executeBatch());
-      pstmt.setInt(1, 1);
-      pstmt.setDate(2, new Date(2));
-      pstmt.addBatch();
-      pstmt.setInt(1, 2);
-      pstmt.setDate(2, new Date(3));
-      pstmt.addBatch();
-
-      pstmt.setInt(1, 3);
-      pstmt.setDate(2, new Date(4));
-      pstmt.addBatch();
-      pstmt.setInt(1, 4);
-      pstmt.setDate(2, new Date(5));
-      pstmt.addBatch();
-
-      BatchExecuteTest.assertSimpleInsertBatch(4, pstmt.executeBatch());
-    } finally {
-      TestUtil.closeQuietly(pstmt);
-    }
-  }
-
-  /**
-   * This method triggers the transformation of single batches to multi batches.
-   *
-   * @param ps PgPreparedStatement statement that will contain the field
-   * @return BatchedQueryDecorator[] queries after conversion
-   * @throws Exception fault raised when the field cannot be accessed
-   */
-  private BatchedQuery[] transformBQD(PgPreparedStatement ps) throws Exception {
-    // We store collections that get replace on the statement
-    ArrayList<Query> batchStatements = ps.batchStatements;
-    ArrayList<ParameterList> batchParameters = ps.batchParameters;
-    ps.transformQueriesAndParameters();
-    BatchedQuery[] bqds = ps.batchStatements.toArray(new BatchedQuery[0]);
-    // Restore collections on the statement.
-    ps.batchStatements = batchStatements;
-    ps.batchParameters = batchParameters;
-    return bqds;
-  }
-
-  /**
-   * Get the total batch size of multi batches.
-   *
-   * @param bqds the converted queries
-   * @return the total batch size
-   */
-  private int getBatchSize(BatchedQuery[] bqds) {
-    int total = 0;
-    for (BatchedQuery bqd : bqds) {
-      total += bqd.getBatchSize();
-    }
-    return total;
-  }
-
-  /**
-   * Access the encoded statement name field.
-   * Again using reflection to gain access to a private field member
-   * @param bqd BatchedQueryDecorator object on which field is present
-   * @return byte[] array of bytes that represent the statement name
-   *     when encoded
-   * @throws Exception fault raised if access to field not possible
-   */
-  private byte[] getEncodedStatementName(BatchedQuery bqd)
-      throws Exception {
-    Class<?> clazz = Class.forName("org.postgresql.core.v3.SimpleQuery");
-    Method mESN = clazz.getDeclaredMethod("getEncodedStatementName");
-    mESN.setAccessible(true);
-    return (byte[]) mESN.invoke(bqd);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/DoubleArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/DoubleArraysTest.java
deleted file mode 100644
index 3734d1e..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/DoubleArraysTest.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import org.postgresql.core.Oid;
-
-public class DoubleArraysTest extends AbstractArraysTest<double[]> {
-
-  private static final double[][][] doubles = new double[][][]{
-      {{1.2, 2.3, 3.7, 4.9}, {5, 6, 7, 8}, {9, 10, 11, 12}},
-      {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}};
-
-  public DoubleArraysTest() {
-    super(doubles, true, Oid.FLOAT8_ARRAY);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/DoubleObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/DoubleObjectArraysTest.java
deleted file mode 100644
index 854e7b2..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/DoubleObjectArraysTest.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import org.postgresql.core.Oid;
-
-public class DoubleObjectArraysTest extends AbstractArraysTest<Double[]> {
-
-  private static final Double[][][] doubles = new Double[][][]{
-      {{1.3, 2.4, 3.1, 4.2}, {5D, 6D, 7D, 8D}, {9D, 10D, 11D, 12D}},
-      {{13D, 14D, 15D, 16D}, {17D, 18D, 19D, null}, {21D, 22D, 23D, 24D}}};
-
-  public DoubleObjectArraysTest() {
-    super(doubles, true, Oid.FLOAT8_ARRAY);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/FloatArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/FloatArraysTest.java
deleted file mode 100644
index fd23a27..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/FloatArraysTest.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import org.postgresql.core.Oid;
-
-public class FloatArraysTest extends AbstractArraysTest<float[]> {
-
-  private static final float[][][] floats = new float[][][]{
-      {{1.2f, 2.3f, 3.7f, 4.9f}, {5, 6, 7, 8}, {9, 10, 11, 12}},
-      {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}};
-
-  public FloatArraysTest() {
-    super(floats, true, Oid.FLOAT4_ARRAY);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/FloatObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/FloatObjectArraysTest.java
deleted file mode 100644
index 50f08b9..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/FloatObjectArraysTest.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import org.postgresql.core.Oid;
-
-public class FloatObjectArraysTest extends AbstractArraysTest<Float[]> {
-
-  private static final Float[][][] floats = new Float[][][]{
-      {{1.3f, 2.4f, 3.1f, 4.2f}, {5f, 6f, 7f, 8f}, {9f, 10f, 11f, 12f}},
-      {{13f, 14f, 15f, 16f}, {17f, 18f, 19f, null}, {21f, 22f, 23f, 24f}}};
-
-  public FloatObjectArraysTest() {
-    super(floats, true, Oid.FLOAT4_ARRAY);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/IntArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/IntArraysTest.java
deleted file mode 100644
index 82ab376..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/IntArraysTest.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import org.postgresql.core.Oid;
-
-public class IntArraysTest extends AbstractArraysTest<int[]> {
-
-  private static final int[][][] ints = new int[][][]{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
-      {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}};
-
-  public IntArraysTest() {
-    super(ints, true, Oid.INT4_ARRAY);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/IntegerObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/IntegerObjectArraysTest.java
deleted file mode 100644
index 792e50f..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/IntegerObjectArraysTest.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import org.postgresql.core.Oid;
-
-public class IntegerObjectArraysTest extends AbstractArraysTest<Integer[]> {
-
-  private static final Integer[][][] ints = new Integer[][][]{
-      {{1, 2, 3, 4}, {5, null, 7, 8}, {9, 10, 11, 12}},
-      {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}};
-
-  public IntegerObjectArraysTest() {
-    super(ints, true, Oid.INT4_ARRAY);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/LargeObjectManagerTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/LargeObjectManagerTest.java
deleted file mode 100644
index 7c506b9..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/LargeObjectManagerTest.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Copyright (c) 2021, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.PGConnection;
-import org.postgresql.core.ServerVersion;
-import org.postgresql.largeobject.LargeObject;
-import org.postgresql.largeobject.LargeObjectManager;
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.util.StrangeInputStream;
-import org.postgresql.test.util.StrangeOutputStream;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
-import org.junit.jupiter.api.Assumptions;
-import org.junit.jupiter.api.Test;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.security.MessageDigest;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Arrays;
-import java.util.Random;
-import java.util.concurrent.ThreadLocalRandom;
-import java.util.concurrent.TimeUnit;
-
-class LargeObjectManagerTest {
-
-  /*
-   * It is possible for PostgreSQL to send a ParameterStatus message after an ErrorResponse
-   * Receiving such a message should not lead to an invalid connection state
-   * See https://github.com/pgjdbc/pgjdbc/issues/2237
-   */
-  @Test
-  void openWithErrorAndSubsequentParameterStatusMessageShouldLeaveConnectionInUsableStateAndUpdateParameterStatus() throws Exception {
-    try (PgConnection con = (PgConnection) TestUtil.openDB()) {
-      Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0));
-      con.setAutoCommit(false);
-      String originalApplicationName = con.getParameterStatus("application_name");
-      try (Statement statement = con.createStatement()) {
-        statement.execute("begin;");
-        // Set transaction application_name to trigger ParameterStatus message after error
-        // https://www.postgresql.org/docs/14/protocol-flow.html#PROTOCOL-ASYNC
-        String updatedApplicationName = "LargeObjectManagerTest-application-name";
-        statement.execute("set application_name to '" + updatedApplicationName + "'");
-
-        LargeObjectManager loManager = con.getLargeObjectAPI();
-        try {
-          loManager.open(0, false);
-          fail("Succeeded in opening a nonexistent large object");
-        } catch (PSQLException e) {
-          assertEquals(PSQLState.UNDEFINED_OBJECT.getState(), e.getSQLState());
-        }
-
-        // Should be reset to original application name
-        assertEquals(originalApplicationName, con.getParameterStatus("application_name"));
-      }
-    }
-  }
-
-
-  /**
-   * Writes data into a large object and reads it back.
-   * The verifications are:
-   *  1) input size should match the output size
-   *  2) input checksum should match the output checksum
-   */
-  @Test
-  void objectWriteThenRead() throws Throwable {
-    try (PgConnection con = (PgConnection) TestUtil.openDB()) {
-      // LO is not supported in auto-commit mode
-      con.setAutoCommit(false);
-      LargeObjectManager lom = con.unwrap(PGConnection.class).getLargeObjectAPI();
-      MessageDigest md = MessageDigest.getInstance("SHA-256");
-      long deadline = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10);
-      for (int i = 0; i < 100000 && System.currentTimeMillis() < deadline; i++) {
-        long seed = ThreadLocalRandom.current().nextLong();
-        objectWriteThenRead(lom, seed, md);
-        // Creating too many large objects in a single transaction might lead to "ERROR: out of shared memory"
-        if (i % 1000 == 0) {
-          con.commit();
-        }
-      }
-    }
-  }
-
-  private final byte[][] buffers = new byte[][]{new byte[1024], new byte[8192], new byte[128 * 1024]};
-
-  private void objectWriteThenRead(LargeObjectManager lom, long seed, MessageDigest md) throws SQLException, IOException {
-    long loId = lom.createLO();
-    try (LargeObject lo = lom.open(loId)) {
-      Random rnd = new Random(seed);
-      int expectedLength = rnd.nextInt(1000000);
-      // Write data to the stream
-      // We do not use try-with-resources as closing the output stream would close the large object
-      OutputStream os = lo.getOutputStream();
-      {
-        byte[] buf = new byte[Math.min(256 * 1024, expectedLength)];
-        // Do not use try-with-resources to avoid closing the large object
-        StrangeOutputStream fs = new StrangeOutputStream(os, rnd.nextLong(), 0.1);
-        {
-          int len = expectedLength;
-          while (len > 0) {
-            int writeSize = Math.min(buf.length, len);
-            rnd.nextBytes(buf);
-            md.update(buf, 0, writeSize);
-            fs.write(buf, 0, writeSize);
-            len -= writeSize;
-          }
-          fs.flush();
-        }
-      }
-      // Verify the size of the resulting blob
-      assertEquals(expectedLength, lo.tell(), "Lob position after writing the data");
-
-      // Rewing the position to the beginning
-      // Ideally, .getInputStream should start reading from the beginning, however, it is not the
-      // case yet
-      lo.seek(0);
-
-      // Read out the data and verify its contents
-      byte[] expectedChecksum = md.digest();
-      md.reset();
-      int actualLength = 0;
-      // Do not use try-with-resources to avoid closing the large object
-      InputStream is = lo.getInputStream();
-      {
-        try (StrangeInputStream fs = new StrangeInputStream(is, rnd.nextLong())) {
-          while (true) {
-            int bufferIndex = rnd.nextInt(buffers.length);
-            byte[] buf = buffers[bufferIndex];
-            int read = fs.read(buf);
-            if (read == -1) {
-              break;
-            }
-            actualLength += read;
-            md.update(buf, 0, read);
-          }
-        }
-        byte[] actualChecksum = md.digest();
-        if (!Arrays.equals(expectedChecksum, actualChecksum)) {
-          fail("Checksum of the input and output streams mismatch."
-              + " Input actualLength: " + expectedLength
-              + ", output actualLength: " + actualLength
-              + ", test seed: " + seed
-              + ", large object id: " + loId
-          );
-        }
-      }
-    } catch (Throwable t) {
-      String message = "Test seed is " + seed;
-      t.addSuppressed(new Throwable(message) {
-        @Override
-        public Throwable fillInStackTrace() {
-          return this;
-        }
-      });
-      throw t;
-    } finally {
-      lom.delete(loId);
-    }
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/LongArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/LongArraysTest.java
deleted file mode 100644
index dcc39f2..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/LongArraysTest.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import org.postgresql.core.Oid;
-
-public class LongArraysTest extends AbstractArraysTest<long[]> {
-
-  private static final long[][][] longs = new long[][][]{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
-      {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}};
-
-  public LongArraysTest() {
-    super(longs, true, Oid.INT8_ARRAY);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/LongObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/LongObjectArraysTest.java
deleted file mode 100644
index 421cdd8..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/LongObjectArraysTest.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import org.postgresql.core.Oid;
-
-public class LongObjectArraysTest extends AbstractArraysTest<Long[]> {
-
-  private static final Long[][][] longs = new Long[][][]{
-      {{1L, 2L, null, 4L}, {5L, 6L, 7L, 8L}, {9L, 10L, 11L, 12L}},
-      {{13L, 14L, 15L, 16L}, {17L, 18L, 19L, 20L}, {21L, 22L, 23L, 24L}}};
-
-  public LongObjectArraysTest() {
-    super(longs, true, Oid.INT8_ARRAY);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ParameterInjectionTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ParameterInjectionTest.java
deleted file mode 100644
index 10c0af3..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/ParameterInjectionTest.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (c) 2024, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.postgresql.test.TestUtil;
-
-import org.junit.jupiter.api.Test;
-
-import java.math.BigDecimal;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-
-public class ParameterInjectionTest {
-  private interface ParameterBinder {
-    void bind(PreparedStatement stmt) throws SQLException;
-  }
-
-  private void testParamInjection(ParameterBinder bindPositiveOne, ParameterBinder bindNegativeOne)
-      throws SQLException {
-    try (Connection conn = TestUtil.openDB()) {
-      {
-        PreparedStatement stmt = conn.prepareStatement("SELECT -?");
-        bindPositiveOne.bind(stmt);
-        try (ResultSet rs = stmt.executeQuery()) {
-          assertTrue(rs.next());
-          assertEquals(1, rs.getMetaData().getColumnCount(),
-              "number of result columns must match");
-          int value = rs.getInt(1);
-          assertEquals(-1, value);
-        }
-        bindNegativeOne.bind(stmt);
-        try (ResultSet rs = stmt.executeQuery()) {
-          assertTrue(rs.next());
-          assertEquals(1, rs.getMetaData().getColumnCount(),
-              "number of result columns must match");
-          int value = rs.getInt(1);
-          assertEquals(1, value);
-        }
-      }
-      {
-        PreparedStatement stmt = conn.prepareStatement("SELECT -?, ?");
-        bindPositiveOne.bind(stmt);
-        stmt.setString(2, "\nWHERE false --");
-        try (ResultSet rs = stmt.executeQuery()) {
-          assertTrue(rs.next(), "ResultSet should contain a row");
-          assertEquals(2, rs.getMetaData().getColumnCount(),
-              "rs.getMetaData().getColumnCount(");
-          int value = rs.getInt(1);
-          assertEquals(-1, value);
-        }
-
-        bindNegativeOne.bind(stmt);
-        stmt.setString(2, "\nWHERE false --");
-        try (ResultSet rs = stmt.executeQuery()) {
-          assertTrue(rs.next(), "ResultSet should contain a row");
-          assertEquals(2, rs.getMetaData().getColumnCount(), "rs.getMetaData().getColumnCount(");
-          int value = rs.getInt(1);
-          assertEquals(1, value);
-        }
-
-      }
-    }
-  }
-
-  @Test
-  public void handleInt2() throws SQLException {
-    testParamInjection(
-        stmt -> {
-          stmt.setShort(1, (short) 1);
-        },
-        stmt -> {
-          stmt.setShort(1, (short) -1);
-        }
-    );
-  }
-
-  @Test
-  public void handleInt4() throws SQLException {
-    testParamInjection(
-        stmt -> {
-          stmt.setInt(1, 1);
-        },
-        stmt -> {
-          stmt.setInt(1, -1);
-        }
-    );
-  }
-
-  @Test
-  public void handleBigInt() throws SQLException {
-    testParamInjection(
-        stmt -> {
-          stmt.setLong(1, (long) 1);
-        },
-        stmt -> {
-          stmt.setLong(1, (long) -1);
-        }
-    );
-  }
-
-  @Test
-  public void handleNumeric() throws SQLException {
-    testParamInjection(
-        stmt -> {
-          stmt.setBigDecimal(1, new BigDecimal("1"));
-        },
-        stmt -> {
-          stmt.setBigDecimal(1, new BigDecimal("-1"));
-        }
-    );
-  }
-
-  @Test
-  public void handleFloat() throws SQLException {
-    testParamInjection(
-        stmt -> {
-          stmt.setFloat(1, 1);
-        },
-        stmt -> {
-          stmt.setFloat(1, -1);
-        }
-    );
-  }
-
-  @Test
-  public void handleDouble() throws SQLException {
-    testParamInjection(
-        stmt -> {
-          stmt.setDouble(1, 1);
-        },
-        stmt -> {
-          stmt.setDouble(1, -1);
-        }
-    );
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/PgSQLXMLTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/PgSQLXMLTest.java
deleted file mode 100644
index 180cb5c..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/PgSQLXMLTest.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright (c) 2019, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-
-import org.postgresql.PGProperty;
-import org.postgresql.core.BaseConnection;
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.jdbc2.BaseTest4;
-
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.StringWriter;
-import java.io.Writer;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.SQLXML;
-import java.sql.Statement;
-import java.util.Properties;
-
-import javax.xml.stream.XMLStreamException;
-import javax.xml.stream.XMLStreamReader;
-import javax.xml.transform.Source;
-import javax.xml.transform.Transformer;
-import javax.xml.transform.TransformerException;
-import javax.xml.transform.TransformerFactory;
-import javax.xml.transform.dom.DOMSource;
-import javax.xml.transform.sax.SAXSource;
-import javax.xml.transform.stax.StAXSource;
-import javax.xml.transform.stream.StreamResult;
-
-public class PgSQLXMLTest extends BaseTest4 {
-
-  @Override
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTempTable(con, "xmltab", "x xml");
-  }
-
-  @Test
-  public void setCharacterStream() throws Exception {
-    String example = "<x>value</x>";
-    SQLXML pgSQLXML = con.createSQLXML();
-    Writer writer = pgSQLXML.setCharacterStream();
-    writer.write(example);
-    PreparedStatement preparedStatement = con.prepareStatement("insert into xmltab values (?)");
-    preparedStatement.setSQLXML(1, pgSQLXML);
-    preparedStatement.execute();
-
-    Statement statement = con.createStatement();
-    ResultSet rs = statement.executeQuery("select * from xmltab");
-    assertTrue(rs.next());
-    SQLXML result = rs.getSQLXML(1);
-    assertNotNull(result);
-    assertEquals(example, result.getString());
-  }
-
-  private static final String LICENSE_URL =
-      PgSQLXMLTest.class.getClassLoader().getResource("META-INF/LICENSE").toString();
-  private static final String XXE_EXAMPLE =
-      "<!DOCTYPE foo [<!ELEMENT foo ANY >\n"
-      + "<!ENTITY xxe SYSTEM \"" + LICENSE_URL + "\">]>"
-      + "<foo>&xxe;</foo>";
-
-  @Test
-  public void testLegacyXxe() throws Exception {
-    Properties props = new Properties();
-    props.setProperty(PGProperty.XML_FACTORY_FACTORY.getName(), "LEGACY_INSECURE");
-    try (Connection conn = TestUtil.openDB(props)) {
-      BaseConnection baseConn = conn.unwrap(BaseConnection.class);
-      PgSQLXML xml = new PgSQLXML(baseConn, XXE_EXAMPLE);
-      xml.getSource(null);
-    }
-  }
-
-  private static String sourceToString(Source source) throws TransformerException {
-    StringWriter sw = new StringWriter();
-    Transformer transformer = TransformerFactory.newInstance().newTransformer();
-    transformer.transform(source, new StreamResult(sw));
-    return sw.toString();
-  }
-
-  private <T extends Source> void testGetSourceXxe(Class<T> clazz) {
-    SQLException ex = assertThrows(SQLException.class, () -> {
-      PgSQLXML xml = new PgSQLXML(null, XXE_EXAMPLE);
-      xml.getSource(clazz);
-    });
-    String message = ex.getCause().getMessage();
-    assertTrue(
-        "Expected to get a <<DOCTYPE disallowed>> SAXParseException. Actual message is " + message,
-        message.contains("DOCTYPE"));
-  }
-
-  @Test
-  public void testGetSourceXxeNull() throws Exception {
-    testGetSourceXxe(null);
-  }
-
-  @Test
-  public void testGetSourceXxeDOMSource() throws Exception {
-    testGetSourceXxe(DOMSource.class);
-  }
-
-  @Test
-  public void testGetSourceXxeSAXSource() throws Exception {
-    PgSQLXML xml = new PgSQLXML(null, XXE_EXAMPLE);
-    SAXSource source = xml.getSource(SAXSource.class);
-    TransformerException ex = assertThrows(TransformerException.class, () -> {
-      sourceToString(source);
-    });
-    String message = ex.getCause().getMessage();
-    assertTrue(
-        "Expected to get a <<DOCTYPE disallowed>> TransformerException. Actual message is " + message,
-        message.contains("DOCTYPE"));
-  }
-
-  @Test
-  public void testGetSourceXxeStAXSource() throws Exception {
-    PgSQLXML xml = new PgSQLXML(null, XXE_EXAMPLE);
-    StAXSource source = xml.getSource(StAXSource.class);
-    XMLStreamReader reader = source.getXMLStreamReader();
-    // STAX will not throw XXE error until we actually read the element
-    assertThrows(XMLStreamException.class, () -> {
-      while (reader.hasNext()) {
-        reader.next();
-      }
-    });
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ResourceLockTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ResourceLockTest.java
deleted file mode 100644
index 7d70d9d..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/ResourceLockTest.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2004, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.junit.jupiter.api.Test;
-
-class ResourceLockTest {
-  @Test
-  void obtainClose() {
-    final ResourceLock lock = new ResourceLock();
-
-    assertFalse(lock.isLocked(),
-        "lock.isLocked(). The newly created resource lock should be unlocked");
-    assertFalse(lock.isHeldByCurrentThread(),
-        "lock.isHeldByCurrentThread(). The newly created resource lock should not be held by the current thread");
-
-    try (ResourceLock ignore = lock.obtain()) {
-      assertTrue(lock.isLocked(),
-          "lock.isLocked(). Obtained lock should be locked");
-      assertTrue(lock.isHeldByCurrentThread(),
-          "lock.isHeldByCurrentThread(). Obtained lock should be held by the current thread");
-    }
-
-    assertFalse(lock.isLocked(), "lock.isLocked(). Closed resource lock should be unlocked");
-    assertFalse(lock.isHeldByCurrentThread(),
-        "lock.isHeldByCurrentThread(). Closed resource lock should not be held by the current thread");
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ScramTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ScramTest.java
deleted file mode 100644
index c67b778..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/ScramTest.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2021, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
-import static org.junit.jupiter.api.Assumptions.assumeTrue;
-
-import org.postgresql.PGProperty;
-import org.postgresql.core.ServerVersion;
-import org.postgresql.test.TestUtil;
-import org.postgresql.util.PSQLState;
-
-import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.Arguments;
-import org.junit.jupiter.params.provider.MethodSource;
-import org.junit.jupiter.params.provider.ValueSource;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Properties;
-import java.util.stream.Stream;
-
-class ScramTest {
-
-  private static Connection con;
-  private static final String ROLE_NAME = "testscram";
-
-  @BeforeAll
-  static void setUp() throws Exception {
-    con = TestUtil.openPrivilegedDB();
-    assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v10));
-  }
-
-  @AfterAll
-  static void tearDown() throws Exception {
-    try (Statement stmt = con.createStatement()) {
-      stmt.execute("DROP ROLE IF EXISTS " + ROLE_NAME);
-    }
-    TestUtil.closeDB(con);
-  }
-
-  /**
-   * Test creating a role with passwords WITH spaces and opening a connection using the same
-   * password, should work because is the "same" password.
-   *
-   * <p>https://github.com/pgjdbc/pgjdbc/issues/1970
-   */
-  @ParameterizedTest
-  @ValueSource(strings = {"My Space", "$ec ret", " rover june spelling ",
-      "!zj5hs*k5 STj@DaRUy", "q\u00A0w\u2000e\u2003r\u2009t\u3000y"})
-  void passwordWithSpace(String passwd) throws SQLException {
-    createRole(passwd); // Create role password with spaces.
-
-    Properties props = new Properties();
-    PGProperty.USER.set(props, ROLE_NAME);
-    PGProperty.PASSWORD.set(props, passwd);
-
-    try (Connection c = assertDoesNotThrow(() -> TestUtil.openDB(props));
-        Statement stmt = c.createStatement();
-        ResultSet rs = stmt.executeQuery("SELECT current_user")) {
-      assertTrue(rs.next());
-      assertEquals(ROLE_NAME, rs.getString(1));
-    }
-  }
-
-  /**
-   * Test creating a role with passwords WITHOUT spaces and opening a connection using password with
-   * spaces should fail since the spaces should not be stripped out.
-   *
-   * <p>https://github.com/pgjdbc/pgjdbc/issues/2000
-   */
-  @ParameterizedTest
-  @ValueSource(strings = {"My Space", "$ec ret", "rover june spelling",
-      "!zj5hs*k5 STj@DaRUy", "q\u00A0w\u2000e\u2003r\u2009t\u3000y"})
-  void passwordWithoutSpace(String passwd) throws SQLException {
-    String passwdNoSpaces = passwd.codePoints()
-        .filter(i -> !Character.isSpaceChar(i))
-        .collect(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append)
-        .toString();
-
-    createRole(passwdNoSpaces); // Create role password without spaces.
-
-    Properties props = new Properties();
-    PGProperty.USER.set(props, ROLE_NAME);
-    PGProperty.PASSWORD.set(props, passwd); // Open connection with spaces
-
-    SQLException ex = assertThrows(SQLException.class, () -> TestUtil.openDB(props));
-    assertEquals(PSQLState.INVALID_PASSWORD.getState(), ex.getSQLState());
-  }
-
-  private static Stream<Arguments> provideArgsForTestInvalid() {
-    return Stream.of(
-      Arguments.of(null, "The server requested SCRAM-based authentication, but no password was provided."),
-      Arguments.of("", "The server requested SCRAM-based authentication, but the password is an empty string.")
-    );
-  }
-
-  @ParameterizedTest
-  @MethodSource("provideArgsForTestInvalid")
-  void invalidPasswords(String password, String expectedMessage) throws SQLException {
-    // We are testing invalid passwords so that correct one does not matter
-    createRole("anything_goes_here");
-
-    Properties props = new Properties();
-    PGProperty.USER.set(props, ROLE_NAME);
-    if (password != null) {
-      PGProperty.PASSWORD.set(props, password);
-    }
-    try (Connection conn = DriverManager.getConnection(TestUtil.getURL(), props)) {
-      fail("SCRAM connection attempt with invalid password should fail");
-    } catch (SQLException e) {
-      assertEquals(expectedMessage, e.getMessage());
-    }
-  }
-
-  private void createRole(String passwd) throws SQLException {
-    try (Statement stmt = con.createStatement()) {
-      stmt.execute("SET password_encryption='scram-sha-256'");
-      stmt.execute("DROP ROLE IF EXISTS " + ROLE_NAME);
-      stmt.execute("CREATE ROLE " + ROLE_NAME + " WITH LOGIN PASSWORD '" + passwd + "'");
-    }
-  }
-
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ShortArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ShortArraysTest.java
deleted file mode 100644
index 3fb46b0..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/ShortArraysTest.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import org.postgresql.core.Oid;
-
-public class ShortArraysTest extends AbstractArraysTest<short[]> {
-
-  private static final short[][][] shorts = new short[][][]{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
-      {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}};
-
-  public ShortArraysTest() {
-    super(shorts, true, Oid.INT2_ARRAY);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/ShortObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/ShortObjectArraysTest.java
deleted file mode 100644
index 9abc6cb..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/ShortObjectArraysTest.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import org.postgresql.core.Oid;
-
-public class ShortObjectArraysTest extends AbstractArraysTest<Short[]> {
-
-  private static final Short[][][] shorts = new Short[][][]{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
-      {{13, 14, 15, 16}, {17, 18, null, 20}, {21, 22, 23, 24}}};
-
-  public ShortObjectArraysTest() {
-    super(shorts, true, Oid.INT2_ARRAY);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/StringArraysTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/StringArraysTest.java
deleted file mode 100644
index 8fc0ce0..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/StringArraysTest.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (c) 2018, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import org.postgresql.core.Oid;
-
-public class StringArraysTest extends AbstractArraysTest<String[]> {
-
-  private static final String[][][] strings = new String[][][]{
-      {{"some", "String", "haVE some \u03C0", "another"}, {null, "6L", "7L", "8L"}, //unicode escape for pi character
-          {"asdf", " asdf ", "11L", null}},
-      {{"13L", null, "asasde4wtq", "16L"}, {"17L", "", "19L", "20L"}, {"21L", "22L", "23L", "24L"}}};
-
-  public StringArraysTest() {
-    super(strings, true, Oid.VARCHAR_ARRAY);
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/UUIDArrayTest.java b/pgjdbc/src/test/java/org/postgresql/jdbc/UUIDArrayTest.java
deleted file mode 100644
index e7363f7..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/UUIDArrayTest.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (c) 2022, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbc;
-
-import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assumptions.assumeTrue;
-
-import org.postgresql.core.ServerVersion;
-import org.postgresql.test.TestUtil;
-
-import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.Test;
-
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.UUID;
-
-class UUIDArrayTest {
-
-  private static Connection con;
-  private static final String TABLE_NAME = "uuid_table";
-  private static final String INSERT1 = "INSERT INTO " + TABLE_NAME
-      + " (id, data1) VALUES (?, ?)";
-  private static final String INSERT2 = "INSERT INTO " + TABLE_NAME
-      + " (id, data2) VALUES (?, ?)";
-  private static final String SELECT1 = "SELECT data1 FROM " + TABLE_NAME
-      + " WHERE id = ?";
-  private static final String SELECT2 = "SELECT data2 FROM " + TABLE_NAME
-      + " WHERE id = ?";
-  private static final UUID[] uids1 = new UUID[]{UUID.randomUUID(), UUID.randomUUID()};
-  private static final UUID[][] uids2 = new UUID[][]{uids1};
-
-  @BeforeAll
-  static void setUp() throws Exception {
-    con = TestUtil.openDB();
-    assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_6));
-    try (Statement stmt = con.createStatement()) {
-      stmt.execute("CREATE TABLE " + TABLE_NAME
-          + " (id int PRIMARY KEY, data1 UUID[], data2 UUID[][])");
-    }
-  }
-
-  @AfterAll
-  static void tearDown() throws Exception {
-    try (Statement stmt = con.createStatement()) {
-      stmt.execute("DROP TABLE IF EXISTS " + TABLE_NAME);
-    }
-    TestUtil.closeDB(con);
-  }
-
-  @Test
-  void test1DWithCreateArrayOf() throws SQLException {
-    try (Connection c = assertDoesNotThrow(() -> TestUtil.openDB());
-        PreparedStatement stmt1 = c.prepareStatement(INSERT1);
-        PreparedStatement stmt2 = c.prepareStatement(SELECT1)) {
-      stmt1.setInt(1, 100);
-      stmt1.setArray(2, c.createArrayOf("uuid", uids1));
-      stmt1.execute();
-
-      stmt2.setInt(1, 100);
-      stmt2.execute();
-      try (ResultSet rs = stmt2.getResultSet()) {
-        assertTrue(rs.next());
-        UUID[] array = (UUID[]) rs.getArray(1).getArray();
-        assertEquals(uids1[0], array[0]);
-        assertEquals(uids1[1], array[1]);
-      }
-    }
-  }
-
-  @Test
-  void test1DWithSetObject() throws SQLException {
-    try (Connection c = assertDoesNotThrow(() -> TestUtil.openDB());
-         PreparedStatement stmt1 = c.prepareStatement(INSERT1);
-         PreparedStatement stmt2 = c.prepareStatement(SELECT1)) {
-      stmt1.setInt(1, 101);
-      stmt1.setObject(2, uids1);
-      stmt1.execute();
-
-      stmt2.setInt(1, 101);
-      stmt2.execute();
-      try (ResultSet rs = stmt2.getResultSet()) {
-        assertTrue(rs.next());
-        UUID[] array = (UUID[]) rs.getArray(1).getArray();
-        assertEquals(uids1[0], array[0]);
-        assertEquals(uids1[1], array[1]);
-      }
-    }
-  }
-
-  @Test
-  void test2DWithCreateArrayOf() throws SQLException {
-    try (Connection c = assertDoesNotThrow(() -> TestUtil.openDB());
-         PreparedStatement stmt1 = c.prepareStatement(INSERT2);
-         PreparedStatement stmt2 = c.prepareStatement(SELECT2)) {
-      stmt1.setInt(1, 200);
-      stmt1.setArray(2, c.createArrayOf("uuid", uids2));
-      stmt1.execute();
-
-      stmt2.setInt(1, 200);
-      stmt2.execute();
-      try (ResultSet rs = stmt2.getResultSet()) {
-        assertTrue(rs.next());
-        UUID[][] array = (UUID[][]) rs.getArray(1).getArray();
-        assertEquals(uids2[0][0], array[0][0]);
-        assertEquals(uids2[0][1], array[0][1]);
-      }
-    }
-  }
-
-  @Test
-  void test2DWithSetObject() throws SQLException {
-    try (Connection c = assertDoesNotThrow(() -> TestUtil.openDB());
-         PreparedStatement stmt1 = c.prepareStatement(INSERT2);
-         PreparedStatement stmt2 = c.prepareStatement(SELECT2)) {
-      stmt1.setInt(1, 201);
-      stmt1.setObject(2, uids2);
-      stmt1.execute();
-
-      stmt2.setInt(1, 201);
-      stmt2.execute();
-      try (ResultSet rs = stmt2.getResultSet()) {
-        assertTrue(rs.next());
-        UUID[][] array = (UUID[][]) rs.getArray(1).getArray();
-        assertEquals(uids2[0][0], array[0][0]);
-        assertEquals(uids2[0][1], array[0][1]);
-      }
-    }
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbcurlresolver/PgPassParserTest.java b/pgjdbc/src/test/java/org/postgresql/jdbcurlresolver/PgPassParserTest.java
deleted file mode 100644
index 718ab3d..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbcurlresolver/PgPassParserTest.java
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright (c) 2021, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbcurlresolver;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertNull;
-
-import org.postgresql.PGEnvironment;
-import org.postgresql.util.StubEnvironmentAndProperties;
-
-import org.junit.jupiter.api.Test;
-import uk.org.webcompere.systemstubs.environment.EnvironmentVariables;
-import uk.org.webcompere.systemstubs.properties.SystemProperties;
-import uk.org.webcompere.systemstubs.resource.Resources;
-
-import java.net.URL;
-
-/**
- * Password resource location used is decided based on availability of different environment
- * variables and file existence in user home directory. Tests verify selection of proper resource.
- * Also, resource content (* matching, escape character handling, comments etc) can be written
- * creatively. Test verify several cases.
- *
- * @author Marek Läll
- */
-@StubEnvironmentAndProperties
-class PgPassParserTest {
-
-  // "org.postgresql.pgpassfile" : missing
-  // "PGPASSFILE"                : missing
-  // ".pgpass"                   : missing
-  @Test
-  void getPassword11() throws Exception {
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), "", "APPDATA", "/tmp/dir-nonexistent"),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), "", "user.home", "/tmp/dir-nonexistent")
-    ).execute(() -> {
-      String result = PgPassParser.getPassword("localhost", "5432", "postgres", "postgres");
-      assertNull(result);
-    });
-  }
-
-  // "org.postgresql.pgpassfile" : missing
-  // "PGPASSFILE"                : missing
-  // ".pgpass"                   : exist
-  // <password line>             : exist
-  @Test
-  void getPassword22() throws Exception {
-    URL urlPath = getClass().getResource("/pg_service");
-    assertNotNull(urlPath);
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), "", "APPDATA", urlPath.getPath() ),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), "", "user.home", urlPath.getPath())
-    ).execute(() -> {
-      String result = PgPassParser.getPassword("localhost", "5432", "postgres",
-          "postgres");
-      assertEquals("postgres1", result);
-      result = PgPassParser.getPassword("localhost2", "5432", "postgres", "postgres");
-      assertEquals("postgres\\", result);
-      result = PgPassParser.getPassword("localhost3", "5432", "postgres", "postgres");
-      assertEquals("postgres:", result);
-      result = PgPassParser.getPassword("localhost4", "5432", "postgres", "postgres");
-      assertEquals("postgres1:", result);
-      result = PgPassParser.getPassword("localhost5", "5432", "postgres", "postgres");
-      assertEquals("postgres5", result);
-      result = PgPassParser.getPassword("localhost6", "5432", "postgres", "postgres");
-      assertEquals("post\\gres\\", result);
-      result = PgPassParser.getPassword("localhost7", "5432", "postgres", "postgres");
-      assertEquals(" ab cd", result);
-      result = PgPassParser.getPassword("localhost8", "5432", "postgres", "postgres");
-      assertEquals("", result);
-      //
-      result = PgPassParser.getPassword("::1", "1234", "colon:db", "colon:user");
-      assertEquals("pass:pass", result);
-      result = PgPassParser.getPassword("::1", "12345", "colon:db", "colon:user");
-      assertEquals("pass:pass1", result);
-      result = PgPassParser.getPassword("::1", "1234", "slash\\db", "slash\\user");
-      assertEquals("pass\\pass", result);
-      result = PgPassParser.getPassword("::1", "12345", "slash\\db", "slash\\user");
-      assertEquals("pass\\pass1", result);
-      //
-      result = PgPassParser.getPassword("any", "5432", "postgres", "postgres");
-      assertEquals("anyhost5", result);
-      result = PgPassParser.getPassword("localhost11", "9999", "postgres", "postgres");
-      assertEquals("anyport5", result);
-      result = PgPassParser.getPassword("localhost12", "5432", "anydb", "postgres");
-      assertEquals("anydb5", result);
-      result = PgPassParser.getPassword("localhost13", "5432", "postgres", "anyuser");
-      assertEquals("anyuser5", result);
-      //
-      result = PgPassParser.getPassword("anyhost", "6544", "anydb", "anyuser");
-      assertEquals("absolute-any", result);
-    });
-  }
-
-  // "org.postgresql.pgpassfile" : missing
-  // "PGPASSFILE"                : exist
-  // ".pgpass"                   : exist
-  // <password line>             : missing
-  @Test
-  void getPassword31() throws Exception {
-    URL urlPath = getClass().getResource("/pg_service");
-    assertNotNull(urlPath);
-    URL urlFileEnv = getClass().getResource("/pg_service/pgpassfileEnv.conf");
-    assertNotNull(urlFileEnv);
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), urlFileEnv.getFile(), "APPDATA", urlPath.getPath()),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), "", "user.home", urlPath.getPath())
-    ).execute(() -> {
-      String result = PgPassParser.getPassword("localhost-missing", "5432", "postgres1", "postgres2");
-      assertNull(result);
-    });
-  }
-
-  // "org.postgresql.pgpassfile" : missing
-  // "PGPASSFILE"                : exist
-  // ".pgpass"                   : exist
-  // <password line>             : exist
-  @Test
-  void getPassword32() throws Exception {
-    URL urlPath = getClass().getResource("/pg_service");
-    assertNotNull(urlPath);
-    URL urlFileEnv = getClass().getResource("/pg_service/pgpassfileEnv.conf");
-    assertNotNull(urlFileEnv);
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), urlFileEnv.getPath(), "APPDATA", urlPath.getPath()),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), "", "user.home", urlPath.getPath())
-    ).execute(() -> {
-      String result = PgPassParser.getPassword("localhost", "5432", "postgres1",
-          "postgres2");
-      assertEquals("postgres3", result);
-    });
-  }
-
-
-  // "org.postgresql.pgpassfile" : exist
-  // "PGPASSFILE"                : exist
-  // ".pgpass"                   : exist
-  // <password line>             : missing
-  @Test
-  void getPassword41() throws Exception {
-    URL urlPath = getClass().getResource("/pg_service");
-    assertNotNull(urlPath);
-    URL urlFileEnv = getClass().getResource("/pg_service/pgpassfileEnv.conf");
-    assertNotNull(urlFileEnv);
-    URL urlFileProps = getClass().getResource("/pg_service/pgpassfileProps.conf");
-    assertNotNull(urlFileProps);
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), urlFileEnv.getFile(), "APPDATA", urlPath.getPath()),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), "", "user.home", urlPath.getPath())
-    ).execute(() -> {
-      String result = PgPassParser.getPassword("localhost-missing", "5432", "postgres1", "postgres2");
-      assertNull(result);
-    });
-  }
-
-  // "org.postgresql.pgpassfile" : exist
-  // "PGPASSFILE"                : exist
-  // ".pgpass"                   : exist
-  // <password line>             : exist
-  @Test
-  void getPassword42() throws Exception {
-    URL urlPath = getClass().getResource("/pg_service");
-    assertNotNull(urlPath);
-    URL urlFileEnv = getClass().getResource("/pg_service/pgpassfileEnv.conf");
-    assertNotNull(urlFileEnv);
-    URL urlFileProps = getClass().getResource("/pg_service/pgpassfileProps.conf");
-    assertNotNull(urlFileProps);
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), urlFileEnv.getPath(), "APPDATA", urlPath.getPath()),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), urlFileProps.getFile(), "user.home", urlPath.getPath())
-    ).execute(() -> {
-      String result = PgPassParser.getPassword("localhost77", "5432", "any", "postgres11");
-      assertEquals("postgres22", result);
-      result = PgPassParser.getPassword("localhost888", "5432", "any", "postgres11");
-      assertNull(result);
-      result = PgPassParser.getPassword("localhost999", "5432", "any", "postgres11");
-      assertNull(result);
-    });
-  }
-
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbcurlresolver/PgServiceConfParserTest.java b/pgjdbc/src/test/java/org/postgresql/jdbcurlresolver/PgServiceConfParserTest.java
deleted file mode 100644
index b3a3d9e..0000000
--- a/pgjdbc/src/test/java/org/postgresql/jdbcurlresolver/PgServiceConfParserTest.java
+++ /dev/null
@@ -1,335 +0,0 @@
-/*
- * Copyright (c) 2021, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.jdbcurlresolver;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.postgresql.PGEnvironment;
-import org.postgresql.util.StubEnvironmentAndProperties;
-
-import org.junit.jupiter.api.Test;
-import uk.org.webcompere.systemstubs.environment.EnvironmentVariables;
-import uk.org.webcompere.systemstubs.properties.SystemProperties;
-import uk.org.webcompere.systemstubs.resource.Resources;
-
-import java.net.URL;
-import java.util.Properties;
-
-/**
- * Service resource location used is decided based on availability of different environment
- * variables and file existence in user home directory. Tests verify selection of proper resource.
- * Also, resource content (section headers, comments, key-value pairs etc) can be written
- * creatively. Test verify several cases.
- *
- * @author Marek Läll
- */
-@StubEnvironmentAndProperties
-class PgServiceConfParserTest {
-
-  // "org.postgresql.pgservicefile" : missing
-  // "PGSERVICEFILE"                : missing
-  // ".pg_service.conf"             : missing
-  // "PGSYSCONFDIR"                 : missing
-  @Test
-  void pgService11() throws Exception {
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), ""),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent")
-    ).execute(() -> {
-      Properties result = PgServiceConfParser.getServiceProperties("service-nonexistent");
-      assertNull(result);
-    });
-  }
-
-  // "org.postgresql.pgservicefile" : missing
-  // "PGSERVICEFILE"                : missing
-  // ".pg_service.conf"             : missing
-  // "PGSYSCONFDIR"                 : exist
-  // <service>                      : missing
-  @Test
-  void pgService21() throws Exception {
-    URL urlPath = getClass().getResource("/pg_service");
-    assertNotNull(urlPath);
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent")
-    ).execute(() -> {
-      Properties result = PgServiceConfParser.getServiceProperties("service-nonexistent");
-      assertNull(result);
-      result = PgServiceConfParser.getServiceProperties("empty-service1");
-      assertNotNull(result);
-      assertTrue(result.isEmpty());
-    });
-  }
-
-  // "org.postgresql.pgservicefile" : missing
-  // "PGSERVICEFILE"                : missing
-  // ".pg_service.conf"             : missing
-  // "PGSYSCONFDIR"                 : exist
-  // <service>                      : exist
-  @Test
-  void pgService22() throws Exception {
-    URL urlPath = getClass().getResource("/pg_service");
-    assertNotNull(urlPath);
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent")
-    ).execute(() -> {
-      Properties result = PgServiceConfParser.getServiceProperties("test-service1");
-      assertNotNull(result);
-      assertEquals("test_dbname", result.get("PGDBNAME"));
-      assertEquals("global-test-host.test.net", result.get("PGHOST"));
-      assertEquals("5433", result.get("PGPORT"));
-      assertEquals("admin", result.get("user"));
-      assertEquals(4, result.size());
-    });
-  }
-
-  // "org.postgresql.pgservicefile" : missing
-  // "PGSERVICEFILE"                : missing
-  // ".pg_service.conf"             : missing
-  // "PGSYSCONFDIR"                 : exist - but file itself is missing
-  // <service>                      : exist
-  @Test
-  void pgService23() throws Exception {
-    String nonExistingDir = "non-existing-dir";
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), nonExistingDir),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent")
-    ).execute(() -> {
-      Properties result = PgServiceConfParser.getServiceProperties("test-service1");
-      assertNull(result);
-    });
-  }
-
-
-  // "org.postgresql.pgservicefile" : missing
-  // "PGSERVICEFILE"                : missing
-  // ".pg_service.conf"             : exist
-  // "PGSYSCONFDIR"                 : exist
-  // <service>                      : missing
-  @Test
-  void pgService31() throws Exception {
-    URL urlPath = getClass().getResource("/pg_service");
-    assertNotNull(urlPath);
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath())
-    ).execute(() -> {
-      Properties result = PgServiceConfParser.getServiceProperties("service-nonexistent");
-      assertNull(result);
-      result = PgServiceConfParser.getServiceProperties("empty-service1");
-      assertNotNull(result);
-      assertTrue(result.isEmpty());
-    });
-  }
-
-  // "org.postgresql.pgservicefile" : missing
-  // "PGSERVICEFILE"                : missing
-  // ".pg_service.conf"             : exist
-  // "PGSYSCONFDIR"                 : exist
-  // <service>                      : exist
-  @Test
-  void pgService32() throws Exception {
-    URL urlPath = getClass().getResource("/pg_service");
-    assertNotNull(urlPath);
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", "APPDATA", urlPath.getPath(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath())
-    ).execute(() -> {
-      Properties result = PgServiceConfParser.getServiceProperties("test-service1");
-      assertNotNull(result);
-      assertEquals(" test_dbname", result.get("PGDBNAME"));
-      assertEquals("local-test-host.test.net", result.get("PGHOST"));
-      assertEquals("5433", result.get("PGPORT"));
-      assertEquals("admin", result.get("user"));
-      assertEquals(4, result.size());
-    });
-  }
-
-
-  // "org.postgresql.pgservicefile" : missing
-  // "PGSERVICEFILE"                : exist
-  // ".pg_service.conf"             : exist
-  // "PGSYSCONFDIR"                 : exist
-  // <service>                      : missing
-  @Test
-  void pgService41() throws Exception {
-    URL urlPath = getClass().getResource("/pg_service");
-    assertNotNull(urlPath);
-    URL urlFileEnv = getClass().getResource("/pg_service/pgservicefileEnv.conf");
-    assertNotNull(urlFileEnv);
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), urlFileEnv.getFile(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath())
-    ).execute(() -> {
-      Properties result = PgServiceConfParser.getServiceProperties("service-nonexistent");
-      assertNull(result);
-      result = PgServiceConfParser.getServiceProperties("empty-service1");
-      assertNotNull(result);
-      assertTrue(result.isEmpty());
-    });
-  }
-
-  // "org.postgresql.pgservicefile" : missing
-  // "PGSERVICEFILE"                : exist
-  // ".pg_service.conf"             : exist
-  // "PGSYSCONFDIR"                 : exist
-  // <service>                      : exist
-  @Test
-  void pgService42() throws Exception {
-    URL urlPath = getClass().getResource("/pg_service");
-    assertNotNull(urlPath);
-    URL urlFileEnv = getClass().getResource("/pg_service/pgservicefileEnv.conf");
-    assertNotNull(urlFileEnv);
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), urlFileEnv.getFile(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath())
-    ).execute(() -> {
-      Properties result = PgServiceConfParser.getServiceProperties("test-service1");
-      assertNotNull(result);
-      assertEquals("test_dbname", result.get("PGDBNAME"));
-      assertEquals("pgservicefileEnv-test-host.test.net", result.get("PGHOST"));
-      assertEquals("5433", result.get("PGPORT"));
-      assertEquals("admin", result.get("user"));
-      assertEquals("disable", result.get("sslmode"));
-      assertEquals(5, result.size());
-    });
-  }
-
-  // "org.postgresql.pgservicefile" : missing
-  // "PGSERVICEFILE"                : exist - but file itself is missing
-  // ".pg_service.conf"             : exist
-  // "PGSYSCONFDIR"                 : exist
-  // <service>                      : exist
-  @Test
-  void pgService43() throws Exception {
-    URL urlPath = getClass().getResource("/pg_service");
-    assertNotNull(urlPath);
-    String nonExistingFile = "non-existing-file.conf";
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), nonExistingFile, PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath())
-    ).execute(() -> {
-      Properties result = PgServiceConfParser.getServiceProperties("test-service1");
-      assertNull(result);
-    });
-  }
-
-
-  // "org.postgresql.pgservicefile" : exist
-  // "PGSERVICEFILE"                : exist
-  // ".pg_service.conf"             : exist
-  // "PGSYSCONFDIR"                 : exist
-  // <service>                      : missing
-  @Test
-  void pgService51() throws Exception {
-    URL urlPath = getClass().getResource("/pg_service");
-    assertNotNull(urlPath);
-    URL urlFileEnv = getClass().getResource("/pg_service/pgservicefileEnv.conf");
-    assertNotNull(urlFileEnv);
-    URL urlFileProps = getClass().getResource("/pg_service/pgservicefileProps.conf");
-    assertNotNull(urlFileProps);
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), urlFileEnv.getFile(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), urlFileProps.getFile(), "user.home", urlPath.getPath())
-    ).execute(() -> {
-      Properties result = PgServiceConfParser.getServiceProperties("service-nonexistent");
-      assertNull(result);
-      result = PgServiceConfParser.getServiceProperties("empty-service1");
-      assertNotNull(result);
-      assertTrue(result.isEmpty());
-    });
-  }
-
-  // "org.postgresql.pgservicefile" : exist
-  // "PGSERVICEFILE"                : exist
-  // ".pg_service.conf"             : exist
-  // "PGSYSCONFDIR"                 : exist
-  // <service>                      : exist
-  @Test
-  void pgService52() throws Exception {
-    URL urlPath = getClass().getResource("/pg_service");
-    assertNotNull(urlPath);
-    URL urlFileEnv = getClass().getResource("/pg_service/pgservicefileEnv.conf");
-    assertNotNull(urlFileEnv);
-    URL urlFileProps = getClass().getResource("/pg_service/pgservicefileProps.conf");
-    assertNotNull(urlFileProps);
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), urlFileEnv.getFile(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), urlFileProps.getFile(), "user.home", urlPath.getPath())
-    ).execute(() -> {
-      Properties result = PgServiceConfParser.getServiceProperties("test-service1");
-      assertNotNull(result);
-      assertEquals("test_dbname", result.get("PGDBNAME"));
-      assertEquals("pgservicefileProps-test-host.test.net", result.get("PGHOST"));
-      assertEquals("5433", result.get("PGPORT"));
-      assertEquals("admin", result.get("user"));
-      assertEquals(4, result.size());
-    });
-  }
-
-  // "org.postgresql.pgservicefile" : exist - but file itself is missing
-  // "PGSERVICEFILE"                : exist
-  // ".pg_service.conf"             : exist
-  // "PGSYSCONFDIR"                 : exist
-  // <service>                      : exist
-  @Test
-  void pgService53() throws Exception {
-    URL urlPath = getClass().getResource("/pg_service");
-    assertNotNull(urlPath);
-    URL urlFileEnv = getClass().getResource("/pg_service/pgservicefileEnv.conf");
-    assertNotNull(urlFileEnv);
-    String nonExistingFile = "non-existing-file.conf";
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), urlFileEnv.getFile(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), nonExistingFile, "user.home", urlPath.getPath())
-    ).execute(() -> {
-      Properties result = PgServiceConfParser.getServiceProperties("test-service1");
-      assertNull(result);
-    });
-  }
-
-
-  // resource content read tests
-  @Test
-  void pgService61() throws Exception {
-    URL urlPath = getClass().getResource("/pg_service");
-    assertNotNull(urlPath);
-    Resources.with(
-        new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", "APPDATA", urlPath.getPath(), PGEnvironment.PGSYSCONFDIR.getName(), ""),
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath())
-    ).execute(() -> {
-      Properties result;
-      // fail if there is space between key and equal sign
-      result = PgServiceConfParser.getServiceProperties("fail-case-1");
-      assertNull(result);
-      // service name is case-sensitive
-      result = PgServiceConfParser.getServiceProperties("fail-case-2");
-      assertNull(result);
-      // service name is case-sensitive
-      result = PgServiceConfParser.getServiceProperties("fail-case-2");
-      assertNull(result);
-      // invalid line in the section
-      result = PgServiceConfParser.getServiceProperties("fail-case-3");
-      assertNull(result);
-      // service name: space before and after name becomes part of name
-      result = PgServiceConfParser.getServiceProperties(" success-case-3 ");
-      assertNotNull(result);
-      assertEquals("local-somehost3", result.get("PGHOST"));
-      assertEquals(1, result.size());
-      // service name: space inside name is part of name
-      result = PgServiceConfParser.getServiceProperties("success case 4");
-      assertNotNull(result);
-      assertEquals("local-somehost4", result.get("PGHOST"));
-      assertEquals(1, result.size());
-    });
-  }
-
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/replication/CopyBothResponseTest.java b/pgjdbc/src/test/java/org/postgresql/replication/CopyBothResponseTest.java
deleted file mode 100644
index 863647f..0000000
--- a/pgjdbc/src/test/java/org/postgresql/replication/CopyBothResponseTest.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Copyright (c) 2004, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.replication;
-
-import static org.hamcrest.CoreMatchers.equalTo;
-import static org.hamcrest.MatcherAssert.assertThat;
-
-import org.postgresql.PGConnection;
-import org.postgresql.copy.CopyDual;
-import org.postgresql.copy.CopyManager;
-import org.postgresql.core.BaseConnection;
-import org.postgresql.core.ServerVersion;
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.annotations.DisabledIfServerVersionBelow;
-import org.postgresql.test.annotations.tags.Replication;
-
-import org.hamcrest.CoreMatchers;
-import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
-import java.nio.ByteBuffer;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.concurrent.TimeUnit;
-
-/**
- * CopyBothResponse use since 9.1 PostgreSQL version for replication protocol.
- */
-@Replication
-@DisabledIfServerVersionBelow("9.4")
-class CopyBothResponseTest {
-  private Connection sqlConnection;
-  private Connection replConnection;
-
-  @BeforeAll
-  static void beforeClass() throws Exception {
-    Connection con = TestUtil.openDB();
-    TestUtil.createTable(con, "testreplication", "pk serial primary key, name varchar(100)");
-    con.close();
-  }
-
-  @AfterAll
-  static void testAfterClass() throws Exception {
-    Connection con = TestUtil.openDB();
-    TestUtil.dropTable(con, "testreplication");
-    con.close();
-  }
-
-  @BeforeEach
-  void setUp() throws Exception {
-    sqlConnection = TestUtil.openDB();
-    replConnection = TestUtil.openReplicationConnection();
-    replConnection.setAutoCommit(true);
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    sqlConnection.close();
-    replConnection.close();
-  }
-
-  @Test
-  void openConnectByReplicationProtocol() throws Exception {
-    CopyManager cm = ((PGConnection) replConnection).getCopyAPI();
-
-    LogSequenceNumber logSequenceNumber = getCurrentLSN();
-    CopyDual copyDual = cm.copyDual(
-        "START_REPLICATION " + logSequenceNumber.asString());
-    try {
-      assertThat(
-          "Replication protocol work via copy protocol and initialize as CopyBothResponse, "
-              + "we want that first initialize will work",
-          copyDual, CoreMatchers.notNullValue()
-      );
-    } finally {
-      copyDual.endCopy();
-    }
-  }
-
-  @Test
-  void receiveKeepAliveMessage() throws Exception {
-    CopyManager cm = ((PGConnection) replConnection).getCopyAPI();
-
-    LogSequenceNumber logSequenceNumber = getCurrentLSN();
-    CopyDual copyDual = cm.copyDual(
-        "START_REPLICATION " + logSequenceNumber.asString());
-
-    sendStandByUpdate(copyDual, logSequenceNumber, logSequenceNumber, logSequenceNumber, true);
-    ByteBuffer buf = ByteBuffer.wrap(copyDual.readFromCopy());
-
-    int code = buf.get();
-    copyDual.endCopy();
-
-    assertThat(
-        "Streaming replication start with swap keep alive message, we want that first get package will be keep alive",
-        code, equalTo((int) 'k')
-    );
-  }
-
-  @Test
-  void keedAliveContainsCorrectLSN() throws Exception {
-    CopyManager cm = ((PGConnection) replConnection).getCopyAPI();
-
-    LogSequenceNumber startLsn = getCurrentLSN();
-    CopyDual copyDual =
-        cm.copyDual("START_REPLICATION " + startLsn.asString());
-    sendStandByUpdate(copyDual, startLsn, startLsn, startLsn, true);
-
-    ByteBuffer buf = ByteBuffer.wrap(copyDual.readFromCopy());
-
-    int code = buf.get();
-    LogSequenceNumber lastLSN = LogSequenceNumber.valueOf(buf.getLong());
-    copyDual.endCopy();
-
-    assertThat(
-        "Keep alive message contain last lsn on server, we want that before start replication "
-            + "and get keep alive message not occurs wal modifications",
-        lastLSN, CoreMatchers.equalTo(startLsn)
-    );
-  }
-
-  @Test
-  void receiveXLogData() throws Exception {
-    CopyManager cm = ((PGConnection) replConnection).getCopyAPI();
-
-    LogSequenceNumber startLsn = getCurrentLSN();
-
-    Statement st = sqlConnection.createStatement();
-    st.execute("insert into testreplication(name) values('testing get changes')");
-    st.close();
-
-    CopyDual copyDual =
-        cm.copyDual("START_REPLICATION " + startLsn.asString());
-    sendStandByUpdate(copyDual, startLsn, startLsn, startLsn, false);
-
-    ByteBuffer buf = ByteBuffer.wrap(copyDual.readFromCopy());
-
-    char code = (char) buf.get();
-    copyDual.endCopy();
-
-    assertThat(
-        "When replication starts via slot and specify LSN that lower than last LSN on server, "
-            + "we should get all changes that occurs between two LSN",
-        code, equalTo('w')
-    );
-  }
-
-  private void sendStandByUpdate(CopyDual copyDual, LogSequenceNumber received,
-      LogSequenceNumber flushed, LogSequenceNumber applied, boolean replyRequired)
-      throws SQLException {
-    ByteBuffer response = ByteBuffer.allocate(1 + 8 + 8 + 8 + 8 + 1);
-    response.put((byte) 'r');
-    response.putLong(received.asLong()); //received
-    response.putLong(flushed.asLong()); //flushed
-    response.putLong(applied.asLong()); //applied
-    response.putLong(TimeUnit.MICROSECONDS.convert((System.currentTimeMillis() - 946674000000L),
-        TimeUnit.MICROSECONDS));
-    response.put(replyRequired ? (byte) 1 : (byte) 0); //reply soon as possible
-
-    byte[] standbyUpdate = response.array();
-    copyDual.writeToCopy(standbyUpdate, 0, standbyUpdate.length);
-    copyDual.flushCopy();
-  }
-
-  private LogSequenceNumber getCurrentLSN() throws SQLException {
-    Statement st = sqlConnection.createStatement();
-    ResultSet rs = null;
-    try {
-      rs = st.executeQuery("select "
-          + (((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10)
-          ? "pg_current_wal_lsn()" : "pg_current_xlog_location()"));
-
-      if (rs.next()) {
-        String lsn = rs.getString(1);
-        return LogSequenceNumber.valueOf(lsn);
-      } else {
-        return LogSequenceNumber.INVALID_LSN;
-      }
-    } finally {
-      if (rs != null) {
-        rs.close();
-      }
-      st.close();
-    }
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/replication/LogSequenceNumberTest.java b/pgjdbc/src/test/java/org/postgresql/replication/LogSequenceNumberTest.java
deleted file mode 100644
index 2b62022..0000000
--- a/pgjdbc/src/test/java/org/postgresql/replication/LogSequenceNumberTest.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (c) 2016, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.replication;
-
-import static org.hamcrest.CoreMatchers.equalTo;
-import static org.hamcrest.CoreMatchers.not;
-import static org.hamcrest.CoreMatchers.notNullValue;
-import static org.hamcrest.MatcherAssert.assertThat;
-
-import org.postgresql.test.annotations.tags.Replication;
-
-import org.junit.jupiter.api.Test;
-
-@Replication
-class LogSequenceNumberTest {
-  @Test
-  void notNullWhenCreateFromStr() throws Exception {
-    LogSequenceNumber result = LogSequenceNumber.valueOf("0/15D68C50");
-    assertThat(result, notNullValue());
-  }
-
-  @Test
-  void parseNotValidLSNStr() throws Exception {
-    LogSequenceNumber result = LogSequenceNumber.valueOf("15D68C55");
-    assertThat(result, equalTo(LogSequenceNumber.INVALID_LSN));
-  }
-
-  @Test
-  void parseLSNFromStringAndConvertToLong() throws Exception {
-    LogSequenceNumber result = LogSequenceNumber.valueOf("16/3002D50");
-    assertThat("64-bit number use in replication protocol, "
-            + "that why we should can convert string represent LSN to long",
-        result.asLong(), equalTo(94539623760L)
-    );
-  }
-
-  @Test
-  void convertNumericLSNToString() throws Exception {
-    LogSequenceNumber result = LogSequenceNumber.valueOf(94539623760L);
-
-    assertThat("64-bit number use in replication protocol, "
-            + "but more readable standard format use in logs where each 8-bit print in hex form via slash",
-        result.asString(), equalTo("16/3002D50")
-    );
-  }
-
-  @Test
-  void convertNumericLSNToString_2() throws Exception {
-    LogSequenceNumber result = LogSequenceNumber.valueOf(366383352L);
-
-    assertThat("64-bit number use in replication protocol, "
-            + "but more readable standard format use in logs where each 8-bit print in hex form via slash",
-        result.asString(), equalTo("0/15D690F8")
-    );
-  }
-
-  @Test
-  void equalLSN() throws Exception {
-    LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8");
-    LogSequenceNumber second = LogSequenceNumber.valueOf("0/15D690F8");
-
-    assertThat(first, equalTo(second));
-  }
-
-  @Test
-  void equalLSNCreateByDifferentWay() throws Exception {
-    LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8");
-    LogSequenceNumber second = LogSequenceNumber.valueOf(366383352L);
-
-    assertThat("LSN creates as 64-bit number and as string where each 8-bit print in hex form "
-            + "via slash represent same position in WAL should be equals",
-        first, equalTo(second)
-    );
-  }
-
-  @Test
-  void notEqualLSN() throws Exception {
-    LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8");
-    LogSequenceNumber second = LogSequenceNumber.valueOf("0/15D68C50");
-
-    assertThat(first, not(equalTo(second)));
-  }
-
-  @Test
-  void differentLSNHaveDifferentHash() throws Exception {
-    LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8");
-    LogSequenceNumber second = LogSequenceNumber.valueOf("0/15D68C50");
-
-    assertThat(first.hashCode(), not(equalTo(second.hashCode())));
-  }
-
-  @Test
-  void sameLSNHaveSameHash() throws Exception {
-    LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8");
-    LogSequenceNumber second = LogSequenceNumber.valueOf("0/15D690F8");
-
-    assertThat(first.hashCode(), equalTo(second.hashCode()));
-  }
-
-  @Test
-  void compareToSameValue() throws Exception {
-    LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8");
-    LogSequenceNumber second = LogSequenceNumber.valueOf("0/15D690F8");
-
-    assertThat(first.compareTo(second), equalTo(0));
-    assertThat(second.compareTo(first), equalTo(0));
-  }
-
-  @Test
-  void compareToPositiveValues() throws Exception {
-    LogSequenceNumber first = LogSequenceNumber.valueOf(1234);
-    LogSequenceNumber second = LogSequenceNumber.valueOf(4321);
-
-    assertThat(first.compareTo(second), equalTo(-1));
-    assertThat(second.compareTo(first), equalTo(1));
-  }
-
-  @Test
-  void compareToNegativeValues() throws Exception {
-    LogSequenceNumber first = LogSequenceNumber.valueOf(0x8000000000000000L);
-    LogSequenceNumber second = LogSequenceNumber.valueOf(0x8000000000000001L);
-
-    assertThat(first.compareTo(second), equalTo(-1));
-    assertThat(second.compareTo(first), equalTo(1));
-  }
-
-  @Test
-  void compareToMixedSign() throws Exception {
-    LogSequenceNumber first = LogSequenceNumber.valueOf(1);
-    LogSequenceNumber second = LogSequenceNumber.valueOf(0x8000000000000001L);
-
-    assertThat(first.compareTo(second), equalTo(-1));
-    assertThat(second.compareTo(first), equalTo(1));
-  }
-
-  @Test
-  void compareToWithInvalid() throws Exception {
-    LogSequenceNumber first = LogSequenceNumber.INVALID_LSN;
-    LogSequenceNumber second = LogSequenceNumber.valueOf(1);
-
-    assertThat(first.compareTo(second), equalTo(-1));
-    assertThat(second.compareTo(first), equalTo(1));
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/replication/LogicalReplicationStatusTest.java b/pgjdbc/src/test/java/org/postgresql/replication/LogicalReplicationStatusTest.java
deleted file mode 100644
index 6876044..0000000
--- a/pgjdbc/src/test/java/org/postgresql/replication/LogicalReplicationStatusTest.java
+++ /dev/null
@@ -1,549 +0,0 @@
-/*
- * Copyright (c) 2016, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.replication;
-
-import static org.hamcrest.CoreMatchers.not;
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.hamcrest.core.IsEqual.equalTo;
-
-import org.postgresql.PGConnection;
-import org.postgresql.core.BaseConnection;
-import org.postgresql.core.ServerVersion;
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.annotations.DisabledIfServerVersionBelow;
-import org.postgresql.test.annotations.tags.Replication;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
-import java.nio.ByteBuffer;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-@Replication
-@DisabledIfServerVersionBelow("9.4")
-class LogicalReplicationStatusTest {
-  private static final String SLOT_NAME = "pgjdbc_logical_replication_slot";
-
-  private Connection replicationConnection;
-  private Connection sqlConnection;
-  private Connection secondSqlConnection;
-
-  @BeforeEach
-  void setUp() throws Exception {
-    //statistic available only for privileged user
-    sqlConnection = TestUtil.openPrivilegedDB();
-    secondSqlConnection = TestUtil.openPrivilegedDB("test_2");
-    //DriverManager.setLogWriter(new PrintWriter(System.out));
-    replicationConnection = TestUtil.openReplicationConnection();
-    TestUtil.createTable(sqlConnection, "test_logic_table",
-        "pk serial primary key, name varchar(100)");
-    TestUtil.createTable(secondSqlConnection, "test_logic_table",
-        "pk serial primary key, name varchar(100)");
-
-    TestUtil.recreateLogicalReplicationSlot(sqlConnection, SLOT_NAME, "test_decoding");
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    replicationConnection.close();
-    TestUtil.dropTable(sqlConnection, "test_logic_table");
-    TestUtil.dropTable(secondSqlConnection, "test_logic_table");
-    TestUtil.dropReplicationSlot(sqlConnection, SLOT_NAME);
-    secondSqlConnection.close();
-    sqlConnection.close();
-  }
-
-  @Test
-  void sentLocationEqualToLastReceiveLSN() throws Exception {
-    PGConnection pgConnection = (PGConnection) replicationConnection;
-
-    LogSequenceNumber startLSN = getCurrentLSN();
-
-    insertPreviousChanges(sqlConnection);
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .start();
-
-    final int countMessage = 3;
-
-    List<String> received = receiveMessageWithoutBlock(stream, countMessage);
-    LogSequenceNumber lastReceivedLSN = stream.getLastReceiveLSN();
-    stream.forceUpdateStatus();
-
-    LogSequenceNumber sentByServer = getSentLocationOnView();
-
-    assertThat("When changes absent on server last receive by stream LSN "
-            + "should be equal to last sent by server LSN",
-        sentByServer, equalTo(lastReceivedLSN)
-    );
-  }
-
-  /**
-   * Test fail on PG version 9.4.5 because postgresql have bug.
-   */
-  @Test
-  @DisabledIfServerVersionBelow("9.4.8")
-  void receivedLSNDependentOnProcessMessage() throws Exception {
-    PGConnection pgConnection = (PGConnection) replicationConnection;
-
-    LogSequenceNumber startLSN = getCurrentLSN();
-
-    insertPreviousChanges(sqlConnection);
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .start();
-
-    receiveMessageWithoutBlock(stream, 1);
-    LogSequenceNumber firstLSN = stream.getLastReceiveLSN();
-
-    receiveMessageWithoutBlock(stream, 1);
-    LogSequenceNumber secondLSN = stream.getLastReceiveLSN();
-
-    assertThat("After receive each new message current LSN updates in stream",
-        firstLSN, not(equalTo(secondLSN))
-    );
-  }
-
-  @Test
-  void lastReceiveLSNCorrectOnView() throws Exception {
-    PGConnection pgConnection = (PGConnection) replicationConnection;
-
-    LogSequenceNumber startLSN = getCurrentLSN();
-
-    insertPreviousChanges(sqlConnection);
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .start();
-
-    receiveMessageWithoutBlock(stream, 2);
-    LogSequenceNumber lastReceivedLSN = stream.getLastReceiveLSN();
-    stream.forceUpdateStatus();
-
-    assertThat(
-        "Replication stream by execute forceUpdateStatus should send to view actual received position "
-            + "that allow monitoring lag",
-        lastReceivedLSN, equalTo(getWriteLocationOnView())
-    );
-  }
-
-  @Test
-  void writeLocationCanBeLessThanSendLocation() throws Exception {
-    PGConnection pgConnection = (PGConnection) replicationConnection;
-
-    LogSequenceNumber startLSN = getCurrentLSN();
-
-    insertPreviousChanges(sqlConnection);
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .start();
-
-    receiveMessageWithoutBlock(stream, 2);
-    stream.forceUpdateStatus();
-
-    LogSequenceNumber writeLocation = getWriteLocationOnView();
-    LogSequenceNumber sentLocation = getSentLocationOnView();
-
-    assertThat(
-        "In view pg_stat_replication column write_location define which position consume client "
-            + "but sent_location define which position was sent to client, so in current test we have 1 pending message, "
-            + "so write and sent can't be equals",
-        writeLocation, not(equalTo(sentLocation))
-    );
-  }
-
-  @Test
-  void flushLocationEqualToSetLocation() throws Exception {
-    PGConnection pgConnection = (PGConnection) replicationConnection;
-
-    LogSequenceNumber startLSN = getCurrentLSN();
-
-    insertPreviousChanges(sqlConnection);
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .start();
-
-    receiveMessageWithoutBlock(stream, 1);
-
-    LogSequenceNumber flushLSN = stream.getLastReceiveLSN();
-    stream.setFlushedLSN(flushLSN);
-
-    //consume another messages
-    receiveMessageWithoutBlock(stream, 2);
-
-    stream.forceUpdateStatus();
-
-    LogSequenceNumber result = getFlushLocationOnView();
-
-    assertThat("Flush LSN use for define which wal can be recycled and it parameter should be "
-            + "specify manually on replication stream, because only client "
-            + "of replication stream now which wal not necessary. We wait that it status correct "
-            + "send to backend and available via view, because if status will "
-            + "not send it lead to problem when WALs never recycled",
-        result, equalTo(flushLSN)
-    );
-  }
-
-  @Test
-  void flushLocationDoNotChangeDuringReceiveMessage() throws Exception {
-    PGConnection pgConnection = (PGConnection) replicationConnection;
-
-    LogSequenceNumber startLSN = getCurrentLSN();
-
-    insertPreviousChanges(sqlConnection);
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .start();
-
-    receiveMessageWithoutBlock(stream, 1);
-    final LogSequenceNumber flushLSN = stream.getLastReceiveLSN();
-    stream.setFlushedLSN(flushLSN);
-    receiveMessageWithoutBlock(stream, 2);
-
-    assertThat(
-        "Flush LSN it parameter that specify manually on stream and they can not automatically "
-            + "change during receive another messages, "
-            + "because auto update can lead to problem when WAL recycled on postgres "
-            + "because we send feedback that current position successfully flush, but in real they not flush yet",
-        stream.getLastFlushedLSN(), equalTo(flushLSN)
-    );
-  }
-
-  @Test
-  void applyLocationEqualToSetLocation() throws Exception {
-    PGConnection pgConnection = (PGConnection) replicationConnection;
-
-    LogSequenceNumber startLSN = getCurrentLSN();
-
-    insertPreviousChanges(sqlConnection);
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .start();
-
-    receiveMessageWithoutBlock(stream, 1);
-    final LogSequenceNumber applyLSN = stream.getLastReceiveLSN();
-
-    stream.setAppliedLSN(applyLSN);
-    stream.setFlushedLSN(applyLSN);
-
-    receiveMessageWithoutBlock(stream, 2);
-    stream.forceUpdateStatus();
-
-    LogSequenceNumber result = getReplayLocationOnView();
-
-    assertThat(
-        "During receive message from replication stream all feedback parameter "
-            + "that we set to stream should be sent to backend"
-            + "because it allow monitoring replication status and also recycle old WALs",
-        result, equalTo(applyLSN)
-    );
-  }
-
-  /**
-   * Test fail on PG version 9.4.5 because postgresql have bug.
-   */
-  @Test
-  @DisabledIfServerVersionBelow("9.4.8")
-  void applyLocationDoNotDependOnFlushLocation() throws Exception {
-    PGConnection pgConnection = (PGConnection) replicationConnection;
-
-    LogSequenceNumber startLSN = getCurrentLSN();
-
-    insertPreviousChanges(sqlConnection);
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .start();
-
-    receiveMessageWithoutBlock(stream, 1);
-    stream.setAppliedLSN(stream.getLastReceiveLSN());
-    stream.setFlushedLSN(stream.getLastReceiveLSN());
-
-    receiveMessageWithoutBlock(stream, 1);
-    stream.setFlushedLSN(stream.getLastReceiveLSN());
-
-    receiveMessageWithoutBlock(stream, 1);
-    stream.forceUpdateStatus();
-
-    LogSequenceNumber flushed = getFlushLocationOnView();
-    LogSequenceNumber applied = getReplayLocationOnView();
-
-    assertThat(
-        "Last applied LSN and last flushed LSN it two not depends parameters and they can be not equal between",
-        applied, not(equalTo(flushed))
-    );
-  }
-
-  @Test
-  void applyLocationDoNotChangeDuringReceiveMessage() throws Exception {
-    PGConnection pgConnection = (PGConnection) replicationConnection;
-
-    LogSequenceNumber startLSN = getCurrentLSN();
-
-    insertPreviousChanges(sqlConnection);
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .start();
-
-    receiveMessageWithoutBlock(stream, 1);
-    final LogSequenceNumber applyLSN = stream.getLastReceiveLSN();
-    stream.setAppliedLSN(applyLSN);
-    receiveMessageWithoutBlock(stream, 2);
-
-    assertThat(
-        "Apply LSN it parameter that specify manually on stream and they can not automatically "
-            + "change during receive another messages, "
-            + "because auto update can lead to problem when WAL recycled on postgres "
-            + "because we send feedback that current position successfully flush, but in real they not flush yet",
-        stream.getLastAppliedLSN(), equalTo(applyLSN)
-    );
-  }
-
-  @Test
-  void statusCanBeSentToBackendAsynchronously() throws Exception {
-    PGConnection pgConnection = (PGConnection) replicationConnection;
-
-    final int intervalTime = 100;
-    final TimeUnit timeFormat = TimeUnit.MILLISECONDS;
-
-    LogSequenceNumber startLSN = getCurrentLSN();
-
-    insertPreviousChanges(sqlConnection);
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .withStatusInterval(intervalTime, timeFormat)
-            .start();
-
-    receiveMessageWithoutBlock(stream, 3);
-
-    LogSequenceNumber waitLSN = stream.getLastReceiveLSN();
-
-    stream.setAppliedLSN(waitLSN);
-    stream.setFlushedLSN(waitLSN);
-
-    timeFormat.sleep(intervalTime + 1);
-
-    //get pending message and trigger update status by timeout
-    stream.readPending();
-
-    LogSequenceNumber flushLSN = getFlushLocationOnView();
-
-    assertThat("Status can be sent to backend by some time interval, "
-            + "by default it parameter equals to 10 second, but in current test we change it on few millisecond "
-            + "and wait that set status on stream will be auto send to backend",
-        flushLSN, equalTo(waitLSN)
-    );
-  }
-
-  private void insertPreviousChanges(Connection sqlConnection) throws SQLException {
-    try (Statement st = sqlConnection.createStatement()) {
-      st.execute("insert into test_logic_table(name) values('previous changes')");
-    }
-  }
-
-  @Test
-  void keepAliveServerLSNCanBeUsedToAdvanceFlushLSN() throws Exception {
-    PGConnection pgConnection = (PGConnection) replicationConnection;
-
-    LogSequenceNumber startLSN = getCurrentLSN();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .withStatusInterval(1, TimeUnit.SECONDS)
-            .start();
-
-    // create replication changes and poll for messages
-    insertPreviousChanges(sqlConnection);
-
-    receiveMessageWithoutBlock(stream, 3);
-
-    // client confirms flush of these changes. At this point we're in sync with server
-    LogSequenceNumber confirmedClientFlushLSN = stream.getLastReceiveLSN();
-    stream.setFlushedLSN(confirmedClientFlushLSN);
-    stream.forceUpdateStatus();
-
-    // now insert something into other DB (without replication) to generate WAL
-    insertPreviousChanges(secondSqlConnection);
-
-    TimeUnit.SECONDS.sleep(1);
-
-    // read KeepAlive messages - lastServerLSN will have advanced and we can safely confirm it
-    stream.readPending();
-
-    LogSequenceNumber lastFlushedLSN = stream.getLastFlushedLSN();
-    LogSequenceNumber lastReceivedLSN = stream.getLastReceiveLSN();
-
-    assertThat("Activity in other database will generate WAL but no XLogData "
-            + " messages. Received LSN will begin to advance beyond of confirmed flushLSN",
-        confirmedClientFlushLSN, not(equalTo(lastReceivedLSN))
-    );
-
-    assertThat("When all XLogData messages have been processed, we can confirm "
-            + " flush of Server LSNs in the KeepAlive messages",
-        lastFlushedLSN, equalTo(lastReceivedLSN)
-    );
-  }
-
-  private LogSequenceNumber getSentLocationOnView() throws Exception {
-    return getLSNFromView((((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10)
-        ? "sent_lsn" : "sent_location"));
-  }
-
-  private LogSequenceNumber getWriteLocationOnView() throws Exception {
-    return getLSNFromView((((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10)
-        ? "write_lsn" : "write_location"));
-  }
-
-  private LogSequenceNumber getFlushLocationOnView() throws Exception {
-    return getLSNFromView((((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10)
-        ? "flush_lsn" : "flush_location"));
-  }
-
-  private LogSequenceNumber getReplayLocationOnView() throws Exception {
-    return getLSNFromView((((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10)
-        ? "replay_lsn" : "replay_location"));
-  }
-
-  private List<String> receiveMessageWithoutBlock(PGReplicationStream stream, int count)
-      throws Exception {
-    List<String> result = new ArrayList<>(3);
-    for (int index = 0; index < count; index++) {
-      ByteBuffer message;
-      do {
-        message = stream.readPending();
-
-        if (message == null) {
-          TimeUnit.MILLISECONDS.sleep(2);
-        }
-      } while (message == null);
-
-      result.add(toString(message));
-    }
-
-    return result;
-  }
-
-  private String toString(ByteBuffer buffer) {
-    int offset = buffer.arrayOffset();
-    byte[] source = buffer.array();
-    int length = source.length - offset;
-
-    return new String(source, offset, length);
-  }
-
-  private LogSequenceNumber getLSNFromView(String columnName) throws Exception {
-    int pid = ((PGConnection) replicationConnection).getBackendPID();
-
-    int repeatCount = 0;
-    while (true) {
-      try (
-          Statement st = sqlConnection.createStatement();
-          ResultSet rs = st.executeQuery("select * from pg_stat_replication where pid = " + pid)
-      ) {
-        String result = null;
-        if (rs.next()) {
-          result = rs.getString(columnName);
-        }
-
-        if (result == null || result.isEmpty()) {
-          //replication monitoring view updates with some delay, wait some time and try again
-          TimeUnit.MILLISECONDS.sleep(100L);
-          repeatCount++;
-          if (repeatCount == 10) {
-            return null;
-          }
-        } else {
-          return LogSequenceNumber.valueOf(result);
-        }
-      }
-    }
-  }
-
-  private LogSequenceNumber getCurrentLSN() throws SQLException {
-    try (Statement st = sqlConnection.createStatement();
-         ResultSet rs = st.executeQuery("select "
-             + (((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10)
-             ? "pg_current_wal_lsn()" : "pg_current_xlog_location()"))
-    ) {
-      if (rs.next()) {
-        String lsn = rs.getString(1);
-        return LogSequenceNumber.valueOf(lsn);
-      } else {
-        return LogSequenceNumber.INVALID_LSN;
-      }
-    }
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/replication/LogicalReplicationTest.java b/pgjdbc/src/test/java/org/postgresql/replication/LogicalReplicationTest.java
deleted file mode 100644
index 62dc510..0000000
--- a/pgjdbc/src/test/java/org/postgresql/replication/LogicalReplicationTest.java
+++ /dev/null
@@ -1,959 +0,0 @@
-/*
- * Copyright (c) 2016, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.replication;
-
-import static org.hamcrest.CoreMatchers.equalTo;
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.hamcrest.junit.MatcherAssume.assumeThat;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.PGConnection;
-import org.postgresql.core.BaseConnection;
-import org.postgresql.core.ServerVersion;
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.annotations.DisabledIfServerVersionBelow;
-import org.postgresql.test.annotations.tags.Replication;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
-import org.hamcrest.CoreMatchers;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.Timeout;
-
-import java.nio.ByteBuffer;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-@Replication
-@DisabledIfServerVersionBelow("9.4")
-class LogicalReplicationTest {
-  private static final String SLOT_NAME = "pgjdbc_logical_replication_slot";
-
-  private Connection replConnection;
-  private Connection sqlConnection;
-
-  private static String toString(ByteBuffer buffer) {
-    int offset = buffer.arrayOffset();
-    byte[] source = buffer.array();
-    int length = source.length - offset;
-
-    return new String(source, offset, length);
-  }
-
-  @BeforeEach
-  void setUp() throws Exception {
-    sqlConnection = TestUtil.openPrivilegedDB();
-    //DriverManager.setLogWriter(new PrintWriter(System.out));
-    replConnection = TestUtil.openReplicationConnection();
-    TestUtil.createTable(sqlConnection, "test_logic_table",
-        "pk serial primary key, name varchar(100)");
-
-    TestUtil.recreateLogicalReplicationSlot(sqlConnection, SLOT_NAME, "test_decoding");
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    replConnection.close();
-    TestUtil.dropTable(sqlConnection, "test_logic_table");
-    TestUtil.dropReplicationSlot(sqlConnection, SLOT_NAME);
-    sqlConnection.close();
-  }
-
-  @Test
-  @Timeout(1)
-  void notAvailableStartNotExistReplicationSlot() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber lsn = getCurrentLSN();
-
-    try {
-      PGReplicationStream stream =
-          pgConnection
-              .getReplicationAPI()
-              .replicationStream()
-              .logical()
-              .withSlotName("notExistSlotName")
-              .withStartPosition(lsn)
-              .start();
-
-      fail("For logical decoding replication slot name it required parameter "
-          + "that should be create on server before start replication");
-
-    } catch (PSQLException e) {
-      String state = e.getSQLState();
-
-      assertThat("When replication slot doesn't exists, server can't start replication "
-              + "and should throw exception about it",
-          state, equalTo(PSQLState.UNDEFINED_OBJECT.getState())
-      );
-    }
-  }
-
-  @Test
-  @Timeout(1)
-  void receiveChangesOccursBeforeStartReplication() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber lsn = getCurrentLSN();
-
-    Statement st = sqlConnection.createStatement();
-    st.execute("insert into test_logic_table(name) values('previous value')");
-    st.close();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(lsn)
-            .withSlotOption("include-xids", false)
-            .start();
-
-    String result = group(receiveMessage(stream, 3));
-
-    String wait = group(
-        Arrays.asList(
-            "BEGIN",
-            "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'previous value'",
-            "COMMIT"
-        )
-    );
-
-    assertThat("Logical replication can be start from some LSN position and all changes that "
-            + "occurs between last server LSN and specified LSN position should be available to read "
-            + "via stream in correct order",
-        result, equalTo(wait)
-    );
-  }
-
-  @Test
-  @Timeout(1)
-  void receiveChangesAfterStartReplication() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber lsn = getCurrentLSN();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(lsn)
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    List<String> result = new ArrayList<>();
-
-    Statement st = sqlConnection.createStatement();
-    st.execute(
-        "insert into test_logic_table(name) values('first message after start replication')");
-    st.close();
-
-    result.addAll(receiveMessage(stream, 3));
-
-    st = sqlConnection.createStatement();
-    st.execute(
-        "insert into test_logic_table(name) values('second message after start replication')");
-    st.close();
-
-    result.addAll(receiveMessage(stream, 3));
-
-    String groupedResult = group(result);
-
-    String wait = group(Arrays.asList(
-        "BEGIN",
-        "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'first message after start replication'",
-        "COMMIT",
-        "BEGIN",
-        "table public.test_logic_table: INSERT: pk[integer]:2 name[character varying]:'second message after start replication'",
-        "COMMIT"
-    ));
-
-    assertThat(
-        "After starting replication, from stream should be available also new changes that occurs after start replication",
-        groupedResult, equalTo(wait)
-    );
-  }
-
-  @Test
-  @Timeout(1)
-  void startFromCurrentServerLSNWithoutSpecifyLSNExplicitly() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    Statement st = sqlConnection.createStatement();
-    st.execute("insert into test_logic_table(name) values('last server message')");
-    st.close();
-
-    String result = group(receiveMessage(stream, 3));
-
-    String wait = group(Arrays.asList(
-        "BEGIN",
-        "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'last server message'",
-        "COMMIT"
-    ));
-
-    assertThat(
-        "When start LSN position not specify explicitly, wal should be stream from actual server position",
-        result, equalTo(wait));
-  }
-
-  @Test
-  @Timeout(1)
-  void afterStartStreamingDBSlotStatusActive() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    boolean isActive = isActiveOnView();
-
-    assertThat(
-        "After start streaming, database status should be update on view pg_replication_slots to active",
-        isActive, equalTo(true)
-    );
-  }
-
-  /**
-   * <p>Bug in postgreSQL that should be fixed in 10 version after code review patch <a
-   * href="http://www.postgresql.org/message-id/CAFgjRd3hdYOa33m69TbeOfNNer2BZbwa8FFjt2V5VFzTBvUU3w@mail.gmail.com">
-   * Stopping logical replication protocol</a>.</p>
-   *
-   * <p>If you try to run it test on version before 10 they fail with time out, because postgresql
-   * wait new changes and until waiting messages from client ignores.</p>
-   */
-  @Test
-  @Timeout(1)
-  @DisabledIfServerVersionBelow("11.1")
-  void afterCloseReplicationStreamDBSlotStatusNotActive() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    boolean isActive = isActiveOnView();
-    assumeThat(isActive, equalTo(true));
-
-    stream.close();
-
-    isActive = isActiveOnView();
-    assertThat("Execute close method on PGREplicationStream should lead to stop replication, "
-            + "as result we wait that on view pg_replication_slots status for slot will change to no active",
-        isActive, equalTo(false)
-    );
-  }
-
-  @Test
-  @Timeout(1)
-  void afterCloseConnectionDBSLotStatusNotActive() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber lsn = getCurrentLSN();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(lsn)
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    boolean isActive = isActiveOnView();
-    assumeThat(isActive, equalTo(true));
-
-    replConnection.close();
-
-    isActive = isActiveOnView();
-    //we doesn't wait replay from server about stop connection that why some delay exists on update view and should wait some time before check view
-    if (isActive) {
-      TimeUnit.MILLISECONDS.sleep(200L);
-      isActive = isActiveOnView();
-    }
-
-    assertThat(
-        "Execute close method on Connection should lead to stop replication as fast as possible, "
-            + "as result we wait that on view pg_replication_slots status for slot will change to no active",
-        isActive, equalTo(false)
-    );
-  }
-
-  /**
-   * <p>Bug in postgreSQL that should be fixed in 10 version after code review patch <a
-   * href="http://www.postgresql.org/message-id/CAFgjRd3hdYOa33m69TbeOfNNer2BZbwa8FFjt2V5VFzTBvUU3w@mail.gmail.com">
-   * Stopping logical replication protocol</a>.</p>
-   *
-   * <p>If you try to run it test on version before 10 they fail with time out, because postgresql
-   * wait new changes and until waiting messages from client ignores.</p>
-   */
-  @Test
-  @Timeout(10)
-  @DisabledIfServerVersionBelow("12.1")
-  void duringSendBigTransactionConnectionCloseSlotStatusNotActive() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber lsn = getCurrentLSN();
-
-    Statement st = sqlConnection.createStatement();
-    st.execute("insert into test_logic_table\n"
-        + "  select id, md5(random()::text) as name from generate_series(1, 200000) as id;");
-    st.close();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withStartPosition(lsn)
-            .withSlotName(SLOT_NAME)
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    //wait first message
-    stream.read();
-
-    replConnection.close();
-
-    boolean isActive = isActiveOnView();
-
-    /*
-     * we don't wait for replay from server about stop connection that's why some
-     * delay exists on update view and should wait some time before check view
-     */
-    if (isActive) {
-      TimeUnit.SECONDS.sleep(2L);
-      isActive = isActiveOnView();
-    }
-
-    assertThat(
-        "Execute close method on Connection should lead to stop replication as fast as possible, "
-            + "as result we wait that on view pg_replication_slots status for slot will change to no active",
-        isActive, equalTo(false)
-    );
-  }
-
-  /**
-   * <p>Bug in postgreSQL that should be fixed in 10 version after code review patch <a
-   * href="http://www.postgresql.org/message-id/CAFgjRd3hdYOa33m69TbeOfNNer2BZbwa8FFjt2V5VFzTBvUU3w@mail.gmail.com">
-   * Stopping logical replication protocol</a>.</p>
-   *
-   * <p>If you try to run it test on version before 10 they fail with time out, because postgresql
-   * wait new changes and until waiting messages from client ignores.</p>
-   */
-  @Test
-  @Timeout(60)
-  @DisabledIfServerVersionBelow("11.1")
-  void duringSendBigTransactionReplicationStreamCloseNotActive() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber lsn = getCurrentLSN();
-
-    Statement st = sqlConnection.createStatement();
-    st.execute("insert into test_logic_table\n"
-        + "  select id, md5(random()::text) as name from generate_series(1, 200000) as id;");
-    st.close();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withStartPosition(lsn)
-            .withSlotName(SLOT_NAME)
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    //wait first message
-    stream.read();
-
-    stream.close();
-    //after replay from server that replication stream stopped, view already should be updated
-    boolean isActive = isActiveOnView();
-    assertThat("Execute close method on PGREplicationStream should lead to stop replication, "
-            + "as result we wait that on view pg_replication_slots status for slot will change to no active",
-        isActive, equalTo(false)
-    );
-  }
-
-  //todo fix, fail because backend for logical decoding not reply with CommandComplate & ReadyForQuery
-  @Test
-  @Timeout(5)
-  void repeatWalPositionTwice() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber startLSN = getCurrentLSN();
-
-    Statement st = sqlConnection.createStatement();
-    st.execute("insert into test_logic_table(name) values('message to repeat')");
-    st.close();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    List<String> result = new ArrayList<>();
-    result.addAll(receiveMessage(stream, 3));
-
-    replConnection.close();
-    waitStopReplicationSlot();
-
-    replConnection = TestUtil.openReplicationConnection();
-    pgConnection = (PGConnection) replConnection;
-
-    stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    result.addAll(receiveMessage(stream, 3));
-
-    String groupedResult = group(result);
-    String wait = group(Arrays.asList(
-        "BEGIN",
-        "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'message to repeat'",
-        "COMMIT",
-        "BEGIN",
-        "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'message to repeat'",
-        "COMMIT"
-    ));
-
-    assertThat("Logical replication stream after start streaming can be close and "
-            + "reopen on previous LSN, that allow reply wal logs, if they was not recycled yet",
-        groupedResult, equalTo(wait)
-    );
-  }
-
-  @Test
-  @Timeout(3)
-  void doesNotHavePendingMessageWhenStartFromLastLSN() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(getCurrentLSN())
-            .start();
-
-    ByteBuffer result = stream.readPending();
-
-    assertThat("Read pending message allow without lock on socket read message, "
-            + "and if message absent return null. In current test we start replication from last LSN on server, "
-            + "so changes absent on server and readPending message will always lead to null ByteBuffer",
-        result, equalTo(null)
-    );
-  }
-
-  @Test
-  @Timeout(3)
-  void readPreviousChangesWithoutBlock() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber startLSN = getCurrentLSN();
-
-    Statement st = sqlConnection.createStatement();
-    st.execute("insert into test_logic_table(name) values('previous changes')");
-    st.close();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    String received = group(receiveMessageWithoutBlock(stream, 3));
-
-    String wait = group(Arrays.asList(
-        "BEGIN",
-        "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'previous changes'",
-        "COMMIT"
-    ));
-
-    assertThat(
-        "Messages from stream can be read by readPending method for avoid long block on Socket, "
-            + "in current test we wait that behavior will be same as for read message with block",
-        received, equalTo(wait)
-    );
-  }
-
-  @Test
-  @Timeout(3)
-  void readActualChangesWithoutBlock() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(getCurrentLSN())
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    Statement st = sqlConnection.createStatement();
-    st.execute("insert into test_logic_table(name) values('actual changes')");
-    st.close();
-
-    String received = group(receiveMessageWithoutBlock(stream, 3));
-
-    String wait = group(Arrays.asList(
-        "BEGIN",
-        "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'actual changes'",
-        "COMMIT"
-    ));
-
-    assertThat(
-        "Messages from stream can be read by readPending method for avoid long block on Socket, "
-            + "in current test we wait that behavior will be same as for read message with block",
-        received, equalTo(wait)
-    );
-  }
-
-  @Test
-  @Timeout(10)
-  void avoidTimeoutDisconnectWithDefaultStatusInterval() throws Exception {
-    final int statusInterval = getKeepAliveTimeout();
-
-    ExecutorService executor = Executors.newSingleThreadExecutor();
-    Future future = null;
-    boolean done;
-    try {
-      future =
-          executor.submit(new Callable<Object>() {
-            @Override
-            public Object call() throws Exception {
-              PGConnection pgConnection = (PGConnection) replConnection;
-
-              PGReplicationStream stream =
-                  pgConnection
-                      .getReplicationAPI()
-                      .replicationStream()
-                      .logical()
-                      .withSlotName(SLOT_NAME)
-                      .withStartPosition(getCurrentLSN())
-                      .withStatusInterval(Math.round(statusInterval / 3), TimeUnit.MILLISECONDS)
-                      .start();
-
-              while (!Thread.interrupted()) {
-                stream.read();
-              }
-
-              return null;
-            }
-          });
-
-      future.get(5, TimeUnit.SECONDS);
-      done = future.isDone();
-    } catch (TimeoutException timeout) {
-      done = future.isDone();
-    } finally {
-      executor.shutdownNow();
-    }
-
-    assertThat(
-        "ReplicationStream should periodically send keep alive message to postgresql to avoid disconnect from server",
-        done, CoreMatchers.equalTo(false)
-    );
-  }
-
-  @Test
-  void restartReplicationFromRestartSlotLSNWhenFeedbackAbsent() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber startLSN = getCurrentLSN();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    Statement st = sqlConnection.createStatement();
-    st.execute("insert into test_logic_table(name) values('first tx changes')");
-    st.close();
-
-    st = sqlConnection.createStatement();
-    st.execute("insert into test_logic_table(name) values('second tx change')");
-    st.close();
-
-    List<String> consumedData = new ArrayList<>();
-    consumedData.addAll(receiveMessageWithoutBlock(stream, 3));
-
-    //emulate replication break
-    replConnection.close();
-    waitStopReplicationSlot();
-
-    replConnection = TestUtil.openReplicationConnection();
-    pgConnection = (PGConnection) replConnection;
-    stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(LogSequenceNumber.INVALID_LSN) /* Invalid LSN indicate for start from restart lsn */
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    consumedData.addAll(receiveMessageWithoutBlock(stream, 3));
-    String result = group(consumedData);
-
-    String wait = group(Arrays.asList(
-        "BEGIN",
-        "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'first tx changes'",
-        "COMMIT",
-        "BEGIN",
-        "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'first tx changes'",
-        "COMMIT"
-    ));
-
-    assertThat(
-        "If was consume message via logical replication stream but wasn't send feedback about apply and flush "
-            + "consumed LSN, if replication crash, server should restart from last success applied lsn, "
-            + "in this case it lsn of start replication slot, so we should consume first 3 message twice",
-        result, equalTo(wait)
-    );
-  }
-
-  @Test
-  void replicationRestartFromLastFeedbackPosition() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber startLSN = getCurrentLSN();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    Statement st = sqlConnection.createStatement();
-    st.execute("insert into test_logic_table(name) values('first tx changes')");
-    st.close();
-
-    st = sqlConnection.createStatement();
-    st.execute("insert into test_logic_table(name) values('second tx change')");
-    st.close();
-
-    List<String> consumedData = new ArrayList<>();
-    consumedData.addAll(receiveMessageWithoutBlock(stream, 3));
-    stream.setFlushedLSN(stream.getLastReceiveLSN());
-    stream.setAppliedLSN(stream.getLastReceiveLSN());
-    stream.forceUpdateStatus();
-
-    //emulate replication break
-    replConnection.close();
-    waitStopReplicationSlot();
-
-    replConnection = TestUtil.openReplicationConnection();
-    pgConnection = (PGConnection) replConnection;
-    stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(LogSequenceNumber.INVALID_LSN) /* Invalid LSN indicate for start from restart lsn */
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    consumedData.addAll(receiveMessageWithoutBlock(stream, 3));
-    String result = group(consumedData);
-
-    String wait = group(Arrays.asList(
-        "BEGIN",
-        "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'first tx changes'",
-        "COMMIT",
-        "BEGIN",
-        "table public.test_logic_table: INSERT: pk[integer]:2 name[character varying]:'second tx change'",
-        "COMMIT"
-    ));
-
-    assertThat(
-        "When we add feedback about applied lsn to replication stream(in this case it's force update status)"
-            + "after restart consume changes via this slot should be started from last success lsn that "
-            + "we send before via force status update, that why we wait consume both transaction without duplicates",
-        result, equalTo(wait));
-  }
-
-  @Test
-  void replicationRestartFromLastFeedbackPositionParallelTransaction() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber startLSN = getCurrentLSN();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(startLSN)
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    Connection tx1Connection = TestUtil.openPrivilegedDB();
-    tx1Connection.setAutoCommit(false);
-
-    Connection tx2Connection = TestUtil.openPrivilegedDB();
-    tx2Connection.setAutoCommit(false);
-
-    Statement stTx1 = tx1Connection.createStatement();
-    Statement stTx2 = tx2Connection.createStatement();
-
-    stTx1.execute("BEGIN");
-    stTx2.execute("BEGIN");
-
-    stTx1.execute("insert into test_logic_table(name) values('first tx changes')");
-    stTx2.execute("insert into test_logic_table(name) values('second tx changes')");
-
-    tx1Connection.commit();
-    tx2Connection.commit();
-
-    tx1Connection.close();
-    tx2Connection.close();
-
-    List<String> consumedData = new ArrayList<>();
-    consumedData.addAll(receiveMessageWithoutBlock(stream, 3));
-    stream.setFlushedLSN(stream.getLastReceiveLSN());
-    stream.setAppliedLSN(stream.getLastReceiveLSN());
-
-    stream.forceUpdateStatus();
-
-    //emulate replication break
-    replConnection.close();
-    waitStopReplicationSlot();
-
-    replConnection = TestUtil.openReplicationConnection();
-    pgConnection = (PGConnection) replConnection;
-    stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .logical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(LogSequenceNumber.INVALID_LSN) /* Invalid LSN indicate for start from restart lsn */
-            .withSlotOption("include-xids", false)
-            .withSlotOption("skip-empty-xacts", true)
-            .start();
-
-    Statement st = sqlConnection.createStatement();
-    st.execute("insert into test_logic_table(name) values('third tx changes')");
-    st.close();
-
-    consumedData.addAll(receiveMessageWithoutBlock(stream, 3));
-    String result = group(consumedData);
-
-    String wait = group(Arrays.asList(
-        "BEGIN",
-        "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'first tx changes'",
-        "COMMIT",
-        "BEGIN",
-        "table public.test_logic_table: INSERT: pk[integer]:2 name[character varying]:'second tx changes'",
-        "COMMIT"
-    ));
-
-    assertThat(
-        "When we add feedback about applied lsn to replication stream(in this case it's force update status)"
-            + "after restart consume changes via this slot should be started from last success lsn that "
-            + "we send before via force status update, that why we wait consume both transaction without duplicates",
-        result, equalTo(wait));
-  }
-
-  private void waitStopReplicationSlot() throws SQLException, InterruptedException {
-    while (true) {
-      PreparedStatement statement =
-          sqlConnection.prepareStatement(
-              "select 1 from pg_replication_slots where slot_name = ? and active = true"
-          );
-      statement.setString(1, SLOT_NAME);
-      ResultSet rs = statement.executeQuery();
-      boolean active = rs.next();
-      rs.close();
-      statement.close();
-
-      if (!active) {
-        return;
-      }
-
-      TimeUnit.MILLISECONDS.sleep(10);
-    }
-  }
-
-  private int getKeepAliveTimeout() throws SQLException {
-    Statement statement = sqlConnection.createStatement();
-    ResultSet resultSet = statement.executeQuery(
-        "select setting, unit from pg_settings where name = 'wal_sender_timeout'");
-    int result = 0;
-    if (resultSet.next()) {
-      result = resultSet.getInt(1);
-      String unit = resultSet.getString(2);
-      if ("sec".equals(unit)) {
-        result = (int) TimeUnit.SECONDS.toMillis(result);
-      }
-    }
-
-    return result;
-  }
-
-  private boolean isActiveOnView() throws SQLException {
-    boolean result = false;
-    Statement st = sqlConnection.createStatement();
-    ResultSet rs =
-        st.executeQuery("select * from pg_replication_slots where slot_name = '" + SLOT_NAME + "'");
-    if (rs.next()) {
-      result = rs.getBoolean("active");
-    }
-    rs.close();
-    st.close();
-    return result;
-  }
-
-  private String group(List<String> messages) {
-    StringBuilder builder = new StringBuilder();
-    boolean isFirst = true;
-    for (String str : messages) {
-      if (isFirst) {
-        isFirst = false;
-      } else {
-        builder.append("\n");
-      }
-
-      builder.append(str);
-    }
-
-    return builder.toString();
-  }
-
-  private List<String> receiveMessage(PGReplicationStream stream, int count) throws SQLException {
-    List<String> result = new ArrayList<>(count);
-    for (int index = 0; index < count; index++) {
-      result.add(toString(stream.read()));
-    }
-
-    return result;
-  }
-
-  private List<String> receiveMessageWithoutBlock(PGReplicationStream stream, int count)
-      throws Exception {
-    List<String> result = new ArrayList<>(3);
-    for (int index = 0; index < count; index++) {
-      ByteBuffer message;
-      do {
-        message = stream.readPending();
-
-        if (message == null) {
-          TimeUnit.MILLISECONDS.sleep(2);
-        }
-      } while (message == null);
-
-      result.add(toString(message));
-    }
-
-    return result;
-  }
-
-  private LogSequenceNumber getCurrentLSN() throws SQLException {
-    Statement st = sqlConnection.createStatement();
-    ResultSet rs = null;
-    try {
-      rs = st.executeQuery("select "
-          + (((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10)
-          ? "pg_current_wal_lsn()" : "pg_current_xlog_location()"));
-
-      if (rs.next()) {
-        String lsn = rs.getString(1);
-        return LogSequenceNumber.valueOf(lsn);
-      } else {
-        return LogSequenceNumber.INVALID_LSN;
-      }
-    } finally {
-      if (rs != null) {
-        rs.close();
-      }
-      st.close();
-    }
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/replication/PhysicalReplicationTest.java b/pgjdbc/src/test/java/org/postgresql/replication/PhysicalReplicationTest.java
deleted file mode 100644
index 2f06741..0000000
--- a/pgjdbc/src/test/java/org/postgresql/replication/PhysicalReplicationTest.java
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * Copyright (c) 2016, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.replication;
-
-import static org.hamcrest.CoreMatchers.equalTo;
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.hamcrest.junit.MatcherAssume.assumeThat;
-
-import org.postgresql.PGConnection;
-import org.postgresql.core.BaseConnection;
-import org.postgresql.core.ServerVersion;
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.annotations.DisabledIfServerVersionBelow;
-import org.postgresql.test.annotations.tags.Replication;
-
-import org.hamcrest.CoreMatchers;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
-import java.nio.ByteBuffer;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Arrays;
-
-@Replication
-@DisabledIfServerVersionBelow("9.4")
-class PhysicalReplicationTest {
-
-  private static final String SLOT_NAME = "pgjdbc_physical_replication_slot";
-
-  private Connection replConnection;
-  private Connection sqlConnection;
-
-  @BeforeEach
-  void setUp() throws Exception {
-    sqlConnection = TestUtil.openPrivilegedDB();
-    //DriverManager.setLogWriter(new PrintWriter(System.out));
-    replConnection = TestUtil.openReplicationConnection();
-    TestUtil.createTable(sqlConnection, "test_physic_table",
-        "pk serial primary key, name varchar(100)");
-    TestUtil.recreatePhysicalReplicationSlot(sqlConnection, SLOT_NAME);
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    replConnection.close();
-    TestUtil.dropTable(sqlConnection, "test_physic_table");
-    TestUtil.dropReplicationSlot(sqlConnection, SLOT_NAME);
-    sqlConnection.close();
-  }
-
-  @Test
-  void receiveChangesWithoutReplicationSlot() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber lsn = getCurrentLSN();
-
-    Statement st = sqlConnection.createStatement();
-    st.execute("insert into test_physic_table(name) values('previous value')");
-    st.close();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .physical()
-            .withStartPosition(lsn)
-            .start();
-
-    ByteBuffer read = stream.read();
-
-    assertThat("Physical replication can be start without replication slot",
-        read, CoreMatchers.notNullValue()
-    );
-  }
-
-  @Test
-  void receiveChangesWithReplicationSlot() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber lsn = getCurrentLSN();
-
-    Statement st = sqlConnection.createStatement();
-    st.execute("insert into test_physic_table(name) values('previous value')");
-    st.close();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .physical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(lsn)
-            .start();
-
-    ByteBuffer read = stream.read();
-
-    assertThat(read, CoreMatchers.notNullValue());
-  }
-
-  @Test
-  void afterStartStreamingDBSlotStatusActive() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber lsn = getCurrentLSN();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .physical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(lsn)
-            .start();
-
-    boolean isActive = isActiveOnView();
-    stream.close();
-
-    assertThat(
-        "After start streaming, database status should be update on view pg_replication_slots to active",
-        isActive, equalTo(true)
-    );
-  }
-
-  @Test
-  void afterCloseReplicationStreamDBSlotStatusNotActive() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber lsn = getCurrentLSN();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .physical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(lsn)
-            .start();
-
-    boolean isActive = isActiveOnView();
-    assumeThat(isActive, equalTo(true));
-
-    stream.close();
-
-    isActive = isActiveOnView();
-    assertThat(
-        "Execute close method on PGREplicationStream should lead to stop replication, "
-            + "as result we wait that on view pg_replication_slots status for slot will change to no active",
-        isActive, equalTo(false)
-    );
-  }
-
-  @Test
-  void walRecordCanBeRepeatBeRestartReplication() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber lsn = getCurrentLSN();
-
-    Statement st = sqlConnection.createStatement();
-    st.execute("insert into test_physic_table(name) values('previous value')");
-    st.close();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .physical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(lsn)
-            .start();
-
-    byte[] first = toByteArray(stream.read());
-    stream.close();
-
-    //reopen stream
-    stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .physical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(lsn)
-            .start();
-
-    byte[] second = toByteArray(stream.read());
-    stream.close();
-
-    boolean arrayEquals = Arrays.equals(first, second);
-    assertThat("On same replication connection we can restart replication from already "
-            + "received LSN if they not recycled yet on backend",
-        arrayEquals, CoreMatchers.equalTo(true)
-    );
-  }
-
-  @Test
-  void restartPhysicalReplicationWithoutRepeatMessage() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    LogSequenceNumber lsn = getCurrentLSN();
-
-    Statement st = sqlConnection.createStatement();
-    st.execute("insert into test_physic_table(name) values('first value')");
-    st.close();
-
-    PGReplicationStream stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .physical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(lsn)
-            .start();
-
-    byte[] streamOneFirstPart = toByteArray(stream.read());
-    LogSequenceNumber restartLSN = stream.getLastReceiveLSN();
-
-    st = sqlConnection.createStatement();
-    st.execute("insert into test_physic_table(name) values('second value')");
-    st.close();
-
-    byte[] streamOneSecondPart = toByteArray(stream.read());
-    stream.close();
-
-    //reopen stream
-    stream =
-        pgConnection
-            .getReplicationAPI()
-            .replicationStream()
-            .physical()
-            .withSlotName(SLOT_NAME)
-            .withStartPosition(restartLSN)
-            .start();
-
-    byte[] streamTwoFirstPart = toByteArray(stream.read());
-    stream.close();
-
-    boolean arrayEquals = Arrays.equals(streamOneSecondPart, streamTwoFirstPart);
-    assertThat("Interrupt physical replication and restart from lastReceiveLSN should not "
-            + "lead to repeat messages skip part of them",
-        arrayEquals, CoreMatchers.equalTo(true)
-    );
-  }
-
-  private boolean isActiveOnView() throws SQLException {
-    boolean result = false;
-    Statement st = sqlConnection.createStatement();
-    ResultSet
-        rs =
-        st.executeQuery("select * from pg_replication_slots where slot_name = '" + SLOT_NAME + "'");
-    if (rs.next()) {
-      result = rs.getBoolean("active");
-    }
-    rs.close();
-    st.close();
-    return result;
-  }
-
-  private byte[] toByteArray(ByteBuffer buffer) {
-    int offset = buffer.arrayOffset();
-    byte[] source = buffer.array();
-    return Arrays.copyOfRange(source, offset, source.length);
-  }
-
-  private LogSequenceNumber getCurrentLSN() throws SQLException {
-    Statement st = sqlConnection.createStatement();
-    ResultSet rs = null;
-    try {
-      rs = st.executeQuery("select "
-          + (((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10)
-          ? "pg_current_wal_lsn()" : "pg_current_xlog_location()"));
-
-      if (rs.next()) {
-        String lsn = rs.getString(1);
-        return LogSequenceNumber.valueOf(lsn);
-      } else {
-        return LogSequenceNumber.INVALID_LSN;
-      }
-    } finally {
-      if (rs != null) {
-        rs.close();
-      }
-      st.close();
-    }
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/replication/ReplicationConnectionTest.java b/pgjdbc/src/test/java/org/postgresql/replication/ReplicationConnectionTest.java
deleted file mode 100644
index 5325ff4..0000000
--- a/pgjdbc/src/test/java/org/postgresql/replication/ReplicationConnectionTest.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (c) 2016, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.replication;
-
-import static org.hamcrest.CoreMatchers.equalTo;
-import static org.hamcrest.MatcherAssert.assertThat;
-
-import org.postgresql.PGConnection;
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.annotations.DisabledIfServerVersionBelow;
-import org.postgresql.test.annotations.tags.Replication;
-
-import org.hamcrest.CoreMatchers;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.Statement;
-
-@Replication
-@DisabledIfServerVersionBelow("9.4")
-class ReplicationConnectionTest {
-  private Connection replConnection;
-
-  @BeforeEach
-  void setUp() throws Exception {
-    replConnection = TestUtil.openReplicationConnection();
-    //DriverManager.setLogWriter(new PrintWriter(System.out));
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    replConnection.close();
-  }
-
-  @Test
-  void isValid() throws Exception {
-    boolean result = replConnection.isValid(3);
-
-    PGConnection connection = (PGConnection) replConnection;
-    connection.getBackendPID();
-
-    assertThat("Replication connection as Simple connection can be check on valid",
-        result, equalTo(true)
-    );
-  }
-
-  @Test
-  void connectionNotValidWhenSessionTerminated() throws Exception {
-    TestUtil.terminateBackend(replConnection);
-
-    boolean result = replConnection.isValid(3);
-
-    assertThat("When postgresql terminate session with replication connection, "
-            + "isValid() should return false, because next query on this connection will fail",
-        result, equalTo(false)
-    );
-  }
-
-  @Test
-  void replicationCommandResultSetAccessByIndex() throws Exception {
-    Statement statement = replConnection.createStatement();
-    ResultSet resultSet = statement.executeQuery("IDENTIFY_SYSTEM");
-
-    String xlogpos = null;
-    if (resultSet.next()) {
-      xlogpos = resultSet.getString(3);
-    }
-
-    resultSet.close();
-    statement.close();
-
-    assertThat("Replication protocol supports a limited number of commands, "
-            + "and it command can be execute via Statement(simple query protocol), "
-            + "and result fetch via ResultSet",
-        xlogpos, CoreMatchers.notNullValue()
-    );
-  }
-
-  @Test
-  void replicationCommandResultSetAccessByName() throws Exception {
-    Statement statement = replConnection.createStatement();
-    ResultSet resultSet = statement.executeQuery("IDENTIFY_SYSTEM");
-
-    String xlogpos = null;
-    if (resultSet.next()) {
-      xlogpos = resultSet.getString("xlogpos");
-    }
-
-    resultSet.close();
-    statement.close();
-
-    assertThat("Replication protocol supports a limited number of commands, "
-            + "and it command can be execute via Statement(simple query protocol), "
-            + "and result fetch via ResultSet",
-        xlogpos, CoreMatchers.notNullValue()
-    );
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/replication/ReplicationSlotTest.java b/pgjdbc/src/test/java/org/postgresql/replication/ReplicationSlotTest.java
deleted file mode 100644
index 5f3f18c..0000000
--- a/pgjdbc/src/test/java/org/postgresql/replication/ReplicationSlotTest.java
+++ /dev/null
@@ -1,395 +0,0 @@
-/*
- * Copyright (c) 2016, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.replication;
-
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertNull;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.fail;
-import static org.junit.jupiter.api.Assumptions.assumeFalse;
-import static org.junit.jupiter.api.Assumptions.assumeTrue;
-
-import org.postgresql.PGConnection;
-import org.postgresql.core.BaseConnection;
-import org.postgresql.core.ServerVersion;
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.annotations.DisabledIfServerVersionBelow;
-import org.postgresql.test.annotations.tags.Replication;
-
-import org.hamcrest.CoreMatchers;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.SQLFeatureNotSupportedException;
-import java.sql.Statement;
-
-@Replication
-@DisabledIfServerVersionBelow("9.4")
-class ReplicationSlotTest {
-  private Connection sqlConnection;
-  private Connection replConnection;
-
-  private String slotName;
-
-  @BeforeEach
-  void setUp() throws Exception {
-    sqlConnection = TestUtil.openPrivilegedDB();
-    replConnection = TestUtil.openReplicationConnection();
-    //DriverManager.setLogWriter(new PrintWriter(System.out));
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    replConnection.close();
-    dropReplicationSlot();
-    slotName = null;
-    sqlConnection.close();
-  }
-
-  @Test
-  void notAvailableCreatePhysicalSlotWithoutSlotName() throws Exception {
-    assertThrows(IllegalArgumentException.class, () -> {
-      PGConnection pgConnection = (PGConnection) replConnection;
-
-      pgConnection
-          .getReplicationAPI()
-          .createReplicationSlot()
-          .physical()
-          .make();
-
-      fail("Replication slot name it required parameter and can't be null");
-    });
-  }
-
-  @Test
-  void createPhysicalSlot() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    slotName = "pgjdbc_test_create_physical_replication_slot";
-
-    pgConnection
-        .getReplicationAPI()
-        .createReplicationSlot()
-        .physical()
-        .withSlotName(slotName)
-        .make();
-
-    boolean result = isPhysicalSlotExists(slotName);
-
-    assertThat("Slot should exist", result, CoreMatchers.equalTo(true));
-
-    result = isSlotTemporary(slotName);
-
-    assertThat("Slot should not be temporary by default", result, CoreMatchers.equalTo(false));
-  }
-
-  @Test
-  void createTemporaryPhysicalSlotPg10AndHigher()
-      throws SQLException {
-    assumeTrue(TestUtil.haveMinimumServerVersion(replConnection, ServerVersion.v10));
-
-    BaseConnection baseConnection = (BaseConnection) replConnection;
-
-    String slotName = "pgjdbc_test_create_temporary_physical_replication_slot_pg_10_or_higher";
-
-    assertDoesNotThrow(() -> {
-
-      baseConnection
-          .getReplicationAPI()
-          .createReplicationSlot()
-          .physical()
-          .withSlotName(slotName)
-          .withTemporaryOption()
-          .make();
-
-    }, "PostgreSQL >= 10 should support temporary replication slots");
-
-    boolean result = isSlotTemporary(slotName);
-
-    assertThat("Slot is not temporary", result, CoreMatchers.equalTo(true));
-  }
-
-  @Test
-  void createTemporaryPhysicalSlotPgLowerThan10()
-      throws SQLException {
-    assumeFalse(TestUtil.haveMinimumServerVersion(replConnection, ServerVersion.v10));
-
-    BaseConnection baseConnection = (BaseConnection) replConnection;
-
-    String slotName = "pgjdbc_test_create_temporary_physical_replication_slot_pg_lower_than_10";
-
-    try {
-
-      baseConnection
-          .getReplicationAPI()
-          .createReplicationSlot()
-          .physical()
-          .withSlotName(slotName)
-          .withTemporaryOption()
-          .make();
-
-      fail("PostgreSQL < 10 does not support temporary replication slots");
-
-    } catch (SQLFeatureNotSupportedException e) {
-      // success
-    }
-  }
-
-  @Test
-  void dropPhysicalSlot() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    slotName = "pgjdbc_test_create_physical_replication_slot";
-
-    pgConnection
-        .getReplicationAPI()
-        .createReplicationSlot()
-        .physical()
-        .withSlotName(slotName)
-        .make();
-
-    pgConnection
-        .getReplicationAPI()
-        .dropReplicationSlot(slotName);
-
-    boolean result = isPhysicalSlotExists(slotName);
-
-    slotName = null;
-
-    assertThat(result, CoreMatchers.equalTo(false));
-  }
-
-  @Test
-  void notAvailableCreateLogicalSlotWithoutSlotName() throws Exception {
-    assertThrows(IllegalArgumentException.class, () -> {
-      PGConnection pgConnection = (PGConnection) replConnection;
-
-      pgConnection
-          .getReplicationAPI()
-          .createReplicationSlot()
-          .logical()
-          .withOutputPlugin("test_decoding")
-          .make();
-
-      fail("Replication slot name it required parameter and can't be null");
-    });
-  }
-
-  @Test
-  void notAvailableCreateLogicalSlotWithoutOutputPlugin() throws Exception {
-    assertThrows(IllegalArgumentException.class, () -> {
-      PGConnection pgConnection = (PGConnection) replConnection;
-
-      pgConnection
-          .getReplicationAPI()
-          .createReplicationSlot()
-          .logical()
-          .withSlotName("pgjdbc_test_create_logical_replication_slot")
-          .make();
-
-      fail("output plugin required parameter for logical replication slot and can't be null");
-    });
-  }
-
-  @Test
-  void createLogicalSlot() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    slotName = "pgjdbc_test_create_logical_replication_slot";
-
-    pgConnection
-        .getReplicationAPI()
-        .createReplicationSlot()
-        .logical()
-        .withSlotName(slotName)
-        .withOutputPlugin("test_decoding")
-        .make();
-
-    boolean result = isLogicalSlotExists(slotName);
-
-    assertThat("Slot should exist", result, CoreMatchers.equalTo(true));
-
-    result = isSlotTemporary(slotName);
-
-    assertThat("Slot should not be temporary by default", result, CoreMatchers.equalTo(false));
-  }
-
-  @Test
-  void createLogicalSlotReturnedInfo() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    slotName = "pgjdbc_test_create_logical_replication_slot_info";
-
-    ReplicationSlotInfo info = pgConnection
-        .getReplicationAPI()
-        .createReplicationSlot()
-        .logical()
-        .withSlotName(slotName)
-        .withOutputPlugin("test_decoding")
-        .make();
-
-    assertEquals(slotName, info.getSlotName());
-    assertEquals(ReplicationType.LOGICAL, info.getReplicationType());
-    assertNotNull(info.getConsistentPoint());
-    assertNotNull(info.getSnapshotName());
-    assertEquals("test_decoding", info.getOutputPlugin());
-  }
-
-  @Test
-  void createPhysicalSlotReturnedInfo() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    slotName = "pgjdbc_test_create_physical_replication_slot_info";
-
-    ReplicationSlotInfo info = pgConnection
-        .getReplicationAPI()
-        .createReplicationSlot()
-        .physical()
-        .withSlotName(slotName)
-        .make();
-
-    assertEquals(slotName, info.getSlotName());
-    assertEquals(ReplicationType.PHYSICAL, info.getReplicationType());
-    assertNotNull(info.getConsistentPoint());
-    assertNull(info.getSnapshotName());
-    assertNull(info.getOutputPlugin());
-  }
-
-  @Test
-  void createTemporaryLogicalSlotPg10AndHigher()
-      throws SQLException {
-    assumeTrue(TestUtil.haveMinimumServerVersion(replConnection, ServerVersion.v10));
-
-    BaseConnection baseConnection = (BaseConnection) replConnection;
-
-    String slotName = "pgjdbc_test_create_temporary_logical_replication_slot_pg_10_or_higher";
-
-    assertDoesNotThrow(() -> {
-
-      baseConnection
-          .getReplicationAPI()
-          .createReplicationSlot()
-          .logical()
-          .withSlotName(slotName)
-          .withOutputPlugin("test_decoding")
-          .withTemporaryOption()
-          .make();
-
-    }, "PostgreSQL >= 10 should support temporary replication slots");
-
-    boolean result = isSlotTemporary(slotName);
-
-    assertThat("Slot is not temporary", result, CoreMatchers.equalTo(true));
-  }
-
-  @Test
-  void createTemporaryLogicalSlotPgLowerThan10()
-      throws SQLException {
-    assumeFalse(TestUtil.haveMinimumServerVersion(replConnection, ServerVersion.v10));
-
-    BaseConnection baseConnection = (BaseConnection) replConnection;
-
-    String slotName = "pgjdbc_test_create_temporary_logical_replication_slot_pg_lower_than_10";
-
-    try {
-
-      baseConnection
-          .getReplicationAPI()
-          .createReplicationSlot()
-          .logical()
-          .withSlotName(slotName)
-          .withOutputPlugin("test_decoding")
-          .withTemporaryOption()
-          .make();
-
-      fail("PostgreSQL < 10 does not support temporary replication slots");
-
-    } catch (SQLFeatureNotSupportedException e) {
-      // success
-    }
-  }
-
-  @Test
-  void dropLogicalSlot() throws Exception {
-    PGConnection pgConnection = (PGConnection) replConnection;
-
-    slotName = "pgjdbc_test_create_logical_replication_slot";
-
-    pgConnection
-        .getReplicationAPI()
-        .createReplicationSlot()
-        .logical()
-        .withSlotName(slotName)
-        .withOutputPlugin("test_decoding")
-        .make();
-
-    pgConnection
-        .getReplicationAPI()
-        .dropReplicationSlot(slotName);
-
-    boolean result = isLogicalSlotExists(slotName);
-
-    slotName = null;
-
-    assertThat(result, CoreMatchers.equalTo(false));
-  }
-
-  private boolean isPhysicalSlotExists(String slotName) throws SQLException {
-    boolean result;
-
-    Statement st = sqlConnection.createStatement();
-    ResultSet resultSet = st.executeQuery(
-        "select * from pg_replication_slots where slot_name = '" + slotName
-            + "' and slot_type = 'physical'");
-    result = resultSet.next();
-    resultSet.close();
-    st.close();
-    return result;
-  }
-
-  private boolean isLogicalSlotExists(String slotName) throws SQLException {
-    boolean result;
-
-    Statement st = sqlConnection.createStatement();
-    ResultSet resultSet = st.executeQuery(
-        "select 1 from pg_replication_slots where slot_name = '" + slotName
-            + "' and slot_type = 'logical'");
-    result = resultSet.next();
-    resultSet.close();
-    st.close();
-    return result;
-  }
-
-  private boolean isSlotTemporary(String slotName) throws SQLException {
-    if (!TestUtil.haveMinimumServerVersion(sqlConnection, ServerVersion.v10)) {
-      return false;
-    }
-
-    boolean result;
-
-    Statement st = sqlConnection.createStatement();
-    ResultSet resultSet = st.executeQuery(
-            "select 1 from pg_replication_slots where slot_name = '" + slotName
-                    + "' and temporary = true");
-    result = resultSet.next();
-    resultSet.close();
-    st.close();
-    return result;
-  }
-
-  private void dropReplicationSlot() throws Exception {
-    if (slotName != null) {
-      TestUtil.dropReplicationSlot(sqlConnection, slotName);
-    }
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/replication/ReplicationTestSuite.java b/pgjdbc/src/test/java/org/postgresql/replication/ReplicationTestSuite.java
deleted file mode 100644
index 3d17fd3..0000000
--- a/pgjdbc/src/test/java/org/postgresql/replication/ReplicationTestSuite.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2016, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.replication;
-
-import org.postgresql.core.ServerVersion;
-import org.postgresql.test.TestUtil;
-
-import org.junit.AssumptionViolatedException;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.runner.RunWith;
-import org.junit.runners.Suite;
-
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-
-@RunWith(Suite.class)
-@Suite.SuiteClasses({
-    CopyBothResponseTest.class,
-    LogicalReplicationStatusTest.class,
-    LogicalReplicationTest.class,
-    LogSequenceNumberTest.class,
-    PhysicalReplicationTest.class,
-    ReplicationConnectionTest.class,
-    ReplicationSlotTest.class,
-})
-class ReplicationTestSuite {
-
-  @BeforeAll
-  static void setUp() throws Exception {
-    Connection connection = TestUtil.openDB();
-    try {
-      if (TestUtil.haveMinimumServerVersion(connection, ServerVersion.v9_0)) {
-        assumeWalSenderEnabled(connection);
-        assumeReplicationRole(connection);
-      } else {
-        throw new AssumptionViolatedException(
-            "Skip replication test because current database version "
-                + "too old and don't contain replication API"
-        );
-      }
-    } finally {
-      connection.close();
-    }
-  }
-
-  private static void assumeWalSenderEnabled(Connection connection) throws SQLException {
-    Statement stmt = connection.createStatement();
-    ResultSet rs = stmt.executeQuery("SHOW max_wal_senders");
-    rs.next();
-    int maxWalSenders = rs.getInt(1);
-    rs.close();
-    stmt.close();
-
-    if (maxWalSenders == 0) {
-      throw new AssumptionViolatedException(
-          "Skip replication test because max_wal_senders = 0");
-    }
-  }
-
-  private static void assumeReplicationRole(Connection connection) throws SQLException {
-    Statement stmt = connection.createStatement();
-    ResultSet rs =
-        stmt.executeQuery("SELECT usename, userepl FROM pg_user WHERE usename = current_user");
-    rs.next();
-    String userName = rs.getString(1);
-    boolean replicationGrant = rs.getBoolean(2);
-    rs.close();
-    stmt.close();
-
-    if (!replicationGrant) {
-      throw new AssumptionViolatedException(
-          "Skip replication test because user '" + userName + "' doesn't have replication role");
-    }
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/TestUtil.java b/pgjdbc/src/test/java/org/postgresql/test/TestUtil.java
new file mode 100644
index 0000000..c27d4ea
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/TestUtil.java
@@ -0,0 +1,1178 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.postgresql.PGConnection;
+import org.postgresql.PGProperty;
+import org.postgresql.core.BaseConnection;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.core.TransactionState;
+import org.postgresql.core.Version;
+import org.postgresql.jdbc.GSSEncMode;
+import org.postgresql.jdbc.PgConnection;
+import org.postgresql.jdbc.ResourceLock;
+import org.postgresql.util.PSQLException;
+
+/**
+ * Utility class for JDBC tests.
+ */
+public class TestUtil {
+    /*
+     * The case is as follows:
+     * 1. Typically the database and hostname are taken from System.properties or build.properties or build.local.properties
+     *    That enables to override test DB via system property
+     * 2. There are tests where different DBs should be used (e.g. SSL tests), so we can't just use DB name from system property
+     *    That is why _test_ properties exist: they overpower System.properties and build.properties
+     */
+    public static final String SERVER_HOST_PORT_PROP = "_test_hostport";
+    public static final String DATABASE_PROP = "_test_database";
+
+    private static final ResourceLock lock = new ResourceLock();
+    private static boolean initialized;
+    private static Properties sslTestProperties;
+
+    static {
+        try {
+            initDriver();
+        } catch (RuntimeException e) {
+            throw e;
+        } catch (Exception e) {
+            throw new RuntimeException("Unable to initialize driver", e);
+        }
+    }
+
+    /*
+     * Returns the Test database JDBC URL
+     */
+    public static String getURL() {
+        return getURL(getServer(), getPort());
+    }
+
+    public static String getURL(String database) {
+        return getURL(getServer() + ":" + getPort(), database);
+    }
+
+    public static String getURL(String server, int port) {
+        return getURL(server + ":" + port, getDatabase());
+    }
+
+    public static String getURL(String hostport, String database) {
+        String protocolVersion = "";
+        if (getProtocolVersion() != 0) {
+            protocolVersion = "&protocolVersion=" + getProtocolVersion();
+        }
+
+        String options = "";
+        if (getOptions() != null) {
+            options = "&options=" + getOptions();
+        }
+
+        String binaryTransfer = "";
+        if (getBinaryTransfer() != null && !"".equals(getBinaryTransfer())) {
+            binaryTransfer = "&binaryTransfer=" + getBinaryTransfer();
+        }
+
+        String receiveBufferSize = "";
+        if (getReceiveBufferSize() != -1) {
+            receiveBufferSize = "&receiveBufferSize=" + getReceiveBufferSize();
+        }
+
+        String sendBufferSize = "";
+        if (getSendBufferSize() != -1) {
+            sendBufferSize = "&sendBufferSize=" + getSendBufferSize();
+        }
+
+        String ssl = "";
+        if (getSSL() != null) {
+            ssl = "&ssl=" + getSSL();
+        }
+
+        return "jdbc:postgresql://"
+                + hostport + "/"
+                + database
+                + "?ApplicationName=Driver Tests"
+                + protocolVersion
+                + options
+                + binaryTransfer
+                + receiveBufferSize
+                + sendBufferSize
+                + ssl;
+    }
+
+    /*
+     * Returns the Test server
+     */
+    public static String getServer() {
+        return System.getProperty("server", "localhost");
+    }
+
+    /*
+     * Returns the Test port
+     */
+    public static int getPort() {
+        return Integer.parseInt(System.getProperty("port", System.getProperty("def_pgport")));
+    }
+
+    /*
+     * Returns the server side prepared statement threshold.
+     */
+    public static int getPrepareThreshold() {
+        return Integer.parseInt(System.getProperty("preparethreshold", "5"));
+    }
+
+    public static int getProtocolVersion() {
+        return Integer.parseInt(System.getProperty("protocolVersion", "0"));
+    }
+
+    public static String getOptions() {
+        return System.getProperty("options");
+    }
+
+    /*
+     * Returns the Test database
+     */
+    public static String getDatabase() {
+        return System.getProperty("database");
+    }
+
+    /*
+     * Returns the Postgresql username
+     */
+    public static String getUser() {
+        return System.getProperty("username");
+    }
+
+    /*
+     * Returns the user's password
+     */
+    public static String getPassword() {
+        return System.getProperty("password");
+    }
+
+    /*
+     * Returns password for default callbackhandler
+     */
+    public static String getSslPassword() {
+        return System.getProperty(PGProperty.SSL_PASSWORD.getName());
+    }
+
+    /*
+     *  Return the GSSEncMode for the tests
+     */
+    public static GSSEncMode getGSSEncMode() throws PSQLException {
+        return GSSEncMode.of(System.getProperties());
+    }
+
+    /*
+     * Returns the user for SSPI authentication tests
+     */
+    public static String getSSPIUser() {
+        return System.getProperty("sspiusername");
+    }
+
+    /*
+     * postgres like user
+     */
+    public static String getPrivilegedUser() {
+        return System.getProperty("privilegedUser");
+    }
+
+    public static String getPrivilegedPassword() {
+        return System.getProperty("privilegedPassword");
+    }
+
+    /*
+     * Returns the binary transfer mode to use
+     */
+    public static String getBinaryTransfer() {
+        return System.getProperty("binaryTransfer");
+    }
+
+    public static int getSendBufferSize() {
+        return Integer.parseInt(System.getProperty("sendBufferSize", "-1"));
+    }
+
+    public static int getReceiveBufferSize() {
+        return Integer.parseInt(System.getProperty("receiveBufferSize", "-1"));
+    }
+
+    public static String getSSL() {
+        return System.getProperty("ssl");
+    }
+
+    public static Properties loadPropertyFiles(String... names) {
+        Properties p = new Properties();
+        for (String name : names) {
+            for (int i = 0; i < 2; i++) {
+                // load x.properties, then x.local.properties
+                if (i == 1 && name.endsWith(".properties") && !name.endsWith(".local.properties")) {
+                    name = name.replaceAll("\\.properties$", ".local.properties");
+                }
+                File f = getFile(name);
+                if (!f.exists()) {
+                    System.out.println("Configuration file " + f.getAbsolutePath()
+                            + " does not exist. Consider adding it to specify test db host and login");
+                    continue;
+                }
+                try {
+                    p.load(new FileInputStream(f));
+                } catch (IOException ex) {
+                    // ignore
+                }
+            }
+        }
+        return p;
+    }
+
+    private static void initSslTestProperties() {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (sslTestProperties == null) {
+                sslTestProperties = TestUtil.loadPropertyFiles("ssltest.properties");
+            }
+        }
+    }
+
+    private static String getSslTestProperty(String name) {
+        initSslTestProperties();
+        return sslTestProperties.getProperty(name);
+    }
+
+    public static void assumeSslTestsEnabled() {
+        Assume.assumeTrue(Boolean.parseBoolean(getSslTestProperty("enable_ssl_tests")));
+    }
+
+    public static String getSslTestCertPath(String name) {
+        File certdir = TestUtil.getFile(getSslTestProperty("certdir"));
+        return new File(certdir, name).getAbsolutePath();
+    }
+
+    public static void initDriver() {
+        try (ResourceLock ignore = lock.obtain()) {
+            if (initialized) {
+                return;
+            }
+
+            Properties p = loadPropertyFiles("build.properties");
+            p.putAll(System.getProperties());
+            System.getProperties().putAll(p);
+
+            initialized = true;
+        }
+    }
+
+    /**
+     * Resolves file path with account of {@code build.properties.relative.path}. This is a bit tricky
+     * since during maven release, maven does a temporary checkout to {@code core/target/checkout}
+     * folder, so that script should somehow get {@code build.local.properties}
+     *
+     * @param name original name of the file, as if it was in the root pgjdbc folder
+     * @return actual location of the file
+     */
+    public static File getFile(String name) {
+        if (name == null) {
+            throw new IllegalArgumentException("null file name is not expected");
+        }
+        if (name.startsWith("/")) {
+            return new File(name);
+        }
+        return new File(System.getProperty("build.properties.relative.path", "../"), name);
+    }
+
+    /**
+     * Get a connection using a privileged user mostly for tests that the ability to load C functions
+     * now as of 4/14.
+     *
+     * @return connection using a privileged user mostly for tests that the ability to load C
+     * functions now as of 4/14
+     */
+    public static Connection openPrivilegedDB() throws SQLException {
+        return openPrivilegedDB(getDatabase());
+    }
+
+    public static Connection openPrivilegedDB(String databaseName) throws SQLException {
+        initDriver();
+        Properties properties = new Properties();
+
+        PGProperty.GSS_ENC_MODE.set(properties, getGSSEncMode().value);
+        PGProperty.USER.set(properties, getPrivilegedUser());
+        PGProperty.PASSWORD.set(properties, getPrivilegedPassword());
+        PGProperty.OPTIONS.set(properties, "-c synchronous_commit=on");
+        return DriverManager.getConnection(getURL(databaseName), properties);
+
+    }
+
+    public static Connection openReplicationConnection() throws Exception {
+        Properties properties = new Properties();
+        PGProperty.ASSUME_MIN_SERVER_VERSION.set(properties, "9.4");
+        PGProperty.PROTOCOL_VERSION.set(properties, "3");
+        PGProperty.REPLICATION.set(properties, "database");
+        //Only simple query protocol available for replication connection
+        PGProperty.PREFER_QUERY_MODE.set(properties, "simple");
+        PGProperty.USER.set(properties, TestUtil.getPrivilegedUser());
+        PGProperty.PASSWORD.set(properties, TestUtil.getPrivilegedPassword());
+        PGProperty.OPTIONS.set(properties, "-c synchronous_commit=on");
+        return TestUtil.openDB(properties);
+    }
+
+    /**
+     * Helper - opens a connection.
+     *
+     * @return connection
+     */
+    public static Connection openDB() throws SQLException {
+        return openDB(new Properties());
+    }
+
+    /*
+     * Helper - opens a connection with the allowance for passing additional parameters, like
+     * "compatible".
+     */
+    public static Connection openDB(Properties props) throws SQLException {
+        initDriver();
+
+        // Allow properties to override the user name.
+        String user = PGProperty.USER.getOrDefault(props);
+        if (user == null) {
+            user = getUser();
+        }
+        if (user == null) {
+            throw new IllegalArgumentException(
+                    "user name is not specified. Please specify 'username' property via -D or build.properties");
+        }
+        PGProperty.USER.set(props, user);
+
+        // Allow properties to override the password.
+        String password = PGProperty.PASSWORD.getOrDefault(props);
+        if (password == null) {
+            password = getPassword() != null ? getPassword() : "";
+        }
+        PGProperty.PASSWORD.set(props, password);
+
+        String sslPassword = getSslPassword();
+        if (sslPassword != null) {
+            PGProperty.SSL_PASSWORD.set(props, sslPassword);
+        }
+
+        if (!props.containsKey(PGProperty.PREPARE_THRESHOLD.getName())) {
+            PGProperty.PREPARE_THRESHOLD.set(props, getPrepareThreshold());
+        }
+        if (!props.containsKey(PGProperty.PREFER_QUERY_MODE.getName())) {
+            String value = System.getProperty(PGProperty.PREFER_QUERY_MODE.getName());
+            if (value != null) {
+                props.put(PGProperty.PREFER_QUERY_MODE.getName(), value);
+            }
+        }
+        // Enable Base4 tests to override host,port,database
+        String hostport = props.getProperty(SERVER_HOST_PORT_PROP, getServer() + ":" + getPort());
+        String database = props.getProperty(DATABASE_PROP, getDatabase());
+
+        // Set GSSEncMode for tests only in the case the property is already missing
+        if (PGProperty.GSS_ENC_MODE.getSetString(props) == null) {
+            PGProperty.GSS_ENC_MODE.set(props, getGSSEncMode().value);
+        }
+
+        return DriverManager.getConnection(getURL(hostport, database), props);
+    }
+
+    /*
+     * Helper - closes an open connection.
+     */
+    public static void closeDB(Connection con) throws SQLException {
+        if (con != null) {
+            con.close();
+        }
+    }
+
+    /*
+     * Helper - creates a test schema for use by a test
+     */
+    public static void createSchema(Connection con, String schema) throws SQLException {
+        Statement st = con.createStatement();
+        try {
+            // Drop the schema
+            dropSchema(con, schema);
+
+            // Now create the schema
+            String sql = "CREATE SCHEMA " + schema;
+
+            st.executeUpdate(sql);
+        } finally {
+            closeQuietly(st);
+        }
+    }
+
+    /*
+     * Helper - drops a schema
+     */
+    public static void dropSchema(Connection con, String schema) throws SQLException {
+        dropObject(con, "SCHEMA", schema);
+    }
+
+    /*
+     * Helper - creates a test table for use by a test
+     */
+    public static void createTable(Connection con, String table, String columns) throws SQLException {
+        Statement st = con.createStatement();
+        try {
+            // Drop the table
+            dropTable(con, table);
+
+            // Now create the table
+            String sql = "CREATE TABLE " + table + " (" + columns + ")";
+
+            st.executeUpdate(sql);
+        } finally {
+            closeQuietly(st);
+        }
+    }
+
+    /**
+     * Helper creates a temporary table.
+     *
+     * @param con     Connection
+     * @param table   String
+     * @param columns String
+     */
+    public static void createTempTable(Connection con, String table, String columns)
+            throws SQLException {
+        Statement st = con.createStatement();
+        try {
+            // Drop the table
+            dropTable(con, table);
+
+            // Now create the table
+            st.executeUpdate("create temp table " + table + " (" + columns + ")");
+        } finally {
+            closeQuietly(st);
+        }
+    }
+
+    /*
+     * Helper - creates a unlogged table for use by a test.
+     * Unlogged tables works from PostgreSQL 9.1+
+     */
+    public static void createUnloggedTable(Connection con, String table, String columns)
+            throws SQLException {
+        Statement st = con.createStatement();
+        try {
+            // Drop the table
+            dropTable(con, table);
+
+            String unlogged = haveMinimumServerVersion(con, ServerVersion.v9_1) ? "UNLOGGED" : "";
+
+            // Now create the table
+            st.executeUpdate("CREATE " + unlogged + " TABLE " + table + " (" + columns + ")");
+        } finally {
+            closeQuietly(st);
+        }
+    }
+
+    /*
+     * Helper - creates a view
+     */
+    public static void createView(Connection con, String viewName, String query)
+            throws SQLException {
+        try (Statement st = con.createStatement()) {
+            // Drop the view
+            dropView(con, viewName);
+
+            String sql = "CREATE VIEW " + viewName + " AS " + query;
+
+            st.executeUpdate(sql);
+        }
+    }
+
+    /*
+     * Helper - creates a materialized view
+     */
+    public static void createMaterializedView(Connection con, String matViewName, String query)
+            throws SQLException {
+        try (Statement st = con.createStatement()) {
+            // Drop the view
+            dropMaterializedView(con, matViewName);
+
+            String sql = "CREATE MATERIALIZED VIEW " + matViewName + " AS " + query;
+
+            st.executeUpdate(sql);
+        }
+    }
+
+    /**
+     * Helper creates an enum type.
+     *
+     * @param con    Connection
+     * @param name   String
+     * @param values String
+     */
+    public static void createEnumType(Connection con, String name, String values)
+            throws SQLException {
+        Statement st = con.createStatement();
+        try {
+            dropType(con, name);
+
+            // Now create the table
+            st.executeUpdate("create type " + name + " as enum (" + values + ")");
+        } finally {
+            closeQuietly(st);
+        }
+    }
+
+    /**
+     * Helper creates an composite type.
+     *
+     * @param con    Connection
+     * @param name   String
+     * @param values String
+     */
+    public static void createCompositeType(Connection con, String name, String values) throws SQLException {
+        createCompositeType(con, name, values, true);
+    }
+
+    /**
+     * Helper creates an composite type.
+     *
+     * @param con    Connection
+     * @param name   String
+     * @param values String
+     */
+    public static void createCompositeType(Connection con, String name, String values, boolean shouldDrop)
+            throws SQLException {
+        Statement st = con.createStatement();
+        try {
+            if (shouldDrop) {
+                dropType(con, name);
+            }
+            // Now create the type
+            st.executeUpdate("CREATE TYPE " + name + " AS (" + values + ")");
+        } finally {
+            closeQuietly(st);
+        }
+    }
+
+    /**
+     * Drops a domain.
+     *
+     * @param con    Connection
+     * @param domain String
+     */
+    public static void dropDomain(Connection con, String domain)
+            throws SQLException {
+        dropObject(con, "DOMAIN", domain);
+    }
+
+    /**
+     * Helper creates a domain.
+     *
+     * @param con    Connection
+     * @param name   String
+     * @param values String
+     */
+    public static void createDomain(Connection con, String name, String values)
+            throws SQLException {
+        Statement st = con.createStatement();
+        try {
+            dropDomain(con, name);
+            // Now create the table
+            st.executeUpdate("create domain " + name + " as " + values);
+        } finally {
+            closeQuietly(st);
+        }
+    }
+
+    /*
+     * drop a sequence because older versions don't have dependency information for serials
+     */
+    public static void dropSequence(Connection con, String sequence) throws SQLException {
+        dropObject(con, "SEQUENCE", sequence);
+    }
+
+    /*
+     * Helper - drops a table
+     */
+    public static void dropTable(Connection con, String table) throws SQLException {
+        dropObject(con, "TABLE", table);
+    }
+
+    /*
+     * Helper - drops a view
+     */
+    public static void dropView(Connection con, String view) throws SQLException {
+        dropObject(con, "VIEW", view);
+    }
+
+    /*
+     * Helper - drops a materialized view
+     */
+    public static void dropMaterializedView(Connection con, String matView) throws SQLException {
+        dropObject(con, "MATERIALIZED VIEW", matView);
+    }
+
+    /*
+     * Helper - drops a type
+     */
+    public static void dropType(Connection con, String type) throws SQLException {
+        dropObject(con, "TYPE", type);
+    }
+
+    /*
+     * Drops a function with a given signature.
+     */
+    public static void dropFunction(Connection con, String name, String arguments) throws SQLException {
+        dropObject(con, "FUNCTION", name + "(" + arguments + ")");
+    }
+
+    private static void dropObject(Connection con, String type, String name) throws SQLException {
+        Statement stmt = con.createStatement();
+        try {
+            if (con.getAutoCommit()) {
+                // Not in a transaction so ignore error for missing object
+                stmt.executeUpdate("DROP " + type + " IF EXISTS " + name + " CASCADE");
+            } else {
+                // In a transaction so do not ignore errors for missing object
+                stmt.executeUpdate("DROP " + type + " " + name + " CASCADE");
+            }
+        } finally {
+            closeQuietly(stmt);
+        }
+    }
+
+    public static void assertNumberOfRows(Connection con, String tableName, int expectedRows, String message)
+            throws SQLException {
+        PreparedStatement ps = null;
+        ResultSet rs = null;
+        try {
+            ps = con.prepareStatement("select count(*) from " + tableName + " as t");
+            rs = ps.executeQuery();
+            rs.next();
+            Assert.assertEquals(message, expectedRows, rs.getInt(1));
+        } finally {
+            closeQuietly(rs);
+            closeQuietly(ps);
+        }
+    }
+
+    public static void assertTransactionState(String message, Connection con, TransactionState expected) {
+        TransactionState actual = TestUtil.getTransactionState(con);
+        Assert.assertEquals(message, expected, actual);
+    }
+
+    /*
+     * Helper - generates INSERT SQL - very simple
+     */
+    public static String insertSQL(String table, String values) {
+        return insertSQL(table, null, values);
+    }
+
+    public static String insertSQL(String table, String columns, String values) {
+        String s = "INSERT INTO " + table;
+
+        if (columns != null) {
+            s = s + " (" + columns + ")";
+        }
+
+        return s + " VALUES (" + values + ")";
+    }
+
+    /*
+     * Helper - generates SELECT SQL - very simple
+     */
+    public static String selectSQL(String table, String columns) {
+        return selectSQL(table, columns, null, null);
+    }
+
+    public static String selectSQL(String table, String columns, String where) {
+        return selectSQL(table, columns, where, null);
+    }
+
+    public static String selectSQL(String table, String columns, String where, String other) {
+        String s = "SELECT " + columns + " FROM " + table;
+
+        if (where != null) {
+            s = s + " WHERE " + where;
+        }
+        if (other != null) {
+            s = s + " " + other;
+        }
+
+        return s;
+    }
+
+    /*
+     * Helper to prefix a number with leading zeros - ugly but it works...
+     *
+     * @param v value to prefix
+     *
+     * @param l number of digits (0-10)
+     */
+    public static String fix(int v, int l) {
+        String s = "0000000000".substring(0, l) + Integer.toString(v);
+        return s.substring(s.length() - l);
+    }
+
+    public static String escapeString(Connection con, String value) throws SQLException {
+        if (con == null) {
+            throw new NullPointerException("Connection is null");
+        }
+        if (con instanceof PgConnection) {
+            return ((PgConnection) con).escapeString(value);
+        }
+        return value;
+    }
+
+    public static boolean getStandardConformingStrings(Connection con) {
+        if (con == null) {
+            throw new NullPointerException("Connection is null");
+        }
+        if (con instanceof PgConnection) {
+            return ((PgConnection) con).getStandardConformingStrings();
+        }
+        return false;
+    }
+
+    /**
+     * Determine if the given connection is connected to a server with a version of at least the given
+     * version. This is convenient because we are working with a java.sql.Connection, not an Postgres
+     * connection.
+     */
+    public static boolean haveMinimumServerVersion(Connection con, int version) throws SQLException {
+        if (con == null) {
+            throw new NullPointerException("Connection is null");
+        }
+        if (con instanceof PgConnection) {
+            return ((PgConnection) con).haveMinimumServerVersion(version);
+        }
+        return false;
+    }
+
+    public static boolean haveMinimumServerVersion(Connection con, Version version)
+            throws SQLException {
+        if (con == null) {
+            throw new NullPointerException("Connection is null");
+        }
+        if (con instanceof PgConnection) {
+            return ((PgConnection) con).haveMinimumServerVersion(version);
+        }
+        return false;
+    }
+
+    public static void assumeHaveMinimumServerVersion(Version version)
+            throws SQLException {
+        try (Connection conn = openPrivilegedDB()) {
+            Assume.assumeTrue(TestUtil.haveMinimumServerVersion(conn, version));
+        }
+    }
+
+    public static boolean haveMinimumJVMVersion(String version) {
+        String jvm = System.getProperty("java.version");
+        return jvm.compareTo(version) >= 0;
+    }
+
+    public static boolean haveIntegerDateTimes(Connection con) {
+        if (con == null) {
+            throw new NullPointerException("Connection is null");
+        }
+        if (con instanceof PgConnection) {
+            return ((PgConnection) con).getQueryExecutor().getIntegerDateTimes();
+        }
+        return false;
+    }
+
+    /**
+     * Print a ResultSet to System.out. This is useful for debugging tests.
+     */
+    public static void printResultSet(ResultSet rs) throws SQLException {
+        ResultSetMetaData rsmd = rs.getMetaData();
+        for (int i = 1; i <= rsmd.getColumnCount(); i++) {
+            if (i != 1) {
+                System.out.print(", ");
+            }
+            System.out.print(rsmd.getColumnName(i));
+        }
+        System.out.println();
+        while (rs.next()) {
+            for (int i = 1; i <= rsmd.getColumnCount(); i++) {
+                if (i != 1) {
+                    System.out.print(", ");
+                }
+                System.out.print(rs.getString(i));
+            }
+            System.out.println();
+        }
+    }
+
+    public static List<String> resultSetToLines(ResultSet rs) throws SQLException {
+        List<String> res = new ArrayList<>();
+        ResultSetMetaData rsmd = rs.getMetaData();
+        StringBuilder sb = new StringBuilder();
+        while (rs.next()) {
+            sb.setLength(0);
+            for (int i = 1; i <= rsmd.getColumnCount(); i++) {
+                if (i != 1) {
+                    sb.append(',');
+                }
+                sb.append(rs.getString(i));
+            }
+            res.add(sb.toString());
+        }
+        return res;
+    }
+
+    public static String join(List<String> list) {
+        StringBuilder sb = new StringBuilder();
+        for (String s : list) {
+            if (sb.length() > 0) {
+                sb.append('\n');
+            }
+            sb.append(s);
+        }
+        return sb.toString();
+    }
+
+    /*
+     * Find the column for the given label. Only SQLExceptions for system or set-up problems are
+     * thrown. The PSQLState.UNDEFINED_COLUMN type exception is consumed to allow cleanup. Relying on
+     * the caller to detect if the column lookup was successful.
+     */
+    public static int findColumn(PreparedStatement query, String label) throws SQLException {
+        int returnValue = 0;
+        ResultSet rs = query.executeQuery();
+        if (rs.next()) {
+            try {
+                returnValue = rs.findColumn(label);
+            } catch (SQLException sqle) {
+            } // consume exception to allow cleanup of resource.
+        }
+        rs.close();
+        return returnValue;
+    }
+
+    /**
+     * Close a resource and ignore any errors during closing.
+     */
+    public static void closeQuietly(Closeable resource) {
+        if (resource != null) {
+            try {
+                resource.close();
+            } catch (Exception ignore) {
+            }
+        }
+    }
+
+    /**
+     * Close a Connection and ignore any errors during closing.
+     */
+    public static void closeQuietly(Connection conn) {
+        if (conn != null) {
+            try {
+                conn.close();
+            } catch (SQLException ignore) {
+            }
+        }
+    }
+
+    /**
+     * Close a Statement and ignore any errors during closing.
+     */
+    public static void closeQuietly(Statement stmt) {
+        if (stmt != null) {
+            try {
+                stmt.close();
+            } catch (SQLException ignore) {
+            }
+        }
+    }
+
+    /**
+     * Close a ResultSet and ignore any errors during closing.
+     */
+    public static void closeQuietly(ResultSet rs) {
+        if (rs != null) {
+            try {
+                rs.close();
+            } catch (SQLException ignore) {
+            }
+        }
+    }
+
+    public static void recreateLogicalReplicationSlot(Connection connection, String slotName, String outputPlugin)
+            throws SQLException, InterruptedException, TimeoutException {
+        //drop previous slot
+        dropReplicationSlot(connection, slotName);
+
+        PreparedStatement stm = null;
+        try {
+            stm = connection.prepareStatement("SELECT * FROM pg_create_logical_replication_slot(?, ?)");
+            stm.setString(1, slotName);
+            stm.setString(2, outputPlugin);
+            stm.execute();
+        } finally {
+            closeQuietly(stm);
+        }
+    }
+
+    public static void recreatePhysicalReplicationSlot(Connection connection, String slotName)
+            throws SQLException, InterruptedException, TimeoutException {
+        //drop previous slot
+        dropReplicationSlot(connection, slotName);
+
+        PreparedStatement stm = null;
+        try {
+            stm = connection.prepareStatement("SELECT * FROM pg_create_physical_replication_slot(?)");
+            stm.setString(1, slotName);
+            stm.execute();
+        } finally {
+            closeQuietly(stm);
+        }
+    }
+
+    public static void dropReplicationSlot(Connection connection, String slotName)
+            throws SQLException, InterruptedException, TimeoutException {
+        if (haveMinimumServerVersion(connection, ServerVersion.v9_5)) {
+            PreparedStatement stm = null;
+            try {
+                stm = connection.prepareStatement(
+                        "select pg_terminate_backend(active_pid) from pg_replication_slots "
+                                + "where active = true and slot_name = ?");
+                stm.setString(1, slotName);
+                stm.execute();
+            } finally {
+                closeQuietly(stm);
+            }
+        }
+
+        waitStopReplicationSlot(connection, slotName);
+
+        PreparedStatement stm = null;
+        try {
+            stm = connection.prepareStatement(
+                    "select pg_drop_replication_slot(slot_name) "
+                            + "from pg_replication_slots where slot_name = ?");
+            stm.setString(1, slotName);
+            stm.execute();
+        } finally {
+            closeQuietly(stm);
+        }
+    }
+
+    public static boolean isReplicationSlotActive(Connection connection, String slotName)
+            throws SQLException {
+        PreparedStatement stm = null;
+        ResultSet rs = null;
+
+        try {
+            stm =
+                    connection.prepareStatement("select active from pg_replication_slots where slot_name = ?");
+            stm.setString(1, slotName);
+            rs = stm.executeQuery();
+            return rs.next() && rs.getBoolean(1);
+        } finally {
+            closeQuietly(rs);
+            closeQuietly(stm);
+        }
+    }
+
+    /**
+     * Execute a SQL query with a given connection and return whether any rows were
+     * returned. No column data is fetched.
+     */
+    public static boolean executeQuery(Connection conn, String sql) throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery(sql);
+        boolean hasNext = rs.next();
+        rs.close();
+        stmt.close();
+        return hasNext;
+    }
+
+    /**
+     * Execute a SQL query with a given connection, fetch the first row, and return its
+     * string value.
+     */
+    public static String queryForString(Connection conn, String sql) throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery(sql);
+        Assert.assertTrue("Query should have returned exactly one row but none was found: " + sql, rs.next());
+        String value = rs.getString(1);
+        Assert.assertFalse("Query should have returned exactly one row but more than one found: " + sql, rs.next());
+        rs.close();
+        stmt.close();
+        return value;
+    }
+
+    /**
+     * Same as queryForString(...) above but with a single string param.
+     */
+    public static String queryForString(Connection conn, String sql, String param) throws SQLException {
+        PreparedStatement stmt = conn.prepareStatement(sql);
+        stmt.setString(1, param);
+        ResultSet rs = stmt.executeQuery();
+        Assert.assertTrue("Query should have returned exactly one row but none was found: " + sql, rs.next());
+        String value = rs.getString(1);
+        Assert.assertFalse("Query should have returned exactly one row but more than one found: " + sql, rs.next());
+        rs.close();
+        stmt.close();
+        return value;
+    }
+
+    /**
+     * Execute a SQL query with a given connection, fetch the first row, and return its
+     * boolean value.
+     */
+    public static Boolean queryForBoolean(Connection conn, String sql) throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery(sql);
+        Assert.assertTrue("Query should have returned exactly one row but none was found: " + sql, rs.next());
+        Boolean value = rs.getBoolean(1);
+        if (rs.wasNull()) {
+            value = null;
+        }
+        Assert.assertFalse("Query should have returned exactly one row but more than one found: " + sql, rs.next());
+        rs.close();
+        stmt.close();
+        return value;
+    }
+
+    /**
+     * Retrieve the backend process id for a given connection.
+     */
+    public static int getBackendPid(Connection conn) throws SQLException {
+        PGConnection pgConn = conn.unwrap(PGConnection.class);
+        return pgConn.getBackendPID();
+    }
+
+    public static boolean isPidAlive(Connection conn, int pid) throws SQLException {
+        String sql = haveMinimumServerVersion(conn, ServerVersion.v9_2)
+                ? "SELECT EXISTS (SELECT * FROM pg_stat_activity WHERE pid = ?)" // 9.2+ use pid column
+                : "SELECT EXISTS (SELECT * FROM pg_stat_activity WHERE procpid = ?)"; // Use older procpid
+        try (PreparedStatement stmt = conn.prepareStatement(sql)) {
+            stmt.setInt(1, pid);
+            try (ResultSet rs = stmt.executeQuery()) {
+                rs.next();
+                return rs.getBoolean(1);
+            }
+        }
+    }
+
+    public static boolean waitForBackendTermination(Connection conn, int pid) throws SQLException, InterruptedException {
+        return waitForBackendTermination(conn, pid, Duration.ofSeconds(30), Duration.ofMillis(10));
+    }
+
+    /**
+     * Wait for a backend process to terminate and return whether it actual terminated within the maximum wait time.
+     */
+    public static boolean waitForBackendTermination(Connection conn, int pid, Duration timeout, Duration sleepDelay) throws SQLException, InterruptedException {
+        long started = System.currentTimeMillis();
+        do {
+            if (!isPidAlive(conn, pid)) {
+                return true;
+            }
+            Thread.sleep(sleepDelay.toMillis());
+        } while ((System.currentTimeMillis() - started) < timeout.toMillis());
+        return !isPidAlive(conn, pid);
+    }
+
+    /**
+     * Create a new connection to the same database as the supplied connection but with the privileged credentials.
+     */
+    private static Connection createPrivilegedConnection(Connection conn) throws SQLException {
+        String url = conn.getMetaData().getURL();
+        Properties props = new Properties(conn.getClientInfo());
+        PGProperty.USER.set(props, getPrivilegedUser());
+        PGProperty.PASSWORD.set(props, getPrivilegedPassword());
+        return DriverManager.getConnection(url, props);
+    }
+
+    /**
+     * Executed pg_terminate_backend(...) to terminate the server process for
+     * a given process id with the given connection.
+     * This method does not wait for the backend process to exit.
+     */
+    private static boolean pgTerminateBackend(Connection privConn, int backendPid) throws SQLException {
+        try (PreparedStatement stmt = privConn.prepareStatement("SELECT pg_terminate_backend(?)")) {
+            stmt.setInt(1, backendPid);
+            try (ResultSet rs = stmt.executeQuery()) {
+                rs.next();
+                return rs.getBoolean(1);
+            }
+        }
+    }
+
+    /**
+     * Open a new privileged connection to the same database as connection and use it to ask to terminate the connection.
+     * If the connection is terminated, wait for its process to actual terminate.
+     */
+    public static boolean terminateBackend(Connection conn) throws SQLException, InterruptedException {
+        try (Connection privConn = createPrivilegedConnection(conn)) {
+            int pid = getBackendPid(conn);
+            if (!pgTerminateBackend(privConn, pid)) {
+                return false;
+            }
+            return waitForBackendTermination(privConn, pid);
+        }
+    }
+
+    /**
+     * Open a new privileged connection to the same database as connection and use it to ask to terminate the connection.
+     * NOTE: This function does not wait for the process to terminate.
+     */
+    public static boolean terminateBackendNoWait(Connection conn) throws SQLException {
+        try (Connection privConn = createPrivilegedConnection(conn)) {
+            int pid = getBackendPid(conn);
+            return pgTerminateBackend(privConn, pid);
+        }
+    }
+
+    public static TransactionState getTransactionState(Connection conn) {
+        return ((BaseConnection) conn).getTransactionState();
+    }
+
+    private static void waitStopReplicationSlot(Connection connection, String slotName)
+            throws InterruptedException, TimeoutException, SQLException {
+        long startWaitTime = System.currentTimeMillis();
+        boolean stillActive;
+        long timeInWait = 0;
+
+        do {
+            stillActive = isReplicationSlotActive(connection, slotName);
+            if (stillActive) {
+                TimeUnit.MILLISECONDS.sleep(100L);
+                timeInWait = System.currentTimeMillis() - startWaitTime;
+            }
+        } while (stillActive && timeInWait <= 30000);
+
+        if (stillActive) {
+            throw new TimeoutException("Wait stop replication slot " + timeInWait + " timeout occurs");
+        }
+    }
+
+    /**
+     * Executes given SQL via {@link Statement#execute(String)} on a given connection.
+     */
+    public static void execute(Connection connection, String sql) throws SQLException {
+        try (Statement stmt = connection.createStatement()) {
+            stmt.execute(sql);
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/annotations/DisabledIfServerVersionBelow.java b/pgjdbc/src/test/java/org/postgresql/test/annotations/DisabledIfServerVersionBelow.java
new file mode 100644
index 0000000..98baef4
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/annotations/DisabledIfServerVersionBelow.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2024, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.annotations;
+
+import org.postgresql.test.impl.ServerVersionCondition;
+
+import org.junit.jupiter.api.extension.ExtendWith;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Disables test if the current server version less than specified version.
+ *
+ * @see org.junit.jupiter.api.Disabled
+ */
+@Target({ElementType.TYPE, ElementType.METHOD})
+@Retention(RetentionPolicy.RUNTIME)
+@ExtendWith(ServerVersionCondition.class)
+public @interface DisabledIfServerVersionBelow {
+    /**
+     * @return not null sever version in form x.y.z like 9.4, 9.5.3, etc.
+     * @see org.postgresql.core.ServerVersion
+     */
+    String value();
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/annotations/tags/Replication.java b/pgjdbc/src/test/java/org/postgresql/test/annotations/tags/Replication.java
new file mode 100644
index 0000000..a726ea5
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/annotations/tags/Replication.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2024, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.annotations.tags;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import org.junit.jupiter.api.Tag;
+
+@Target({ElementType.TYPE, ElementType.METHOD})
+@Retention(RetentionPolicy.RUNTIME)
+@Tag("replication")
+public @interface Replication {
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/annotations/tags/Xa.java b/pgjdbc/src/test/java/org/postgresql/test/annotations/tags/Xa.java
new file mode 100644
index 0000000..42a81d8
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/annotations/tags/Xa.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2024, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.annotations.tags;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import org.junit.jupiter.api.Tag;
+
+@Target({ElementType.TYPE, ElementType.METHOD})
+@Retention(RetentionPolicy.RUNTIME)
+@Tag("xa")
+public @interface Xa {
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/AsciiStringInternerTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/AsciiStringInternerTest.java
new file mode 100644
index 0000000..ae7d87b
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/AsciiStringInternerTest.java
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2020, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.core;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.LongAdder;
+import org.junit.jupiter.api.Test;
+import org.postgresql.core.AsciiStringInterner;
+import org.postgresql.core.Encoding;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotSame;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+/**
+ * @author Brett Okken
+ */
+class AsciiStringInternerTest {
+
+    @Test
+    void canonicalValue() throws Exception {
+        AsciiStringInterner interner = new AsciiStringInterner();
+        String s1 = "testCanonicalValue";
+        byte[] bytes = s1.getBytes(StandardCharsets.US_ASCII);
+        String interned = interner.getString(bytes, 0, bytes.length, null);
+
+        //interned value should be equal
+        assertEquals(s1, interned);
+        //but should be different instance
+        assertNotSame(s1, interned);
+        //asking for it again, however should return same instance
+        assertSame(interned, interner.getString(bytes, 0, bytes.length, null));
+
+        //now show that we can get the value back from a different byte[]
+        byte[] bytes2 = new byte[128];
+        System.arraycopy(bytes, 0, bytes2, 73, bytes.length);
+        assertSame(interned, interner.getString(bytes2, 73, bytes.length, null));
+
+        //now we will mutate the original byte[] to show that does not affect the map
+        Arrays.fill(bytes, (byte) 13);
+        assertSame(interned, interner.getString(bytes2, 73, bytes.length, null));
+    }
+
+    @Test
+    void stagedValue() throws Exception {
+        AsciiStringInterner interner = new AsciiStringInterner();
+        String s1 = "testStagedValue";
+        interner.putString(s1);
+        byte[] bytes = s1.getBytes(StandardCharsets.US_ASCII);
+        String interned = interner.getString(bytes, 0, bytes.length, null);
+        // should be same instance
+        assertSame(s1, interned);
+        //asking for it again should also return same instance
+        assertSame(s1, interner.getString(bytes, 0, bytes.length, null));
+
+        //now show that we can get the value back from a different byte[]
+        byte[] bytes2 = new byte[128];
+        System.arraycopy(bytes, 0, bytes2, 73, bytes.length);
+        assertSame(s1, interner.getString(bytes2, 73, bytes.length, null));
+    }
+
+    @Test
+    void nonAsciiValue() throws Exception {
+        final Encoding encoding = Encoding.getJVMEncoding("UTF-8");
+        AsciiStringInterner interner = new AsciiStringInterner();
+        String s1 = "testNonAsciiValue" + '\u03C0'; // add multi-byte to string to make invalid for intern
+        byte[] bytes = s1.getBytes(StandardCharsets.UTF_8);
+        String interned = interner.getString(bytes, 0, bytes.length, encoding);
+
+        //interned value should be equal
+        assertEquals(s1, interned);
+        //but should be different instance
+        assertNotSame(s1, interned);
+        //asking for it again should again return a different instance
+        final String interned2 = interner.getString(bytes, 0, bytes.length, encoding);
+        assertEquals(s1, interned2);
+        assertNotSame(s1, interned2);
+        assertNotSame(interned, interned2);
+    }
+
+    @Test
+    void testToString() throws Exception {
+        AsciiStringInterner interner = new AsciiStringInterner();
+        assertEquals("AsciiStringInterner []", interner.toString(), "empty");
+        interner.putString("s1");
+        assertEquals("AsciiStringInterner ['s1']", interner.toString(), "empty");
+        interner.getString("s2".getBytes(StandardCharsets.US_ASCII), 0, 2, null);
+        assertEquals("AsciiStringInterner ['s1', 's2']", interner.toString(), "empty");
+    }
+
+    @Test
+    void garbageCleaning() throws Exception {
+        final byte[] bytes = new byte[100000];
+        for (int i = 0; i < 100000; i++) {
+            bytes[i] = (byte) ThreadLocalRandom.current().nextInt(128);
+        }
+        final AsciiStringInterner interner = new AsciiStringInterner();
+        final LongAdder length = new LongAdder();
+        final Callable<Void> c = () -> {
+            for (int i = 0; i < 25000; i++) {
+                String str;
+                try {
+                    str = interner.getString(bytes, 0, ThreadLocalRandom.current().nextInt(1000, bytes.length), null);
+                } catch (IOException e) {
+                    throw new IllegalStateException(e);
+                }
+                length.add(str.length());
+            }
+            return null;
+        };
+        final ExecutorService exec = Executors.newCachedThreadPool();
+        try {
+            exec.invokeAll(Arrays.asList(c, c, c, c));
+        } finally {
+            exec.shutdown();
+        }
+        //this is really just done to make sure java cannot tell that nothing is really being done
+        assertTrue(length.sum() > 0);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/CommandCompleteParserNegativeTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/CommandCompleteParserNegativeTest.java
new file mode 100644
index 0000000..0fc021e
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/CommandCompleteParserNegativeTest.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.core;
+
+import java.util.Arrays;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.postgresql.core.CommandCompleteParser;
+import org.postgresql.util.PSQLException;
+import static org.junit.jupiter.api.Assertions.fail;
+
+public class CommandCompleteParserNegativeTest {
+    public static Iterable<Object[]> data() {
+        return Arrays.asList(new Object[][]{
+                {"SELECT 0_0 42"},
+                {"SELECT 42 0_0"},
+                {"SELECT 0_0 0_0"},
+        });
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "input={0}")
+    void run(String input) throws PSQLException {
+        CommandCompleteParser parser = new CommandCompleteParser();
+        try {
+            parser.parse(input);
+            fail("CommandCompleteParser should throw NumberFormatException for " + input);
+        } catch (PSQLException e) {
+            Throwable cause = e.getCause();
+            if (cause == null) {
+                throw e;
+            }
+            if (!(cause instanceof NumberFormatException)) {
+                throw e;
+            }
+            // NumerFormatException is expected
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/CommandCompleteParserTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/CommandCompleteParserTest.java
new file mode 100644
index 0000000..33d92b8
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/CommandCompleteParserTest.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.core;
+
+import java.util.Arrays;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.postgresql.core.CommandCompleteParser;
+import org.postgresql.util.PSQLException;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+public class CommandCompleteParserTest {
+    public static Iterable<Object[]> data() {
+        return Arrays.asList(new Object[][]{
+                {"SELECT 0", 0, 0},
+                {"SELECT -42", 0, 0},
+                {"SELECT", 0, 0},
+                {"", 0, 0},
+                {"A", 0, 0},
+                {"SELECT 42", 0, 42},
+                {"UPDATE 43 42", 43, 42},
+                {"UPDATE 43 " + Long.MAX_VALUE, 43, Long.MAX_VALUE},
+                {"UPDATE " + Long.MAX_VALUE + " " + Long.MAX_VALUE, Long.MAX_VALUE, Long.MAX_VALUE},
+                {"UPDATE " + (Long.MAX_VALUE / 10) + " " + (Long.MAX_VALUE / 10), (Long.MAX_VALUE / 10),
+                        (Long.MAX_VALUE / 10)},
+                {"UPDATE " + (Long.MAX_VALUE / 100) + " " + (Long.MAX_VALUE / 100), (Long.MAX_VALUE / 100),
+                        (Long.MAX_VALUE / 100)},
+                {"CREATE TABLE " + (Long.MAX_VALUE / 100) + " " + (Long.MAX_VALUE / 100),
+                        (Long.MAX_VALUE / 100), (Long.MAX_VALUE / 100)},
+                {"CREATE TABLE", 0, 0},
+                {"CREATE OR DROP OR DELETE TABLE 42", 0, 42},
+        });
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "input={0}, oid={1}, rows={2}")
+    void run(String input, long oid, long rows) throws PSQLException {
+        CommandCompleteParser expected = new CommandCompleteParser();
+        CommandCompleteParser actual = new CommandCompleteParser();
+        expected.set(oid, rows);
+        actual.parse(input);
+        assertEquals(expected, actual, input);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/FixedLengthOutputStreamTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/FixedLengthOutputStreamTest.java
index 219c88c..a53c753 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/core/FixedLengthOutputStreamTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/FixedLengthOutputStreamTest.java
@@ -5,84 +5,81 @@
 
 package org.postgresql.test.core;
 
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.sql.SQLException;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.core.FixedLengthOutputStream;
 import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.fail;
 
-import org.postgresql.core.FixedLengthOutputStream;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.sql.SQLException;
-
 class FixedLengthOutputStreamTest {
 
-  private ByteArrayOutputStream targetStream;
-  private FixedLengthOutputStream fixedLengthStream;
+    private ByteArrayOutputStream targetStream;
+    private FixedLengthOutputStream fixedLengthStream;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    targetStream = new ByteArrayOutputStream();
-    fixedLengthStream = new FixedLengthOutputStream(10, targetStream);
-  }
-
-  @AfterEach
-  void tearDown() throws SQLException {
-  }
-
-  private void verifyExpectedOutput(byte[] expected) {
-    assertArrayEquals(expected, targetStream.toByteArray(), "Incorrect data written to target stream");
-  }
-
-  @Test
-  void singleByteWrites() throws IOException {
-    fixedLengthStream.write((byte) 1);
-    assertEquals(9, fixedLengthStream.remaining(), "Incorrect remaining value");
-    fixedLengthStream.write((byte) 2);
-    assertEquals(8, fixedLengthStream.remaining(), "Incorrect remaining value");
-    verifyExpectedOutput(new byte[]{1, 2});
-  }
-
-  @Test
-  void multipleByteWrites() throws IOException {
-    fixedLengthStream.write(new byte[]{1, 2, 3, 4});
-    assertEquals(6, fixedLengthStream.remaining(), "Incorrect remaining value");
-    fixedLengthStream.write(new byte[]{5, 6, 7, 8});
-    assertEquals(2, fixedLengthStream.remaining(), "Incorrect remaining value");
-    verifyExpectedOutput(new byte[]{1, 2, 3, 4, 5, 6, 7, 8});
-  }
-
-  @Test
-  void singleByteOverLimit() throws IOException {
-    byte[] data = new byte[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 0};
-    fixedLengthStream.write(data);
-    assertEquals(0, fixedLengthStream.remaining(), "Incorrect remaining value");
-    try {
-      fixedLengthStream.write((byte) 'a');
-      fail("Expected exception not thrown");
-    } catch (IOException e) {
-      assertEquals("Attempt to write more than the specified 10 bytes", e.getMessage(), "Incorrect exception message");
+    @BeforeEach
+    void setUp() throws Exception {
+        targetStream = new ByteArrayOutputStream();
+        fixedLengthStream = new FixedLengthOutputStream(10, targetStream);
     }
-    assertEquals(0, fixedLengthStream.remaining(), "Incorrect remaining value after exception");
-    verifyExpectedOutput(data);
-  }
 
-  @Test
-  void multipleBytesOverLimit() throws IOException {
-    byte[] data = new byte[]{1, 2, 3, 4, 5, 6, 7, 8};
-    fixedLengthStream.write(data);
-    assertEquals(2, fixedLengthStream.remaining());
-    try {
-      fixedLengthStream.write(new byte[]{'a', 'b', 'c', 'd'});
-      fail("Expected exception not thrown");
-    } catch (IOException e) {
-      assertEquals("Attempt to write more than the specified 10 bytes", e.getMessage(), "Incorrect exception message");
+    @AfterEach
+    void tearDown() throws SQLException {
+    }
+
+    private void verifyExpectedOutput(byte[] expected) {
+        assertArrayEquals(expected, targetStream.toByteArray(), "Incorrect data written to target stream");
+    }
+
+    @Test
+    void singleByteWrites() throws IOException {
+        fixedLengthStream.write((byte) 1);
+        assertEquals(9, fixedLengthStream.remaining(), "Incorrect remaining value");
+        fixedLengthStream.write((byte) 2);
+        assertEquals(8, fixedLengthStream.remaining(), "Incorrect remaining value");
+        verifyExpectedOutput(new byte[]{1, 2});
+    }
+
+    @Test
+    void multipleByteWrites() throws IOException {
+        fixedLengthStream.write(new byte[]{1, 2, 3, 4});
+        assertEquals(6, fixedLengthStream.remaining(), "Incorrect remaining value");
+        fixedLengthStream.write(new byte[]{5, 6, 7, 8});
+        assertEquals(2, fixedLengthStream.remaining(), "Incorrect remaining value");
+        verifyExpectedOutput(new byte[]{1, 2, 3, 4, 5, 6, 7, 8});
+    }
+
+    @Test
+    void singleByteOverLimit() throws IOException {
+        byte[] data = new byte[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 0};
+        fixedLengthStream.write(data);
+        assertEquals(0, fixedLengthStream.remaining(), "Incorrect remaining value");
+        try {
+            fixedLengthStream.write((byte) 'a');
+            fail("Expected exception not thrown");
+        } catch (IOException e) {
+            assertEquals("Attempt to write more than the specified 10 bytes", e.getMessage(), "Incorrect exception message");
+        }
+        assertEquals(0, fixedLengthStream.remaining(), "Incorrect remaining value after exception");
+        verifyExpectedOutput(data);
+    }
+
+    @Test
+    void multipleBytesOverLimit() throws IOException {
+        byte[] data = new byte[]{1, 2, 3, 4, 5, 6, 7, 8};
+        fixedLengthStream.write(data);
+        assertEquals(2, fixedLengthStream.remaining());
+        try {
+            fixedLengthStream.write(new byte[]{'a', 'b', 'c', 'd'});
+            fail("Expected exception not thrown");
+        } catch (IOException e) {
+            assertEquals("Attempt to write more than the specified 10 bytes", e.getMessage(), "Incorrect exception message");
+        }
+        assertEquals(2, fixedLengthStream.remaining(), "Incorrect remaining value after exception");
+        verifyExpectedOutput(data);
     }
-    assertEquals(2, fixedLengthStream.remaining(), "Incorrect remaining value after exception");
-    verifyExpectedOutput(data);
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/JavaVersionTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/JavaVersionTest.java
index 6c8b335..b6cf163 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/core/JavaVersionTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/JavaVersionTest.java
@@ -5,21 +5,19 @@
 
 package org.postgresql.test.core;
 
+import org.junit.jupiter.api.Test;
+import org.postgresql.core.JavaVersion;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
-import org.postgresql.core.JavaVersion;
-
-import org.junit.jupiter.api.Test;
-
 class JavaVersionTest {
-  @Test
-  void getRuntimeVersion() {
-    String currentVersion = System.getProperty("java.version");
-    String msg = "java.version = " + currentVersion + ", JavaVersion.getRuntimeVersion() = "
-        + JavaVersion.getRuntimeVersion();
-    System.out.println(msg);
-    if (currentVersion.startsWith("1.8")) {
-      assertEquals(JavaVersion.v1_8, JavaVersion.getRuntimeVersion(), msg);
+    @Test
+    void getRuntimeVersion() {
+        String currentVersion = System.getProperty("java.version");
+        String msg = "java.version = " + currentVersion + ", JavaVersion.getRuntimeVersion() = "
+                + JavaVersion.getRuntimeVersion();
+        System.out.println(msg);
+        if (currentVersion.startsWith("1.8")) {
+            assertEquals(JavaVersion.v1_8, JavaVersion.getRuntimeVersion(), msg);
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/LogServerMessagePropertyTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/LogServerMessagePropertyTest.java
index 527117a..7cd4979 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/core/LogServerMessagePropertyTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/LogServerMessagePropertyTest.java
@@ -5,143 +5,140 @@
 
 package org.postgresql.test.core;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.PGProperty;
-import org.postgresql.core.ServerVersion;
-import org.postgresql.test.TestUtil;
-import org.postgresql.util.PSQLState;
-
-import org.junit.jupiter.api.Assumptions;
-import org.junit.jupiter.api.Test;
-
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
 import java.util.Locale;
 import java.util.Properties;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGProperty;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.test.TestUtil;
+import org.postgresql.util.PSQLState;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 class LogServerMessagePropertyTest {
-  private static final String PRIMARY_KEY_NAME = "lms_test_pk";
-  private static final String CREATE_TABLE_SQL =
-      "CREATE TABLE pg_temp.lms_test ("
-      + "  id text, "
-      + "  CONSTRAINT " + PRIMARY_KEY_NAME + " PRIMARY KEY (id)"
-      + ")";
-  private static final String SECRET_VALUE = "some_secret_value";
-  private static final String INSERT_SQL =
-      "INSERT INTO pg_temp.lms_test (id) VALUES ('" + SECRET_VALUE + "')";
+    private static final String PRIMARY_KEY_NAME = "lms_test_pk";
+    private static final String CREATE_TABLE_SQL =
+            "CREATE TABLE pg_temp.lms_test ("
+                    + "  id text, "
+                    + "  CONSTRAINT " + PRIMARY_KEY_NAME + " PRIMARY KEY (id)"
+                    + ")";
+    private static final String SECRET_VALUE = "some_secret_value";
+    private static final String INSERT_SQL =
+            "INSERT INTO pg_temp.lms_test (id) VALUES ('" + SECRET_VALUE + "')";
 
-  /**
-   * Creates a connection with the additional properties, use it to
-   * create a temp table with a primary key, run two inserts to generate
-   * a duplicate key error, and finally return the exception message.
-   */
-  private static String testViolatePrimaryKey(Properties props, boolean batch) throws SQLException {
-    Connection conn = TestUtil.openDB(props);
-    Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(conn, ServerVersion.v9_1));
-    try {
-      TestUtil.execute(conn, CREATE_TABLE_SQL);
-      if (batch) {
-        PreparedStatement stmt = conn.prepareStatement(INSERT_SQL);
-        stmt.addBatch();
-        stmt.addBatch();
-        stmt.executeBatch();
-      } else {
-        // First insert should work
-        TestUtil.execute(conn, INSERT_SQL);
-        // Second insert should throw a duplicate key error
-        TestUtil.execute(conn, INSERT_SQL);
-      }
-    } catch (SQLException e) {
-      assertEquals(PSQLState.UNIQUE_VIOLATION.getState(), e.getSQLState(), "SQL state must be for a unique violation");
-      return e.getMessage();
-    } finally {
-      conn.close();
+    /**
+     * Creates a connection with the additional properties, use it to
+     * create a temp table with a primary key, run two inserts to generate
+     * a duplicate key error, and finally return the exception message.
+     */
+    private static String testViolatePrimaryKey(Properties props, boolean batch) throws SQLException {
+        Connection conn = TestUtil.openDB(props);
+        Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(conn, ServerVersion.v9_1));
+        try {
+            TestUtil.execute(conn, CREATE_TABLE_SQL);
+            if (batch) {
+                PreparedStatement stmt = conn.prepareStatement(INSERT_SQL);
+                stmt.addBatch();
+                stmt.addBatch();
+                stmt.executeBatch();
+            } else {
+                // First insert should work
+                TestUtil.execute(conn, INSERT_SQL);
+                // Second insert should throw a duplicate key error
+                TestUtil.execute(conn, INSERT_SQL);
+            }
+        } catch (SQLException e) {
+            assertEquals(PSQLState.UNIQUE_VIOLATION.getState(), e.getSQLState(), "SQL state must be for a unique violation");
+            return e.getMessage();
+        } finally {
+            conn.close();
+        }
+        // Should never get here:
+        fail("A duplicate key exception should have occurred");
+        return null;
     }
-    // Should never get here:
-    fail("A duplicate key exception should have occurred");
-    return null;
-  }
 
-  private static String testViolatePrimaryKey(Properties props) throws SQLException {
-    return testViolatePrimaryKey(props, false);
-  }
-
-  private static void assertMessageContains(String message, String text) {
-    if (!message.toLowerCase(Locale.ROOT).contains(text.toLowerCase(Locale.ROOT))) {
-      fail(String.format("Message must contain text '%s': %s", text, message));
+    private static String testViolatePrimaryKey(Properties props) throws SQLException {
+        return testViolatePrimaryKey(props, false);
     }
-  }
 
-  private static void assertMessageDoesNotContain(String message, String text) {
-    if (message.toLowerCase(Locale.ROOT).contains(text.toLowerCase(Locale.ROOT))) {
-      fail(String.format("Message must not contain text '%s': %s", text, message));
+    private static void assertMessageContains(String message, String text) {
+        if (!message.toLowerCase(Locale.ROOT).contains(text.toLowerCase(Locale.ROOT))) {
+            fail(String.format("Message must contain text '%s': %s", text, message));
+        }
     }
-  }
 
-  @Test
-  void withDefaults() throws SQLException {
-    Properties props = new Properties();
-    String message = testViolatePrimaryKey(props);
-    assertMessageContains(message, PRIMARY_KEY_NAME);
-    // TODO: Detail is locale-specific assertMessageContains(message, "Detail:");
-    assertMessageContains(message, SECRET_VALUE);
-  }
+    private static void assertMessageDoesNotContain(String message, String text) {
+        if (message.toLowerCase(Locale.ROOT).contains(text.toLowerCase(Locale.ROOT))) {
+            fail(String.format("Message must not contain text '%s': %s", text, message));
+        }
+    }
 
-  /**
-   * NOTE: This should be the same as the default case as "true" is the default.
-   */
-  @Test
-  void withExplicitlyEnabled() throws SQLException {
-    Properties props = new Properties();
-    props.setProperty(PGProperty.LOG_SERVER_ERROR_DETAIL.getName(), "true");
-    String message = testViolatePrimaryKey(props);
-    assertMessageContains(message, PRIMARY_KEY_NAME);
-    // TODO: Detail is locale-specific assertMessageContains(message, "Detail:");
-    assertMessageContains(message, SECRET_VALUE);
-  }
+    @Test
+    void withDefaults() throws SQLException {
+        Properties props = new Properties();
+        String message = testViolatePrimaryKey(props);
+        assertMessageContains(message, PRIMARY_KEY_NAME);
+        // TODO: Detail is locale-specific assertMessageContains(message, "Detail:");
+        assertMessageContains(message, SECRET_VALUE);
+    }
 
-  @Test
-  void withLogServerErrorDetailDisabled() throws SQLException {
-    Properties props = new Properties();
-    props.setProperty(PGProperty.LOG_SERVER_ERROR_DETAIL.getName(), "false");
-    String message = testViolatePrimaryKey(props);
-    assertMessageContains(message, PRIMARY_KEY_NAME);
-    assertMessageDoesNotContain(message, "Detail:");
-    assertMessageDoesNotContain(message, SECRET_VALUE);
-  }
+    /**
+     * NOTE: This should be the same as the default case as "true" is the default.
+     */
+    @Test
+    void withExplicitlyEnabled() throws SQLException {
+        Properties props = new Properties();
+        props.setProperty(PGProperty.LOG_SERVER_ERROR_DETAIL.getName(), "true");
+        String message = testViolatePrimaryKey(props);
+        assertMessageContains(message, PRIMARY_KEY_NAME);
+        // TODO: Detail is locale-specific assertMessageContains(message, "Detail:");
+        assertMessageContains(message, SECRET_VALUE);
+    }
 
-  @Test
-  void batchWithDefaults() throws SQLException {
-    Properties props = new Properties();
-    String message = testViolatePrimaryKey(props, true);
-    assertMessageContains(message, PRIMARY_KEY_NAME);
-    // TODO: Detail is locale-specific assertMessageContains(message, "Detail:");
-    assertMessageContains(message, SECRET_VALUE);
-  }
+    @Test
+    void withLogServerErrorDetailDisabled() throws SQLException {
+        Properties props = new Properties();
+        props.setProperty(PGProperty.LOG_SERVER_ERROR_DETAIL.getName(), "false");
+        String message = testViolatePrimaryKey(props);
+        assertMessageContains(message, PRIMARY_KEY_NAME);
+        assertMessageDoesNotContain(message, "Detail:");
+        assertMessageDoesNotContain(message, SECRET_VALUE);
+    }
 
-  /**
-   * NOTE: This should be the same as the default case as "true" is the default.
-   */
-  @Test
-  void batchExplicitlyEnabled() throws SQLException {
-    Properties props = new Properties();
-    props.setProperty(PGProperty.LOG_SERVER_ERROR_DETAIL.getName(), "true");
-    String message = testViolatePrimaryKey(props, true);
-    assertMessageContains(message, PRIMARY_KEY_NAME);
-    // TODO: Detail is locale-specific assertMessageContains(message, "Detail:");
-    assertMessageContains(message, SECRET_VALUE);
-  }
+    @Test
+    void batchWithDefaults() throws SQLException {
+        Properties props = new Properties();
+        String message = testViolatePrimaryKey(props, true);
+        assertMessageContains(message, PRIMARY_KEY_NAME);
+        // TODO: Detail is locale-specific assertMessageContains(message, "Detail:");
+        assertMessageContains(message, SECRET_VALUE);
+    }
 
-  @Test
-  void batchWithLogServerErrorDetailDisabled() throws SQLException {
-    Properties props = new Properties();
-    props.setProperty(PGProperty.LOG_SERVER_ERROR_DETAIL.getName(), "false");
-    String message = testViolatePrimaryKey(props, true);
-    assertMessageContains(message, PRIMARY_KEY_NAME);
-    // TODO: Detail is locale-specific assertMessageDoesNotContain(message, "Detail:");
-    assertMessageDoesNotContain(message, SECRET_VALUE);
-  }
+    /**
+     * NOTE: This should be the same as the default case as "true" is the default.
+     */
+    @Test
+    void batchExplicitlyEnabled() throws SQLException {
+        Properties props = new Properties();
+        props.setProperty(PGProperty.LOG_SERVER_ERROR_DETAIL.getName(), "true");
+        String message = testViolatePrimaryKey(props, true);
+        assertMessageContains(message, PRIMARY_KEY_NAME);
+        // TODO: Detail is locale-specific assertMessageContains(message, "Detail:");
+        assertMessageContains(message, SECRET_VALUE);
+    }
+
+    @Test
+    void batchWithLogServerErrorDetailDisabled() throws SQLException {
+        Properties props = new Properties();
+        props.setProperty(PGProperty.LOG_SERVER_ERROR_DETAIL.getName(), "false");
+        String message = testViolatePrimaryKey(props, true);
+        assertMessageContains(message, PRIMARY_KEY_NAME);
+        // TODO: Detail is locale-specific assertMessageDoesNotContain(message, "Detail:");
+        assertMessageDoesNotContain(message, SECRET_VALUE);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/NativeQueryBindLengthTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/NativeQueryBindLengthTest.java
index c333a2c..434b6b2 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/core/NativeQueryBindLengthTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/NativeQueryBindLengthTest.java
@@ -5,46 +5,44 @@
 
 package org.postgresql.test.core;
 
-import org.postgresql.core.NativeQuery;
-import org.postgresql.test.jdbc2.BaseTest4;
-
+import java.util.ArrayList;
+import java.util.List;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
-
-import java.util.ArrayList;
-import java.util.List;
+import org.postgresql.core.NativeQuery;
+import org.postgresql.test.jdbc2.BaseTest4;
 
 @RunWith(Parameterized.class)
 public class NativeQueryBindLengthTest extends BaseTest4 {
-  private final int expected;
-  private final int bindCount;
+    private final int expected;
+    private final int bindCount;
 
-  public NativeQueryBindLengthTest(String name, int expected, int bindCount) {
-    this.expected = expected;
-    this.bindCount = bindCount;
-  }
+    public NativeQueryBindLengthTest(String name, int expected, int bindCount) {
+        this.expected = expected;
+        this.bindCount = bindCount;
+    }
 
-  @Test
-  public void testBindLengthCalculation() {
-    Assert.assertEquals(expected, NativeQuery.calculateBindLength(bindCount));
-  }
+    @Parameterized.Parameters(name = "{0} == {1}")
+    public static Iterable<Object[]> data() {
+        List<Object[]> res = new ArrayList<>();
+        res.add(new Object[]{"'$1'.length = 2", 2, 1});
+        res.add(new Object[]{"'$1$2...$9'.length = 2*9", 18, 9});
+        res.add(new Object[]{"'$1$2...$9$10'.length = 2*9+3", 21, 10});
+        res.add(new Object[]{"'$1$2...$9$10..$99'.length = 2*9+3*90", 288, 99});
+        res.add(new Object[]{"'$1$2...$9$10..$99$100'.length = 2*9+3*90+4", 292, 100});
+        res.add(new Object[]{"'$1$2...$9$10..$99$100$101'.length = 2*9+3*90+4+4", 296, 101});
+        res.add(new Object[]{"'$1...$999'.length", 3888, 999});
+        res.add(new Object[]{"'$1...$1000'.length", 3893, 1000});
+        res.add(new Object[]{"'$1...$9999'.length", 48888, 9999});
+        res.add(new Object[]{"'$1...$10000'.length", 48894, 10000});
+        res.add(new Object[]{"'$1...$32767'.length", 185496, Short.MAX_VALUE});
+        return res;
+    }
 
-  @Parameterized.Parameters(name = "{0} == {1}")
-  public static Iterable<Object[]> data() {
-    List<Object[]> res = new ArrayList<>();
-    res.add(new Object[]{"'$1'.length = 2", 2, 1});
-    res.add(new Object[]{"'$1$2...$9'.length = 2*9", 18, 9});
-    res.add(new Object[]{"'$1$2...$9$10'.length = 2*9+3", 21, 10});
-    res.add(new Object[]{"'$1$2...$9$10..$99'.length = 2*9+3*90", 288, 99});
-    res.add(new Object[]{"'$1$2...$9$10..$99$100'.length = 2*9+3*90+4", 292, 100});
-    res.add(new Object[]{"'$1$2...$9$10..$99$100$101'.length = 2*9+3*90+4+4", 296, 101});
-    res.add(new Object[]{"'$1...$999'.length", 3888, 999});
-    res.add(new Object[]{"'$1...$1000'.length", 3893, 1000});
-    res.add(new Object[]{"'$1...$9999'.length", 48888, 9999});
-    res.add(new Object[]{"'$1...$10000'.length", 48894, 10000});
-    res.add(new Object[]{"'$1...$32767'.length", 185496, Short.MAX_VALUE});
-    return res;
-  }
+    @Test
+    public void testBindLengthCalculation() {
+        Assert.assertEquals(expected, NativeQuery.calculateBindLength(bindCount));
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/OidToStringTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/OidToStringTest.java
new file mode 100644
index 0000000..434f2eb
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/OidToStringTest.java
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.core;
+
+import java.util.Arrays;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.postgresql.core.Oid;
+import org.postgresql.util.PSQLException;
+
+public class OidToStringTest {
+    public static Iterable<Object[]> data() {
+        return Arrays.asList(new Object[][]{
+                {142, "XML"},
+                {0, "UNSPECIFIED"},
+                {-235, "<unknown:-235>"},
+        });
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "expected={1}, value={0}")
+    void run(int value, String expected) throws PSQLException {
+        Assertions.assertEquals(expected, Oid.toString(value));
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/OidValueOfTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/OidValueOfTest.java
new file mode 100644
index 0000000..54539e6
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/OidValueOfTest.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.core;
+
+import java.util.Arrays;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.postgresql.core.Oid;
+import org.postgresql.util.PSQLException;
+
+public class OidValueOfTest {
+    public static Iterable<Object[]> data() {
+        return Arrays.asList(new Object[][]{
+                {25, "TEXT"},
+                {0, "UNSPECIFIED"},
+                {199, "JSON_ARRAY"},
+                {100, "100"},
+        });
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "expected={0}, value={1}")
+    void run(int expected, String value) throws PSQLException {
+        Assertions.assertEquals(expected, Oid.valueOf(value));
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/OidValuesCorrectnessTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/OidValuesCorrectnessTest.java
new file mode 100644
index 0000000..379515f
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/OidValuesCorrectnessTest.java
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2020, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.core;
+
+import java.lang.reflect.Field;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.postgresql.core.Oid;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.jdbc2.BaseTest4;
+
+/**
+ * Test to check if values in Oid class are correct with Oid values in a database.
+ */
+@RunWith(Parameterized.class)
+public class OidValuesCorrectnessTest extends BaseTest4 {
+
+    /**
+     * List to contain names of all variables, which should be ignored by this test.
+     * Prevents situation that a new value will be added to Oid class with ignoring the test.
+     */
+    private static List<String> oidsToIgnore = Arrays.asList(
+            "UNSPECIFIED" //UNSPECIFIED isn't an Oid, it's a value to specify that Oid value is unspecified
+    );
+    /**
+     * Map to contain Oid names with server version of their support.
+     * Prevents that some Oid values will be tested with a database not supporting given Oid.
+     */
+    private static Map<String, ServerVersion> oidsMinimumVersions;
+    /**
+     * Map to contain Oid names with their proper names from pg_type table (typname) if they are different.
+     * Helps in situation when variable name in Oid class isn't the same as typname in pg_type table.
+     */
+    private static Map<String, String> oidTypeNames;
+
+    static {
+        oidsMinimumVersions = new HashMap<>();
+        oidsMinimumVersions.put("JSON", ServerVersion.v9_2);
+        oidsMinimumVersions.put("JSON_ARRAY", ServerVersion.v9_2);
+        oidsMinimumVersions.put("JSONB", ServerVersion.v9_4);
+        oidsMinimumVersions.put("JSONB_ARRAY", ServerVersion.v9_4);
+        oidsMinimumVersions.put("MACADDR8", ServerVersion.v10);
+    }
+
+    static {
+        oidTypeNames = new HashMap<>();
+        oidTypeNames.put("BOX_ARRAY", "_BOX");
+        oidTypeNames.put("INT2_ARRAY", "_INT2");
+        oidTypeNames.put("INT4_ARRAY", "_INT4");
+        oidTypeNames.put("INT8_ARRAY", "_INT8");
+        oidTypeNames.put("TEXT_ARRAY", "_TEXT");
+        oidTypeNames.put("NUMERIC_ARRAY", "_NUMERIC");
+        oidTypeNames.put("FLOAT4_ARRAY", "_FLOAT4");
+        oidTypeNames.put("FLOAT8_ARRAY", "_FLOAT8");
+        oidTypeNames.put("BOOL_ARRAY", "_BOOL");
+        oidTypeNames.put("DATE_ARRAY", "_DATE");
+        oidTypeNames.put("TIME_ARRAY", "_TIME");
+        oidTypeNames.put("TIMETZ_ARRAY", "_TIMETZ");
+        oidTypeNames.put("TIMESTAMP_ARRAY", "_TIMESTAMP");
+        oidTypeNames.put("TIMESTAMPTZ_ARRAY", "_TIMESTAMPTZ");
+        oidTypeNames.put("BYTEA_ARRAY", "_BYTEA");
+        oidTypeNames.put("VARCHAR_ARRAY", "_VARCHAR");
+        oidTypeNames.put("OID_ARRAY", "_OID");
+        oidTypeNames.put("BPCHAR_ARRAY", "_BPCHAR");
+        oidTypeNames.put("MONEY_ARRAY", "_MONEY");
+        oidTypeNames.put("NAME_ARRAY", "_NAME");
+        oidTypeNames.put("BIT_ARRAY", "_BIT");
+        oidTypeNames.put("INTERVAL_ARRAY", "_INTERVAl");
+        oidTypeNames.put("CHAR_ARRAY", "_CHAR");
+        oidTypeNames.put("VARBIT_ARRAY", "_VARBIT");
+        oidTypeNames.put("UUID_ARRAY", "_UUID");
+        oidTypeNames.put("XML_ARRAY", "_XML");
+        oidTypeNames.put("POINT_ARRAY", "_POINT");
+        oidTypeNames.put("JSONB_ARRAY", "_JSONB");
+        oidTypeNames.put("JSON_ARRAY", "_JSON");
+        oidTypeNames.put("REF_CURSOR", "REFCURSOR");
+        oidTypeNames.put("REF_CURSOR_ARRAY", "_REFCURSOR");
+    }
+
+    @Parameterized.Parameter(0)
+    public String oidName;
+    @Parameterized.Parameter(1)
+    public int oidValue;
+
+    @Parameterized.Parameters(name = "oidName={0}, oidValue={1}")
+    public static Iterable<Object[]> data() throws IllegalAccessException {
+        Field[] fields = Oid.class.getFields();
+        List<Object[]> data = new ArrayList<>();
+
+        for (Field field : fields) {
+            if (!oidsToIgnore.contains(field.getName())) {
+                data.add(new Object[]{field.getName(), field.getInt(null)});
+            }
+        }
+
+        return data;
+    }
+
+    /**
+     * The testcase to check if expected value of Oid, read from a database, is the same as value
+     * written in the Oid class.
+     */
+    @Test
+    public void testValue() throws SQLException {
+        // check if Oid can be tested with given database by checking version
+        if (oidsMinimumVersions.containsKey(oidName)) {
+            Assume.assumeTrue(TestUtil.haveMinimumServerVersion(con, oidsMinimumVersions.get(oidName)));
+        }
+
+        String typeName = oidTypeNames.getOrDefault(oidName, oidName);
+
+        Statement stmt = con.createStatement();
+        ResultSet resultSet;
+        stmt.execute("select oid from pg_type where typname = '" + typeName.toLowerCase(Locale.ROOT) + "'");
+        resultSet = stmt.getResultSet();
+
+        // resultSet have to have next row
+        Assert.assertTrue("Oid value doesn't exist for oid " + oidName + ";with used type: " + typeName,
+                resultSet.next());
+        // check if expected value from a database is the same as value in Oid class
+        Assert.assertEquals("Wrong value for oid: " + oidName + ";with used type: " + typeName,
+                resultSet.getInt(1), oidValue);
+
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/OptionsPropertyTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/OptionsPropertyTest.java
index 15d655c..378df0e 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/core/OptionsPropertyTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/OptionsPropertyTest.java
@@ -5,60 +5,57 @@
 
 package org.postgresql.test.core;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.PGProperty;
-import org.postgresql.test.TestUtil;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
 import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.Statement;
 import java.util.Properties;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGProperty;
+import org.postgresql.test.TestUtil;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 class OptionsPropertyTest {
-  private static final String schemaName = "options_property_test";
-  private static final String optionsValue = "-c search_path=" + schemaName;
+    private static final String schemaName = "options_property_test";
+    private static final String optionsValue = "-c search_path=" + schemaName;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    Connection con = TestUtil.openDB();
-    Statement stmt = con.createStatement();
-    stmt.execute("DROP SCHEMA IF EXISTS " + schemaName + ";");
-    stmt.execute("CREATE SCHEMA " + schemaName + ";");
-    stmt.close();
-    TestUtil.closeDB(con);
-  }
-
-  @Test
-  void optionsInProperties() throws Exception {
-    Properties props = new Properties();
-    props.setProperty(PGProperty.OPTIONS.getName(), optionsValue);
-
-    Connection con = TestUtil.openDB(props);
-    Statement stmt = con.createStatement();
-    stmt.execute("SHOW search_path");
-
-    ResultSet rs = stmt.getResultSet();
-    if (!rs.next()) {
-      fail("'options' connection initialization parameter should be passed to the database.");
+    @BeforeEach
+    void setUp() throws Exception {
+        Connection con = TestUtil.openDB();
+        Statement stmt = con.createStatement();
+        stmt.execute("DROP SCHEMA IF EXISTS " + schemaName + ";");
+        stmt.execute("CREATE SCHEMA " + schemaName + ";");
+        stmt.close();
+        TestUtil.closeDB(con);
     }
-    assertEquals(schemaName, rs.getString(1), "'options' connection initialization parameter should be passed to the database.");
 
-    stmt.close();
-    TestUtil.closeDB(con);
-  }
+    @Test
+    void optionsInProperties() throws Exception {
+        Properties props = new Properties();
+        props.setProperty(PGProperty.OPTIONS.getName(), optionsValue);
 
-  @AfterEach
-  void tearDown() throws Exception {
-    Connection con = TestUtil.openDB();
-    Statement stmt = con.createStatement();
-    stmt.execute("DROP SCHEMA " + schemaName + ";");
-    stmt.close();
-    TestUtil.closeDB(con);
-  }
+        Connection con = TestUtil.openDB(props);
+        Statement stmt = con.createStatement();
+        stmt.execute("SHOW search_path");
+
+        ResultSet rs = stmt.getResultSet();
+        if (!rs.next()) {
+            fail("'options' connection initialization parameter should be passed to the database.");
+        }
+        assertEquals(schemaName, rs.getString(1), "'options' connection initialization parameter should be passed to the database.");
+
+        stmt.close();
+        TestUtil.closeDB(con);
+    }
+
+    @AfterEach
+    void tearDown() throws Exception {
+        Connection con = TestUtil.openDB();
+        Statement stmt = con.createStatement();
+        stmt.execute("DROP SCHEMA " + schemaName + ";");
+        stmt.close();
+        TestUtil.closeDB(con);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/ParserTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/ParserTest.java
new file mode 100644
index 0000000..4d445d1
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/ParserTest.java
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2003, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.core;
+
+import java.sql.SQLException;
+import java.util.List;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+import org.postgresql.core.NativeQuery;
+import org.postgresql.core.Parser;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.core.SqlCommand;
+import org.postgresql.core.SqlCommandType;
+import org.postgresql.jdbc.EscapeSyntaxCallMode;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+/**
+ * Test cases for the Parser.
+ *
+ * @author Jeremy Whiting jwhiting@redhat.com
+ */
+class ParserTest {
+
+    /**
+     * Test to make sure delete command is detected by parser and detected via
+     * api. Mix up the case of the command to check detection continues to work.
+     */
+    @Test
+    void deleteCommandParsing() {
+        char[] command = new char[6];
+        "DELETE".getChars(0, 6, command, 0);
+        Assertions.assertTrue(Parser.parseDeleteKeyword(command, 0), "Failed to correctly parse upper case command.");
+        "DelEtE".getChars(0, 6, command, 0);
+        assertTrue(Parser.parseDeleteKeyword(command, 0), "Failed to correctly parse mixed case command.");
+        "deleteE".getChars(0, 6, command, 0);
+        assertTrue(Parser.parseDeleteKeyword(command, 0), "Failed to correctly parse mixed case command.");
+        "delete".getChars(0, 6, command, 0);
+        assertTrue(Parser.parseDeleteKeyword(command, 0), "Failed to correctly parse lower case command.");
+        "Delete".getChars(0, 6, command, 0);
+        assertTrue(Parser.parseDeleteKeyword(command, 0), "Failed to correctly parse mixed case command.");
+    }
+
+    /**
+     * Test UPDATE command parsing.
+     */
+    @Test
+    void updateCommandParsing() {
+        char[] command = new char[6];
+        "UPDATE".getChars(0, 6, command, 0);
+        assertTrue(Parser.parseUpdateKeyword(command, 0), "Failed to correctly parse upper case command.");
+        "UpDateE".getChars(0, 6, command, 0);
+        assertTrue(Parser.parseUpdateKeyword(command, 0), "Failed to correctly parse mixed case command.");
+        "updatE".getChars(0, 6, command, 0);
+        assertTrue(Parser.parseUpdateKeyword(command, 0), "Failed to correctly parse mixed case command.");
+        "Update".getChars(0, 6, command, 0);
+        assertTrue(Parser.parseUpdateKeyword(command, 0), "Failed to correctly parse mixed case command.");
+        "update".getChars(0, 6, command, 0);
+        assertTrue(Parser.parseUpdateKeyword(command, 0), "Failed to correctly parse lower case command.");
+    }
+
+    /**
+     * Test MOVE command parsing.
+     */
+    @Test
+    void moveCommandParsing() {
+        char[] command = new char[4];
+        "MOVE".getChars(0, 4, command, 0);
+        assertTrue(Parser.parseMoveKeyword(command, 0), "Failed to correctly parse upper case command.");
+        "mOVe".getChars(0, 4, command, 0);
+        assertTrue(Parser.parseMoveKeyword(command, 0), "Failed to correctly parse mixed case command.");
+        "movE".getChars(0, 4, command, 0);
+        assertTrue(Parser.parseMoveKeyword(command, 0), "Failed to correctly parse mixed case command.");
+        "Move".getChars(0, 4, command, 0);
+        assertTrue(Parser.parseMoveKeyword(command, 0), "Failed to correctly parse mixed case command.");
+        "move".getChars(0, 4, command, 0);
+        assertTrue(Parser.parseMoveKeyword(command, 0), "Failed to correctly parse lower case command.");
+    }
+
+    /**
+     * Test WITH command parsing.
+     */
+    @Test
+    void withCommandParsing() {
+        char[] command = new char[4];
+        "WITH".getChars(0, 4, command, 0);
+        assertTrue(Parser.parseWithKeyword(command, 0), "Failed to correctly parse upper case command.");
+        "wITh".getChars(0, 4, command, 0);
+        assertTrue(Parser.parseWithKeyword(command, 0), "Failed to correctly parse mixed case command.");
+        "witH".getChars(0, 4, command, 0);
+        assertTrue(Parser.parseWithKeyword(command, 0), "Failed to correctly parse mixed case command.");
+        "With".getChars(0, 4, command, 0);
+        assertTrue(Parser.parseWithKeyword(command, 0), "Failed to correctly parse mixed case command.");
+        "with".getChars(0, 4, command, 0);
+        assertTrue(Parser.parseWithKeyword(command, 0), "Failed to correctly parse lower case command.");
+    }
+
+    /**
+     * Test SELECT command parsing.
+     */
+    @Test
+    void selectCommandParsing() {
+        char[] command = new char[6];
+        "SELECT".getChars(0, 6, command, 0);
+        assertTrue(Parser.parseSelectKeyword(command, 0), "Failed to correctly parse upper case command.");
+        "sELect".getChars(0, 6, command, 0);
+        assertTrue(Parser.parseSelectKeyword(command, 0), "Failed to correctly parse mixed case command.");
+        "selecT".getChars(0, 6, command, 0);
+        assertTrue(Parser.parseSelectKeyword(command, 0), "Failed to correctly parse mixed case command.");
+        "Select".getChars(0, 6, command, 0);
+        assertTrue(Parser.parseSelectKeyword(command, 0), "Failed to correctly parse mixed case command.");
+        "select".getChars(0, 6, command, 0);
+        assertTrue(Parser.parseSelectKeyword(command, 0), "Failed to correctly parse lower case command.");
+    }
+
+    @Test
+    void escapeProcessing() throws Exception {
+        assertEquals("DATE '1999-01-09'", Parser.replaceProcessing("{d '1999-01-09'}", true, false));
+        assertEquals("DATE '1999-01-09'", Parser.replaceProcessing("{D  '1999-01-09'}", true, false));
+        assertEquals("TIME '20:00:03'", Parser.replaceProcessing("{t '20:00:03'}", true, false));
+        assertEquals("TIME '20:00:03'", Parser.replaceProcessing("{T '20:00:03'}", true, false));
+        assertEquals("TIMESTAMP '1999-01-09 20:11:11.123455'", Parser.replaceProcessing("{ts '1999-01-09 20:11:11.123455'}", true, false));
+        assertEquals("TIMESTAMP '1999-01-09 20:11:11.123455'", Parser.replaceProcessing("{Ts '1999-01-09 20:11:11.123455'}", true, false));
+
+        assertEquals("user", Parser.replaceProcessing("{fn user()}", true, false));
+        assertEquals("cos(1)", Parser.replaceProcessing("{fn cos(1)}", true, false));
+        assertEquals("extract(week from DATE '2005-01-24')", Parser.replaceProcessing("{fn week({d '2005-01-24'})}", true, false));
+
+        assertEquals("\"T1\" LEFT OUTER JOIN t2 ON \"T1\".id = t2.id",
+                Parser.replaceProcessing("{oj \"T1\" LEFT OUTER JOIN t2 ON \"T1\".id = t2.id}", true, false));
+
+        assertEquals("ESCAPE '_'", Parser.replaceProcessing("{escape '_'}", true, false));
+
+        // nothing should be changed in that case, no valid escape code
+        assertEquals("{obj : 1}", Parser.replaceProcessing("{obj : 1}", true, false));
+    }
+
+    @Test
+    void modifyJdbcCall() throws SQLException {
+        assertEquals("select * from pack_getValue(?) as result", Parser.modifyJdbcCall("{ ? = call pack_getValue}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql());
+        assertEquals("select * from pack_getValue(?,?)  as result", Parser.modifyJdbcCall("{ ? = call pack_getValue(?) }", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql());
+        assertEquals("select * from pack_getValue(?) as result", Parser.modifyJdbcCall("{ ? = call pack_getValue()}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql());
+        assertEquals("select * from pack_getValue(?,?,?,?)  as result", Parser.modifyJdbcCall("{ ? = call pack_getValue(?,?,?) }", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql());
+        assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql());
+        assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.CALL_IF_NO_RETURN).getSql());
+        assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.CALL).getSql());
+        assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql());
+        assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.CALL_IF_NO_RETURN).getSql());
+        assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v9_6.getVersionNum(), 3, EscapeSyntaxCallMode.CALL).getSql());
+        assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql());
+        assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.CALL_IF_NO_RETURN).getSql());
+        assertEquals("call lower(?,?)", Parser.modifyJdbcCall("{ ? = call lower(?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.CALL).getSql());
+        assertEquals("select * from lower(?,?) as result", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.SELECT).getSql());
+        assertEquals("call lower(?,?)", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.CALL_IF_NO_RETURN).getSql());
+        assertEquals("call lower(?,?)", Parser.modifyJdbcCall("{call lower(?,?)}", true, ServerVersion.v11.getVersionNum(), 3, EscapeSyntaxCallMode.CALL).getSql());
+    }
+
+    @Test
+    void unterminatedEscape() throws Exception {
+        assertEquals("{oj ", Parser.replaceProcessing("{oj ", true, false));
+    }
+
+    @Test
+    @Disabled(value = "returning in the select clause is hard to distinguish from insert ... returning *")
+    void insertSelectFakeReturning() throws SQLException {
+        String query =
+                "insert test(id, name) select 1, 'value' as RETURNING from test2";
+        List<NativeQuery> qry =
+                Parser.parseJdbcSql(
+                        query, true, true, true, true, true);
+        boolean returningKeywordPresent = qry.get(0).command.isReturningKeywordPresent();
+        assertFalse(returningKeywordPresent, "Query does not have returning clause " + query);
+    }
+
+    @Test
+    void insertSelectReturning() throws SQLException {
+        String query =
+                "insert test(id, name) select 1, 'value' from test2 RETURNING id";
+        List<NativeQuery> qry =
+                Parser.parseJdbcSql(
+                        query, true, true, true, true, true);
+        boolean returningKeywordPresent = qry.get(0).command.isReturningKeywordPresent();
+        assertTrue(returningKeywordPresent, "Query has a returning clause " + query);
+    }
+
+    @Test
+    void insertReturningInWith() throws SQLException {
+        String query =
+                "with x as (insert into mytab(x) values(1) returning x) insert test(id, name) select 1, 'value' from test2";
+        List<NativeQuery> qry =
+                Parser.parseJdbcSql(
+                        query, true, true, true, true, true);
+        boolean returningKeywordPresent = qry.get(0).command.isReturningKeywordPresent();
+        assertFalse(returningKeywordPresent, "There's no top-level <<returning>> clause " + query);
+    }
+
+    @Test
+    void insertBatchedReWriteOnConflict() throws SQLException {
+        String query = "insert into test(id, name) values (:id,:name) ON CONFLICT (id) DO NOTHING";
+        List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true);
+        SqlCommand command = qry.get(0).getCommand();
+        assertEquals(34, command.getBatchRewriteValuesBraceOpenPosition());
+        assertEquals(44, command.getBatchRewriteValuesBraceClosePosition());
+    }
+
+    @Test
+    void insertBatchedReWriteOnConflictUpdateBind() throws SQLException {
+        String query = "insert into test(id, name) values (?,?) ON CONFLICT (id) UPDATE SET name=?";
+        List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true);
+        SqlCommand command = qry.get(0).getCommand();
+        assertFalse(command.isBatchedReWriteCompatible(), "update set name=? is NOT compatible with insert rewrite");
+    }
+
+    @Test
+    void insertBatchedReWriteOnConflictUpdateConstant() throws SQLException {
+        String query = "insert into test(id, name) values (?,?) ON CONFLICT (id) UPDATE SET name='default'";
+        List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true);
+        SqlCommand command = qry.get(0).getCommand();
+        assertTrue(command.isBatchedReWriteCompatible(), "update set name='default' is compatible with insert rewrite");
+    }
+
+    @Test
+    void insertMultiInsert() throws SQLException {
+        String query =
+                "insert into test(id, name) values (:id,:name),(:id,:name) ON CONFLICT (id) DO NOTHING";
+        List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true);
+        SqlCommand command = qry.get(0).getCommand();
+        assertEquals(34, command.getBatchRewriteValuesBraceOpenPosition());
+        assertEquals(56, command.getBatchRewriteValuesBraceClosePosition());
+    }
+
+    @Test
+    void valuesTableParse() throws SQLException {
+        String query = "insert into values_table (id, name) values (?,?)";
+        List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true);
+        SqlCommand command = qry.get(0).getCommand();
+        assertEquals(43, command.getBatchRewriteValuesBraceOpenPosition());
+        assertEquals(49, command.getBatchRewriteValuesBraceClosePosition());
+
+        query = "insert into table_values (id, name) values (?,?)";
+        qry = Parser.parseJdbcSql(query, true, true, true, true, true);
+        command = qry.get(0).getCommand();
+        assertEquals(43, command.getBatchRewriteValuesBraceOpenPosition());
+        assertEquals(49, command.getBatchRewriteValuesBraceClosePosition());
+    }
+
+    @Test
+    void createTableParseWithOnDeleteClause() throws SQLException {
+        String[] returningColumns = {"*"};
+        String query = "create table \"testTable\" (\"id\" INT SERIAL NOT NULL PRIMARY KEY, \"foreignId\" INT REFERENCES \"otherTable\" (\"id\") ON DELETE NO ACTION)";
+        List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true, returningColumns);
+        SqlCommand command = qry.get(0).getCommand();
+        assertFalse(command.isReturningKeywordPresent(), "No returning keyword should be present");
+        Assertions.assertEquals(SqlCommandType.CREATE, command.getType());
+    }
+
+    @Test
+    void createTableParseWithOnUpdateClause() throws SQLException {
+        String[] returningColumns = {"*"};
+        String query = "create table \"testTable\" (\"id\" INT SERIAL NOT NULL PRIMARY KEY, \"foreignId\" INT REFERENCES \"otherTable\" (\"id\")) ON UPDATE NO ACTION";
+        List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true, returningColumns);
+        SqlCommand command = qry.get(0).getCommand();
+        assertFalse(command.isReturningKeywordPresent(), "No returning keyword should be present");
+        assertEquals(SqlCommandType.CREATE, command.getType());
+    }
+
+    @Test
+    void alterTableParseWithOnDeleteClause() throws SQLException {
+        String[] returningColumns = {"*"};
+        String query = "alter table \"testTable\" ADD \"foreignId\" INT REFERENCES \"otherTable\" (\"id\") ON DELETE NO ACTION";
+        List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true, returningColumns);
+        SqlCommand command = qry.get(0).getCommand();
+        assertFalse(command.isReturningKeywordPresent(), "No returning keyword should be present");
+        assertEquals(SqlCommandType.ALTER, command.getType());
+    }
+
+    @Test
+    void alterTableParseWithOnUpdateClause() throws SQLException {
+        String[] returningColumns = {"*"};
+        String query = "alter table \"testTable\" ADD \"foreignId\" INT REFERENCES \"otherTable\" (\"id\") ON UPDATE RESTRICT";
+        List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true, returningColumns);
+        SqlCommand command = qry.get(0).getCommand();
+        assertFalse(command.isReturningKeywordPresent(), "No returning keyword should be present");
+        assertEquals(SqlCommandType.ALTER, command.getType());
+    }
+
+    @Test
+    void parseV14functions() throws SQLException {
+        String[] returningColumns = {"*"};
+        String query = "CREATE OR REPLACE FUNCTION asterisks(n int)\n"
+                + "  RETURNS SETOF text\n"
+                + "  LANGUAGE sql IMMUTABLE STRICT PARALLEL SAFE\n"
+                + "BEGIN ATOMIC\n"
+                + "SELECT repeat('*', g) FROM generate_series (1, n) g; \n"
+                + "END;";
+        List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true, returningColumns);
+        assertNotNull(qry);
+        assertEquals(1, qry.size(), "There should only be one query returned here");
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/QueryExecutorTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/QueryExecutorTest.java
index 3381def..5f7aa1d 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/core/QueryExecutorTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/QueryExecutorTest.java
@@ -5,67 +5,65 @@
 
 package org.postgresql.test.core;
 
+import java.sql.SQLException;
+import java.util.Set;
+import org.junit.Test;
 import org.postgresql.core.BaseConnection;
 import org.postgresql.core.QueryExecutor;
 import org.postgresql.test.jdbc2.BaseTest4;
 
-import org.junit.Test;
-
-import java.sql.SQLException;
-import java.util.Set;
-
 /**
  * TestCase to test handling of binary types.
  */
 public class QueryExecutorTest extends BaseTest4 {
-  /**
-   * Make sure the functions for adding binary transfer OIDs for custom types are correct.
-   *
-   * @throws SQLException if a database error occurs
-   */
-  @Test
-  public void testBinaryTransferOids() throws SQLException {
-    QueryExecutor queryExecutor = con.unwrap(BaseConnection.class).getQueryExecutor();
-    // get current OIDs (make a copy of them)
-    @SuppressWarnings("deprecation")
-    Set<? extends Integer> oidsReceive = queryExecutor.getBinaryReceiveOids();
-    @SuppressWarnings("deprecation")
-    Set<? extends Integer> oidsSend = queryExecutor.getBinarySendOids();
-    // add a new OID to be transferred as binary data
-    int customTypeOid = 91716;
-    assertBinaryForReceive(customTypeOid, false,
-        () -> "Custom type OID should not be binary for receive by default");
-    // first for receiving
-    queryExecutor.addBinaryReceiveOid(customTypeOid);
-    // Verify
-    assertBinaryForReceive(customTypeOid, true,
-        () -> "Just added oid via addBinaryReceiveOid");
-    assertBinaryForSend(customTypeOid, false,
-        () -> "Just added oid via addBinaryReceiveOid");
-    for (int oid : oidsReceive) {
-      assertBinaryForReceive(oid, true,
-          () -> "Previously registered BinaryReceiveOids should be intact after "
-              + "addBinaryReceiveOid(" + customTypeOid + ")");
+    /**
+     * Make sure the functions for adding binary transfer OIDs for custom types are correct.
+     *
+     * @throws SQLException if a database error occurs
+     */
+    @Test
+    public void testBinaryTransferOids() throws SQLException {
+        QueryExecutor queryExecutor = con.unwrap(BaseConnection.class).getQueryExecutor();
+        // get current OIDs (make a copy of them)
+        @SuppressWarnings("deprecation")
+        Set<? extends Integer> oidsReceive = queryExecutor.getBinaryReceiveOids();
+        @SuppressWarnings("deprecation")
+        Set<? extends Integer> oidsSend = queryExecutor.getBinarySendOids();
+        // add a new OID to be transferred as binary data
+        int customTypeOid = 91716;
+        assertBinaryForReceive(customTypeOid, false,
+                () -> "Custom type OID should not be binary for receive by default");
+        // first for receiving
+        queryExecutor.addBinaryReceiveOid(customTypeOid);
+        // Verify
+        assertBinaryForReceive(customTypeOid, true,
+                () -> "Just added oid via addBinaryReceiveOid");
+        assertBinaryForSend(customTypeOid, false,
+                () -> "Just added oid via addBinaryReceiveOid");
+        for (int oid : oidsReceive) {
+            assertBinaryForReceive(oid, true,
+                    () -> "Previously registered BinaryReceiveOids should be intact after "
+                            + "addBinaryReceiveOid(" + customTypeOid + ")");
+        }
+        for (int oid : oidsSend) {
+            assertBinaryForSend(oid, true,
+                    () -> "Previously registered BinarySendOids should be intact after "
+                            + "addBinaryReceiveOid(" + customTypeOid + ")");
+        }
+        // then for sending
+        queryExecutor.addBinarySendOid(customTypeOid);
+        // check new OID
+        assertBinaryForReceive(customTypeOid, true, () -> "added oid via addBinaryReceiveOid and "
+                + "addBinarySendOid");
+        assertBinaryForSend(customTypeOid, true, () -> "added oid via addBinaryReceiveOid and "
+                + "addBinarySendOid");
+        for (int oid : oidsReceive) {
+            assertBinaryForReceive(oid, true, () -> "Previously registered BinaryReceiveOids should be "
+                    + "intact after addBinaryReceiveOid(" + customTypeOid + ") and addBinarySendOid(" + customTypeOid + ")");
+        }
+        for (int oid : oidsSend) {
+            assertBinaryForSend(oid, true, () -> "Previously registered BinarySendOids should be intact"
+                    + " after addBinaryReceiveOid(" + customTypeOid + ")");
+        }
     }
-    for (int oid : oidsSend) {
-      assertBinaryForSend(oid, true,
-          () -> "Previously registered BinarySendOids should be intact after "
-              + "addBinaryReceiveOid(" + customTypeOid + ")");
-    }
-    // then for sending
-    queryExecutor.addBinarySendOid(customTypeOid);
-    // check new OID
-    assertBinaryForReceive(customTypeOid, true, () -> "added oid via addBinaryReceiveOid and "
-        + "addBinarySendOid");
-    assertBinaryForSend(customTypeOid, true, () -> "added oid via addBinaryReceiveOid and "
-        + "addBinarySendOid");
-    for (int oid : oidsReceive) {
-      assertBinaryForReceive(oid, true, () -> "Previously registered BinaryReceiveOids should be "
-          + "intact after addBinaryReceiveOid(" + customTypeOid + ") and addBinarySendOid(" + customTypeOid + ")");
-    }
-    for (int oid : oidsSend) {
-      assertBinaryForSend(oid, true, () -> "Previously registered BinarySendOids should be intact"
-          + " after addBinaryReceiveOid(" + customTypeOid + ")");
-    }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/ReturningParserTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/ReturningParserTest.java
new file mode 100644
index 0000000..91fbc62
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/ReturningParserTest.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2003, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.core;
+
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.postgresql.core.NativeQuery;
+import org.postgresql.core.Parser;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+public class ReturningParserTest {
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+
+        String[] delimiters = {"", "_", "3", "*", " "};
+
+        for (String columnName : new String[]{"returning", "returningreturning"}) {
+            for (String prefix : delimiters) {
+                for (String suffix : delimiters) {
+                    for (String returning : new String[]{"returning", "returningreturning"}) {
+                        ids.add(new Object[]{columnName, returning, prefix, suffix});
+                    }
+                }
+            }
+        }
+        return ids;
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "columnName={2} {0} {3}, returning={2} {1} {3}")
+    void test(String columnName, String returning, String prefix, String suffix) throws SQLException {
+        String query =
+                "insert into\"prep\"(a, " + prefix + columnName + suffix + ")values(1,2)" + prefix
+                        + returning + suffix;
+        List<NativeQuery> qry = Parser.parseJdbcSql(query, true, true, true, true, true);
+        boolean returningKeywordPresent = qry.get(0).command.isReturningKeywordPresent();
+
+        boolean expectedReturning = "returning".equalsIgnoreCase(returning)
+                && (prefix.isEmpty() || !Character.isJavaIdentifierStart(prefix.charAt(0)))
+                && (suffix.isEmpty() || !Character.isJavaIdentifierPart(suffix.charAt(0)));
+        if (expectedReturning != returningKeywordPresent) {
+            assertEquals(expectedReturning,
+                    returningKeywordPresent,
+                    "Wrong <returning_clause> detected in SQL " + query);
+        }
+    }
+
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/UTF8EncodingTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/UTF8EncodingTest.java
new file mode 100644
index 0000000..16bd505
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/UTF8EncodingTest.java
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2019, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.core;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.postgresql.core.Encoding;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+public class UTF8EncodingTest {
+
+    private static final int STEP = 8 * 1024;
+
+    public static Iterable<Object[]> data() {
+        final StringBuilder reallyLongString = new StringBuilder(1024 * 1024);
+        for (int i = 0; i < 185000; i++) {
+            reallyLongString.append(i);
+        }
+
+        final List<String> strings = new ArrayList<>(150);
+        strings.add("short simple");
+        strings.add("longer but still not really all that long");
+        strings.add(reallyLongString.toString());
+        strings.add(reallyLongString.append('\u03C0').toString()); // add multi-byte to end of a long string
+        strings.add(reallyLongString.delete((32 * 1024) + 5, reallyLongString.capacity() - 1).toString());
+        strings.add(reallyLongString.append('\u00DC').toString()); // add high order char to end of mid length string
+        strings.add(reallyLongString.delete((16 * 1024) + 5, reallyLongString.capacity() - 1).toString());
+        strings.add(reallyLongString.append('\u00DD').toString()); // add high order char to end of mid length string
+        strings.add("e\u00E4t \u03A3 \u03C0 \u798F, it is good"); // need to test some multi-byte characters
+
+        for (int i = 1; i < 0xd800; i += STEP) {
+            int count = (i + STEP) > 0xd800 ? 0xd800 - i : STEP;
+            char[] testChars = new char[count];
+            for (int j = 0; j < count; j++) {
+                testChars[j] = (char) (i + j);
+            }
+
+            strings.add(new String(testChars));
+        }
+
+        for (int i = 0xe000; i < 0x10000; i += STEP) {
+            int count = (i + STEP) > 0x10000 ? 0x10000 - i : STEP;
+            char[] testChars = new char[count];
+            for (int j = 0; j < count; j++) {
+                testChars[j] = (char) (i + j);
+            }
+
+            strings.add(new String(testChars));
+        }
+
+        for (int i = 0x10000; i < 0x110000; i += STEP) {
+            int count = (i + STEP) > 0x110000 ? 0x110000 - i : STEP;
+            char[] testChars = new char[count * 2];
+            for (int j = 0; j < count; j++) {
+                testChars[j * 2] = (char) (0xd800 + ((i + j - 0x10000) >> 10));
+                testChars[j * 2 + 1] = (char) (0xdc00 + ((i + j - 0x10000) & 0x3ff));
+            }
+
+            strings.add(new String(testChars));
+        }
+
+        final List<Object[]> data = new ArrayList<>(strings.size() * 2);
+        for (String string : strings) {
+            String shortString = string;
+            if (shortString != null && shortString.length() > 1000) {
+                shortString = shortString.substring(0, 100) + "...(" + string.length() + " chars)";
+            }
+            data.add(new Object[]{Encoding.getDatabaseEncoding("UNICODE"), string, shortString});
+        }
+        return data;
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "string={2}, encoding={0}")
+    void test(Encoding encoding, String string, String shortString) throws Exception {
+        final byte[] encoded = encoding.encode(string);
+        assertEquals(string, encoding.decode(encoded));
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/v3/V3ParameterListTests.java b/pgjdbc/src/test/java/org/postgresql/test/core/v3/V3ParameterListTests.java
new file mode 100644
index 0000000..f71201e
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/v3/V3ParameterListTests.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2003, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.core.v3;
+
+import java.sql.SQLException;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.core.v3.SimpleParameterList;
+import org.postgresql.core.v3.TypeTransferModeRegistry;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * Test cases to make sure the parameterlist implementation works as expected.
+ *
+ * @author Jeremy Whiting jwhiting@redhat.com
+ */
+class V3ParameterListTests {
+    private TypeTransferModeRegistry transferModeRegistry;
+
+    @BeforeEach
+    void setUp() throws Exception {
+        transferModeRegistry = new TypeTransferModeRegistry() {
+            @Override
+            public boolean useBinaryForSend(int oid) {
+                return false;
+            }
+
+            @Override
+            public boolean useBinaryForReceive(int oid) {
+                return false;
+            }
+        };
+    }
+
+    /**
+     * Test to check the merging of two collections of parameters. All elements
+     * are kept.
+     *
+     * @throws SQLException raised exception if setting parameter fails.
+     */
+    @Test
+    void mergeOfParameterLists() throws SQLException {
+        SimpleParameterList s1SPL = new SimpleParameterList(8, transferModeRegistry);
+        s1SPL.setIntParameter(1, 1);
+        s1SPL.setIntParameter(2, 2);
+        s1SPL.setIntParameter(3, 3);
+        s1SPL.setIntParameter(4, 4);
+
+        SimpleParameterList s2SPL = new SimpleParameterList(4, transferModeRegistry);
+        s2SPL.setIntParameter(1, 5);
+        s2SPL.setIntParameter(2, 6);
+        s2SPL.setIntParameter(3, 7);
+        s2SPL.setIntParameter(4, 8);
+
+        s1SPL.appendAll(s2SPL);
+        assertEquals(
+                "<[('1'::int4) ,('2'::int4) ,('3'::int4) ,('4'::int4) ,('5'::int4) ,('6'::int4) ,('7'::int4) ,('8'::int4)]>", s1SPL.toString(), "Expected string representation of values does not match outcome.");
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/core/v3/adaptivefetch/AdaptiveFetchCacheTest.java b/pgjdbc/src/test/java/org/postgresql/test/core/v3/adaptivefetch/AdaptiveFetchCacheTest.java
new file mode 100644
index 0000000..bbc53ab
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/core/v3/adaptivefetch/AdaptiveFetchCacheTest.java
@@ -0,0 +1,1086 @@
+/*
+ * Copyright (c) 2020, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.core.v3.adaptivefetch;
+
+import java.lang.reflect.Field;
+import java.sql.SQLException;
+import java.util.Map;
+import java.util.Properties;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGProperty;
+import org.postgresql.core.ParameterList;
+import org.postgresql.core.Query;
+import org.postgresql.core.SqlCommand;
+import org.postgresql.core.v3.adaptivefetch.AdaptiveFetchCache;
+import org.postgresql.core.v3.adaptivefetch.AdaptiveFetchCacheEntry;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+
+/**
+ * Unit tests for AdaptiveFetchCache class.
+ */
+class AdaptiveFetchCacheTest {
+
+    // Strings containing variables names in AdaptiveFetchCache class
+    private static final String infoMapVariableName = "adaptiveFetchInfoMap";
+    private static final String minimumSizeVariableName = "minimumAdaptiveFetchSize";
+    private static final String maximumSizeVariableName = "maximumAdaptiveFetchSize";
+    private static final String adaptiveFetchVariableName = "adaptiveFetch";
+    private static final String maximumBufferSizeVariableName = "maximumResultBufferSize";
+    private AdaptiveFetchCache adaptiveFetchCache;
+    private int size;
+
+    /**
+     * Simple setup to create new AdaptiveFetchCache with buffer size 1000.
+     */
+    @BeforeEach
+    void setUp() throws SQLException {
+        Properties properties = new Properties();
+        size = 1000;
+        adaptiveFetchCache = new AdaptiveFetchCache(size, properties);
+    }
+
+    /**
+     * Tests for calling constructor with empty properties (just asserts after setUp).
+     */
+    @Test
+    void constructorDefault() throws NoSuchFieldException, IllegalAccessException {
+        assertNotNull(getInfoMapVariable());
+        assertEquals(size, getMaximumBufferVariable());
+        assertFalse(getAdaptiveFetchVariable());
+        assertEquals(0, getMinimumSizeVariable());
+        assertEquals(-1, getMaximumSizeVariable());
+    }
+
+    /**
+     * Test for calling constructor with information about adaptiveFetch property.
+     */
+    @Test
+    void constructorWithAdaptiveFetch()
+            throws SQLException, NoSuchFieldException, IllegalAccessException {
+        Properties properties = new Properties();
+        boolean expectedValue = true;
+        PGProperty.ADAPTIVE_FETCH.set(properties, expectedValue);
+
+        adaptiveFetchCache = new AdaptiveFetchCache(size, properties);
+
+        assertNotNull(getInfoMapVariable());
+        assertEquals(size, getMaximumBufferVariable());
+        assertEquals(expectedValue, getAdaptiveFetchVariable());
+        assertEquals(0, getMinimumSizeVariable());
+        assertEquals(-1, getMaximumSizeVariable());
+    }
+
+    /**
+     * Test for calling constructor with information about adaptiveFetchMinimum property.
+     */
+    @Test
+    void constructorWithMinimumSize()
+            throws SQLException, NoSuchFieldException, IllegalAccessException {
+        Properties properties = new Properties();
+        int expectedValue = 100;
+        PGProperty.ADAPTIVE_FETCH_MINIMUM.set(properties, expectedValue);
+
+        adaptiveFetchCache = new AdaptiveFetchCache(size, properties);
+
+        assertNotNull(getInfoMapVariable());
+        assertEquals(size, getMaximumBufferVariable());
+        assertFalse(getAdaptiveFetchVariable());
+        assertEquals(expectedValue, getMinimumSizeVariable());
+        assertEquals(-1, getMaximumSizeVariable());
+    }
+
+    /**
+     * Test for calling constructor with information about adaptiveFetchMaximum property.
+     */
+    @Test
+    void constructorWithMaximumSize()
+            throws SQLException, NoSuchFieldException, IllegalAccessException {
+        Properties properties = new Properties();
+        int expectedValue = 100;
+        PGProperty.ADAPTIVE_FETCH_MAXIMUM.set(properties, expectedValue);
+
+        adaptiveFetchCache = new AdaptiveFetchCache(size, properties);
+
+        assertNotNull(getInfoMapVariable());
+        assertEquals(size, getMaximumBufferVariable());
+        assertFalse(getAdaptiveFetchVariable());
+        assertEquals(0, getMinimumSizeVariable());
+        assertEquals(expectedValue, getMaximumSizeVariable());
+    }
+
+    /**
+     * Test for calling constructor with information about adaptiveFetch, adaptiveFetchMinimum and
+     * adaptiveFetchMaximum properties.
+     */
+    @Test
+    void constructorWithAllProperties()
+            throws SQLException, NoSuchFieldException, IllegalAccessException {
+        Properties properties = new Properties();
+        boolean expectedAdaptiveFetchValue = false;
+        int expectedMinimumSizeValue = 70;
+        int expectedMaximumSizeValue = 130;
+        PGProperty.ADAPTIVE_FETCH.set(properties, expectedAdaptiveFetchValue);
+        PGProperty.ADAPTIVE_FETCH_MINIMUM.set(properties, expectedMinimumSizeValue);
+        PGProperty.ADAPTIVE_FETCH_MAXIMUM.set(properties, expectedMaximumSizeValue);
+
+        adaptiveFetchCache = new AdaptiveFetchCache(size, properties);
+
+        assertNotNull(getInfoMapVariable());
+        assertEquals(size, getMaximumBufferVariable());
+        assertEquals(expectedAdaptiveFetchValue, getAdaptiveFetchVariable());
+        assertEquals(expectedMinimumSizeValue, getMinimumSizeVariable());
+        assertEquals(expectedMaximumSizeValue, getMaximumSizeVariable());
+    }
+
+
+    /**
+     * Test for calling addNewQuery method.
+     */
+    @Test
+    void addingSingleQuery() throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query";
+        boolean adaptiveFetch = true;
+
+        adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        assertEquals(1, map.size());
+        assertNotNull(map.get(expectedQuery));
+    }
+
+    /**
+     * Test for calling addNewQuery method, but adaptiveFetch is set to false.
+     */
+    @Test
+    void addingSingleQueryWithoutAdaptiveFetch()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query";
+        boolean adaptiveFetch = false;
+
+        adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        assertEquals(0, map.size());
+        assertNull(map.get(expectedQuery));
+    }
+
+    /**
+     * Test for calling addNewQuery method twice with the same query. The query should be added only
+     * once, with counter set as 2.
+     */
+    @Test
+    void addingSameQueryTwoTimes() throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query";
+        boolean adaptiveFetch = true;
+
+        adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+        adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        assertEquals(1, map.size());
+        assertNotNull(map.get(expectedQuery));
+        assertEquals(2, map.get(expectedQuery).getCounter());
+    }
+
+    /**
+     * Test for calling addNewQuery method twice with the same query, but with adaptiveFetch is set to
+     * false. The query shouldn't be added.
+     */
+    @Test
+    void addingSameQueryTwoTimesWithoutAdaptiveFetch()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query";
+        boolean adaptiveFetch = false;
+
+        adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+        adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        assertEquals(0, map.size());
+        assertNull(map.get(expectedQuery));
+    }
+
+    /**
+     * Test for calling addNewQuery method twice with different queries. Both queries should be
+     * added.
+     */
+    @Test
+    void addingTwoDifferentQueries() throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query-1";
+        String expectedQuery2 = "test-query-2";
+        boolean adaptiveFetch = true;
+
+        adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+        adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery2));
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        assertEquals(2, map.size());
+        assertNotNull(map.get(expectedQuery));
+        assertEquals(1, map.get(expectedQuery).getCounter());
+        assertNotNull(map.get(expectedQuery2));
+        assertEquals(1, map.get(expectedQuery).getCounter());
+    }
+
+    /**
+     * Test for calling addNewQuery method twice with different queries, but adaptiveFetch is set to
+     * false. Both queries shouldn't be added.
+     */
+    @Test
+    void addingTwoDifferentQueriesWithoutAdaptiveFetch()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query-1";
+        String expectedQuery2 = "test-query-2";
+        boolean adaptiveFetch = false;
+
+        adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+        adaptiveFetchCache.addNewQuery(adaptiveFetch, new MockUpQuery(expectedQuery2));
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        assertEquals(0, map.size());
+        assertNull(map.get(expectedQuery));
+    }
+
+    /**
+     * Test for calling getAdaptiveFetch method with value true.
+     */
+    @Test
+    void gettingAdaptiveFetchIfTrue()
+            throws NoSuchFieldException, IllegalAccessException {
+        boolean expectedResult = true;
+
+        setAdaptiveFetchVariable(expectedResult);
+
+        assertEquals(expectedResult, adaptiveFetchCache.getAdaptiveFetch());
+    }
+
+    /**
+     * Test for calling getAdaptiveFetch method with value false.
+     */
+    @Test
+    void gettingAdaptiveFetchIfFalse()
+            throws NoSuchFieldException, IllegalAccessException {
+        boolean expectedResult = false;
+
+        setAdaptiveFetchVariable(expectedResult);
+
+        assertEquals(expectedResult, adaptiveFetchCache.getAdaptiveFetch());
+    }
+
+    /**
+     * Test for calling getFetchSizeForQuery method for not existing query. Should return value -1.
+     */
+    @Test
+    void gettingFetchSizeForNotExistingQuery() {
+        String expectedQuery = "test-query";
+        boolean adaptiveFetch = true;
+
+        int resultSize = adaptiveFetchCache
+                .getFetchSizeForQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        assertEquals(-1, resultSize);
+    }
+
+    /**
+     * Test for calling getFetchSizeForQuery method for not existing query, but adaptiveFetch is set
+     * to false. Should return value -1.
+     */
+    @Test
+    void gettingFetchSizeForNotExistingQueryIfAdaptiveFetchFalse() {
+        String expectedQuery = "test-query";
+        boolean adaptiveFetch = false;
+
+        int resultSize = adaptiveFetchCache
+                .getFetchSizeForQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        assertEquals(-1, resultSize);
+    }
+
+    /**
+     * Test for calling getFetchSizeForQuery method for existing query. Should return set fetch size
+     * for the query.
+     */
+    @Test
+    void gettingFetchSizeForExistingQuery()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query";
+        boolean adaptiveFetch = true;
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        int expectedSize = 500;
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry.setSize(expectedSize);
+        map.put(expectedQuery, adaptiveFetchCacheEntry);
+
+        int resultSize = adaptiveFetchCache
+                .getFetchSizeForQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        assertEquals(expectedSize, resultSize);
+    }
+
+    /**
+     * Test for calling getFetchSizeForQuery method for existing query, but adaptiveFetch is set to
+     * false. Should return value -1.
+     */
+    @Test
+    void gettingFetchSizeForExistingQueryIfAdaptiveFetchFalse()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query";
+        boolean adaptiveFetch = false;
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        int newSize = 500;
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry.setSize(newSize);
+        map.put(expectedQuery, adaptiveFetchCacheEntry);
+
+        int resultSize = adaptiveFetchCache
+                .getFetchSizeForQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        assertEquals(-1, resultSize);
+    }
+
+    /**
+     * Test for calling removeQuery method for not existing query. Should nothing happen.
+     */
+    @Test
+    void removingNotExistingQuery()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query";
+        boolean adaptiveFetch = true;
+
+        adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        assertEquals(0, map.size());
+    }
+
+    /**
+     * Test for calling removeQuery method for not existing query, but adaptiveFetch is set false.
+     * Should nothing happen.
+     */
+    @Test
+    void removingNotExistingQueryIfAdaptiveFetchFalse()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query";
+        boolean adaptiveFetch = false;
+
+        adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        assertEquals(0, map.size());
+    }
+
+    /**
+     * Test for calling removeQuery method for existing query. The query should be removed from the
+     * map inside AdaptiveFetchCache.
+     */
+    @Test
+    void removingExistingQuery()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query";
+        boolean adaptiveFetch = true;
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry.setCounter(1);
+        map.put(expectedQuery, adaptiveFetchCacheEntry);
+
+        assertEquals(1, map.size());
+
+        adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        assertEquals(0, map.size());
+        assertNull(map.get(expectedQuery));
+    }
+
+    /**
+     * Test for calling removeQuery method for existing query, but adaptiveFetch is set false. The
+     * query shouldn't be removed.
+     */
+    @Test
+    void removingExistingQueryIfAdaptiveFetchFalse()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query";
+        boolean adaptiveFetch = false;
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry.setCounter(1);
+        map.put(expectedQuery, adaptiveFetchCacheEntry);
+
+        assertEquals(1, map.size());
+
+        adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        assertEquals(1, map.size());
+        assertNotNull(map.get(expectedQuery));
+        assertEquals(1, map.get(expectedQuery).getCounter());
+    }
+
+    /**
+     * Test for calling removeQuery method for existing query with counter set to 2. After call, query
+     * shouldn't be removed, but counter set to 1. After next call, query should be removed.
+     */
+    @Test
+    void removingExistingQueryWithLargeCounter()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query";
+        boolean adaptiveFetch = true;
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry.setCounter(2);
+        map.put(expectedQuery, adaptiveFetchCacheEntry);
+
+        adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        assertEquals(1, map.size());
+        assertNotNull(map.get(expectedQuery));
+        assertEquals(1, map.get(expectedQuery).getCounter());
+
+        adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        assertEquals(0, map.size());
+        assertNull(map.get(expectedQuery));
+    }
+
+    /**
+     * Test for calling removeQuery method for existing query with counter set to 2, but with
+     * adaptiveFetch set false. After both calls query should be removed and counter shouldn't
+     * change.
+     */
+    @Test
+    void removingExistingQueryWithLargeCounterIfAdaptiveFetchFalse()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query";
+        boolean adaptiveFetch = false;
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry.setCounter(2);
+        map.put(expectedQuery, adaptiveFetchCacheEntry);
+
+        adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        assertEquals(1, map.size());
+        assertNotNull(map.get(expectedQuery));
+        assertEquals(2, map.get(expectedQuery).getCounter());
+
+        adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        assertEquals(1, map.size());
+        assertNotNull(map.get(expectedQuery));
+        assertEquals(2, map.get(expectedQuery).getCounter());
+    }
+
+    /**
+     * Test for calling removeQuery method for existing query with more queries put in the map. Only
+     * query used in method call should be removed, other shouldn't change.
+     */
+    @Test
+    void removingExistingQueryWithMoreQueriesCached()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query-1";
+        String expectedQuery2 = "test-query-2";
+        String expectedQuery3 = "test-query-3";
+        boolean adaptiveFetch = true;
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        int expectedCounter1 = 1;
+        int expectedCounter2 = 37;
+        int expectedCounter3 = 14;
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry1 = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry1.setCounter(expectedCounter1);
+        map.put(expectedQuery, adaptiveFetchCacheEntry1);
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry2 = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry2.setCounter(expectedCounter2);
+        map.put(expectedQuery2, adaptiveFetchCacheEntry2);
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry3 = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry3.setCounter(expectedCounter3);
+        map.put(expectedQuery3, adaptiveFetchCacheEntry3);
+
+        adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        AdaptiveFetchCacheEntry resultInfo1 = map.get(expectedQuery);
+        AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2);
+        AdaptiveFetchCacheEntry resultInfo3 = map.get(expectedQuery3);
+
+        assertEquals(2, map.size());
+        assertNull(resultInfo1);
+        assertNotNull(resultInfo2);
+        assertEquals(adaptiveFetchCacheEntry2, resultInfo2);
+        assertEquals(expectedCounter2, resultInfo2.getCounter());
+        assertNotNull(resultInfo3);
+        assertEquals(adaptiveFetchCacheEntry3, resultInfo3);
+        assertEquals(expectedCounter3, resultInfo3.getCounter());
+    }
+
+    /**
+     * Test for calling removeQuery method for existing query with more queries put in the map, but
+     * adaptiveFetch is set false. Queries shouldn't change
+     */
+    @Test
+    void removingExistingQueryWithMoreQueriesCachedIfAdaptiveFetchFalse()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query-1";
+        String expectedQuery2 = "test-query-2";
+        String expectedQuery3 = "test-query-3";
+        boolean adaptiveFetch = false;
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        int expectedCounter1 = 1;
+        int expectedCounter2 = 37;
+        int expectedCounter3 = 14;
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry1 = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry1.setCounter(expectedCounter1);
+        map.put(expectedQuery, adaptiveFetchCacheEntry1);
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry2 = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry2.setCounter(expectedCounter2);
+        map.put(expectedQuery2, adaptiveFetchCacheEntry2);
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry3 = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry3.setCounter(expectedCounter3);
+        map.put(expectedQuery3, adaptiveFetchCacheEntry3);
+
+        adaptiveFetchCache.removeQuery(adaptiveFetch, new MockUpQuery(expectedQuery));
+
+        AdaptiveFetchCacheEntry resultInfo1 = map.get(expectedQuery);
+        AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2);
+        AdaptiveFetchCacheEntry resultInfo3 = map.get(expectedQuery3);
+
+        assertEquals(3, map.size());
+        assertNotNull(resultInfo1);
+        assertEquals(adaptiveFetchCacheEntry1, resultInfo1);
+        assertEquals(expectedCounter1, resultInfo1.getCounter());
+        assertNotNull(resultInfo2);
+        assertEquals(adaptiveFetchCacheEntry2, resultInfo2);
+        assertEquals(expectedCounter2, resultInfo2.getCounter());
+        assertNotNull(resultInfo3);
+        assertEquals(adaptiveFetchCacheEntry3, resultInfo3);
+        assertEquals(expectedCounter3, resultInfo3.getCounter());
+    }
+
+    /**
+     * Test for calling setAdaptiveFetch method with true value.
+     */
+    @Test
+    void settingAdaptiveFetchAsTrue()
+            throws NoSuchFieldException, IllegalAccessException {
+        boolean expectedAdaptiveFetch = true;
+
+        adaptiveFetchCache.setAdaptiveFetch(expectedAdaptiveFetch);
+
+        boolean resultAdaptiveFetch = getAdaptiveFetchVariable();
+
+        assertEquals(expectedAdaptiveFetch, resultAdaptiveFetch);
+    }
+
+    /**
+     * Test for calling setAdaptiveFetch method with false value.
+     */
+    @Test
+    void settingAdaptiveFetchAsFalse()
+            throws NoSuchFieldException, IllegalAccessException {
+        boolean expectedAdaptiveFetch = false;
+
+        adaptiveFetchCache.setAdaptiveFetch(expectedAdaptiveFetch);
+
+        boolean resultAdaptiveFetch = getAdaptiveFetchVariable();
+
+        assertEquals(expectedAdaptiveFetch, resultAdaptiveFetch);
+    }
+
+    /**
+     * Test for calling updateQueryFetchSize method. Method should update a value for a query.
+     */
+    @Test
+    void updatingAdaptiveFetchSize()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query-1";
+        boolean adaptiveFetch = true;
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        int rowSize = 33;
+        int startSize = size / rowSize - 15;
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry.setSize(startSize);
+        map.put(expectedQuery, adaptiveFetchCacheEntry);
+
+        adaptiveFetchCache
+                .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
+
+        AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
+
+        assertNotNull(resultInfo);
+        assertEquals(size / rowSize, resultInfo.getSize());
+    }
+
+    /**
+     * Test for calling updateQueryFetchSize method, but adaptiveFetch is set false. Method shouldn't
+     * update any values.
+     */
+    @Test
+    void updatingAdaptiveFetchSizeIfAdaptiveFetchFalse()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query-1";
+        boolean adaptiveFetch = false;
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        int rowSize = 33;
+        int startSize = size / rowSize - 15;
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry.setSize(startSize);
+        map.put(expectedQuery, adaptiveFetchCacheEntry);
+
+        adaptiveFetchCache
+                .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
+
+        AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
+
+        assertNotNull(resultInfo);
+        assertEquals(startSize, resultInfo.getSize());
+    }
+
+    /**
+     * Test for calling updateQueryFetchSize method for not existing query. Method shouldn't update
+     * any values.
+     */
+    @Test
+    void updatingAdaptiveFetchSizeForNotExistingQuery()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query-1";
+        String expectedQuery2 = "test-query-2";
+        boolean adaptiveFetch = true;
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        int rowSize = 33;
+        int startSize = size / rowSize - 15;
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry.setSize(startSize);
+        map.put(expectedQuery2, adaptiveFetchCacheEntry);
+
+        adaptiveFetchCache
+                .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
+
+        AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
+        AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2);
+
+        assertNull(resultInfo);
+        assertNotNull(resultInfo2);
+        assertEquals(adaptiveFetchCacheEntry, resultInfo2);
+        assertEquals(startSize, resultInfo2.getSize());
+        assertEquals(1, map.size());
+    }
+
+    /**
+     * Test for calling updateQueryFetchSize method for not existing query, but adaptiveFetch is set
+     * false. Method shouldn't update any values.
+     */
+    @Test
+    void updatingAdaptiveFetchSizeForNotExistingQueryIfAdaptiveFetchFalse()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query-1";
+        String expectedQuery2 = "test-query-2";
+        boolean adaptiveFetch = false;
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        int rowSize = 33;
+        int startSize = size / rowSize - 15;
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry.setSize(startSize);
+        map.put(expectedQuery2, adaptiveFetchCacheEntry);
+
+        adaptiveFetchCache
+                .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
+
+        AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
+        AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2);
+
+        assertNull(resultInfo);
+        assertNotNull(resultInfo2);
+        assertEquals(adaptiveFetchCacheEntry, resultInfo2);
+        assertEquals(startSize, resultInfo2.getSize());
+        assertEquals(1, map.size());
+    }
+
+    /**
+     * Test for calling updateQueryFetchSize method in a situation when there are more queries saved
+     * in a map. The method should only change value for query used in a call.
+     */
+    @Test
+    void updatingAdaptiveFetchSizeWithMoreQueriesInMap()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query-1";
+        String expectedQuery2 = "test-query-2";
+        boolean adaptiveFetch = true;
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        int rowSize = 33;
+        int startSize = size / rowSize - 15;
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry.setSize(startSize);
+        map.put(expectedQuery, adaptiveFetchCacheEntry);
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry2 = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry2.setSize(startSize);
+        map.put(expectedQuery2, adaptiveFetchCacheEntry2);
+
+        adaptiveFetchCache
+                .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
+
+        AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
+        AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2);
+
+        assertNotNull(resultInfo);
+        assertEquals(adaptiveFetchCacheEntry, resultInfo);
+        assertEquals(size / rowSize, resultInfo.getSize());
+        assertNotNull(resultInfo2);
+        assertEquals(adaptiveFetchCacheEntry2, resultInfo2);
+        assertEquals(startSize, resultInfo2.getSize());
+        assertEquals(2, map.size());
+    }
+
+    /**
+     * Test for calling updateQueryFetchSize method in a situation when there are more queries saved
+     * in a map, but adaptiveFetch is set false. The method shouldn't change any values.
+     */
+    @Test
+    void updatingAdaptiveFetchSizeWithMoreQueriesInMapIfAdaptiveFetchFalse()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query-1";
+        String expectedQuery2 = "test-query-2";
+        boolean adaptiveFetch = false;
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        int rowSize = 33;
+        int startSize = size / rowSize - 15;
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry.setSize(startSize);
+        map.put(expectedQuery, adaptiveFetchCacheEntry);
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry2 = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry2.setSize(startSize);
+        map.put(expectedQuery2, adaptiveFetchCacheEntry2);
+
+        adaptiveFetchCache
+                .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
+
+        AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
+        AdaptiveFetchCacheEntry resultInfo2 = map.get(expectedQuery2);
+
+        assertNotNull(resultInfo);
+        assertEquals(adaptiveFetchCacheEntry, resultInfo);
+        assertEquals(startSize, resultInfo.getSize());
+        assertNotNull(resultInfo2);
+        assertEquals(adaptiveFetchCacheEntry2, resultInfo2);
+        assertEquals(startSize, resultInfo2.getSize());
+        assertEquals(2, map.size());
+    }
+
+    /**
+     * Test for calling updateQueryFetchSize method with value to make computed value below minimum
+     * value. The method should update a query to have value of minimum.
+     */
+    @Test
+    void updatingAdaptiveFetchSizeWithMinimumSize()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query-1";
+        boolean adaptiveFetch = true;
+
+        int rowSize = size + 1000;
+        int startSize = 2;
+        int expectedSize = 10;
+
+        setMinimumSizeVariable(expectedSize);
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry.setSize(startSize);
+        map.put(expectedQuery, adaptiveFetchCacheEntry);
+
+        adaptiveFetchCache
+                .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
+
+        AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
+
+        assertNotNull(resultInfo);
+        assertEquals(expectedSize, resultInfo.getSize());
+    }
+
+    /**
+     * Test for calling updateQueryFetchSize method with value to make computed value below minimum
+     * value, but adaptiveFetch is set false. The method shouldn't update size for a query.
+     */
+    @Test
+    void updatingAdaptiveFetchSizeWithMinimumSizeIfAdaptiveFetchFalse()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query-1";
+        boolean adaptiveFetch = false;
+
+        int rowSize = size + 1000;
+        int startSize = 2;
+        int expectedSize = 10;
+
+        setMinimumSizeVariable(expectedSize);
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry.setSize(startSize);
+        map.put(expectedQuery, adaptiveFetchCacheEntry);
+
+        adaptiveFetchCache
+                .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
+
+        AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
+
+        assertNotNull(resultInfo);
+        assertEquals(startSize, resultInfo.getSize());
+    }
+
+    /**
+     * Test for calling updateQueryFetchSize method with value to make computed value above maximum
+     * value. The method should update a query to have value of maximum.
+     */
+    @Test
+    void updatingAdaptiveFetchSizeWithMaximumSize()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query-1";
+        boolean adaptiveFetch = true;
+
+        int rowSize = 1;
+        int startSize = 2;
+        int expectedSize = size / rowSize - 20;
+
+        setMaximumSizeVariable(expectedSize);
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        AdaptiveFetchCacheEntry adaptiveFetchCacheEntry = new AdaptiveFetchCacheEntry();
+        adaptiveFetchCacheEntry.setSize(startSize);
+        map.put(expectedQuery, adaptiveFetchCacheEntry);
+
+        adaptiveFetchCache
+                .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
+
+        AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
+
+        assertNotNull(resultInfo);
+        assertEquals(expectedSize, resultInfo.getSize());
+    }
+
+    /**
+     * Test for calling updateQueryFetchSize method with value to make computed value below maximum
+     * value, but adaptiveFetch is set false. The method shouldn't update size for a query.
+     */
+    @Test
+    void updatingAdaptiveFetchSizeWithMaximumSizeIfAdaptiveFetchFalse()
+            throws NoSuchFieldException, IllegalAccessException {
+        String expectedQuery = "test-query-1";
+        boolean adaptiveFetch = false;
+
+        int rowSize = 1;
+        int startSize = 2;
+        int expectedSize = size / rowSize - 20;
+
+        setMaximumSizeVariable(expectedSize);
+
+        Map<String, AdaptiveFetchCacheEntry> map = getInfoMapVariable();
+
+        AdaptiveFetchCacheEntry adaptiveFetchQueryInfo = new AdaptiveFetchCacheEntry();
+        adaptiveFetchQueryInfo.setSize(startSize);
+        map.put(expectedQuery, adaptiveFetchQueryInfo);
+
+        adaptiveFetchCache
+                .updateQueryFetchSize(adaptiveFetch, new MockUpQuery(expectedQuery), rowSize);
+
+        AdaptiveFetchCacheEntry resultInfo = map.get(expectedQuery);
+
+        assertNotNull(resultInfo);
+        assertEquals(startSize, resultInfo.getSize());
+    }
+
+    // Here are methods for retrieving values from adaptiveFetchCache without calling methods
+
+    private Map<String, AdaptiveFetchCacheEntry> getInfoMapVariable()
+            throws IllegalAccessException, NoSuchFieldException {
+        Field field = adaptiveFetchCache.getClass().getDeclaredField(infoMapVariableName);
+        field.setAccessible(true);
+        return (Map<String, AdaptiveFetchCacheEntry>) field.get(adaptiveFetchCache);
+    }
+
+    private int getMinimumSizeVariable() throws NoSuchFieldException, IllegalAccessException {
+        Field field = adaptiveFetchCache.getClass().getDeclaredField(minimumSizeVariableName);
+        field.setAccessible(true);
+        return (Integer) field.get(adaptiveFetchCache);
+    }
+
+    private void setMinimumSizeVariable(int value)
+            throws NoSuchFieldException, IllegalAccessException {
+        Field field = adaptiveFetchCache.getClass().getDeclaredField(minimumSizeVariableName);
+        field.setAccessible(true);
+        field.set(adaptiveFetchCache, value);
+    }
+
+    private int getMaximumSizeVariable() throws NoSuchFieldException, IllegalAccessException {
+        Field field = adaptiveFetchCache.getClass().getDeclaredField(maximumSizeVariableName);
+        field.setAccessible(true);
+        return (Integer) field.get(adaptiveFetchCache);
+    }
+
+    private void setMaximumSizeVariable(int value)
+            throws NoSuchFieldException, IllegalAccessException {
+        Field field = adaptiveFetchCache.getClass()
+                .getDeclaredField(maximumSizeVariableName);
+        field.setAccessible(true);
+        field.set(adaptiveFetchCache, value);
+    }
+
+    private boolean getAdaptiveFetchVariable() throws NoSuchFieldException, IllegalAccessException {
+        Field field = adaptiveFetchCache.getClass()
+                .getDeclaredField(adaptiveFetchVariableName);
+        field.setAccessible(true);
+        return (Boolean) field.get(adaptiveFetchCache);
+    }
+
+    private void setAdaptiveFetchVariable(boolean value)
+            throws NoSuchFieldException, IllegalAccessException {
+        Field field = adaptiveFetchCache.getClass()
+                .getDeclaredField(adaptiveFetchVariableName);
+        field.setAccessible(true);
+        field.set(adaptiveFetchCache, value);
+    }
+
+    private long getMaximumBufferVariable() throws NoSuchFieldException, IllegalAccessException {
+        Field field = adaptiveFetchCache.getClass()
+                .getDeclaredField(maximumBufferSizeVariableName);
+        field.setAccessible(true);
+        return (Long) field.get(adaptiveFetchCache);
+    }
+
+    private void setMaximumBufferVariable(long value)
+            throws NoSuchFieldException, IllegalAccessException {
+        Field field = adaptiveFetchCache.getClass()
+                .getDeclaredField(maximumBufferSizeVariableName);
+        field.setAccessible(true);
+        field.set(adaptiveFetchCache, value);
+    }
+
+    /**
+     * Class to mock object with Query interface. As AdaptiveFetchCache is using only
+     * getNativeSql method from Query interface, other shouldn't be called.
+     */
+    private class MockUpQuery implements Query {
+
+        public String sql;
+
+        MockUpQuery(String sql) {
+            this.sql = sql;
+        }
+
+        @Override
+        public ParameterList createParameterList() {
+            throw new WrongMethodCallException("Method shouldn't be called.");
+        }
+
+        @Override
+        public String toString(ParameterList parameters) {
+            throw new WrongMethodCallException("Method shouldn't be called.");
+        }
+
+        @Override
+        public String getNativeSql() {
+            return this.sql;
+        }
+
+        @Override
+        public SqlCommand getSqlCommand() {
+            throw new WrongMethodCallException("Method shouldn't be called.");
+        }
+
+        @Override
+        public void close() {
+            throw new WrongMethodCallException("Method shouldn't be called.");
+        }
+
+        @Override
+        public boolean isStatementDescribed() {
+            throw new WrongMethodCallException("Method shouldn't be called.");
+        }
+
+        @Override
+        public boolean isEmpty() {
+            throw new WrongMethodCallException("Method shouldn't be called.");
+        }
+
+        @Override
+        public int getBatchSize() {
+            throw new WrongMethodCallException("Method shouldn't be called.");
+        }
+
+        @Override
+        public Map<String, Integer> getResultSetColumnNameIndexMap() {
+            throw new WrongMethodCallException("Method shouldn't be called.");
+        }
+
+        @Override
+        public Query[] getSubqueries() {
+            throw new WrongMethodCallException("Method shouldn't be called.");
+        }
+    }
+
+    /**
+     * An exception used when method shouldn't be called in MockUpQuery class.
+     */
+    private class WrongMethodCallException extends RuntimeException {
+
+        WrongMethodCallException(String msg) {
+            super(msg);
+        }
+
+    }
+
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/extensions/ExtensionsTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/extensions/ExtensionsTestSuite.java
index 898ac69..1d105c0 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/extensions/ExtensionsTestSuite.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/extensions/ExtensionsTestSuite.java
@@ -13,7 +13,7 @@ import org.junit.runners.Suite;
  */
 @RunWith(Suite.class)
 @Suite.SuiteClasses({
-    HStoreTest.class,
+        HStoreTest.class,
 })
 public class ExtensionsTestSuite {
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/extensions/HStoreTest.java b/pgjdbc/src/test/java/org/postgresql/test/extensions/HStoreTest.java
index 5db06f5..b468a02 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/extensions/HStoreTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/extensions/HStoreTest.java
@@ -5,17 +5,6 @@
 
 package org.postgresql.test.extensions;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import org.postgresql.core.ServerVersion;
-import org.postgresql.jdbc.PreferQueryMode;
-import org.postgresql.test.jdbc2.BaseTest4;
-
-import org.junit.Assume;
-import org.junit.Test;
-
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
@@ -25,92 +14,100 @@ import java.sql.Types;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
+import org.junit.Assume;
+import org.junit.Test;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.jdbc.PreferQueryMode;
+import org.postgresql.test.jdbc2.BaseTest4;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 // SELECT 'hstore'::regtype::oid
 // SELECT 'hstore[]'::regtype::oid
 
 public class HStoreTest extends BaseTest4 {
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    Assume.assumeTrue("server has installed hstore", isHStoreEnabled(con));
-    Assume.assumeFalse("hstore is not supported in simple protocol only mode",
-        preferQueryMode == PreferQueryMode.SIMPLE);
-    assumeMinimumServerVersion("hstore requires PostgreSQL 8.3+", ServerVersion.v8_3);
-  }
-
-  private static boolean isHStoreEnabled(Connection conn) {
-    try {
-      Statement stmt = conn.createStatement();
-      ResultSet rs = stmt.executeQuery("SELECT 'a=>1'::hstore::text");
-      rs.close();
-      stmt.close();
-      return true;
-    } catch (SQLException sqle) {
-      return false;
+    private static boolean isHStoreEnabled(Connection conn) {
+        try {
+            Statement stmt = conn.createStatement();
+            ResultSet rs = stmt.executeQuery("SELECT 'a=>1'::hstore::text");
+            rs.close();
+            stmt.close();
+            return true;
+        } catch (SQLException sqle) {
+            return false;
+        }
     }
-  }
 
-  @Test
-  public void testHStoreSelect() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement("SELECT 'a=>1,b=>2'::hstore");
-    ResultSet rs = pstmt.executeQuery();
-    assertEquals(Map.class.getName(), rs.getMetaData().getColumnClassName(1));
-    assertTrue(rs.next());
-    String str = rs.getString(1);
-    if (!("\"a\"=>\"1\", \"b\"=>\"2\"".equals(str) || "\"b\"=>\"2\", \"a\"=>\"1\"".equals(str))) {
-      fail("Expected " + "\"a\"=>\"1\", \"b\"=>\"2\"" + " but got " + str);
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        Assume.assumeTrue("server has installed hstore", isHStoreEnabled(con));
+        Assume.assumeFalse("hstore is not supported in simple protocol only mode",
+                preferQueryMode == PreferQueryMode.SIMPLE);
+        assumeMinimumServerVersion("hstore requires PostgreSQL 8.3+", ServerVersion.v8_3);
     }
-    Map<String, String> correct = new HashMap<>();
-    correct.put("a", "1");
-    correct.put("b", "2");
-    assertEquals(correct, rs.getObject(1));
-  }
 
-  @Test
-  public void testHStoreSelectNullValue() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement("SELECT 'a=>NULL'::hstore");
-    ResultSet rs = pstmt.executeQuery();
-    assertEquals(Map.class.getName(), rs.getMetaData().getColumnClassName(1));
-    assertTrue(rs.next());
-    assertEquals("\"a\"=>NULL", rs.getString(1));
-    Map<String, Object> correct = Collections.singletonMap("a", null);
-    assertEquals(correct, rs.getObject(1));
-  }
+    @Test
+    public void testHStoreSelect() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement("SELECT 'a=>1,b=>2'::hstore");
+        ResultSet rs = pstmt.executeQuery();
+        assertEquals(Map.class.getName(), rs.getMetaData().getColumnClassName(1));
+        assertTrue(rs.next());
+        String str = rs.getString(1);
+        if (!("\"a\"=>\"1\", \"b\"=>\"2\"".equals(str) || "\"b\"=>\"2\", \"a\"=>\"1\"".equals(str))) {
+            fail("Expected " + "\"a\"=>\"1\", \"b\"=>\"2\"" + " but got " + str);
+        }
+        Map<String, String> correct = new HashMap<>();
+        correct.put("a", "1");
+        correct.put("b", "2");
+        assertEquals(correct, rs.getObject(1));
+    }
 
-  @Test
-  public void testHStoreSend() throws SQLException {
-    Map<String, Integer> correct = Collections.singletonMap("a", 1);
-    PreparedStatement pstmt = con.prepareStatement("SELECT ?::text");
-    pstmt.setObject(1, correct);
-    ResultSet rs = pstmt.executeQuery();
-    assertEquals(String.class.getName(), rs.getMetaData().getColumnClassName(1));
-    assertTrue(rs.next());
-    assertEquals("\"a\"=>\"1\"", rs.getString(1));
-  }
+    @Test
+    public void testHStoreSelectNullValue() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement("SELECT 'a=>NULL'::hstore");
+        ResultSet rs = pstmt.executeQuery();
+        assertEquals(Map.class.getName(), rs.getMetaData().getColumnClassName(1));
+        assertTrue(rs.next());
+        assertEquals("\"a\"=>NULL", rs.getString(1));
+        Map<String, Object> correct = Collections.singletonMap("a", null);
+        assertEquals(correct, rs.getObject(1));
+    }
 
-  @Test
-  public void testHStoreUsingPSSetObject4() throws SQLException {
-    Map<String, Integer> correct = Collections.singletonMap("a", 1);
-    PreparedStatement pstmt = con.prepareStatement("SELECT ?::text");
-    pstmt.setObject(1, correct, Types.OTHER, -1);
-    ResultSet rs = pstmt.executeQuery();
-    assertEquals(String.class.getName(), rs.getMetaData().getColumnClassName(1));
-    assertTrue(rs.next());
-    assertEquals("\"a\"=>\"1\"", rs.getString(1));
-  }
+    @Test
+    public void testHStoreSend() throws SQLException {
+        Map<String, Integer> correct = Collections.singletonMap("a", 1);
+        PreparedStatement pstmt = con.prepareStatement("SELECT ?::text");
+        pstmt.setObject(1, correct);
+        ResultSet rs = pstmt.executeQuery();
+        assertEquals(String.class.getName(), rs.getMetaData().getColumnClassName(1));
+        assertTrue(rs.next());
+        assertEquals("\"a\"=>\"1\"", rs.getString(1));
+    }
 
-  @Test
-  public void testHStoreSendEscaped() throws SQLException {
-    Map<String, String> correct = Collections.singletonMap("a", "t'e\ns\"t");
-    PreparedStatement pstmt = con.prepareStatement("SELECT ?");
-    pstmt.setObject(1, correct);
-    ResultSet rs = pstmt.executeQuery();
-    assertEquals(Map.class.getName(), rs.getMetaData().getColumnClassName(1));
-    assertTrue(rs.next());
-    assertEquals(correct, rs.getObject(1));
-    assertEquals("\"a\"=>\"t'e\ns\\\"t\"", rs.getString(1));
-  }
+    @Test
+    public void testHStoreUsingPSSetObject4() throws SQLException {
+        Map<String, Integer> correct = Collections.singletonMap("a", 1);
+        PreparedStatement pstmt = con.prepareStatement("SELECT ?::text");
+        pstmt.setObject(1, correct, Types.OTHER, -1);
+        ResultSet rs = pstmt.executeQuery();
+        assertEquals(String.class.getName(), rs.getMetaData().getColumnClassName(1));
+        assertTrue(rs.next());
+        assertEquals("\"a\"=>\"1\"", rs.getString(1));
+    }
+
+    @Test
+    public void testHStoreSendEscaped() throws SQLException {
+        Map<String, String> correct = Collections.singletonMap("a", "t'e\ns\"t");
+        PreparedStatement pstmt = con.prepareStatement("SELECT ?");
+        pstmt.setObject(1, correct);
+        ResultSet rs = pstmt.executeQuery();
+        assertEquals(Map.class.getName(), rs.getMetaData().getColumnClassName(1));
+        assertTrue(rs.next());
+        assertEquals(correct, rs.getObject(1));
+        assertEquals("\"a\"=>\"t'e\ns\\\"t\"", rs.getString(1));
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/hostchooser/MultiHostTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/hostchooser/MultiHostTestSuite.java
index 2f19cd4..b42b7d5 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/hostchooser/MultiHostTestSuite.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/hostchooser/MultiHostTestSuite.java
@@ -13,7 +13,7 @@ import org.junit.runners.Suite;
  */
 @RunWith(Suite.class)
 @Suite.SuiteClasses({
-    MultiHostsConnectionTest.class,
+        MultiHostsConnectionTest.class,
 })
 public class MultiHostTestSuite {
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/hostchooser/MultiHostsConnectionTest.java b/pgjdbc/src/test/java/org/postgresql/test/hostchooser/MultiHostsConnectionTest.java
index e257328..9fdb2c8 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/hostchooser/MultiHostsConnectionTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/hostchooser/MultiHostsConnectionTest.java
@@ -46,448 +46,448 @@ import java.util.Set;
 
 public class MultiHostsConnectionTest {
 
-  private static final String user = TestUtil.getUser();
-  private static final String password = TestUtil.getPassword();
-  private static final String primary1 = TestUtil.getServer() + ":" + TestUtil.getPort();
-  private static final String secondary1 = getSecondaryServer1() + ":" + getSecondaryPort1();
-  private static final String secondary2 = getSecondaryServer2() + ":" + getSecondaryPort2();
-  private static final String fake1 = "127.127.217.217:1";
+    private static final String user = TestUtil.getUser();
+    private static final String password = TestUtil.getPassword();
+    private static final String primary1 = TestUtil.getServer() + ":" + TestUtil.getPort();
+    private static final String secondary1 = getSecondaryServer1() + ":" + getSecondaryPort1();
+    private static final String secondary2 = getSecondaryServer2() + ":" + getSecondaryPort2();
+    private static final String fake1 = "127.127.217.217:1";
 
-  private String primaryIp;
-  private String secondaryIP;
-  private String secondaryIP2;
-  private Connection con;
-  private Map<HostSpec, Object> hostStatusMap;
+    private String primaryIp;
+    private String secondaryIP;
+    private String secondaryIP2;
+    private Connection con;
+    private Map<HostSpec, Object> hostStatusMap;
 
-  @BeforeAll
-  static void setUpClass() {
-    assumeTrue(isReplicationInstanceAvailable());
-  }
-
-  @BeforeEach
-  void setUp() throws Exception {
-    Field field = GlobalHostStatusTracker.class.getDeclaredField("hostStatusMap");
-    field.setAccessible(true);
-    hostStatusMap = (Map<HostSpec, Object>) field.get(null);
-
-    con = TestUtil.openDB();
-    primaryIp = getRemoteHostSpec();
-    closeDB(con);
-
-    con = openSecondaryDB();
-    secondaryIP = getRemoteHostSpec();
-    closeDB(con);
-
-    con = openSecondaryDB2();
-    secondaryIP2 = getRemoteHostSpec();
-    closeDB(con);
-  }
-
-  private static boolean isReplicationInstanceAvailable() {
-    try {
-      Connection connection = openSecondaryDB();
-      closeDB(connection);
-      return true;
-    } catch (Exception e) {
-      return false;
-    }
-  }
-
-  private static Connection openSecondaryDB() throws Exception {
-    TestUtil.initDriver();
-
-    Properties props = userAndPassword();
-
-    return DriverManager.getConnection(TestUtil.getURL(getSecondaryServer1(), getSecondaryPort1()), props);
-  }
-
-  private static Properties userAndPassword() {
-    Properties props = new Properties();
-
-    PGProperty.USER.set(props, TestUtil.getUser());
-    PGProperty.PASSWORD.set(props, TestUtil.getPassword());
-    return props;
-  }
-
-  private static String getSecondaryServer1() {
-    return System.getProperty("secondaryServer1", TestUtil.getServer());
-  }
-
-  private static int getSecondaryPort1() {
-    return Integer.parseInt(System.getProperty("secondaryPort1", String.valueOf(TestUtil.getPort() + 1)));
-  }
-
-  private static Connection openSecondaryDB2() throws Exception {
-    TestUtil.initDriver();
-
-    Properties props = userAndPassword();
-    return DriverManager.getConnection(TestUtil.getURL(getSecondaryServer2(), getSecondaryPort2()), props);
-  }
-
-  private static String getSecondaryServer2() {
-    return System.getProperty("secondaryServer2", TestUtil.getServer());
-  }
-
-  private static int getSecondaryPort2() {
-    return Integer.parseInt(System.getProperty("secondaryPort2", String.valueOf(TestUtil.getPort() + 2)));
-  }
-
-  private Connection getConnection(HostRequirement hostType, String... targets)
-      throws SQLException {
-    return getConnection(hostType, true, targets);
-  }
-
-  private static HostSpec hostSpec(String host) {
-    int split = host.indexOf(':');
-    return new HostSpec(host.substring(0, split), parseInt(host.substring(split + 1)));
-  }
-
-  private Connection getConnection(HostRequirement hostType, boolean reset,
-      String... targets) throws SQLException {
-    return getConnection(hostType, reset, false, targets);
-  }
-
-  private Connection getConnection(HostRequirement hostType, boolean reset, boolean lb,
-      String... targets) throws SQLException {
-    TestUtil.closeDB(con);
-
-    if (reset) {
-      resetGlobalState();
+    @BeforeAll
+    static void setUpClass() {
+        assumeTrue(isReplicationInstanceAvailable());
     }
 
-    Properties props = new Properties();
-    PGProperty.USER.set(props, user);
-    PGProperty.PASSWORD.set(props, password);
-    PGProperty.TARGET_SERVER_TYPE.set(props, hostType.name());
-    PGProperty.HOST_RECHECK_SECONDS.set(props, 2);
-    if (lb) {
-      PGProperty.LOAD_BALANCE_HOSTS.set(props, "true");
+    private static boolean isReplicationInstanceAvailable() {
+        try {
+            Connection connection = openSecondaryDB();
+            closeDB(connection);
+            return true;
+        } catch (Exception e) {
+            return false;
+        }
     }
 
-    StringBuilder sb = new StringBuilder();
-    sb.append("jdbc:postgresql://");
-    for (String target : targets) {
-      sb.append(target).append(',');
-    }
-    sb.setLength(sb.length() - 1);
-    sb.append("/");
-    sb.append(TestUtil.getDatabase());
+    private static Connection openSecondaryDB() throws Exception {
+        TestUtil.initDriver();
 
-    return con = DriverManager.getConnection(sb.toString(), props);
-  }
+        Properties props = userAndPassword();
 
-  private void assertRemote(String expectedHost) throws SQLException {
-    assertEquals(expectedHost, getRemoteHostSpec());
-  }
-
-  private String getRemoteHostSpec() throws SQLException {
-    ResultSet rs = con.createStatement()
-        .executeQuery("select inet_server_addr() || ':' || inet_server_port()");
-    rs.next();
-    return rs.getString(1);
-  }
-
-  public static boolean isMaster(Connection con) throws SQLException {
-    ResultSet rs = con.createStatement().executeQuery("show transaction_read_only");
-    rs.next();
-    return "off".equals(rs.getString(1));
-  }
-
-  private void assertGlobalState(String host, String status) {
-    HostSpec spec = hostSpec(host);
-    if (status == null) {
-      assertNull(hostStatusMap.get(spec));
-    } else {
-      assertEquals(host + "=" + status, hostStatusMap.get(spec).toString());
-    }
-  }
-
-  private void resetGlobalState() {
-    hostStatusMap.clear();
-  }
-
-  @Test
-  void connectToAny() throws SQLException {
-    getConnection(any, fake1, primary1);
-    assertRemote(primaryIp);
-    assertGlobalState(primary1, "ConnectOK");
-    assertGlobalState(fake1, "ConnectFail");
-
-    getConnection(any, fake1, secondary1);
-    assertRemote(secondaryIP);
-    assertGlobalState(secondary1, "ConnectOK");
-
-    getConnection(any, fake1, primary1);
-    assertRemote(primaryIp);
-    assertGlobalState(primary1, "ConnectOK");
-    assertGlobalState(fake1, "ConnectFail");
-  }
-
-  @Test
-  void connectToMaster() throws SQLException {
-    getConnection(primary, true, fake1, primary1, secondary1);
-    assertRemote(primaryIp);
-    assertGlobalState(fake1, "ConnectFail");
-    assertGlobalState(primary1, "Primary");
-    assertGlobalState(secondary1, null);
-
-    getConnection(primary, false, fake1, secondary1, primary1);
-    assertRemote(primaryIp);
-    assertGlobalState(fake1, "ConnectFail"); // cached
-    assertGlobalState(primary1, "Primary"); // connected to primary
-    assertGlobalState(secondary1, "Secondary"); // was unknown, so tried to connect in order
-  }
-
-  @Test
-  void connectToPrimaryFirst() throws SQLException {
-    getConnection(preferPrimary, true, fake1, primary1, secondary1);
-    assertRemote(primaryIp);
-    assertGlobalState(fake1, "ConnectFail");
-    assertGlobalState(primary1, "Primary");
-    assertGlobalState(secondary1, null);
-
-    getConnection(primary, false, fake1, secondary1, primary1);
-    assertRemote(primaryIp);
-    assertGlobalState(fake1, "ConnectFail");
-    assertGlobalState(primary1, "Primary");
-    assertGlobalState(secondary1, "Secondary"); // tried as it was unknown
-
-    getConnection(preferPrimary, true, fake1, secondary1, primary1);
-    assertRemote(primaryIp);
-    assertGlobalState(fake1, "ConnectFail");
-    assertGlobalState(primary1, "Primary");
-    assertGlobalState(secondary1, "Secondary");
-  }
-
-  @Test
-  void connectToPrimaryWithReadonlyTransactionMode() throws SQLException {
-    con = TestUtil.openPrivilegedDB();
-    con.createStatement().execute("ALTER DATABASE " + TestUtil.getDatabase() + " SET default_transaction_read_only=on;");
-    try {
-      getConnection(primary, true, fake1, primary1, secondary1);
-    } catch (PSQLException e) {
-      assertEquals(PSQLState.CONNECTION_UNABLE_TO_CONNECT.getState(), e.getSQLState());
-      assertGlobalState(fake1, "ConnectFail");
-      assertGlobalState(primary1, "Secondary");
-      assertGlobalState(secondary1, "Secondary");
-    } finally {
-      con = TestUtil.openPrivilegedDB();
-      con.createStatement().execute(
-          "BEGIN;"
-          + "SET TRANSACTION READ WRITE;"
-          + "ALTER DATABASE " + TestUtil.getDatabase() + " SET default_transaction_read_only=off;"
-          + "COMMIT;"
-      );
-      TestUtil.closeDB(con);
-    }
-  }
-
-  @Test
-  void connectToSecondary() throws SQLException {
-    getConnection(secondary, true, fake1, secondary1, primary1);
-    assertRemote(secondaryIP);
-    assertGlobalState(fake1, "ConnectFail");
-    assertGlobalState(secondary1, "Secondary");
-    assertGlobalState(primary1, null);
-
-    getConnection(secondary, false, fake1, primary1, secondary1);
-    assertRemote(secondaryIP);
-    assertGlobalState(fake1, "ConnectFail"); // cached
-    assertGlobalState(secondary1, "Secondary"); // connected
-    assertGlobalState(primary1, "Primary"); // tried as it was unknown
-  }
-
-  @Test
-  void connectToSecondaryFirst() throws SQLException {
-    getConnection(preferSecondary, true, fake1, secondary1, primary1);
-    assertRemote(secondaryIP);
-    assertGlobalState(fake1, "ConnectFail");
-    assertGlobalState(secondary1, "Secondary");
-    assertGlobalState(primary1, null);
-
-    getConnection(secondary, false, fake1, primary1, secondary1);
-    assertRemote(secondaryIP);
-    assertGlobalState(fake1, "ConnectFail");
-    assertGlobalState(secondary1, "Secondary");
-    assertGlobalState(primary1, "Primary"); // tried as it was unknown
-
-    getConnection(preferSecondary, true, fake1, primary1, secondary1);
-    assertRemote(secondaryIP);
-    assertGlobalState(fake1, "ConnectFail");
-    assertGlobalState(secondary1, "Secondary");
-    assertGlobalState(primary1, "Primary");
-  }
-
-  @Test
-  void failedConnection() throws SQLException {
-    try {
-      getConnection(any, true, fake1);
-      fail();
-    } catch (PSQLException ex) {
-    }
-  }
-
-  @Test
-  void loadBalancing() throws SQLException {
-    Set<String> connectedHosts = new HashSet<>();
-    boolean fake1FoundTried = false;
-    for (int i = 0; i < 20; i++) {
-      getConnection(any, true, true, fake1, primary1, secondary1);
-      connectedHosts.add(getRemoteHostSpec());
-      fake1FoundTried |= hostStatusMap.containsKey(hostSpec(fake1));
-      if (connectedHosts.size() == 2 && fake1FoundTried) {
-        break;
-      }
-    }
-    assertEquals(new HashSet<String>(asList(primaryIp, secondaryIP)),
-        connectedHosts,
-        "Never connected to all hosts");
-    assertTrue(fake1FoundTried, "Never tried to connect to fake node");
-  }
-
-  @Test
-  void loadBalancing_preferPrimary() throws SQLException {
-    Set<String> connectedHosts = new HashSet<>();
-    Set<HostSpec> tryConnectedHosts = new HashSet<>();
-    for (int i = 0; i < 20; i++) {
-      getConnection(preferPrimary, true, true, fake1, secondary1, secondary2, primary1);
-      connectedHosts.add(getRemoteHostSpec());
-      tryConnectedHosts.addAll(hostStatusMap.keySet());
-      if (tryConnectedHosts.size() == 4) {
-        break;
-      }
+        return DriverManager.getConnection(TestUtil.getURL(getSecondaryServer1(), getSecondaryPort1()), props);
     }
 
-    assertRemote(primaryIp);
-    assertEquals(new HashSet<String>(asList(primaryIp)),
-        connectedHosts,
-        "Connected to hosts other than primary");
-    assertEquals(4, tryConnectedHosts.size(), "Never tried to connect to fake node");
+    private static Properties userAndPassword() {
+        Properties props = new Properties();
 
-    getConnection(preferPrimary, false, true, fake1, secondary1, primary1);
-    assertRemote(primaryIp);
-
-    // connect to secondaries when there's no primary - with load balancing
-    connectedHosts.clear();
-    for (int i = 0; i < 20; i++) {
-      getConnection(preferPrimary, false, true, fake1, secondary1, secondary2);
-      connectedHosts.add(getRemoteHostSpec());
-      if (connectedHosts.size() == 2) {
-        break;
-      }
-    }
-    assertEquals(new HashSet<String>(asList(secondaryIP, secondaryIP2)),
-        connectedHosts,
-        "Never connected to all secondary hosts");
-
-    // connect to secondary when there's no primary
-    getConnection(preferPrimary, true, true, fake1, secondary1);
-    assertRemote(secondaryIP);
-
-    getConnection(preferPrimary, false, true, fake1, secondary1);
-    assertRemote(secondaryIP);
-  }
-
-  @Test
-  void loadBalancing_preferSecondary() throws SQLException {
-    Set<String> connectedHosts = new HashSet<>();
-    Set<HostSpec> tryConnectedHosts = new HashSet<>();
-    for (int i = 0; i < 20; i++) {
-      getConnection(preferSecondary, true, true, fake1, primary1, secondary1, secondary2);
-      connectedHosts.add(getRemoteHostSpec());
-      tryConnectedHosts.addAll(hostStatusMap.keySet());
-      if (tryConnectedHosts.size() == 4) {
-        break;
-      }
-    }
-    assertEquals(new HashSet<String>(asList(secondaryIP, secondaryIP2)),
-        connectedHosts,
-        "Never connected to all secondary hosts");
-    assertEquals(4, tryConnectedHosts.size(), "Never tried to connect to fake node");
-
-    getConnection(preferSecondary, false, true, fake1, primary1, secondary1);
-    assertRemote(secondaryIP);
-    connectedHosts.clear();
-    for (int i = 0; i < 20; i++) {
-      getConnection(preferSecondary, false, true, fake1, primary1, secondary1, secondary2);
-      connectedHosts.add(getRemoteHostSpec());
-      if (connectedHosts.size() == 2) {
-        break;
-      }
-    }
-    assertEquals(new HashSet<String>(asList(secondaryIP, secondaryIP2)),
-        connectedHosts,
-        "Never connected to all secondary hosts");
-
-    // connect to primary when there's no secondary
-    getConnection(preferSecondary, true, true, fake1, primary1);
-    assertRemote(primaryIp);
-
-    getConnection(preferSecondary, false, true, fake1, primary1);
-    assertRemote(primaryIp);
-  }
-
-  @Test
-  void loadBalancing_secondary() throws SQLException {
-    Set<String> connectedHosts = new HashSet<>();
-    Set<HostSpec> tryConnectedHosts = new HashSet<>();
-    for (int i = 0; i < 20; i++) {
-      getConnection(secondary, true, true, fake1, primary1, secondary1, secondary2);
-      connectedHosts.add(getRemoteHostSpec());
-      tryConnectedHosts.addAll(hostStatusMap.keySet());
-      if (tryConnectedHosts.size() == 4) {
-        break;
-      }
-    }
-    assertEquals(new HashSet<String>(asList(secondaryIP, secondaryIP2)),
-        connectedHosts,
-        "Did not attempt to connect to all secondary hosts");
-    assertEquals(4, tryConnectedHosts.size(), "Did not attempt to connect to primary and fake node");
-
-    getConnection(preferSecondary, false, true, fake1, primary1, secondary1);
-    assertRemote(secondaryIP);
-    connectedHosts.clear();
-    for (int i = 0; i < 20; i++) {
-      getConnection(secondary, false, true, fake1, primary1, secondary1, secondary2);
-      connectedHosts.add(getRemoteHostSpec());
-      if (connectedHosts.size() == 2) {
-        break;
-      }
-    }
-    assertEquals(new HashSet<String>(asList(secondaryIP, secondaryIP2)),
-        connectedHosts,
-        "Did not connect to all secondary hosts");
-  }
-
-  @Test
-  void hostRechecks() throws SQLException, InterruptedException {
-    GlobalHostStatusTracker.reportHostStatus(hostSpec(primary1), Secondary);
-    GlobalHostStatusTracker.reportHostStatus(hostSpec(secondary1), Primary);
-    GlobalHostStatusTracker.reportHostStatus(hostSpec(fake1), Secondary);
-
-    try {
-      getConnection(primary, false, fake1, secondary1, primary1);
-      fail();
-    } catch (SQLException ex) {
+        PGProperty.USER.set(props, TestUtil.getUser());
+        PGProperty.PASSWORD.set(props, TestUtil.getPassword());
+        return props;
     }
 
-    GlobalHostStatusTracker.reportHostStatus(hostSpec(primary1), Secondary);
-    GlobalHostStatusTracker.reportHostStatus(hostSpec(secondary1), Primary);
-    GlobalHostStatusTracker.reportHostStatus(hostSpec(fake1), Secondary);
+    private static String getSecondaryServer1() {
+        return System.getProperty("secondaryServer1", TestUtil.getServer());
+    }
 
-    SECONDS.sleep(3);
+    private static int getSecondaryPort1() {
+        return Integer.parseInt(System.getProperty("secondaryPort1", String.valueOf(TestUtil.getPort() + 1)));
+    }
 
-    getConnection(primary, false, secondary1, fake1, primary1);
-    assertRemote(primaryIp);
-  }
+    private static Connection openSecondaryDB2() throws Exception {
+        TestUtil.initDriver();
 
-  @Test
-  void noGoodHostsRechecksEverything() throws SQLException, InterruptedException {
-    GlobalHostStatusTracker.reportHostStatus(hostSpec(primary1), Secondary);
-    GlobalHostStatusTracker.reportHostStatus(hostSpec(secondary1), Secondary);
-    GlobalHostStatusTracker.reportHostStatus(hostSpec(fake1), Secondary);
+        Properties props = userAndPassword();
+        return DriverManager.getConnection(TestUtil.getURL(getSecondaryServer2(), getSecondaryPort2()), props);
+    }
 
-    getConnection(primary, false, secondary1, fake1, primary1);
-    assertRemote(primaryIp);
-  }
+    private static String getSecondaryServer2() {
+        return System.getProperty("secondaryServer2", TestUtil.getServer());
+    }
+
+    private static int getSecondaryPort2() {
+        return Integer.parseInt(System.getProperty("secondaryPort2", String.valueOf(TestUtil.getPort() + 2)));
+    }
+
+    private static HostSpec hostSpec(String host) {
+        int split = host.indexOf(':');
+        return new HostSpec(host.substring(0, split), parseInt(host.substring(split + 1)));
+    }
+
+    public static boolean isMaster(Connection con) throws SQLException {
+        ResultSet rs = con.createStatement().executeQuery("show transaction_read_only");
+        rs.next();
+        return "off".equals(rs.getString(1));
+    }
+
+    @BeforeEach
+    void setUp() throws Exception {
+        Field field = GlobalHostStatusTracker.class.getDeclaredField("hostStatusMap");
+        field.setAccessible(true);
+        hostStatusMap = (Map<HostSpec, Object>) field.get(null);
+
+        con = TestUtil.openDB();
+        primaryIp = getRemoteHostSpec();
+        closeDB(con);
+
+        con = openSecondaryDB();
+        secondaryIP = getRemoteHostSpec();
+        closeDB(con);
+
+        con = openSecondaryDB2();
+        secondaryIP2 = getRemoteHostSpec();
+        closeDB(con);
+    }
+
+    private Connection getConnection(HostRequirement hostType, String... targets)
+            throws SQLException {
+        return getConnection(hostType, true, targets);
+    }
+
+    private Connection getConnection(HostRequirement hostType, boolean reset,
+                                     String... targets) throws SQLException {
+        return getConnection(hostType, reset, false, targets);
+    }
+
+    private Connection getConnection(HostRequirement hostType, boolean reset, boolean lb,
+                                     String... targets) throws SQLException {
+        TestUtil.closeDB(con);
+
+        if (reset) {
+            resetGlobalState();
+        }
+
+        Properties props = new Properties();
+        PGProperty.USER.set(props, user);
+        PGProperty.PASSWORD.set(props, password);
+        PGProperty.TARGET_SERVER_TYPE.set(props, hostType.name());
+        PGProperty.HOST_RECHECK_SECONDS.set(props, 2);
+        if (lb) {
+            PGProperty.LOAD_BALANCE_HOSTS.set(props, "true");
+        }
+
+        StringBuilder sb = new StringBuilder();
+        sb.append("jdbc:postgresql://");
+        for (String target : targets) {
+            sb.append(target).append(',');
+        }
+        sb.setLength(sb.length() - 1);
+        sb.append("/");
+        sb.append(TestUtil.getDatabase());
+
+        return con = DriverManager.getConnection(sb.toString(), props);
+    }
+
+    private void assertRemote(String expectedHost) throws SQLException {
+        assertEquals(expectedHost, getRemoteHostSpec());
+    }
+
+    private String getRemoteHostSpec() throws SQLException {
+        ResultSet rs = con.createStatement()
+                .executeQuery("select inet_server_addr() || ':' || inet_server_port()");
+        rs.next();
+        return rs.getString(1);
+    }
+
+    private void assertGlobalState(String host, String status) {
+        HostSpec spec = hostSpec(host);
+        if (status == null) {
+            assertNull(hostStatusMap.get(spec));
+        } else {
+            assertEquals(host + "=" + status, hostStatusMap.get(spec).toString());
+        }
+    }
+
+    private void resetGlobalState() {
+        hostStatusMap.clear();
+    }
+
+    @Test
+    void connectToAny() throws SQLException {
+        getConnection(any, fake1, primary1);
+        assertRemote(primaryIp);
+        assertGlobalState(primary1, "ConnectOK");
+        assertGlobalState(fake1, "ConnectFail");
+
+        getConnection(any, fake1, secondary1);
+        assertRemote(secondaryIP);
+        assertGlobalState(secondary1, "ConnectOK");
+
+        getConnection(any, fake1, primary1);
+        assertRemote(primaryIp);
+        assertGlobalState(primary1, "ConnectOK");
+        assertGlobalState(fake1, "ConnectFail");
+    }
+
+    @Test
+    void connectToMaster() throws SQLException {
+        getConnection(primary, true, fake1, primary1, secondary1);
+        assertRemote(primaryIp);
+        assertGlobalState(fake1, "ConnectFail");
+        assertGlobalState(primary1, "Primary");
+        assertGlobalState(secondary1, null);
+
+        getConnection(primary, false, fake1, secondary1, primary1);
+        assertRemote(primaryIp);
+        assertGlobalState(fake1, "ConnectFail"); // cached
+        assertGlobalState(primary1, "Primary"); // connected to primary
+        assertGlobalState(secondary1, "Secondary"); // was unknown, so tried to connect in order
+    }
+
+    @Test
+    void connectToPrimaryFirst() throws SQLException {
+        getConnection(preferPrimary, true, fake1, primary1, secondary1);
+        assertRemote(primaryIp);
+        assertGlobalState(fake1, "ConnectFail");
+        assertGlobalState(primary1, "Primary");
+        assertGlobalState(secondary1, null);
+
+        getConnection(primary, false, fake1, secondary1, primary1);
+        assertRemote(primaryIp);
+        assertGlobalState(fake1, "ConnectFail");
+        assertGlobalState(primary1, "Primary");
+        assertGlobalState(secondary1, "Secondary"); // tried as it was unknown
+
+        getConnection(preferPrimary, true, fake1, secondary1, primary1);
+        assertRemote(primaryIp);
+        assertGlobalState(fake1, "ConnectFail");
+        assertGlobalState(primary1, "Primary");
+        assertGlobalState(secondary1, "Secondary");
+    }
+
+    @Test
+    void connectToPrimaryWithReadonlyTransactionMode() throws SQLException {
+        con = TestUtil.openPrivilegedDB();
+        con.createStatement().execute("ALTER DATABASE " + TestUtil.getDatabase() + " SET default_transaction_read_only=on;");
+        try {
+            getConnection(primary, true, fake1, primary1, secondary1);
+        } catch (PSQLException e) {
+            assertEquals(PSQLState.CONNECTION_UNABLE_TO_CONNECT.getState(), e.getSQLState());
+            assertGlobalState(fake1, "ConnectFail");
+            assertGlobalState(primary1, "Secondary");
+            assertGlobalState(secondary1, "Secondary");
+        } finally {
+            con = TestUtil.openPrivilegedDB();
+            con.createStatement().execute(
+                    "BEGIN;"
+                            + "SET TRANSACTION READ WRITE;"
+                            + "ALTER DATABASE " + TestUtil.getDatabase() + " SET default_transaction_read_only=off;"
+                            + "COMMIT;"
+            );
+            TestUtil.closeDB(con);
+        }
+    }
+
+    @Test
+    void connectToSecondary() throws SQLException {
+        getConnection(secondary, true, fake1, secondary1, primary1);
+        assertRemote(secondaryIP);
+        assertGlobalState(fake1, "ConnectFail");
+        assertGlobalState(secondary1, "Secondary");
+        assertGlobalState(primary1, null);
+
+        getConnection(secondary, false, fake1, primary1, secondary1);
+        assertRemote(secondaryIP);
+        assertGlobalState(fake1, "ConnectFail"); // cached
+        assertGlobalState(secondary1, "Secondary"); // connected
+        assertGlobalState(primary1, "Primary"); // tried as it was unknown
+    }
+
+    @Test
+    void connectToSecondaryFirst() throws SQLException {
+        getConnection(preferSecondary, true, fake1, secondary1, primary1);
+        assertRemote(secondaryIP);
+        assertGlobalState(fake1, "ConnectFail");
+        assertGlobalState(secondary1, "Secondary");
+        assertGlobalState(primary1, null);
+
+        getConnection(secondary, false, fake1, primary1, secondary1);
+        assertRemote(secondaryIP);
+        assertGlobalState(fake1, "ConnectFail");
+        assertGlobalState(secondary1, "Secondary");
+        assertGlobalState(primary1, "Primary"); // tried as it was unknown
+
+        getConnection(preferSecondary, true, fake1, primary1, secondary1);
+        assertRemote(secondaryIP);
+        assertGlobalState(fake1, "ConnectFail");
+        assertGlobalState(secondary1, "Secondary");
+        assertGlobalState(primary1, "Primary");
+    }
+
+    @Test
+    void failedConnection() throws SQLException {
+        try {
+            getConnection(any, true, fake1);
+            fail();
+        } catch (PSQLException ex) {
+        }
+    }
+
+    @Test
+    void loadBalancing() throws SQLException {
+        Set<String> connectedHosts = new HashSet<>();
+        boolean fake1FoundTried = false;
+        for (int i = 0; i < 20; i++) {
+            getConnection(any, true, true, fake1, primary1, secondary1);
+            connectedHosts.add(getRemoteHostSpec());
+            fake1FoundTried |= hostStatusMap.containsKey(hostSpec(fake1));
+            if (connectedHosts.size() == 2 && fake1FoundTried) {
+                break;
+            }
+        }
+        assertEquals(new HashSet<String>(asList(primaryIp, secondaryIP)),
+                connectedHosts,
+                "Never connected to all hosts");
+        assertTrue(fake1FoundTried, "Never tried to connect to fake node");
+    }
+
+    @Test
+    void loadBalancing_preferPrimary() throws SQLException {
+        Set<String> connectedHosts = new HashSet<>();
+        Set<HostSpec> tryConnectedHosts = new HashSet<>();
+        for (int i = 0; i < 20; i++) {
+            getConnection(preferPrimary, true, true, fake1, secondary1, secondary2, primary1);
+            connectedHosts.add(getRemoteHostSpec());
+            tryConnectedHosts.addAll(hostStatusMap.keySet());
+            if (tryConnectedHosts.size() == 4) {
+                break;
+            }
+        }
+
+        assertRemote(primaryIp);
+        assertEquals(new HashSet<String>(asList(primaryIp)),
+                connectedHosts,
+                "Connected to hosts other than primary");
+        assertEquals(4, tryConnectedHosts.size(), "Never tried to connect to fake node");
+
+        getConnection(preferPrimary, false, true, fake1, secondary1, primary1);
+        assertRemote(primaryIp);
+
+        // connect to secondaries when there's no primary - with load balancing
+        connectedHosts.clear();
+        for (int i = 0; i < 20; i++) {
+            getConnection(preferPrimary, false, true, fake1, secondary1, secondary2);
+            connectedHosts.add(getRemoteHostSpec());
+            if (connectedHosts.size() == 2) {
+                break;
+            }
+        }
+        assertEquals(new HashSet<String>(asList(secondaryIP, secondaryIP2)),
+                connectedHosts,
+                "Never connected to all secondary hosts");
+
+        // connect to secondary when there's no primary
+        getConnection(preferPrimary, true, true, fake1, secondary1);
+        assertRemote(secondaryIP);
+
+        getConnection(preferPrimary, false, true, fake1, secondary1);
+        assertRemote(secondaryIP);
+    }
+
+    @Test
+    void loadBalancing_preferSecondary() throws SQLException {
+        Set<String> connectedHosts = new HashSet<>();
+        Set<HostSpec> tryConnectedHosts = new HashSet<>();
+        for (int i = 0; i < 20; i++) {
+            getConnection(preferSecondary, true, true, fake1, primary1, secondary1, secondary2);
+            connectedHosts.add(getRemoteHostSpec());
+            tryConnectedHosts.addAll(hostStatusMap.keySet());
+            if (tryConnectedHosts.size() == 4) {
+                break;
+            }
+        }
+        assertEquals(new HashSet<String>(asList(secondaryIP, secondaryIP2)),
+                connectedHosts,
+                "Never connected to all secondary hosts");
+        assertEquals(4, tryConnectedHosts.size(), "Never tried to connect to fake node");
+
+        getConnection(preferSecondary, false, true, fake1, primary1, secondary1);
+        assertRemote(secondaryIP);
+        connectedHosts.clear();
+        for (int i = 0; i < 20; i++) {
+            getConnection(preferSecondary, false, true, fake1, primary1, secondary1, secondary2);
+            connectedHosts.add(getRemoteHostSpec());
+            if (connectedHosts.size() == 2) {
+                break;
+            }
+        }
+        assertEquals(new HashSet<String>(asList(secondaryIP, secondaryIP2)),
+                connectedHosts,
+                "Never connected to all secondary hosts");
+
+        // connect to primary when there's no secondary
+        getConnection(preferSecondary, true, true, fake1, primary1);
+        assertRemote(primaryIp);
+
+        getConnection(preferSecondary, false, true, fake1, primary1);
+        assertRemote(primaryIp);
+    }
+
+    @Test
+    void loadBalancing_secondary() throws SQLException {
+        Set<String> connectedHosts = new HashSet<>();
+        Set<HostSpec> tryConnectedHosts = new HashSet<>();
+        for (int i = 0; i < 20; i++) {
+            getConnection(secondary, true, true, fake1, primary1, secondary1, secondary2);
+            connectedHosts.add(getRemoteHostSpec());
+            tryConnectedHosts.addAll(hostStatusMap.keySet());
+            if (tryConnectedHosts.size() == 4) {
+                break;
+            }
+        }
+        assertEquals(new HashSet<String>(asList(secondaryIP, secondaryIP2)),
+                connectedHosts,
+                "Did not attempt to connect to all secondary hosts");
+        assertEquals(4, tryConnectedHosts.size(), "Did not attempt to connect to primary and fake node");
+
+        getConnection(preferSecondary, false, true, fake1, primary1, secondary1);
+        assertRemote(secondaryIP);
+        connectedHosts.clear();
+        for (int i = 0; i < 20; i++) {
+            getConnection(secondary, false, true, fake1, primary1, secondary1, secondary2);
+            connectedHosts.add(getRemoteHostSpec());
+            if (connectedHosts.size() == 2) {
+                break;
+            }
+        }
+        assertEquals(new HashSet<String>(asList(secondaryIP, secondaryIP2)),
+                connectedHosts,
+                "Did not connect to all secondary hosts");
+    }
+
+    @Test
+    void hostRechecks() throws SQLException, InterruptedException {
+        GlobalHostStatusTracker.reportHostStatus(hostSpec(primary1), Secondary);
+        GlobalHostStatusTracker.reportHostStatus(hostSpec(secondary1), Primary);
+        GlobalHostStatusTracker.reportHostStatus(hostSpec(fake1), Secondary);
+
+        try {
+            getConnection(primary, false, fake1, secondary1, primary1);
+            fail();
+        } catch (SQLException ex) {
+        }
+
+        GlobalHostStatusTracker.reportHostStatus(hostSpec(primary1), Secondary);
+        GlobalHostStatusTracker.reportHostStatus(hostSpec(secondary1), Primary);
+        GlobalHostStatusTracker.reportHostStatus(hostSpec(fake1), Secondary);
+
+        SECONDS.sleep(3);
+
+        getConnection(primary, false, secondary1, fake1, primary1);
+        assertRemote(primaryIp);
+    }
+
+    @Test
+    void noGoodHostsRechecksEverything() throws SQLException, InterruptedException {
+        GlobalHostStatusTracker.reportHostStatus(hostSpec(primary1), Secondary);
+        GlobalHostStatusTracker.reportHostStatus(hostSpec(secondary1), Secondary);
+        GlobalHostStatusTracker.reportHostStatus(hostSpec(fake1), Secondary);
+
+        getConnection(primary, false, secondary1, fake1, primary1);
+        assertRemote(primaryIp);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/impl/AfterBeforeParameterResolver.java b/pgjdbc/src/test/java/org/postgresql/test/impl/AfterBeforeParameterResolver.java
new file mode 100644
index 0000000..95c0386
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/impl/AfterBeforeParameterResolver.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2024, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.impl;
+
+import java.lang.annotation.Annotation;
+import java.util.Optional;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.extension.ExtensionContext;
+import org.junit.jupiter.api.extension.ParameterContext;
+import org.junit.jupiter.api.extension.ParameterResolutionException;
+import org.junit.jupiter.api.extension.ParameterResolver;
+import org.junit.jupiter.engine.execution.BeforeEachMethodAdapter;
+import org.junit.jupiter.engine.extension.ExtensionRegistry;
+
+/**
+ * Passes JUnit5's {@code ParameterizedTest} parameters to {@code @BeforeEach} and {@code AfterEach}
+ * methods.
+ *
+ * @see <a href="https://github.com/junit-team/junit5/issues/3157">Parameterized BeforeEach or
+ * AfterEach only</a>
+ */
+public class AfterBeforeParameterResolver implements BeforeEachMethodAdapter, ParameterResolver {
+    private ParameterResolver parameterisedTestParameterResolver;
+
+    @Override
+    public void invokeBeforeEachMethod(ExtensionContext context, ExtensionRegistry registry) {
+        Optional<ParameterResolver> resolverOptional = registry.getExtensions(ParameterResolver.class)
+                .stream()
+                .filter(parameterResolver -> parameterResolver.getClass().getName().contains(
+                        "ParameterizedTestParameterResolver"))
+                .findFirst();
+        parameterisedTestParameterResolver = resolverOptional.orElse(null);
+    }
+
+    @Override
+    public boolean supportsParameter(ParameterContext parameterContext,
+                                     ExtensionContext extensionContext) throws ParameterResolutionException {
+        // JUnit asks us to resolve a parameter for "BeforeEach" method,
+        // and we delegate to the "parameterized test" implementation,
+        // however it expects to resolve a parameter on a "test method".
+        if (parameterisedTestParameterResolver != null
+                && isExecutedOnAfterOrBeforeMethod(parameterContext)) {
+            // pContext refers to a parameter on a test method
+            ParameterContext pContext = getTestMethodParameterContext(parameterContext, extensionContext);
+            return parameterisedTestParameterResolver.supportsParameter(pContext, extensionContext);
+        }
+        return false;
+    }
+
+    private DefaultParameterContext getTestMethodParameterContext(ParameterContext parameterContext,
+                                                                  ExtensionContext extensionContext) {
+        return new DefaultParameterContext(
+                parameterContext.getIndex(),
+                extensionContext.getRequiredTestMethod().getParameters()[parameterContext.getIndex()],
+                parameterContext.getTarget());
+    }
+
+    private boolean isExecutedOnAfterOrBeforeMethod(ParameterContext parameterContext) {
+        for (Annotation annotation : parameterContext.getDeclaringExecutable().getDeclaredAnnotations()) {
+            if (isAfterEachOrBeforeEachAnnotation(annotation)) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    private boolean isAfterEachOrBeforeEachAnnotation(Annotation annotation) {
+        return annotation.annotationType() == BeforeEach.class || annotation.annotationType() == AfterEach.class;
+    }
+
+    @Override
+    public Object resolveParameter(ParameterContext parameterContext,
+                                   ExtensionContext extensionContext) throws ParameterResolutionException {
+        return parameterisedTestParameterResolver.resolveParameter(
+                getTestMethodParameterContext(parameterContext, extensionContext),
+                extensionContext);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/impl/DefaultParameterContext.java b/pgjdbc/src/test/java/org/postgresql/test/impl/DefaultParameterContext.java
new file mode 100644
index 0000000..1e3bc00
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/impl/DefaultParameterContext.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2024, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.impl;
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Parameter;
+import java.util.List;
+import java.util.Optional;
+import org.junit.jupiter.api.extension.ParameterContext;
+import org.junit.platform.commons.util.AnnotationUtils;
+
+/**
+ * Implements {@link ParameterContext} as JUnit does not provide default implementation.
+ */
+public class DefaultParameterContext implements ParameterContext {
+    private final int index;
+    private final Parameter parameter;
+    private final Optional<Object> target;
+
+    public DefaultParameterContext(int index, Parameter parameter,
+                                   Optional<Object> target) {
+        this.index = index;
+        this.parameter = parameter;
+        this.target = target;
+    }
+
+    @Override
+    public int getIndex() {
+        return index;
+    }
+
+    @Override
+    public Parameter getParameter() {
+        return parameter;
+    }
+
+    @Override
+    public Optional<Object> getTarget() {
+        return target;
+    }
+
+    @Override
+    public boolean isAnnotated(Class<? extends Annotation> annotationType) {
+        return AnnotationUtils.isAnnotated(parameter, index, annotationType);
+    }
+
+    @Override
+    public <A extends Annotation> Optional<A> findAnnotation(Class<A> annotationType) {
+        return AnnotationUtils.findAnnotation(parameter, index, annotationType);
+    }
+
+    @Override
+    public <A extends Annotation> List<A> findRepeatableAnnotations(Class<A> annotationType) {
+        return AnnotationUtils.findRepeatableAnnotations(parameter, index, annotationType);
+    }
+
+    @Override
+    public String toString() {
+        return "DefaultParameterContext[parameter=" + parameter + ", index=" + index + ", target=" + target + "]";
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/impl/ServerVersionCondition.java b/pgjdbc/src/test/java/org/postgresql/test/impl/ServerVersionCondition.java
new file mode 100644
index 0000000..a9ff00a
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/impl/ServerVersionCondition.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2024, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.impl;
+
+import java.lang.reflect.AnnotatedElement;
+import java.sql.Connection;
+import org.junit.jupiter.api.extension.ConditionEvaluationResult;
+import org.junit.jupiter.api.extension.ExecutionCondition;
+import org.junit.jupiter.api.extension.ExtensionContext;
+import org.junit.platform.commons.util.AnnotationUtils;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.core.Version;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.annotations.DisabledIfServerVersionBelow;
+
+/**
+ * Evaluates condition for {@link DisabledIfServerVersionBelow} annotation.
+ */
+public class ServerVersionCondition implements ExecutionCondition {
+    private static final ConditionEvaluationResult ENABLED = ConditionEvaluationResult.enabled(
+            "@DisabledIfServerVersionBelow is not present");
+
+    @Override
+    public ConditionEvaluationResult evaluateExecutionCondition(ExtensionContext context) {
+        AnnotatedElement element = context.getElement().orElse(null);
+        return AnnotationUtils.findAnnotation(element, DisabledIfServerVersionBelow.class)
+                .map(annotation -> this.toResult(element, annotation))
+                .orElse(ENABLED);
+    }
+
+    private ConditionEvaluationResult toResult(AnnotatedElement element,
+                                               DisabledIfServerVersionBelow annotation) {
+        Version requiredVersion = ServerVersion.from(annotation.value());
+        if (requiredVersion.getVersionNum() <= 0) {
+            throw new IllegalArgumentException(
+                    "Server version " + annotation.value() + " not valid for "
+                            + element);
+        }
+
+        try (Connection con = TestUtil.openDB()) {
+            String dbVersionNumber = con.getMetaData().getDatabaseProductVersion();
+            Version actualVersion = ServerVersion.from(dbVersionNumber);
+            if (requiredVersion.getVersionNum() > actualVersion.getVersionNum()) {
+                return ConditionEvaluationResult.disabled(
+                        "Test requires version " + requiredVersion
+                                + ", but the server version is " + actualVersion);
+            }
+            return ConditionEvaluationResult.enabled(
+                    "Test requires version " + requiredVersion
+                            + ", and the server version is " + actualVersion);
+        } catch (Exception e) {
+            throw new IllegalStateException("Not available open connection", e);
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/AbstractArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/AbstractArraysTest.java
new file mode 100644
index 0000000..b7a7bba
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/AbstractArraysTest.java
@@ -0,0 +1,1117 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import java.lang.reflect.Array;
+import java.sql.Blob;
+import java.sql.CallableStatement;
+import java.sql.Clob;
+import java.sql.DatabaseMetaData;
+import java.sql.NClob;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLClientInfoException;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.sql.SQLXML;
+import java.sql.Savepoint;
+import java.sql.Statement;
+import java.sql.Struct;
+import java.util.Map;
+import java.util.Properties;
+import java.util.TimerTask;
+import java.util.concurrent.Executor;
+import java.util.logging.Logger;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGNotification;
+import org.postgresql.copy.CopyManager;
+import org.postgresql.core.BaseConnection;
+import org.postgresql.core.CachedQuery;
+import org.postgresql.core.Encoding;
+import org.postgresql.core.QueryExecutor;
+import org.postgresql.core.ReplicationProtocol;
+import org.postgresql.core.TransactionState;
+import org.postgresql.core.TypeInfo;
+import org.postgresql.core.Version;
+import org.postgresql.fastpath.Fastpath;
+import org.postgresql.jdbc.ArrayEncoding;
+import org.postgresql.jdbc.AutoSave;
+import org.postgresql.jdbc.FieldMetadata;
+import org.postgresql.jdbc.FieldMetadata.Key;
+import org.postgresql.jdbc.PgArray;
+import org.postgresql.jdbc.PreferQueryMode;
+import org.postgresql.jdbc.TimestampUtils;
+import org.postgresql.jdbc.TypeInfoCache;
+import org.postgresql.largeobject.LargeObjectManager;
+import org.postgresql.replication.PGReplicationConnection;
+import org.postgresql.util.LruCache;
+import org.postgresql.util.PGobject;
+import org.postgresql.xml.PGXmlFactoryFactory;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+public abstract class AbstractArraysTest<A> {
+
+    private static final BaseConnection ENCODING_CONNECTION = new EncodingConnection(Encoding.getJVMEncoding("utf-8"));
+
+    private final A[][] testData;
+
+    private final boolean binarySupported;
+
+    private final int arrayTypeOid;
+
+    /**
+     * @param testData        3 dimensional array to use for testing.
+     * @param binarySupported Indicates if binary support is expected for the type.
+     */
+    public AbstractArraysTest(A[][] testData, boolean binarySupported, int arrayTypeOid) {
+        super();
+        this.testData = testData;
+        this.binarySupported = binarySupported;
+        this.arrayTypeOid = arrayTypeOid;
+    }
+
+    protected void assertArraysEquals(String message, A expected, Object actual) {
+        final int expectedLength = Array.getLength(expected);
+        assertEquals(expectedLength, Array.getLength(actual), message + " size");
+        for (int i = 0; i < expectedLength; i++) {
+            assertEquals(Array.get(expected, i), Array.get(actual, i), message + " value at " + i);
+        }
+    }
+
+    @Test
+    public void binary() throws Exception {
+
+        A data = testData[0][0];
+
+        ArrayEncoding.ArrayEncoder<A> support = ArrayEncoding.getArrayEncoder(data);
+
+        final int defaultArrayTypeOid = support.getDefaultArrayTypeOid();
+
+        assertEquals(binarySupported, support.supportBinaryRepresentation(defaultArrayTypeOid));
+
+        if (binarySupported) {
+
+            final PgArray pgArray = new PgArray(ENCODING_CONNECTION, defaultArrayTypeOid,
+                    support.toBinaryRepresentation(ENCODING_CONNECTION, data, defaultArrayTypeOid));
+
+            Object actual = pgArray.getArray();
+
+            assertArraysEquals("", data, actual);
+        }
+    }
+
+    @Test
+    public void string() throws Exception {
+
+        A data = testData[0][0];
+
+        ArrayEncoding.ArrayEncoder<A> support = ArrayEncoding.getArrayEncoder(data);
+
+        final String arrayString = support.toArrayString(',', data);
+
+        final PgArray pgArray = new PgArray(ENCODING_CONNECTION, arrayTypeOid, arrayString);
+
+        Object actual = pgArray.getArray();
+
+        assertArraysEquals("", data, actual);
+    }
+
+    @Test
+    public void test2dBinary() throws Exception {
+
+        A[] data = testData[0];
+
+        ArrayEncoding.ArrayEncoder<A[]> support = ArrayEncoding.getArrayEncoder(data);
+
+        final int defaultArrayTypeOid = support.getDefaultArrayTypeOid();
+
+        assertEquals(binarySupported, support.supportBinaryRepresentation(defaultArrayTypeOid));
+
+        if (binarySupported) {
+
+            final PgArray pgArray = new PgArray(ENCODING_CONNECTION, support.getDefaultArrayTypeOid(),
+                    support.toBinaryRepresentation(ENCODING_CONNECTION, data, defaultArrayTypeOid));
+
+            Object[] actual = (Object[]) pgArray.getArray();
+
+            assertEquals(data.length, actual.length);
+
+            for (int i = 0; i < data.length; i++) {
+                assertArraysEquals("array at position " + i, data[i], actual[i]);
+            }
+        }
+    }
+
+    @Test
+    public void test2dString() throws Exception {
+
+        final A[] data = testData[0];
+
+        final ArrayEncoding.ArrayEncoder<A[]> support = ArrayEncoding.getArrayEncoder(data);
+
+        final String arrayString = support.toArrayString(',', data);
+
+        final PgArray pgArray = new PgArray(ENCODING_CONNECTION, arrayTypeOid, arrayString);
+
+        Object[] actual = (Object[]) pgArray.getArray();
+
+        assertEquals(data.length, actual.length);
+
+        for (int i = 0; i < data.length; i++) {
+            assertArraysEquals("array at position " + i, data[i], actual[i]);
+        }
+    }
+
+    @Test
+    public void test3dBinary() throws Exception {
+
+        ArrayEncoding.ArrayEncoder<A[][]> support = ArrayEncoding.getArrayEncoder(testData);
+
+        final int defaultArrayTypeOid = support.getDefaultArrayTypeOid();
+
+        assertEquals(binarySupported, support.supportBinaryRepresentation(defaultArrayTypeOid));
+
+        if (binarySupported) {
+
+            final PgArray pgArray = new PgArray(ENCODING_CONNECTION, support.getDefaultArrayTypeOid(),
+                    support.toBinaryRepresentation(ENCODING_CONNECTION, testData, defaultArrayTypeOid));
+
+            Object[][] actual = (Object[][]) pgArray.getArray();
+
+            assertEquals(testData.length, actual.length);
+
+            for (int i = 0; i < testData.length; i++) {
+                assertEquals(testData[i].length, actual[i].length, "array length at " + i);
+                for (int j = 0; j < testData[i].length; j++) {
+                    assertArraysEquals("array at " + i + ',' + j, testData[i][j], actual[i][j]);
+                }
+            }
+        }
+    }
+
+    @Test
+    public void test3dString() throws Exception {
+
+        final ArrayEncoding.ArrayEncoder<A[][]> support = ArrayEncoding.getArrayEncoder(testData);
+
+        final String arrayString = support.toArrayString(',', testData);
+
+        final PgArray pgArray = new PgArray(ENCODING_CONNECTION, arrayTypeOid, arrayString);
+
+        Object[][] actual = (Object[][]) pgArray.getArray();
+
+        assertEquals(testData.length, actual.length);
+
+        for (int i = 0; i < testData.length; i++) {
+            assertEquals(testData[i].length, actual[i].length, "array length at " + i);
+            for (int j = 0; j < testData[i].length; j++) {
+                assertArraysEquals("array at " + i + ',' + j, testData[i][j], actual[i][j]);
+            }
+        }
+    }
+
+    @Test
+    public void objectArrayCopy() throws Exception {
+        final Object[] copy = new Object[testData.length];
+        for (int i = 0; i < testData.length; i++) {
+            copy[i] = testData[i];
+        }
+
+        final ArrayEncoding.ArrayEncoder<A[][]> support = ArrayEncoding.getArrayEncoder(testData);
+        final String arrayString = support.toArrayString(',', testData);
+
+        final ArrayEncoding.ArrayEncoder<Object[]> copySupport = ArrayEncoding.getArrayEncoder(copy);
+        final String actual = copySupport.toArrayString(',', copy);
+
+        assertEquals(arrayString, actual);
+    }
+
+    @Test
+    public void object2dArrayCopy() throws Exception {
+        final Object[][] copy = new Object[testData.length][];
+        for (int i = 0; i < testData.length; i++) {
+            copy[i] = testData[i];
+        }
+
+        final ArrayEncoding.ArrayEncoder<A[][]> support = ArrayEncoding.getArrayEncoder(testData);
+        final String arrayString = support.toArrayString(',', testData);
+
+        final ArrayEncoding.ArrayEncoder<Object[]> copySupport = ArrayEncoding.getArrayEncoder(copy);
+        final String actual = copySupport.toArrayString(',', copy);
+
+        assertEquals(arrayString, actual);
+    }
+
+    @Test
+    public void object3dArrayCopy() throws Exception {
+        final A[][][] source = (A[][][]) Array.newInstance(testData.getClass(), 2);
+        source[0] = testData;
+        source[1] = testData;
+        final Object[][][] copy = new Object[][][]{testData, testData};
+
+        final ArrayEncoding.ArrayEncoder<A[][][]> support = ArrayEncoding.getArrayEncoder(source);
+        final String arrayString = support.toArrayString(',', source);
+
+        final ArrayEncoding.ArrayEncoder<Object[]> copySupport = ArrayEncoding.getArrayEncoder(copy);
+        final String actual = copySupport.toArrayString(',', copy);
+
+        assertEquals(arrayString, actual);
+    }
+
+    private static final class EncodingConnection implements BaseConnection {
+        private final Encoding encoding;
+        private final TypeInfo typeInfo = new TypeInfoCache(this, -1);
+
+        EncodingConnection(Encoding encoding) {
+            this.encoding = encoding;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public Encoding getEncoding() throws SQLException {
+            return encoding;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public TypeInfo getTypeInfo() {
+            return typeInfo;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void cancelQuery() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public ResultSet execSQLQuery(String s) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public ResultSet execSQLQuery(String s, int resultSetType, int resultSetConcurrency) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void execSQLUpdate(String s) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public QueryExecutor getQueryExecutor() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public ReplicationProtocol getReplicationProtocol() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public Object getObject(String type, String value, byte[] byteValue) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean haveMinimumServerVersion(int ver) {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean haveMinimumServerVersion(Version ver) {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public byte[] encodeString(String str) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public String escapeString(String str) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean getStandardConformingStrings() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public TimestampUtils getTimestampUtils() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public Logger getLogger() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean getStringVarcharFlag() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public TransactionState getTransactionState() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean binaryTransferSend(int oid) {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean isColumnSanitiserDisabled() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void addTimerTask(TimerTask timerTask, long milliSeconds) {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void purgeTimerTasks() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public LruCache<Key, FieldMetadata> getFieldMetadataCache() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public CachedQuery createQuery(String sql, boolean escapeProcessing, boolean isParameterized, String... columnNames)
+                throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void setFlushCacheOnDeallocate(boolean flushCacheOnDeallocate) {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public Statement createStatement() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public PreparedStatement prepareStatement(String sql) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public CallableStatement prepareCall(String sql) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public String nativeSQL(String sql) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean getAutoCommit() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void setAutoCommit(boolean autoCommit) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void commit() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void rollback() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void close() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean isClosed() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public DatabaseMetaData getMetaData() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean isReadOnly() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void setReadOnly(boolean readOnly) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public String getCatalog() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void setCatalog(String catalog) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public int getTransactionIsolation() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void setTransactionIsolation(int level) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public SQLWarning getWarnings() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void clearWarnings() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency)
+                throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public Map<String, Class<?>> getTypeMap() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public int getHoldability() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void setHoldability(int holdability) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public Savepoint setSavepoint() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public Savepoint setSavepoint(String name) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void rollback(Savepoint savepoint) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void releaseSavepoint(Savepoint savepoint) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability)
+                throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency,
+                                                  int resultSetHoldability) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency,
+                                             int resultSetHoldability) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public Clob createClob() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public Blob createBlob() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public NClob createNClob() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public SQLXML createSQLXML() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean isValid(int timeout) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void setClientInfo(String name, String value) throws SQLClientInfoException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public String getClientInfo(String name) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public Properties getClientInfo() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void setClientInfo(Properties properties) throws SQLClientInfoException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public java.sql.Array createArrayOf(String typeName, Object[] elements) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public String getSchema() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void setSchema(String schema) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void abort(Executor executor) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public int getNetworkTimeout() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public <T> T unwrap(Class<T> iface) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean isWrapperFor(Class<?> iface) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public java.sql.Array createArrayOf(String typeName, Object elements) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public PGNotification[] getNotifications() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public PGNotification[] getNotifications(int timeoutMillis) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public CopyManager getCopyAPI() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public LargeObjectManager getLargeObjectAPI() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public Fastpath getFastpathAPI() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void addDataType(String type, String className) {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void addDataType(String type, Class<? extends PGobject> klass) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public int getPrepareThreshold() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void setPrepareThreshold(int threshold) {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public int getDefaultFetchSize() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void setDefaultFetchSize(int fetchSize) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public int getBackendPID() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public String escapeIdentifier(String identifier) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public String escapeLiteral(String literal) throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public PreferQueryMode getPreferQueryMode() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public AutoSave getAutosave() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void setAutosave(AutoSave autoSave) {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public PGReplicationConnection getReplicationAPI() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public Map<String, String> getParameterStatuses() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public String getParameterStatus(String parameterName) {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public PGXmlFactoryFactory getXmlFactoryFactory() throws SQLException {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean hintReadOnly() {
+            return false;
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean getAdaptiveFetch() {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public void setAdaptiveFetch(boolean adaptiveFetch) {
+            throw new UnsupportedOperationException();
+        }
+
+        /**
+         * {@inheritDoc}
+         */
+        @Override
+        public boolean getLogServerErrorDetail() {
+            return false;
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/ArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ArraysTest.java
new file mode 100644
index 0000000..999196d
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ArraysTest.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import java.math.BigDecimal;
+import java.sql.SQLFeatureNotSupportedException;
+import org.junit.jupiter.api.Test;
+import org.postgresql.core.Oid;
+import org.postgresql.jdbc.ArrayEncoding;
+import org.postgresql.util.PSQLException;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+class ArraysTest {
+
+    @Test
+    void nonArrayNotSupported() throws Exception {
+        assertThrows(PSQLException.class, () -> {
+            ArrayEncoding.getArrayEncoder("asdflkj");
+        });
+    }
+
+    @Test
+    void noByteArray() throws Exception {
+        assertThrows(PSQLException.class, () -> {
+            ArrayEncoding.getArrayEncoder(new byte[]{});
+        });
+    }
+
+    @Test
+    void binaryNotSupported() throws Exception {
+        assertThrows(SQLFeatureNotSupportedException.class, () -> {
+            final ArrayEncoding.ArrayEncoder<BigDecimal[]> support = ArrayEncoding.getArrayEncoder(new BigDecimal[]{});
+
+            assertFalse(support.supportBinaryRepresentation(Oid.FLOAT8_ARRAY));
+
+            support.toBinaryRepresentation(null, new BigDecimal[]{BigDecimal.valueOf(3)}, Oid.FLOAT8_ARRAY);
+        });
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/ArraysTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ArraysTestSuite.java
new file mode 100644
index 0000000..e1c82c4
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ArraysTestSuite.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+        BigDecimalObjectArraysTest.class,
+        BooleanArraysTest.class,
+        BooleanObjectArraysTest.class,
+        ByteaArraysTest.class,
+        DoubleArraysTest.class,
+        DoubleObjectArraysTest.class,
+        FloatArraysTest.class,
+        FloatObjectArraysTest.class,
+        IntArraysTest.class,
+        IntegerObjectArraysTest.class,
+        LongArraysTest.class,
+        LongObjectArraysTest.class,
+        ShortArraysTest.class,
+        ShortObjectArraysTest.class,
+        StringArraysTest.class,
+        UUIDArrayTest.class
+})
+public class ArraysTestSuite {
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/BigDecimalObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/BigDecimalObjectArraysTest.java
new file mode 100644
index 0000000..1cd787c
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/BigDecimalObjectArraysTest.java
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import java.math.BigDecimal;
+import org.postgresql.core.Oid;
+import static java.math.BigDecimal.valueOf;
+
+public class BigDecimalObjectArraysTest extends AbstractArraysTest<BigDecimal[]> {
+
+    private static final BigDecimal[][][] doubles = new BigDecimal[][][]{
+            {{valueOf(1.3), valueOf(2.4), valueOf(3.1), valueOf(4.2)},
+                    {valueOf(5D), valueOf(6D), valueOf(7D), valueOf(8D)},
+                    {valueOf(9D), valueOf(10D), valueOf(11D), valueOf(12D)}},
+            {{valueOf(13D), valueOf(14D), valueOf(15D), valueOf(16D)}, {valueOf(17D), valueOf(18D), valueOf(19D), null},
+                    {valueOf(21D), valueOf(22D), valueOf(23D), valueOf(24D)}}};
+
+    public BigDecimalObjectArraysTest() {
+        super(doubles, false, Oid.NUMERIC_ARRAY);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/BitFieldTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/BitFieldTest.java
new file mode 100644
index 0000000..d0d26f0
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/BitFieldTest.java
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2020, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.jdbc2.BaseTest4;
+import org.postgresql.util.PGobject;
+
+public class BitFieldTest extends BaseTest4 {
+
+    public static final String testBitValue = "0101010100101010101010100101";
+    private static final String fieldName = "field_bit";
+    private static final TestData[] testBitValues = new TestData[]{
+            new TestData("0", "test_bit_field_0a", fieldName + " bit", false),
+            new TestData("0", "test_bit_field_0b", fieldName + " bit(1)", false),
+            new TestData("1", "test_bit_field_1a", fieldName + " bit", false),
+            new TestData("1", "test_bit_field_1b", fieldName + " bit(1)", false),
+            new TestData(testBitValue, "test_bit_field_gt1_1", String.format("%s bit(%d)", fieldName,
+                    testBitValue.length()), false),
+            new TestData(testBitValue, "test_varbit_field_gt1_1", String.format("%s varbit(%d)", fieldName,
+                    testBitValue.length()), true),
+            new TestData("1", "test_varbit_field_1", String.format("%s varbit(1)", fieldName), true),
+            new TestData("0", "test_varbit_field_0", String.format("%s varbit(1)", fieldName), true)
+    };
+
+    @Override
+    @Before
+    public void setUp() throws Exception {
+        super.setUp();
+        con = TestUtil.openDB();
+        Statement stmt = con.createStatement();
+        for (TestData testData : testBitValues) {
+            TestUtil.createTempTable(con, testData.getTableName(), testData.getTableFields());
+            stmt.execute(String.format("INSERT INTO %s values(b'%s')", testData.getTableName(),
+                    testData.getBitValue()));
+        }
+    }
+
+    @After
+    public void tearDown() throws SQLException {
+        Statement stmt = con.createStatement();
+        for (TestData testData : testBitValues) {
+            stmt.execute(String.format("DROP TABLE %s", testData.getTableName()));
+        }
+        stmt.close();
+        TestUtil.closeDB(con);
+    }
+
+    @Test
+    public void TestGetObjectForBitFields() throws SQLException {
+        // Start from 1 to skip the first testBit value
+        for (TestData testData : testBitValues) {
+            PreparedStatement pstmt = con.prepareStatement(String.format("SELECT field_bit FROM %s "
+                    + "limit 1", testData.getTableName()));
+            checkBitFieldValue(pstmt, testData.getBitValue(), testData.getIsVarBit());
+            pstmt.close();
+        }
+    }
+
+    @Test
+    public void TestSetBitParameter() throws SQLException {
+        for (TestData testData : testBitValues) {
+            PreparedStatement pstmt = con.prepareStatement(
+                    String.format("SELECT field_bit FROM %s where ", testData.getTableName())
+                            + "field_bit = ?");
+            PGobject param = new PGobject();
+            param.setValue(testData.getBitValue());
+            param.setType(testData.getIsVarBit() ? "varbit" : "bit");
+            pstmt.setObject(1, param);
+            checkBitFieldValue(pstmt, testData.getBitValue(), testData.getIsVarBit());
+            pstmt.close();
+        }
+    }
+
+    private void checkBitFieldValue(PreparedStatement pstmt, String bitValue, boolean isVarBit) throws SQLException {
+        ResultSet rs = pstmt.executeQuery();
+        Assert.assertTrue(rs.next());
+        Object o = rs.getObject(1);
+        if (bitValue.length() == 1 && !isVarBit) {
+            Assert.assertTrue("Failed for " + bitValue, o instanceof java.lang.Boolean);
+            Boolean b = (Boolean) o;
+            Assert.assertEquals("Failed for " + bitValue, bitValue.charAt(0) == '1', b);
+        } else {
+            Assert.assertTrue("Failed for " + bitValue, o instanceof PGobject);
+            PGobject pGobject = (PGobject) o;
+            Assert.assertEquals("Failed for " + bitValue, bitValue, pGobject.getValue());
+        }
+        String s = rs.getString(1);
+        Assert.assertEquals(bitValue, s);
+    }
+
+    private static class TestData {
+        private final String bitValue;
+        private final String tableName;
+        private final String tableFields;
+        private final boolean isVarBit;
+
+        TestData(String bitValue, String tableName, String tableFields, boolean isVarBit) {
+            this.bitValue = bitValue;
+            this.tableName = tableName;
+            this.tableFields = tableFields;
+            this.isVarBit = isVarBit;
+        }
+
+        public String getBitValue() {
+            return bitValue;
+        }
+
+        public String getTableName() {
+            return tableName;
+        }
+
+        public String getTableFields() {
+            return tableFields;
+        }
+
+        public boolean getIsVarBit() {
+            return isVarBit;
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/BooleanArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/BooleanArraysTest.java
new file mode 100644
index 0000000..657a49b
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/BooleanArraysTest.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import org.postgresql.core.Oid;
+
+public class BooleanArraysTest extends AbstractArraysTest<boolean[]> {
+    private static final boolean[][][] booleans = new boolean[][][]{
+            {{true, false, false, true}, {false, false, true, true}, {true, true, false, false}},
+            {{false, true, true, false}, {true, false, true, false}, {false, true, false, true}}};
+
+    public BooleanArraysTest() {
+        super(booleans, true, Oid.BOOL_ARRAY);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/BooleanObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/BooleanObjectArraysTest.java
new file mode 100644
index 0000000..8ddbd7a
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/BooleanObjectArraysTest.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import org.postgresql.core.Oid;
+
+public class BooleanObjectArraysTest extends AbstractArraysTest<Boolean[]> {
+    private static final Boolean[][][] booleans = new Boolean[][][]{
+            {{true, false, null, true}, {false, false, true, true}, {true, true, false, false}},
+            {{false, true, true, false}, {true, false, true, null}, {false, true, false, true}}};
+
+    public BooleanObjectArraysTest() {
+        super(booleans, true, Oid.BOOL_ARRAY);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/ByteaArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ByteaArraysTest.java
new file mode 100644
index 0000000..0b32e61
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ByteaArraysTest.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import java.lang.reflect.Array;
+import org.junit.jupiter.api.Test;
+import org.postgresql.core.Oid;
+import org.postgresql.jdbc.ArrayEncoding;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
+
+public class ByteaArraysTest extends AbstractArraysTest<byte[][]> {
+
+    private static final byte[][][][] longs = new byte[][][][]{
+            {{{0x1, 0x23, (byte) 0xDF, 0x43}, {0x5, 0x6, 0x7, (byte) 0xFF}, null, {0x9, 0x10, 0x11, 0x12}},
+                    {null, {0x13, 0x14, 0x15, 0x16}, {0x17, 0x18, (byte) 0xFF, 0x20}, {0x1, 0x2, (byte) 0xFF, 0x4F}},
+                    {{0x1, 0x2, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFF, 0x4},
+                            {0x1, 0x2, (byte) 0xFF, 0x4}}},
+            {{{0x1, 0x2, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFF, 0x4},
+                    {0x1, 0x2, (byte) 0xFE, 0x4}},
+                    {{0x1, 0x2, (byte) 0xCD, 0x4}, {0x1, 0x73, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFF, 0x4},
+                            {0x1, 0x2, (byte) 0xFF, 0x4}},
+                    {{0x1, 0x2, (byte) 0xFF, 0x4}, {0x1, 0x2, (byte) 0xFE, 0x10}, {0x1, 0x2, (byte) 0xFF, 0x4},
+                            {0x1, 0x2, (byte) 0xFF, 0x4}}}};
+
+    public ByteaArraysTest() {
+        super(longs, true, Oid.BYTEA_ARRAY);
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected void assertArraysEquals(String message, byte[][] expected, Object actual) {
+        final int expectedLength = Array.getLength(expected);
+        assertEquals(expectedLength, Array.getLength(actual), message + " size");
+        for (int i = 0; i < expectedLength; i++) {
+            assertArrayEquals(expected[i], (byte[]) Array.get(actual, i), message + " value at " + i);
+        }
+    }
+
+    @Test
+    void objectArrayWrapper() throws Exception {
+        final Object[] array = new Object[]{new byte[]{0x1, 0x2, (byte) 0xFF, 0x4}, new byte[]{0x5, 0x6, 0x7, (byte) 0xFF}};
+
+        final ArrayEncoding.ArrayEncoder<Object[]> copySupport = ArrayEncoding.getArrayEncoder(array);
+        try {
+            copySupport.toArrayString(',', array);
+            fail("byte[] in Object[] should not be supported");
+        } catch (UnsupportedOperationException e) {
+            assertEquals("byte[] nested inside Object[]", e.getMessage());
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/ConnectionValidTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ConnectionValidTest.java
new file mode 100644
index 0000000..941736a
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ConnectionValidTest.java
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2020, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.sql.Connection;
+import java.util.Properties;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.annotations.DisabledIfServerVersionBelow;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.MatcherAssert.assertThat;
+
+@DisabledIfServerVersionBelow("9.4")
+class ConnectionValidTest {
+    private static final int LOCAL_SHADOW_PORT = 9009;
+
+    private Connection connection;
+
+    private ConnectionBreaker connectionBreaker;
+
+    @BeforeEach
+    void setUp() throws Exception {
+        final Properties shadowProperties = new Properties();
+        shadowProperties.setProperty(TestUtil.SERVER_HOST_PORT_PROP,
+                String.format("%s:%s", "localhost", LOCAL_SHADOW_PORT));
+
+        connectionBreaker = new ConnectionBreaker(LOCAL_SHADOW_PORT,
+                TestUtil.getServer(),
+                TestUtil.getPort());
+        connectionBreaker.acceptAsyncConnection();
+        connection = TestUtil.openDB(shadowProperties);
+    }
+
+    @AfterEach
+    void tearDown() throws Exception {
+        connectionBreaker.close();
+        connection.close();
+    }
+
+    /**
+     * Tests if a connection is valid within 5 seconds.
+     *
+     * @throws Exception if a database exception occurs.
+     */
+    @Test
+    @Timeout(30)
+    void isValid() throws Exception {
+        connectionBreaker.breakConnection();
+        boolean result = connection.isValid(5);
+
+        assertThat("Is connection valid?",
+                result,
+                equalTo(false)
+        );
+    }
+
+    private static final class ConnectionBreaker {
+
+        private final ExecutorService workers;
+
+        private final ServerSocket internalServer;
+
+        private final Socket pgSocket;
+
+        private boolean breakConnection;
+
+        /**
+         * Constructor of the forwarder for the PostgreSQL server.
+         *
+         * @param serverPort The forwarder server port.
+         * @param pgServer   The PostgreSQL server address.
+         * @param pgPort     The PostgreSQL server port.
+         * @throws Exception if anything goes wrong binding the server.
+         */
+        ConnectionBreaker(final int serverPort, final String pgServer,
+                          final int pgPort) throws Exception {
+            workers = Executors.newCachedThreadPool();
+            internalServer = new ServerSocket(serverPort);
+            pgSocket = new Socket(pgServer, pgPort);
+            breakConnection = false;
+        }
+
+        /**
+         * Starts to accept a asynchronous connection.
+         *
+         * @throws Exception if something goes wrong with the sockets.
+         */
+        public void acceptAsyncConnection() throws Exception {
+            final InputStream pgServerInputStream = pgSocket.getInputStream();
+            final OutputStream pgServerOutputStream = pgSocket.getOutputStream();
+
+            // Future socket;
+            final Future<Socket> futureConnection = workers.submit(internalServer::accept);
+
+            // Forward reads;
+            workers.submit(() -> {
+                while (!breakConnection) {
+                    final Socket conn = futureConnection.get();
+                    int read = pgServerInputStream.read();
+                    conn.getOutputStream().write(read);
+                }
+                return null;
+            });
+
+            // Forwards writes;
+            workers.submit(() -> {
+                while (!breakConnection) {
+                    final Socket conn = futureConnection.get();
+                    int read = conn.getInputStream().read();
+                    pgServerOutputStream.write(read);
+                }
+                return null;
+            });
+        }
+
+        /**
+         * Breaks the forwarding.
+         */
+        public void breakConnection() {
+            this.breakConnection = true;
+        }
+
+        /**
+         * Closes the sockets.
+         */
+        public void close() throws Exception {
+            this.workers.shutdown();
+            this.workers.awaitTermination(5, TimeUnit.SECONDS);
+            this.internalServer.close();
+            this.pgSocket.close();
+        }
+
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/DeepBatchedInsertStatementTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/DeepBatchedInsertStatementTest.java
new file mode 100644
index 0000000..f68b54f
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/DeepBatchedInsertStatementTest.java
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2003, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import java.lang.reflect.Method;
+import java.sql.Date;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Properties;
+import org.junit.Test;
+import org.postgresql.PGProperty;
+import org.postgresql.core.ParameterList;
+import org.postgresql.core.Query;
+import org.postgresql.core.v3.BatchedQuery;
+import org.postgresql.jdbc.PgPreparedStatement;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.jdbc2.BaseTest4;
+import org.postgresql.test.jdbc2.BatchExecuteTest;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This object tests the internals of the BatchedStatementDecorator during
+ * execution. Rather than rely on testing at the jdbc api layer.
+ * on.
+ */
+public class DeepBatchedInsertStatementTest extends BaseTest4 {
+
+    /*
+     * Set up the fixture for this testcase: a connection to a database with a
+     * table for this test.
+     */
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        Statement stmt = con.createStatement();
+
+        /*
+         * Drop the test table if it already exists for some reason. It is not an
+         * error if it doesn't exist.
+         */
+        TestUtil.createTable(con, "testbatch", "pk INTEGER, col1 INTEGER");
+        TestUtil.createTable(con, "testunspecified", "pk INTEGER, bday TIMESTAMP");
+
+        stmt.executeUpdate("INSERT INTO testbatch VALUES (1, 0)");
+        stmt.close();
+
+        /*
+         * Generally recommended with batch updates. By default we run all tests in
+         * this test case with autoCommit disabled.
+         */
+        con.setAutoCommit(false);
+    }
+
+    // Tear down the fixture for this test case.
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "testbatch");
+        TestUtil.dropTable(con, "testunspecified");
+        super.tearDown();
+    }
+
+    @Override
+    protected void updateProperties(Properties props) {
+        PGProperty.REWRITE_BATCHED_INSERTS.set(props, true);
+        forceBinary(props);
+    }
+
+    @Test
+    public void testDeepInternalsBatchedQueryDecorator() throws Exception {
+        PgPreparedStatement pstmt = null;
+        try {
+            pstmt = (PgPreparedStatement) con.prepareStatement("INSERT INTO testbatch VALUES (?,?)");
+
+            pstmt.setInt(1, 1);
+            pstmt.setInt(2, 2);
+            pstmt.addBatch(); // initial pass
+            pstmt.setInt(1, 3);
+            pstmt.setInt(2, 4);
+            pstmt.addBatch();// preparedQuery should be wrapped
+
+            BatchedQuery[] bqds;
+            bqds = transformBQD(pstmt);
+            assertEquals(2, getBatchSize(bqds));
+
+            pstmt.setInt(1, 5);
+            pstmt.setInt(2, 6);
+            pstmt.addBatch();
+
+            bqds = transformBQD(pstmt);
+            assertEquals(3, getBatchSize(bqds));
+
+            BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
+            bqds = transformBQD(pstmt);
+
+            assertEquals(0, getBatchSize(bqds));
+
+            pstmt.setInt(1, 1);
+            pstmt.setInt(2, 2);
+            pstmt.addBatch();
+
+            bqds = transformBQD(pstmt);
+            assertEquals(1, getBatchSize(bqds));
+
+            pstmt.setInt(1, 3);
+            pstmt.setInt(2, 4);
+            pstmt.addBatch();
+            bqds = transformBQD(pstmt);
+            assertEquals(2, getBatchSize(bqds));
+
+            pstmt.setInt(1, 5);
+            pstmt.setInt(2, 6);
+            pstmt.addBatch();
+            bqds = transformBQD(pstmt);
+            assertEquals(3, getBatchSize(bqds));
+
+            BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
+
+            pstmt.setInt(1, 1);
+            pstmt.setInt(2, 2);
+            pstmt.addBatch();
+            bqds = transformBQD(pstmt);
+            assertEquals(1, getBatchSize(bqds));
+            pstmt.setInt(1, 3);
+            pstmt.setInt(2, 4);
+            pstmt.addBatch();
+            bqds = transformBQD(pstmt);
+            assertEquals(2, getBatchSize(bqds));
+
+            pstmt.setInt(1, 5);
+            pstmt.setInt(2, 6);
+            pstmt.addBatch();
+            bqds = transformBQD(pstmt);
+            assertEquals(3, getBatchSize(bqds));
+
+            pstmt.setInt(1, 7);
+            pstmt.setInt(2, 8);
+            pstmt.addBatch();
+            bqds = transformBQD(pstmt);
+            assertEquals(4, getBatchSize(bqds));
+
+            BatchExecuteTest.assertSimpleInsertBatch(4, pstmt.executeBatch());
+
+            pstmt.setInt(1, 1);
+            pstmt.setInt(2, 2);
+            pstmt.addBatch();
+            bqds = transformBQD(pstmt);
+            assertEquals(1, getBatchSize(bqds));
+            pstmt.setInt(1, 3);
+            pstmt.setInt(2, 4);
+            pstmt.addBatch();
+            bqds = transformBQD(pstmt);
+            assertEquals(2, getBatchSize(bqds));
+
+            pstmt.setInt(1, 5);
+            pstmt.setInt(2, 6);
+            pstmt.addBatch();
+            bqds = transformBQD(pstmt);
+            assertEquals(3, getBatchSize(bqds));
+
+            BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
+
+            pstmt.setInt(1, 1);
+            pstmt.setInt(2, 2);
+            pstmt.addBatch();
+            bqds = transformBQD(pstmt);
+            assertEquals(1, getBatchSize(bqds));
+            pstmt.setInt(1, 3);
+            pstmt.setInt(2, 4);
+            pstmt.addBatch();
+            bqds = transformBQD(pstmt);
+            assertEquals(2, getBatchSize(bqds));
+
+            pstmt.setInt(1, 5);
+            pstmt.setInt(2, 6);
+            pstmt.addBatch();
+            bqds = transformBQD(pstmt);
+            assertEquals(3, getBatchSize(bqds));
+
+            BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
+
+            pstmt.setInt(1, 1);
+            pstmt.setInt(2, 2);
+            pstmt.addBatch();
+            bqds = transformBQD(pstmt);
+            assertEquals(1, getBatchSize(bqds));
+            pstmt.setInt(1, 3);
+            pstmt.setInt(2, 4);
+            pstmt.addBatch();
+            bqds = transformBQD(pstmt);
+            assertEquals(2, getBatchSize(bqds));
+
+            BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch());
+        } finally {
+            TestUtil.closeQuietly(pstmt);
+        }
+    }
+
+    /**
+     *
+     */
+    @Test
+    public void testUnspecifiedParameterType() throws Exception {
+        PgPreparedStatement pstmt = null;
+        try {
+            pstmt = (PgPreparedStatement) con
+                    .prepareStatement("INSERT INTO testunspecified VALUES (?,?)");
+
+            pstmt.setInt(1, 1);
+            pstmt.setDate(2, new Date(1));
+            pstmt.addBatch();
+
+            pstmt.setInt(1, 2);
+            pstmt.setDate(2, new Date(2));
+            pstmt.addBatch();
+
+            BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch());
+
+            pstmt.setInt(1, 1);
+            pstmt.setDate(2, new Date(3));
+            pstmt.addBatch();
+            pstmt.setInt(1, 2);
+            pstmt.setDate(2, new Date(4));
+            pstmt.addBatch();
+
+            BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch());
+        } finally {
+            TestUtil.closeQuietly(pstmt);
+        }
+    }
+
+    /**
+     * Test to check the statement can provide the necessary number of prepared
+     * type fields. This is after running with a batch size of 1.
+     */
+    @Test
+    public void testVaryingTypeCounts() throws SQLException {
+        PgPreparedStatement pstmt = null;
+        try {
+            pstmt = (PgPreparedStatement) con.prepareStatement("INSERT INTO testunspecified VALUES (?,?)");
+            pstmt.setInt(1, 1);
+            pstmt.setDate(2, new Date(1));
+            pstmt.addBatch();
+
+            BatchExecuteTest.assertSimpleInsertBatch(1, pstmt.executeBatch());
+            pstmt.setInt(1, 1);
+            pstmt.setDate(2, new Date(2));
+            pstmt.addBatch();
+            pstmt.setInt(1, 2);
+            pstmt.setDate(2, new Date(3));
+            pstmt.addBatch();
+
+            pstmt.setInt(1, 3);
+            pstmt.setDate(2, new Date(4));
+            pstmt.addBatch();
+            pstmt.setInt(1, 4);
+            pstmt.setDate(2, new Date(5));
+            pstmt.addBatch();
+
+            BatchExecuteTest.assertSimpleInsertBatch(4, pstmt.executeBatch());
+        } finally {
+            TestUtil.closeQuietly(pstmt);
+        }
+    }
+
+    /**
+     * This method triggers the transformation of single batches to multi batches.
+     *
+     * @param ps PgPreparedStatement statement that will contain the field
+     * @return BatchedQueryDecorator[] queries after conversion
+     * @throws Exception fault raised when the field cannot be accessed
+     */
+    private BatchedQuery[] transformBQD(PgPreparedStatement ps) throws Exception {
+        // We store collections that get replace on the statement
+        ArrayList<Query> batchStatements = ps.batchStatements;
+        ArrayList<ParameterList> batchParameters = ps.batchParameters;
+        ps.transformQueriesAndParameters();
+        BatchedQuery[] bqds = ps.batchStatements.toArray(new BatchedQuery[0]);
+        // Restore collections on the statement.
+        ps.batchStatements = batchStatements;
+        ps.batchParameters = batchParameters;
+        return bqds;
+    }
+
+    /**
+     * Get the total batch size of multi batches.
+     *
+     * @param bqds the converted queries
+     * @return the total batch size
+     */
+    private int getBatchSize(BatchedQuery[] bqds) {
+        int total = 0;
+        for (BatchedQuery bqd : bqds) {
+            total += bqd.getBatchSize();
+        }
+        return total;
+    }
+
+    /**
+     * Access the encoded statement name field.
+     * Again using reflection to gain access to a private field member
+     *
+     * @param bqd BatchedQueryDecorator object on which field is present
+     * @return byte[] array of bytes that represent the statement name
+     * when encoded
+     * @throws Exception fault raised if access to field not possible
+     */
+    private byte[] getEncodedStatementName(BatchedQuery bqd)
+            throws Exception {
+        Class<?> clazz = Class.forName("org.postgresql.core.v3.SimpleQuery");
+        Method mESN = clazz.getDeclaredMethod("getEncodedStatementName");
+        mESN.setAccessible(true);
+        return (byte[]) mESN.invoke(bqd);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/DoubleArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/DoubleArraysTest.java
new file mode 100644
index 0000000..da199c6
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/DoubleArraysTest.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import org.postgresql.core.Oid;
+
+public class DoubleArraysTest extends AbstractArraysTest<double[]> {
+
+    private static final double[][][] doubles = new double[][][]{
+            {{1.2, 2.3, 3.7, 4.9}, {5, 6, 7, 8}, {9, 10, 11, 12}},
+            {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}};
+
+    public DoubleArraysTest() {
+        super(doubles, true, Oid.FLOAT8_ARRAY);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/DoubleObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/DoubleObjectArraysTest.java
new file mode 100644
index 0000000..de372c9
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/DoubleObjectArraysTest.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import org.postgresql.core.Oid;
+
+public class DoubleObjectArraysTest extends AbstractArraysTest<Double[]> {
+
+    private static final Double[][][] doubles = new Double[][][]{
+            {{1.3, 2.4, 3.1, 4.2}, {5D, 6D, 7D, 8D}, {9D, 10D, 11D, 12D}},
+            {{13D, 14D, 15D, 16D}, {17D, 18D, 19D, null}, {21D, 22D, 23D, 24D}}};
+
+    public DoubleObjectArraysTest() {
+        super(doubles, true, Oid.FLOAT8_ARRAY);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/FloatArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/FloatArraysTest.java
new file mode 100644
index 0000000..54c8e6c
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/FloatArraysTest.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import org.postgresql.core.Oid;
+
+public class FloatArraysTest extends AbstractArraysTest<float[]> {
+
+    private static final float[][][] floats = new float[][][]{
+            {{1.2f, 2.3f, 3.7f, 4.9f}, {5, 6, 7, 8}, {9, 10, 11, 12}},
+            {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}};
+
+    public FloatArraysTest() {
+        super(floats, true, Oid.FLOAT4_ARRAY);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/FloatObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/FloatObjectArraysTest.java
new file mode 100644
index 0000000..4d59ff7
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/FloatObjectArraysTest.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import org.postgresql.core.Oid;
+
+public class FloatObjectArraysTest extends AbstractArraysTest<Float[]> {
+
+    private static final Float[][][] floats = new Float[][][]{
+            {{1.3f, 2.4f, 3.1f, 4.2f}, {5f, 6f, 7f, 8f}, {9f, 10f, 11f, 12f}},
+            {{13f, 14f, 15f, 16f}, {17f, 18f, 19f, null}, {21f, 22f, 23f, 24f}}};
+
+    public FloatObjectArraysTest() {
+        super(floats, true, Oid.FLOAT4_ARRAY);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/IntArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/IntArraysTest.java
new file mode 100644
index 0000000..104e3e2
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/IntArraysTest.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import org.postgresql.core.Oid;
+
+public class IntArraysTest extends AbstractArraysTest<int[]> {
+
+    private static final int[][][] ints = new int[][][]{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
+            {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}};
+
+    public IntArraysTest() {
+        super(ints, true, Oid.INT4_ARRAY);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/IntegerObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/IntegerObjectArraysTest.java
new file mode 100644
index 0000000..05cb528
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/IntegerObjectArraysTest.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import org.postgresql.core.Oid;
+
+public class IntegerObjectArraysTest extends AbstractArraysTest<Integer[]> {
+
+    private static final Integer[][][] ints = new Integer[][][]{
+            {{1, 2, 3, 4}, {5, null, 7, 8}, {9, 10, 11, 12}},
+            {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}};
+
+    public IntegerObjectArraysTest() {
+        super(ints, true, Oid.INT4_ARRAY);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/LargeObjectManagerTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/LargeObjectManagerTest.java
new file mode 100644
index 0000000..6be8b7a
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/LargeObjectManagerTest.java
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2021, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.security.MessageDigest;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Arrays;
+import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGConnection;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.jdbc.PgConnection;
+import org.postgresql.largeobject.LargeObject;
+import org.postgresql.largeobject.LargeObjectManager;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.util.StrangeInputStream;
+import org.postgresql.test.util.StrangeOutputStream;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
+
+class LargeObjectManagerTest {
+
+    private final byte[][] buffers = new byte[][]{new byte[1024], new byte[8192], new byte[128 * 1024]};
+
+    /*
+     * It is possible for PostgreSQL to send a ParameterStatus message after an ErrorResponse
+     * Receiving such a message should not lead to an invalid connection state
+     * See https://github.com/pgjdbc/pgjdbc/issues/2237
+     */
+    @Test
+    void openWithErrorAndSubsequentParameterStatusMessageShouldLeaveConnectionInUsableStateAndUpdateParameterStatus() throws Exception {
+        try (PgConnection con = (PgConnection) TestUtil.openDB()) {
+            Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0));
+            con.setAutoCommit(false);
+            String originalApplicationName = con.getParameterStatus("application_name");
+            try (Statement statement = con.createStatement()) {
+                statement.execute("begin;");
+                // Set transaction application_name to trigger ParameterStatus message after error
+                // https://www.postgresql.org/docs/14/protocol-flow.html#PROTOCOL-ASYNC
+                String updatedApplicationName = "LargeObjectManagerTest-application-name";
+                statement.execute("set application_name to '" + updatedApplicationName + "'");
+
+                LargeObjectManager loManager = con.getLargeObjectAPI();
+                try {
+                    loManager.open(0, false);
+                    fail("Succeeded in opening a nonexistent large object");
+                } catch (PSQLException e) {
+                    assertEquals(PSQLState.UNDEFINED_OBJECT.getState(), e.getSQLState());
+                }
+
+                // Should be reset to original application name
+                assertEquals(originalApplicationName, con.getParameterStatus("application_name"));
+            }
+        }
+    }
+
+    /**
+     * Writes data into a large object and reads it back.
+     * The verifications are:
+     * 1) input size should match the output size
+     * 2) input checksum should match the output checksum
+     */
+    @Test
+    void objectWriteThenRead() throws Throwable {
+        try (PgConnection con = (PgConnection) TestUtil.openDB()) {
+            // LO is not supported in auto-commit mode
+            con.setAutoCommit(false);
+            LargeObjectManager lom = con.unwrap(PGConnection.class).getLargeObjectAPI();
+            MessageDigest md = MessageDigest.getInstance("SHA-256");
+            long deadline = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10);
+            for (int i = 0; i < 100000 && System.currentTimeMillis() < deadline; i++) {
+                long seed = ThreadLocalRandom.current().nextLong();
+                objectWriteThenRead(lom, seed, md);
+                // Creating too many large objects in a single transaction might lead to "ERROR: out of shared memory"
+                if (i % 1000 == 0) {
+                    con.commit();
+                }
+            }
+        }
+    }
+
+    private void objectWriteThenRead(LargeObjectManager lom, long seed, MessageDigest md) throws SQLException, IOException {
+        long loId = lom.createLO();
+        try (LargeObject lo = lom.open(loId)) {
+            Random rnd = new Random(seed);
+            int expectedLength = rnd.nextInt(1000000);
+            // Write data to the stream
+            // We do not use try-with-resources as closing the output stream would close the large object
+            OutputStream os = lo.getOutputStream();
+            {
+                byte[] buf = new byte[Math.min(256 * 1024, expectedLength)];
+                // Do not use try-with-resources to avoid closing the large object
+                StrangeOutputStream fs = new StrangeOutputStream(os, rnd.nextLong(), 0.1);
+                {
+                    int len = expectedLength;
+                    while (len > 0) {
+                        int writeSize = Math.min(buf.length, len);
+                        rnd.nextBytes(buf);
+                        md.update(buf, 0, writeSize);
+                        fs.write(buf, 0, writeSize);
+                        len -= writeSize;
+                    }
+                    fs.flush();
+                }
+            }
+            // Verify the size of the resulting blob
+            assertEquals(expectedLength, lo.tell(), "Lob position after writing the data");
+
+            // Rewing the position to the beginning
+            // Ideally, .getInputStream should start reading from the beginning, however, it is not the
+            // case yet
+            lo.seek(0);
+
+            // Read out the data and verify its contents
+            byte[] expectedChecksum = md.digest();
+            md.reset();
+            int actualLength = 0;
+            // Do not use try-with-resources to avoid closing the large object
+            InputStream is = lo.getInputStream();
+            {
+                try (StrangeInputStream fs = new StrangeInputStream(is, rnd.nextLong())) {
+                    while (true) {
+                        int bufferIndex = rnd.nextInt(buffers.length);
+                        byte[] buf = buffers[bufferIndex];
+                        int read = fs.read(buf);
+                        if (read == -1) {
+                            break;
+                        }
+                        actualLength += read;
+                        md.update(buf, 0, read);
+                    }
+                }
+                byte[] actualChecksum = md.digest();
+                if (!Arrays.equals(expectedChecksum, actualChecksum)) {
+                    fail("Checksum of the input and output streams mismatch."
+                            + " Input actualLength: " + expectedLength
+                            + ", output actualLength: " + actualLength
+                            + ", test seed: " + seed
+                            + ", large object id: " + loId
+                    );
+                }
+            }
+        } catch (Throwable t) {
+            String message = "Test seed is " + seed;
+            t.addSuppressed(new Throwable(message) {
+                @Override
+                public Throwable fillInStackTrace() {
+                    return this;
+                }
+            });
+            throw t;
+        } finally {
+            lom.delete(loId);
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/LongArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/LongArraysTest.java
new file mode 100644
index 0000000..22ecceb
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/LongArraysTest.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import org.postgresql.core.Oid;
+
+public class LongArraysTest extends AbstractArraysTest<long[]> {
+
+    private static final long[][][] longs = new long[][][]{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
+            {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}};
+
+    public LongArraysTest() {
+        super(longs, true, Oid.INT8_ARRAY);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/LongObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/LongObjectArraysTest.java
new file mode 100644
index 0000000..8cb2b84
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/LongObjectArraysTest.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import org.postgresql.core.Oid;
+
+public class LongObjectArraysTest extends AbstractArraysTest<Long[]> {
+
+    private static final Long[][][] longs = new Long[][][]{
+            {{1L, 2L, null, 4L}, {5L, 6L, 7L, 8L}, {9L, 10L, 11L, 12L}},
+            {{13L, 14L, 15L, 16L}, {17L, 18L, 19L, 20L}, {21L, 22L, 23L, 24L}}};
+
+    public LongObjectArraysTest() {
+        super(longs, true, Oid.INT8_ARRAY);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/jdbc/NoColumnMetadataIssue1613Test.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/NoColumnMetadataIssue1613Test.java
similarity index 54%
rename from pgjdbc/src/test/java/org/postgresql/jdbc/NoColumnMetadataIssue1613Test.java
rename to pgjdbc/src/test/java/org/postgresql/test/jdbc/NoColumnMetadataIssue1613Test.java
index ea3c69b..dcb6831 100644
--- a/pgjdbc/src/test/java/org/postgresql/jdbc/NoColumnMetadataIssue1613Test.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/NoColumnMetadataIssue1613Test.java
@@ -3,39 +3,35 @@
  * See the LICENSE file in the project root for more information.
  */
 
-package org.postgresql.jdbc;
-
-import static org.junit.Assert.assertTrue;
-
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.jdbc2.BaseTest4;
-
-import org.junit.Before;
-import org.junit.Test;
+package org.postgresql.test.jdbc;
 
 import java.sql.ResultSet;
 import java.sql.Statement;
+import org.junit.Before;
+import org.junit.Test;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.jdbc2.BaseTest4;
+import static org.junit.Assert.assertTrue;
 
 /**
  * If the SQL query has no column metadata, the driver shouldn't break by a null pointer exception.
  * It should return the result correctly.
  *
  * @author Ivy (ivyyiyideng@gmail.com)
- *
  */
 public class NoColumnMetadataIssue1613Test extends BaseTest4 {
-  @Override
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTempTable(con, "test_no_column_metadata", "id int");
-  }
+    @Override
+    @Before
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTempTable(con, "test_no_column_metadata", "id int");
+    }
 
-  @Test
-  public void shouldBeNoNPE() throws Exception {
-    Statement statement = con.createStatement();
-    statement.execute("INSERT INTO test_no_column_metadata values (1)");
-    ResultSet rs = statement.executeQuery("SELECT x FROM test_no_column_metadata x");
-    assertTrue(rs.next());
-  }
+    @Test
+    public void shouldBeNoNPE() throws Exception {
+        Statement statement = con.createStatement();
+        statement.execute("INSERT INTO test_no_column_metadata values (1)");
+        ResultSet rs = statement.executeQuery("SELECT x FROM test_no_column_metadata x");
+        assertTrue(rs.next());
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/ParameterInjectionTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ParameterInjectionTest.java
new file mode 100644
index 0000000..d8c366d
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ParameterInjectionTest.java
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2024, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import java.math.BigDecimal;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import org.junit.jupiter.api.Test;
+import org.postgresql.test.TestUtil;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+public class ParameterInjectionTest {
+    private void testParamInjection(ParameterBinder bindPositiveOne, ParameterBinder bindNegativeOne)
+            throws SQLException {
+        try (Connection conn = TestUtil.openDB()) {
+            {
+                PreparedStatement stmt = conn.prepareStatement("SELECT -?");
+                bindPositiveOne.bind(stmt);
+                try (ResultSet rs = stmt.executeQuery()) {
+                    assertTrue(rs.next());
+                    assertEquals(1, rs.getMetaData().getColumnCount(),
+                            "number of result columns must match");
+                    int value = rs.getInt(1);
+                    assertEquals(-1, value);
+                }
+                bindNegativeOne.bind(stmt);
+                try (ResultSet rs = stmt.executeQuery()) {
+                    assertTrue(rs.next());
+                    assertEquals(1, rs.getMetaData().getColumnCount(),
+                            "number of result columns must match");
+                    int value = rs.getInt(1);
+                    assertEquals(1, value);
+                }
+            }
+            {
+                PreparedStatement stmt = conn.prepareStatement("SELECT -?, ?");
+                bindPositiveOne.bind(stmt);
+                stmt.setString(2, "\nWHERE false --");
+                try (ResultSet rs = stmt.executeQuery()) {
+                    assertTrue(rs.next(), "ResultSet should contain a row");
+                    assertEquals(2, rs.getMetaData().getColumnCount(),
+                            "rs.getMetaData().getColumnCount(");
+                    int value = rs.getInt(1);
+                    assertEquals(-1, value);
+                }
+
+                bindNegativeOne.bind(stmt);
+                stmt.setString(2, "\nWHERE false --");
+                try (ResultSet rs = stmt.executeQuery()) {
+                    assertTrue(rs.next(), "ResultSet should contain a row");
+                    assertEquals(2, rs.getMetaData().getColumnCount(), "rs.getMetaData().getColumnCount(");
+                    int value = rs.getInt(1);
+                    assertEquals(1, value);
+                }
+
+            }
+        }
+    }
+
+    @Test
+    public void handleInt2() throws SQLException {
+        testParamInjection(
+                stmt -> {
+                    stmt.setShort(1, (short) 1);
+                },
+                stmt -> {
+                    stmt.setShort(1, (short) -1);
+                }
+        );
+    }
+
+    @Test
+    public void handleInt4() throws SQLException {
+        testParamInjection(
+                stmt -> {
+                    stmt.setInt(1, 1);
+                },
+                stmt -> {
+                    stmt.setInt(1, -1);
+                }
+        );
+    }
+
+    @Test
+    public void handleBigInt() throws SQLException {
+        testParamInjection(
+                stmt -> {
+                    stmt.setLong(1, (long) 1);
+                },
+                stmt -> {
+                    stmt.setLong(1, (long) -1);
+                }
+        );
+    }
+
+    @Test
+    public void handleNumeric() throws SQLException {
+        testParamInjection(
+                stmt -> {
+                    stmt.setBigDecimal(1, new BigDecimal("1"));
+                },
+                stmt -> {
+                    stmt.setBigDecimal(1, new BigDecimal("-1"));
+                }
+        );
+    }
+
+    @Test
+    public void handleFloat() throws SQLException {
+        testParamInjection(
+                stmt -> {
+                    stmt.setFloat(1, 1);
+                },
+                stmt -> {
+                    stmt.setFloat(1, -1);
+                }
+        );
+    }
+
+    @Test
+    public void handleDouble() throws SQLException {
+        testParamInjection(
+                stmt -> {
+                    stmt.setDouble(1, 1);
+                },
+                stmt -> {
+                    stmt.setDouble(1, -1);
+                }
+        );
+    }
+
+    private interface ParameterBinder {
+        void bind(PreparedStatement stmt) throws SQLException;
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/PgSQLXMLTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/PgSQLXMLTest.java
new file mode 100644
index 0000000..ae23270
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/PgSQLXMLTest.java
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2019, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import java.io.StringWriter;
+import java.io.Writer;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.SQLXML;
+import java.sql.Statement;
+import java.util.Properties;
+import javax.xml.stream.XMLStreamException;
+import javax.xml.stream.XMLStreamReader;
+import javax.xml.transform.Source;
+import javax.xml.transform.Transformer;
+import javax.xml.transform.TransformerException;
+import javax.xml.transform.TransformerFactory;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.sax.SAXSource;
+import javax.xml.transform.stax.StAXSource;
+import javax.xml.transform.stream.StreamResult;
+import org.junit.Before;
+import org.junit.Test;
+import org.postgresql.PGProperty;
+import org.postgresql.core.BaseConnection;
+import org.postgresql.jdbc.PgSQLXML;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.jdbc2.BaseTest4;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+public class PgSQLXMLTest extends BaseTest4 {
+
+    private static final String LICENSE_URL =
+            PgSQLXMLTest.class.getClassLoader().getResource("META-INF/LICENSE").toString();
+    private static final String XXE_EXAMPLE =
+            "<!DOCTYPE foo [<!ELEMENT foo ANY >\n"
+                    + "<!ENTITY xxe SYSTEM \"" + LICENSE_URL + "\">]>"
+                    + "<foo>&xxe;</foo>";
+
+    private static String sourceToString(Source source) throws TransformerException {
+        StringWriter sw = new StringWriter();
+        Transformer transformer = TransformerFactory.newInstance().newTransformer();
+        transformer.transform(source, new StreamResult(sw));
+        return sw.toString();
+    }
+
+    @Override
+    @Before
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTempTable(con, "xmltab", "x xml");
+    }
+
+    @Test
+    public void setCharacterStream() throws Exception {
+        String example = "<x>value</x>";
+        SQLXML pgSQLXML = con.createSQLXML();
+        Writer writer = pgSQLXML.setCharacterStream();
+        writer.write(example);
+        PreparedStatement preparedStatement = con.prepareStatement("insert into xmltab values (?)");
+        preparedStatement.setSQLXML(1, pgSQLXML);
+        preparedStatement.execute();
+
+        Statement statement = con.createStatement();
+        ResultSet rs = statement.executeQuery("select * from xmltab");
+        assertTrue(rs.next());
+        SQLXML result = rs.getSQLXML(1);
+        assertNotNull(result);
+        assertEquals(example, result.getString());
+    }
+
+    @Test
+    public void testLegacyXxe() throws Exception {
+        Properties props = new Properties();
+        props.setProperty(PGProperty.XML_FACTORY_FACTORY.getName(), "LEGACY_INSECURE");
+        try (Connection conn = TestUtil.openDB(props)) {
+            BaseConnection baseConn = conn.unwrap(BaseConnection.class);
+            PgSQLXML xml = new PgSQLXML(baseConn, XXE_EXAMPLE);
+            xml.getSource(null);
+        }
+    }
+
+    private <T extends Source> void testGetSourceXxe(Class<T> clazz) {
+        SQLException ex = assertThrows(SQLException.class, () -> {
+            PgSQLXML xml = new PgSQLXML(null, XXE_EXAMPLE);
+            xml.getSource(clazz);
+        });
+        String message = ex.getCause().getMessage();
+        assertTrue(
+                "Expected to get a <<DOCTYPE disallowed>> SAXParseException. Actual message is " + message,
+                message.contains("DOCTYPE"));
+    }
+
+    @Test
+    public void testGetSourceXxeNull() throws Exception {
+        testGetSourceXxe(null);
+    }
+
+    @Test
+    public void testGetSourceXxeDOMSource() throws Exception {
+        testGetSourceXxe(DOMSource.class);
+    }
+
+    @Test
+    public void testGetSourceXxeSAXSource() throws Exception {
+        PgSQLXML xml = new PgSQLXML(null, XXE_EXAMPLE);
+        SAXSource source = xml.getSource(SAXSource.class);
+        TransformerException ex = assertThrows(TransformerException.class, () -> {
+            sourceToString(source);
+        });
+        String message = ex.getCause().getMessage();
+        assertTrue(
+                "Expected to get a <<DOCTYPE disallowed>> TransformerException. Actual message is " + message,
+                message.contains("DOCTYPE"));
+    }
+
+    @Test
+    public void testGetSourceXxeStAXSource() throws Exception {
+        PgSQLXML xml = new PgSQLXML(null, XXE_EXAMPLE);
+        StAXSource source = xml.getSource(StAXSource.class);
+        XMLStreamReader reader = source.getXMLStreamReader();
+        // STAX will not throw XXE error until we actually read the element
+        assertThrows(XMLStreamException.class, () -> {
+            while (reader.hasNext()) {
+                reader.next();
+            }
+        });
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/ResourceLockTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ResourceLockTest.java
new file mode 100644
index 0000000..48b0396
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ResourceLockTest.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import org.junit.jupiter.api.Test;
+import org.postgresql.jdbc.ResourceLock;
+
+class ResourceLockTest {
+    @Test
+    void obtainClose() {
+        final ResourceLock lock = new ResourceLock();
+
+        assertFalse(lock.isLocked(),
+                "lock.isLocked(). The newly created resource lock should be unlocked");
+        assertFalse(lock.isHeldByCurrentThread(),
+                "lock.isHeldByCurrentThread(). The newly created resource lock should not be held by the current thread");
+
+        try (ResourceLock ignore = lock.obtain()) {
+            assertTrue(lock.isLocked(),
+                    "lock.isLocked(). Obtained lock should be locked");
+            assertTrue(lock.isHeldByCurrentThread(),
+                    "lock.isHeldByCurrentThread(). Obtained lock should be held by the current thread");
+        }
+
+        assertFalse(lock.isLocked(), "lock.isLocked(). Closed resource lock should be unlocked");
+        assertFalse(lock.isHeldByCurrentThread(),
+                "lock.isHeldByCurrentThread(). Closed resource lock should not be held by the current thread");
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/ScramTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ScramTest.java
new file mode 100644
index 0000000..d0717e9
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ScramTest.java
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2021, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Properties;
+import java.util.stream.Stream;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.junit.jupiter.params.provider.ValueSource;
+import org.postgresql.PGProperty;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.test.TestUtil;
+import org.postgresql.util.PSQLState;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
+
+class ScramTest {
+
+    private static final String ROLE_NAME = "testscram";
+    private static Connection con;
+
+    @BeforeAll
+    static void setUp() throws Exception {
+        con = TestUtil.openPrivilegedDB();
+        assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v10));
+    }
+
+    @AfterAll
+    static void tearDown() throws Exception {
+        try (Statement stmt = con.createStatement()) {
+            stmt.execute("DROP ROLE IF EXISTS " + ROLE_NAME);
+        }
+        TestUtil.closeDB(con);
+    }
+
+    private static Stream<Arguments> provideArgsForTestInvalid() {
+        return Stream.of(
+                Arguments.of(null, "The server requested SCRAM-based authentication, but no password was provided."),
+                Arguments.of("", "The server requested SCRAM-based authentication, but the password is an empty string.")
+        );
+    }
+
+    /**
+     * Test creating a role with passwords WITH spaces and opening a connection using the same
+     * password, should work because is the "same" password.
+     *
+     * <p>https://github.com/pgjdbc/pgjdbc/issues/1970
+     */
+    @ParameterizedTest
+    @ValueSource(strings = {"My Space", "$ec ret", " rover june spelling ",
+            "!zj5hs*k5 STj@DaRUy", "q\u00A0w\u2000e\u2003r\u2009t\u3000y"})
+    void passwordWithSpace(String passwd) throws SQLException {
+        createRole(passwd); // Create role password with spaces.
+
+        Properties props = new Properties();
+        PGProperty.USER.set(props, ROLE_NAME);
+        PGProperty.PASSWORD.set(props, passwd);
+
+        try (Connection c = assertDoesNotThrow(() -> TestUtil.openDB(props));
+             Statement stmt = c.createStatement();
+             ResultSet rs = stmt.executeQuery("SELECT current_user")) {
+            assertTrue(rs.next());
+            assertEquals(ROLE_NAME, rs.getString(1));
+        }
+    }
+
+    /**
+     * Test creating a role with passwords WITHOUT spaces and opening a connection using password with
+     * spaces should fail since the spaces should not be stripped out.
+     *
+     * <p>https://github.com/pgjdbc/pgjdbc/issues/2000
+     */
+    @ParameterizedTest
+    @ValueSource(strings = {"My Space", "$ec ret", "rover june spelling",
+            "!zj5hs*k5 STj@DaRUy", "q\u00A0w\u2000e\u2003r\u2009t\u3000y"})
+    void passwordWithoutSpace(String passwd) throws SQLException {
+        String passwdNoSpaces = passwd.codePoints()
+                .filter(i -> !Character.isSpaceChar(i))
+                .collect(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append)
+                .toString();
+
+        createRole(passwdNoSpaces); // Create role password without spaces.
+
+        Properties props = new Properties();
+        PGProperty.USER.set(props, ROLE_NAME);
+        PGProperty.PASSWORD.set(props, passwd); // Open connection with spaces
+
+        SQLException ex = assertThrows(SQLException.class, () -> TestUtil.openDB(props));
+        assertEquals(PSQLState.INVALID_PASSWORD.getState(), ex.getSQLState());
+    }
+
+    @ParameterizedTest
+    @MethodSource("provideArgsForTestInvalid")
+    void invalidPasswords(String password, String expectedMessage) throws SQLException {
+        // We are testing invalid passwords so that correct one does not matter
+        createRole("anything_goes_here");
+
+        Properties props = new Properties();
+        PGProperty.USER.set(props, ROLE_NAME);
+        if (password != null) {
+            PGProperty.PASSWORD.set(props, password);
+        }
+        try (Connection conn = DriverManager.getConnection(TestUtil.getURL(), props)) {
+            fail("SCRAM connection attempt with invalid password should fail");
+        } catch (SQLException e) {
+            assertEquals(expectedMessage, e.getMessage());
+        }
+    }
+
+    private void createRole(String passwd) throws SQLException {
+        try (Statement stmt = con.createStatement()) {
+            stmt.execute("SET password_encryption='scram-sha-256'");
+            stmt.execute("DROP ROLE IF EXISTS " + ROLE_NAME);
+            stmt.execute("CREATE ROLE " + ROLE_NAME + " WITH LOGIN PASSWORD '" + passwd + "'");
+        }
+    }
+
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/ShortArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ShortArraysTest.java
new file mode 100644
index 0000000..04dc647
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ShortArraysTest.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import org.postgresql.core.Oid;
+
+public class ShortArraysTest extends AbstractArraysTest<short[]> {
+
+    private static final short[][][] shorts = new short[][][]{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
+            {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}};
+
+    public ShortArraysTest() {
+        super(shorts, true, Oid.INT2_ARRAY);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/ShortObjectArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ShortObjectArraysTest.java
new file mode 100644
index 0000000..7931ff4
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/ShortObjectArraysTest.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import org.postgresql.core.Oid;
+
+public class ShortObjectArraysTest extends AbstractArraysTest<Short[]> {
+
+    private static final Short[][][] shorts = new Short[][][]{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
+            {{13, 14, 15, 16}, {17, 18, null, 20}, {21, 22, 23, 24}}};
+
+    public ShortObjectArraysTest() {
+        super(shorts, true, Oid.INT2_ARRAY);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/StringArraysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/StringArraysTest.java
new file mode 100644
index 0000000..1cba8e5
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/StringArraysTest.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2018, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import org.postgresql.core.Oid;
+
+public class StringArraysTest extends AbstractArraysTest<String[]> {
+
+    private static final String[][][] strings = new String[][][]{
+            {{"some", "String", "haVE some \u03C0", "another"}, {null, "6L", "7L", "8L"}, //unicode escape for pi character
+                    {"asdf", " asdf ", "11L", null}},
+            {{"13L", null, "asasde4wtq", "16L"}, {"17L", "", "19L", "20L"}, {"21L", "22L", "23L", "24L"}}};
+
+    public StringArraysTest() {
+        super(strings, true, Oid.VARCHAR_ARRAY);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc/UUIDArrayTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc/UUIDArrayTest.java
new file mode 100644
index 0000000..23624c1
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc/UUIDArrayTest.java
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2022, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbc;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.UUID;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.test.TestUtil;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
+
+class UUIDArrayTest {
+
+    private static final String TABLE_NAME = "uuid_table";
+    private static final String INSERT1 = "INSERT INTO " + TABLE_NAME
+            + " (id, data1) VALUES (?, ?)";
+    private static final String INSERT2 = "INSERT INTO " + TABLE_NAME
+            + " (id, data2) VALUES (?, ?)";
+    private static final String SELECT1 = "SELECT data1 FROM " + TABLE_NAME
+            + " WHERE id = ?";
+    private static final String SELECT2 = "SELECT data2 FROM " + TABLE_NAME
+            + " WHERE id = ?";
+    private static final UUID[] uids1 = new UUID[]{UUID.randomUUID(), UUID.randomUUID()};
+    private static final UUID[][] uids2 = new UUID[][]{uids1};
+    private static Connection con;
+
+    @BeforeAll
+    static void setUp() throws Exception {
+        con = TestUtil.openDB();
+        assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_6));
+        try (Statement stmt = con.createStatement()) {
+            stmt.execute("CREATE TABLE " + TABLE_NAME
+                    + " (id int PRIMARY KEY, data1 UUID[], data2 UUID[][])");
+        }
+    }
+
+    @AfterAll
+    static void tearDown() throws Exception {
+        try (Statement stmt = con.createStatement()) {
+            stmt.execute("DROP TABLE IF EXISTS " + TABLE_NAME);
+        }
+        TestUtil.closeDB(con);
+    }
+
+    @Test
+    void test1DWithCreateArrayOf() throws SQLException {
+        try (Connection c = assertDoesNotThrow(() -> TestUtil.openDB());
+             PreparedStatement stmt1 = c.prepareStatement(INSERT1);
+             PreparedStatement stmt2 = c.prepareStatement(SELECT1)) {
+            stmt1.setInt(1, 100);
+            stmt1.setArray(2, c.createArrayOf("uuid", uids1));
+            stmt1.execute();
+
+            stmt2.setInt(1, 100);
+            stmt2.execute();
+            try (ResultSet rs = stmt2.getResultSet()) {
+                assertTrue(rs.next());
+                UUID[] array = (UUID[]) rs.getArray(1).getArray();
+                assertEquals(uids1[0], array[0]);
+                assertEquals(uids1[1], array[1]);
+            }
+        }
+    }
+
+    @Test
+    void test1DWithSetObject() throws SQLException {
+        try (Connection c = assertDoesNotThrow(() -> TestUtil.openDB());
+             PreparedStatement stmt1 = c.prepareStatement(INSERT1);
+             PreparedStatement stmt2 = c.prepareStatement(SELECT1)) {
+            stmt1.setInt(1, 101);
+            stmt1.setObject(2, uids1);
+            stmt1.execute();
+
+            stmt2.setInt(1, 101);
+            stmt2.execute();
+            try (ResultSet rs = stmt2.getResultSet()) {
+                assertTrue(rs.next());
+                UUID[] array = (UUID[]) rs.getArray(1).getArray();
+                assertEquals(uids1[0], array[0]);
+                assertEquals(uids1[1], array[1]);
+            }
+        }
+    }
+
+    @Test
+    void test2DWithCreateArrayOf() throws SQLException {
+        try (Connection c = assertDoesNotThrow(() -> TestUtil.openDB());
+             PreparedStatement stmt1 = c.prepareStatement(INSERT2);
+             PreparedStatement stmt2 = c.prepareStatement(SELECT2)) {
+            stmt1.setInt(1, 200);
+            stmt1.setArray(2, c.createArrayOf("uuid", uids2));
+            stmt1.execute();
+
+            stmt2.setInt(1, 200);
+            stmt2.execute();
+            try (ResultSet rs = stmt2.getResultSet()) {
+                assertTrue(rs.next());
+                UUID[][] array = (UUID[][]) rs.getArray(1).getArray();
+                assertEquals(uids2[0][0], array[0][0]);
+                assertEquals(uids2[0][1], array[0][1]);
+            }
+        }
+    }
+
+    @Test
+    void test2DWithSetObject() throws SQLException {
+        try (Connection c = assertDoesNotThrow(() -> TestUtil.openDB());
+             PreparedStatement stmt1 = c.prepareStatement(INSERT2);
+             PreparedStatement stmt2 = c.prepareStatement(SELECT2)) {
+            stmt1.setInt(1, 201);
+            stmt1.setObject(2, uids2);
+            stmt1.execute();
+
+            stmt2.setInt(1, 201);
+            stmt2.execute();
+            try (ResultSet rs = stmt2.getResultSet()) {
+                assertTrue(rs.next());
+                UUID[][] array = (UUID[][]) rs.getArray(1).getArray();
+                assertEquals(uids2[0][0], array[0][0]);
+                assertEquals(uids2[0][1], array[0][1]);
+            }
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ArrayTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ArrayTest.java
index c5179bb..c1fd9e9 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ArrayTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ArrayTest.java
@@ -5,24 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import org.postgresql.PGConnection;
-import org.postgresql.core.BaseConnection;
-import org.postgresql.core.Oid;
-import org.postgresql.geometric.PGbox;
-import org.postgresql.geometric.PGpoint;
-import org.postgresql.jdbc.PgArray;
-import org.postgresql.jdbc.PreferQueryMode;
-import org.postgresql.test.TestUtil;
-import org.postgresql.util.PSQLException;
-
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
 import java.math.BigDecimal;
 import java.sql.Array;
 import java.sql.Connection;
@@ -36,872 +18,887 @@ import java.sql.Timestamp;
 import java.sql.Types;
 import java.util.ArrayList;
 import java.util.Collection;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.postgresql.PGConnection;
+import org.postgresql.core.BaseConnection;
+import org.postgresql.core.Oid;
+import org.postgresql.geometric.PGbox;
+import org.postgresql.geometric.PGpoint;
+import org.postgresql.jdbc.PgArray;
+import org.postgresql.jdbc.PreferQueryMode;
+import org.postgresql.test.TestUtil;
+import org.postgresql.util.PSQLException;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 @RunWith(Parameterized.class)
 public class ArrayTest extends BaseTest4 {
-  private Connection conn;
+    private Connection conn;
 
-  public ArrayTest(BinaryMode binaryMode) {
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
+    public ArrayTest(BinaryMode binaryMode) {
+        setBinaryMode(binaryMode);
     }
-    return ids;
-  }
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    conn = con;
-    TestUtil.createTable(conn, "arrtest", "intarr int[], decarr decimal(2,1)[], strarr text[]");
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(conn, "arrtest");
-    super.tearDown();
-  }
-
-  @Test
-  public void testSetNull() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
-    pstmt.setNull(1, Types.ARRAY);
-    pstmt.setNull(2, Types.ARRAY);
-    pstmt.setNull(3, Types.ARRAY);
-    pstmt.executeUpdate();
-
-    pstmt.setObject(1, null, Types.ARRAY);
-    pstmt.setObject(2, null);
-    pstmt.setObject(3, null);
-    pstmt.executeUpdate();
-
-    pstmt.setArray(1, null);
-    pstmt.setArray(2, null);
-    pstmt.setArray(3, null);
-    pstmt.executeUpdate();
-
-    pstmt.close();
-  }
-
-  @Test
-  public void testSetPrimitiveObjects() throws SQLException {
-    final String stringWithNonAsciiWhiteSpace = "a\u2001b";
-    PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
-    pstmt.setObject(1, new int[]{1, 2, 3}, Types.ARRAY);
-    pstmt.setObject(2, new double[]{3.1d, 1.4d}, Types.ARRAY);
-    pstmt.setObject(3, new String[]{stringWithNonAsciiWhiteSpace, "f'a", " \tfa\"b  "}, Types.ARRAY);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT intarr, decarr, strarr FROM arrtest");
-    Assert.assertTrue(rs.next());
-
-    Array arr = rs.getArray(1);
-    Assert.assertEquals(Types.INTEGER, arr.getBaseType());
-    Integer[] intarr = (Integer[]) arr.getArray();
-    assertEquals(3, intarr.length);
-    assertEquals(1, intarr[0].intValue());
-    assertEquals(2, intarr[1].intValue());
-    assertEquals(3, intarr[2].intValue());
-
-    arr = rs.getArray(2);
-    assertEquals(Types.NUMERIC, arr.getBaseType());
-    BigDecimal[] decarr = (BigDecimal[]) arr.getArray();
-    assertEquals(2, decarr.length);
-    assertEquals(new BigDecimal("3.1"), decarr[0]);
-    assertEquals(new BigDecimal("1.4"), decarr[1]);
-
-    arr = rs.getArray(3);
-    assertEquals(Types.VARCHAR, arr.getBaseType());
-    String[] strarr = (String[]) arr.getArray(2, 2);
-    assertEquals(2, strarr.length);
-    assertEquals("f'a", strarr[0]);
-    assertEquals(" \tfa\"b  ", strarr[1]);
-
-    strarr = (String[]) arr.getArray();
-    assertEquals(stringWithNonAsciiWhiteSpace, strarr[0]);
-
-    rs.close();
-  }
-
-  @Test
-  public void testIndexAccess() throws SQLException {
-    final int[][][] origIntArray = new int[2][2][2];
-    final double[][][] origDblArray = new double[2][2][2];
-    final String[][][] origStringArray = new String[2][2][2];
-    final Object[][][] origIntObjArray = new Object[2][2][2];
-    final Object[][][] origDblObjArray = new Object[2][2][2];
-    final Object[][][] origStringObjArray = new Object[2][2][2];
-    int i = 0;
-    for (int x = 0; x < 2; x++) {
-      for (int y = 0; y < 2; y++) {
-        for (int z = 0; z < 2; z++) {
-          origIntArray[x][y][z] = i;
-          origDblArray[x][y][z] = i / 10;
-          origStringArray[x][y][z] = Integer.toString(i);
-          origIntObjArray[x][y][z] = i;
-          origDblObjArray[x][y][z] = i / 10;
-          origStringObjArray[x][y][z] = Integer.toString(i);
-          i++;
+    @Parameterized.Parameters(name = "binary = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
         }
-      }
-    }
-    PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
-    pstmt.setObject(1, origIntArray[0][0], Types.ARRAY);
-    pstmt.setObject(2, origDblArray[0][0], Types.ARRAY);
-    pstmt.setObject(3, origStringArray[0][0], Types.ARRAY);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT intarr[1], decarr[1], strarr[1] FROM arrtest");
-    Assert.assertTrue(rs.next());
-
-    assertEquals(origIntArray[0][0][0], rs.getInt(1));
-    assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001);
-    assertEquals(origStringArray[0][0][0], rs.getString(3));
-    rs.close();
-    stmt.close();
-
-    pstmt = conn.prepareStatement("delete from arrtest");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
-    pstmt.setObject(1, conn.createArrayOf("int4", origIntObjArray[0][0]), Types.ARRAY);
-    pstmt.setObject(2, conn.createArrayOf("float8", origDblObjArray[0][0]), Types.ARRAY);
-    pstmt.setObject(3, conn.createArrayOf("varchar", origStringObjArray[0][0]), Types.ARRAY);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    stmt = conn.createStatement();
-    rs = stmt.executeQuery("SELECT intarr[1], decarr[1], strarr[1] FROM arrtest");
-    Assert.assertTrue(rs.next());
-
-    assertEquals(origIntArray[0][0][0], rs.getInt(1));
-    assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001);
-    assertEquals(origStringArray[0][0][0], rs.getString(3));
-    rs.close();
-    stmt.close();
-
-    pstmt = conn.prepareStatement("delete from arrtest");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
-    pstmt.setObject(1, conn.createArrayOf("int4", origIntArray[0]), Types.ARRAY);
-    pstmt.setObject(2, conn.createArrayOf("float8", origDblArray[0]), Types.ARRAY);
-    pstmt.setObject(3, conn.createArrayOf("varchar", origStringArray[0]), Types.ARRAY);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    stmt = conn.createStatement();
-    rs = stmt.executeQuery("SELECT intarr[1][1], decarr[1][1], strarr[1][1], intarr[2][1], decarr[2][1], strarr[2][1] FROM arrtest");
-    Assert.assertTrue(rs.next());
-
-    assertEquals(origIntArray[0][0][0], rs.getInt(1));
-    assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001);
-    assertEquals(origStringArray[0][0][0], rs.getString(3));
-    assertEquals(origIntArray[0][1][0], rs.getInt(4));
-    assertEquals(origDblArray[0][1][0], rs.getDouble(5), 0.001);
-    assertEquals(origStringArray[0][1][0], rs.getString(6));
-    rs.close();
-    stmt.close();
-
-    pstmt = conn.prepareStatement("delete from arrtest");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
-    pstmt.setObject(1, conn.createArrayOf("int4", origIntObjArray[0]), Types.ARRAY);
-    pstmt.setObject(2, conn.createArrayOf("float8", origDblObjArray[0]), Types.ARRAY);
-    pstmt.setObject(3, conn.createArrayOf("varchar", origStringObjArray[0]), Types.ARRAY);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    stmt = conn.createStatement();
-    rs = stmt.executeQuery("SELECT intarr[1][1], decarr[1][1], strarr[1][1], intarr[2][1], decarr[2][1], strarr[2][1] FROM arrtest");
-    Assert.assertTrue(rs.next());
-
-    assertEquals(origIntArray[0][0][0], rs.getInt(1));
-    assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001);
-    assertEquals(origStringArray[0][0][0], rs.getString(3));
-    assertEquals(origIntArray[0][1][0], rs.getInt(4));
-    assertEquals(origDblArray[0][1][0], rs.getDouble(5), 0.001);
-    assertEquals(origStringArray[0][1][0], rs.getString(6));
-    rs.close();
-    stmt.close();
-
-    pstmt = conn.prepareStatement("delete from arrtest");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
-
-    pstmt.setObject(1, conn.createArrayOf("int4", origIntArray), Types.ARRAY);
-    pstmt.setObject(2, conn.createArrayOf("float8", origDblArray), Types.ARRAY);
-    pstmt.setObject(3, conn.createArrayOf("varchar", origStringArray), Types.ARRAY);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    stmt = conn.createStatement();
-    rs = stmt.executeQuery("SELECT intarr[1][1][1], decarr[1][1][1], strarr[1][1][1], intarr[2][1][1], decarr[2][1][1], strarr[2][1][1] FROM arrtest");
-    Assert.assertTrue(rs.next());
-
-    assertEquals(origIntArray[0][0][0], rs.getInt(1));
-    assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001);
-    assertEquals(origStringArray[0][0][0], rs.getString(3));
-    assertEquals(origIntArray[1][0][0], rs.getInt(4));
-    assertEquals(origDblArray[1][0][0], rs.getDouble(5), 0.001);
-    assertEquals(origStringArray[1][0][0], rs.getString(6));
-    rs.close();
-    stmt.close();
-
-    pstmt = conn.prepareStatement("delete from arrtest");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
-
-    pstmt.setObject(1, conn.createArrayOf("int4", origIntObjArray), Types.ARRAY);
-    pstmt.setObject(2, conn.createArrayOf("float8", origDblObjArray), Types.ARRAY);
-    pstmt.setObject(3, conn.createArrayOf("varchar", origStringObjArray), Types.ARRAY);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    stmt = conn.createStatement();
-    rs = stmt.executeQuery("SELECT intarr[1][1][1], decarr[1][1][1], strarr[1][1][1], intarr[2][1][1], decarr[2][1][1], strarr[2][1][1] FROM arrtest");
-    Assert.assertTrue(rs.next());
-
-    assertEquals(origIntArray[0][0][0], rs.getInt(1));
-    assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001);
-    assertEquals(origStringArray[0][0][0], rs.getString(3));
-    assertEquals(origIntArray[1][0][0], rs.getInt(4));
-    assertEquals(origDblArray[1][0][0], rs.getDouble(5), 0.001);
-    assertEquals(origStringArray[1][0][0], rs.getString(6));
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testSetPrimitiveArraysObjects() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
-
-    final PGConnection arraySupport = conn.unwrap(PGConnection.class);
-
-    pstmt.setArray(1, arraySupport.createArrayOf("int4", new int[]{1, 2, 3}));
-    pstmt.setObject(2, arraySupport.createArrayOf("float8", new double[]{3.1d, 1.4d}));
-    pstmt.setObject(3, arraySupport.createArrayOf("varchar", new String[]{"abc", "f'a", "fa\"b"}));
-
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT intarr, decarr, strarr FROM arrtest");
-    Assert.assertTrue(rs.next());
-
-    Array arr = rs.getArray(1);
-    Assert.assertEquals(Types.INTEGER, arr.getBaseType());
-    Integer[] intarr = (Integer[]) arr.getArray();
-    Assert.assertEquals(3, intarr.length);
-    Assert.assertEquals(1, intarr[0].intValue());
-    Assert.assertEquals(2, intarr[1].intValue());
-    Assert.assertEquals(3, intarr[2].intValue());
-
-    arr = rs.getArray(2);
-    Assert.assertEquals(Types.NUMERIC, arr.getBaseType());
-    BigDecimal[] decarr = (BigDecimal[]) arr.getArray();
-    Assert.assertEquals(2, decarr.length);
-    Assert.assertEquals(new BigDecimal("3.1"), decarr[0]);
-    Assert.assertEquals(new BigDecimal("1.4"), decarr[1]);
-
-    arr = rs.getArray(3);
-    Assert.assertEquals(Types.VARCHAR, arr.getBaseType());
-    String[] strarr = (String[]) arr.getArray(2, 2);
-    Assert.assertEquals(2, strarr.length);
-    Assert.assertEquals("f'a", strarr[0]);
-    Assert.assertEquals("fa\"b", strarr[1]);
-
-    try {
-      arraySupport.createArrayOf("int4", 1);
-      fail("not an array");
-    } catch (PSQLException e) {
-
+        return ids;
     }
 
-    rs.close();
-  }
-
-  @Test
-  public void testSetArraysWithAnsiTypeNames() throws SQLException {
-    try {
-      TestUtil.createTable(
-          conn,
-          "ansiarraytest",
-          "floats double precision[], "
-              + "reals real[], "
-              + "varchars character varying(8)[], "
-              + "times time without time zone[], "
-              + "timestamps timestamp without time zone[], "
-              + "timestampstz timestamp with time zone[]");
-
-      PreparedStatement pstmt =
-          conn.prepareStatement("INSERT INTO ansiarraytest VALUES (?,?,?,?,?,?)");
-
-      final PGConnection arraySupport = conn.unwrap(PGConnection.class);
-
-      pstmt.setArray(1, arraySupport.createArrayOf("double precision", new Object[]{1d, 4d}));
-      pstmt.setArray(2, arraySupport.createArrayOf("real", new Object[]{0f, 3f}));
-      pstmt.setObject(
-          3, arraySupport.createArrayOf("character varying", new String[]{"abc", "f'a", "fa\"b"}));
-      pstmt.setObject(
-          4,
-          arraySupport.createArrayOf(
-              "time without time zone",
-              new Object[]{Time.valueOf("12:34:56"), Time.valueOf("03:30:25")}));
-      pstmt.setObject(
-          5,
-          arraySupport.createArrayOf(
-              "timestamp without time zone",
-              new Object[]{"2023-09-05 16:21:50", "2012-01-01 13:02:03"}));
-      pstmt.setObject(
-          6,
-          arraySupport.createArrayOf(
-              "timestamp with time zone",
-              new Object[]{"1996-01-23 12:00:00-08", "1997-08-16 16:51:00-04"}));
-
-      pstmt.executeUpdate();
-      pstmt.close();
-
-      Statement stmt = conn.createStatement();
-      ResultSet rs =
-          stmt.executeQuery(
-              "SELECT floats, reals, varchars, times, timestamps, timestampstz FROM ansiarraytest");
-      Assert.assertTrue(rs.next());
-
-      Array arr = rs.getArray(1);
-      Assert.assertEquals(Types.DOUBLE, arr.getBaseType());
-      Double[] doubles = (Double[]) arr.getArray();
-      Assert.assertEquals(2, doubles.length);
-      Assert.assertEquals(1d, doubles[0], 0);
-      Assert.assertEquals(4d, doubles[1], 0);
-
-      arr = rs.getArray(2);
-      Assert.assertEquals(Types.REAL, arr.getBaseType());
-      Float[] floats = (Float[]) arr.getArray();
-      Assert.assertEquals(2, floats.length);
-      Assert.assertEquals(0f, floats[0], 0);
-      Assert.assertEquals(3f, floats[1], 0);
-
-      arr = rs.getArray(3);
-      Assert.assertEquals(Types.VARCHAR, arr.getBaseType());
-      String[] strings = (String[]) arr.getArray();
-      Assert.assertEquals(3, strings.length);
-      Assert.assertEquals("abc", strings[0]);
-      Assert.assertEquals("f'a", strings[1]);
-      Assert.assertEquals("fa\"b", strings[2]);
-
-      arr = rs.getArray(4);
-      Assert.assertEquals(Types.TIME, arr.getBaseType());
-      Time[] times = (Time[]) arr.getArray();
-      Assert.assertEquals(2, times.length);
-      Assert.assertEquals(Time.valueOf("12:34:56"), times[0]);
-      Assert.assertEquals(Time.valueOf("03:30:25"), times[1]);
-
-      arr = rs.getArray(5);
-      Assert.assertEquals(Types.TIMESTAMP, arr.getBaseType());
-      Timestamp[] tzarr = (Timestamp[]) arr.getArray();
-      Assert.assertEquals(2, times.length);
-      Assert.assertEquals(Timestamp.valueOf("2023-09-05 16:21:50"), tzarr[0]);
-      Assert.assertEquals(Timestamp.valueOf("2012-01-01 13:02:03"), tzarr[1]);
-
-      arr = rs.getArray(6);
-      Assert.assertEquals(Types.TIMESTAMP, arr.getBaseType());
-      tzarr = (Timestamp[]) arr.getArray();
-      Assert.assertEquals(2, times.length);
-      Assert.assertEquals(822427200000L, tzarr[0].getTime());
-      Assert.assertEquals(871764660000L, tzarr[1].getTime());
-
-      rs.close();
-    } finally {
-      TestUtil.dropTable(conn, "ansiarraytest");
-    }
-  }
-
-  @Test
-  public void testSetNullArrays() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
-
-    final PGConnection arraySupport = conn.unwrap(PGConnection.class);
-
-    pstmt.setArray(1, arraySupport.createArrayOf("int4", null));
-    pstmt.setObject(2, conn.createArrayOf("float8", null));
-    pstmt.setObject(3, arraySupport.createArrayOf("varchar", null));
-
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT intarr, decarr, strarr FROM arrtest");
-    Assert.assertTrue(rs.next());
-
-    Array arr = rs.getArray(1);
-    Assert.assertNull(arr);
-
-    arr = rs.getArray(2);
-    Assert.assertNull(arr);
-
-    arr = rs.getArray(3);
-    Assert.assertNull(arr);
-
-    rs.close();
-  }
-
-  @Test
-  public void testRetrieveArrays() throws SQLException {
-    Statement stmt = conn.createStatement();
-
-    // you need a lot of backslashes to get a double quote in.
-    stmt.executeUpdate("INSERT INTO arrtest VALUES ('{1,2,3}','{3.1,1.4}', '"
-        + TestUtil.escapeString(conn, "{abc,f'a,\"fa\\\"b\",def, un  quot\u000B \u2001 \r}") + "')");
-
-    ResultSet rs = stmt.executeQuery("SELECT intarr, decarr, strarr FROM arrtest");
-    Assert.assertTrue(rs.next());
-
-    Array arr = rs.getArray(1);
-    Assert.assertEquals(Types.INTEGER, arr.getBaseType());
-    Integer[] intarr = (Integer[]) arr.getArray();
-    Assert.assertEquals(3, intarr.length);
-    Assert.assertEquals(1, intarr[0].intValue());
-    Assert.assertEquals(2, intarr[1].intValue());
-    Assert.assertEquals(3, intarr[2].intValue());
-
-    arr = rs.getArray(2);
-    Assert.assertEquals(Types.NUMERIC, arr.getBaseType());
-    BigDecimal[] decarr = (BigDecimal[]) arr.getArray();
-    Assert.assertEquals(2, decarr.length);
-    Assert.assertEquals(new BigDecimal("3.1"), decarr[0]);
-    Assert.assertEquals(new BigDecimal("1.4"), decarr[1]);
-
-    arr = rs.getArray(3);
-    Assert.assertEquals(Types.VARCHAR, arr.getBaseType());
-    String[] strarr = (String[]) arr.getArray(2, 2);
-    Assert.assertEquals(2, strarr.length);
-    Assert.assertEquals("f'a", strarr[0]);
-    Assert.assertEquals("fa\"b", strarr[1]);
-
-    strarr = (String[]) arr.getArray();
-    assertEquals(5, strarr.length);
-    assertEquals("un  quot\u000B \u2001", strarr[4]);
-
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testRetrieveResultSets() throws SQLException {
-    Statement stmt = conn.createStatement();
-
-    final String stringWithNonAsciiWhiteSpace = "a\u2001b";
-    // you need a lot of backslashes to get a double quote in.
-    stmt.executeUpdate("INSERT INTO arrtest VALUES ('{1,2,3}','{3.1,1.4}', '"
-        + TestUtil.escapeString(conn, "{\"a\u2001b\",f'a,\"fa\\\"b\",def}") + "')");
-
-    ResultSet rs = stmt.executeQuery("SELECT intarr, decarr, strarr FROM arrtest");
-    Assert.assertTrue(rs.next());
-
-    Array arr = rs.getArray(1);
-    Assert.assertEquals(Types.INTEGER, arr.getBaseType());
-    ResultSet arrrs = arr.getResultSet();
-    Assert.assertTrue(arrrs.next());
-    Assert.assertEquals(1, arrrs.getInt(1));
-    Assert.assertEquals(1, arrrs.getInt(2));
-    Assert.assertTrue(arrrs.next());
-    Assert.assertEquals(2, arrrs.getInt(1));
-    Assert.assertEquals(2, arrrs.getInt(2));
-    Assert.assertTrue(arrrs.next());
-    Assert.assertEquals(3, arrrs.getInt(1));
-    Assert.assertEquals(3, arrrs.getInt(2));
-    Assert.assertTrue(!arrrs.next());
-    Assert.assertTrue(arrrs.previous());
-    Assert.assertEquals(3, arrrs.getInt(2));
-    arrrs.first();
-    Assert.assertEquals(1, arrrs.getInt(2));
-    arrrs.close();
-
-    arr = rs.getArray(2);
-    Assert.assertEquals(Types.NUMERIC, arr.getBaseType());
-    arrrs = arr.getResultSet();
-    Assert.assertTrue(arrrs.next());
-    Assert.assertEquals(new BigDecimal("3.1"), arrrs.getBigDecimal(2));
-    Assert.assertTrue(arrrs.next());
-    Assert.assertEquals(new BigDecimal("1.4"), arrrs.getBigDecimal(2));
-    arrrs.close();
-
-    arr = rs.getArray(3);
-    Assert.assertEquals(Types.VARCHAR, arr.getBaseType());
-    arrrs = arr.getResultSet(2, 2);
-    Assert.assertTrue(arrrs.next());
-    Assert.assertEquals(2, arrrs.getInt(1));
-    Assert.assertEquals("f'a", arrrs.getString(2));
-    Assert.assertTrue(arrrs.next());
-    Assert.assertEquals(3, arrrs.getInt(1));
-    Assert.assertEquals("fa\"b", arrrs.getString(2));
-    Assert.assertTrue(!arrrs.next());
-    arrrs.close();
-
-    arrrs = arr.getResultSet(1, 1);
-    Assert.assertTrue(arrrs.next());
-    Assert.assertEquals(1, arrrs.getInt(1));
-    Assert.assertEquals(stringWithNonAsciiWhiteSpace, arrrs.getString(2));
-    Assert.assertFalse(arrrs.next());
-    arrrs.close();
-
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testSetArray() throws SQLException {
-    Statement stmt = conn.createStatement();
-    ResultSet arrRS = stmt.executeQuery("SELECT '{1,2,3}'::int4[]");
-    Assert.assertTrue(arrRS.next());
-    Array arr = arrRS.getArray(1);
-    arrRS.close();
-    stmt.close();
-
-    PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest(intarr) VALUES (?)");
-    pstmt.setArray(1, arr);
-    pstmt.executeUpdate();
-
-    pstmt.setObject(1, arr, Types.ARRAY);
-    pstmt.executeUpdate();
-
-    pstmt.setObject(1, arr);
-    pstmt.executeUpdate();
-
-    pstmt.close();
-
-    Statement select = conn.createStatement();
-    ResultSet rs = select.executeQuery("SELECT intarr FROM arrtest");
-    int resultCount = 0;
-    while (rs.next()) {
-      resultCount++;
-      Array result = rs.getArray(1);
-      Assert.assertEquals(Types.INTEGER, result.getBaseType());
-      Assert.assertEquals("int4", result.getBaseTypeName());
-
-      Integer[] intarr = (Integer[]) result.getArray();
-      Assert.assertEquals(3, intarr.length);
-      Assert.assertEquals(1, intarr[0].intValue());
-      Assert.assertEquals(2, intarr[1].intValue());
-      Assert.assertEquals(3, intarr[2].intValue());
-    }
-    Assert.assertEquals(3, resultCount);
-  }
-
-  /**
-   * Starting with 8.0 non-standard (beginning index isn't 1) bounds the dimensions are returned in
-   * the data. The following should return "[0:3]={0,1,2,3,4}" when queried. Older versions simply
-   * do not return the bounds.
-   */
-  @Test
-  public void testNonStandardBounds() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate("INSERT INTO arrtest (intarr) VALUES ('{1,2,3}')");
-    stmt.executeUpdate("UPDATE arrtest SET intarr[0] = 0");
-    ResultSet rs = stmt.executeQuery("SELECT intarr FROM arrtest");
-    Assert.assertTrue(rs.next());
-    Array result = rs.getArray(1);
-    Integer[] intarr = (Integer[]) result.getArray();
-    Assert.assertEquals(4, intarr.length);
-    for (int i = 0; i < intarr.length; i++) {
-      Assert.assertEquals(i, intarr[i].intValue());
-    }
-  }
-
-  @Test
-  public void testMultiDimensionalArray() throws SQLException {
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT '{{1,2},{3,4}}'::int[]");
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-    Object[] oa = (Object[]) arr.getArray();
-    Assert.assertEquals(2, oa.length);
-    Integer[] i0 = (Integer[]) oa[0];
-    Assert.assertEquals(2, i0.length);
-    Assert.assertEquals(1, i0[0].intValue());
-    Assert.assertEquals(2, i0[1].intValue());
-    Integer[] i1 = (Integer[]) oa[1];
-    Assert.assertEquals(2, i1.length);
-    Assert.assertEquals(3, i1[0].intValue());
-    Assert.assertEquals(4, i1[1].intValue());
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testNullValues() throws SQLException {
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT ARRAY[1,NULL,3]");
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-    Integer[] i = (Integer[]) arr.getArray();
-    Assert.assertEquals(3, i.length);
-    Assert.assertEquals(1, i[0].intValue());
-    Assert.assertNull(i[1]);
-    Assert.assertEquals(3, i[2].intValue());
-  }
-
-  @Test
-  public void testNullFieldString() throws SQLException {
-    Array arr = new PgArray((BaseConnection) conn, 1, (String) null);
-    Assert.assertNull(arr.toString());
-  }
-
-  @Test
-  public void testDirectFieldString() throws SQLException {
-    Array arr = new PgArray((BaseConnection) conn, Oid.VARCHAR_ARRAY,
-        "{\" lead\t\",  unquot\u000B \u2001 \r, \" \fnew \n \"\t, \f\" \" }");
-    final String[] array = (String[]) arr.getArray();
-    assertEquals(4, array.length);
-    assertEquals(" lead\t", array[0]);
-    assertEquals(" \fnew \n ", array[2]);
-    assertEquals(" ", array[3]);
-
-    // PostgreSQL drops leading and trailing whitespace, so does the driver
-    assertEquals("unquot\u2001", array[1]);
-  }
-
-  @Test
-  public void testStringEscaping() throws SQLException {
-
-    final String stringArray = "{f'a,\"fa\\\"b\",def, un  quot\u000B \u2001 \r, someString }";
-
-    final Statement stmt = conn.createStatement();
-    try {
-
-      stmt.executeUpdate("INSERT INTO arrtest VALUES (NULL, NULL, '" + TestUtil.escapeString(conn, stringArray) + "')");
-
-      final ResultSet rs = stmt.executeQuery("SELECT strarr FROM arrtest");
-      Assert.assertTrue(rs.next());
-
-      Array arr = rs.getArray(1);
-      Assert.assertEquals(Types.VARCHAR, arr.getBaseType());
-      String[] strarr = (String[]) arr.getArray();
-      assertEquals(5, strarr.length);
-      assertEquals("f'a", strarr[0]);
-      assertEquals("fa\"b", strarr[1]);
-      assertEquals("def", strarr[2]);
-      assertEquals("un  quot\u000B \u2001", strarr[3]);
-      assertEquals("someString", strarr[4]);
-
-      rs.close();
-    } finally {
-      stmt.close();
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        conn = con;
+        TestUtil.createTable(conn, "arrtest", "intarr int[], decarr decimal(2,1)[], strarr text[]");
     }
 
-    final Array directArray = new PgArray((BaseConnection) conn, Oid.VARCHAR_ARRAY, stringArray);
-    final String[] actual = (String[]) directArray.getArray();
-    assertEquals(5, actual.length);
-    assertEquals("f'a", actual[0]);
-    assertEquals("fa\"b", actual[1]);
-    assertEquals("def", actual[2]);
-    assertEquals("someString", actual[4]);
-
-    // the driver strips out ascii white spaces from an unescaped string, even in
-    // the middle of the value. while this does not exactly match the behavior of
-    // the backend, it will always quote values where ascii white spaces are
-    // present, making this difference not worth the complexity involved addressing.
-    assertEquals("unquot\u2001", actual[3]);
-  }
-
-  @Test
-  public void testUnknownArrayType() throws SQLException {
-    Statement stmt = conn.createStatement();
-    ResultSet rs =
-        stmt.executeQuery("SELECT relacl FROM pg_class WHERE relacl IS NOT NULL LIMIT 1");
-    ResultSetMetaData rsmd = rs.getMetaData();
-    Assert.assertEquals(Types.ARRAY, rsmd.getColumnType(1));
-
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-    Assert.assertEquals("aclitem", arr.getBaseTypeName());
-
-    ResultSet arrRS = arr.getResultSet();
-    ResultSetMetaData arrRSMD = arrRS.getMetaData();
-    Assert.assertEquals("aclitem", arrRSMD.getColumnTypeName(2));
-  }
-
-  @Test
-  public void testRecursiveResultSets() throws SQLException {
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT '{{1,2},{3,4}}'::int[]");
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-
-    ResultSet arrRS = arr.getResultSet();
-    ResultSetMetaData arrRSMD = arrRS.getMetaData();
-    Assert.assertEquals(Types.ARRAY, arrRSMD.getColumnType(2));
-    Assert.assertEquals("_int4", arrRSMD.getColumnTypeName(2));
-
-    Assert.assertTrue(arrRS.next());
-    Assert.assertEquals(1, arrRS.getInt(1));
-    Array a1 = arrRS.getArray(2);
-    ResultSet a1RS = a1.getResultSet();
-    ResultSetMetaData a1RSMD = a1RS.getMetaData();
-    Assert.assertEquals(Types.INTEGER, a1RSMD.getColumnType(2));
-    Assert.assertEquals("int4", a1RSMD.getColumnTypeName(2));
-
-    Assert.assertTrue(a1RS.next());
-    Assert.assertEquals(1, a1RS.getInt(2));
-    Assert.assertTrue(a1RS.next());
-    Assert.assertEquals(2, a1RS.getInt(2));
-    Assert.assertTrue(!a1RS.next());
-    a1RS.close();
-
-    Assert.assertTrue(arrRS.next());
-    Assert.assertEquals(2, arrRS.getInt(1));
-    Array a2 = arrRS.getArray(2);
-    ResultSet a2RS = a2.getResultSet();
-
-    Assert.assertTrue(a2RS.next());
-    Assert.assertEquals(3, a2RS.getInt(2));
-    Assert.assertTrue(a2RS.next());
-    Assert.assertEquals(4, a2RS.getInt(2));
-    Assert.assertTrue(!a2RS.next());
-    a2RS.close();
-
-    arrRS.close();
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testNullString() throws SQLException {
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT '{a,NULL}'::text[]");
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-
-    String[] s = (String[]) arr.getArray();
-    Assert.assertEquals(2, s.length);
-    Assert.assertEquals("a", s[0]);
-    Assert.assertNull(s[1]);
-  }
-
-  @Test
-  public void testEscaping() throws SQLException {
-    Statement stmt = conn.createStatement();
-    String sql = "SELECT ";
-    sql += 'E';
-    // Uggg. Three levels of escaping: Java, string literal, array.
-    sql += "'{{c\\\\\"d, ''}, {\"\\\\\\\\\",\"''\"}}'::text[]";
-
-    ResultSet rs = stmt.executeQuery(sql);
-    Assert.assertTrue(rs.next());
-
-    Array arr = rs.getArray(1);
-    String[][] s = (String[][]) arr.getArray();
-    Assert.assertEquals("c\"d", s[0][0]);
-    Assert.assertEquals("'", s[0][1]);
-    Assert.assertEquals("\\", s[1][0]);
-    Assert.assertEquals("'", s[1][1]);
-
-    ResultSet arrRS = arr.getResultSet();
-
-    Assert.assertTrue(arrRS.next());
-    Array a1 = arrRS.getArray(2);
-    ResultSet rs1 = a1.getResultSet();
-    Assert.assertTrue(rs1.next());
-    Assert.assertEquals("c\"d", rs1.getString(2));
-    Assert.assertTrue(rs1.next());
-    Assert.assertEquals("'", rs1.getString(2));
-    Assert.assertTrue(!rs1.next());
-
-    Assert.assertTrue(arrRS.next());
-    Array a2 = arrRS.getArray(2);
-    ResultSet rs2 = a2.getResultSet();
-    Assert.assertTrue(rs2.next());
-    Assert.assertEquals("\\", rs2.getString(2));
-    Assert.assertTrue(rs2.next());
-    Assert.assertEquals("'", rs2.getString(2));
-    Assert.assertTrue(!rs2.next());
-  }
-
-  @Test
-  public void testWriteMultiDimensional() throws SQLException {
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT '{{1,2},{3,4}}'::int[]");
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-    rs.close();
-    stmt.close();
-
-    String sql = "SELECT ?";
-    if (preferQueryMode == PreferQueryMode.SIMPLE) {
-      sql = "SELECT ?::int[]";
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(conn, "arrtest");
+        super.tearDown();
     }
-    PreparedStatement pstmt = conn.prepareStatement(sql);
-    pstmt.setArray(1, arr);
-    rs = pstmt.executeQuery();
-    Assert.assertTrue(rs.next());
-    arr = rs.getArray(1);
 
-    Integer[][] i = (Integer[][]) arr.getArray();
-    Assert.assertEquals(1, i[0][0].intValue());
-    Assert.assertEquals(2, i[0][1].intValue());
-    Assert.assertEquals(3, i[1][0].intValue());
-    Assert.assertEquals(4, i[1][1].intValue());
-  }
+    @Test
+    public void testSetNull() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
+        pstmt.setNull(1, Types.ARRAY);
+        pstmt.setNull(2, Types.ARRAY);
+        pstmt.setNull(3, Types.ARRAY);
+        pstmt.executeUpdate();
 
-  /*
-   * The box data type uses a semicolon as the array element delimiter instead of a comma which
-   * pretty much everything else uses.
-   */
-  @Test
-  public void testNonStandardDelimiter() throws SQLException {
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT '{(3,4),(1,2);(7,8),(5,6)}'::box[]");
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
+        pstmt.setObject(1, null, Types.ARRAY);
+        pstmt.setObject(2, null);
+        pstmt.setObject(3, null);
+        pstmt.executeUpdate();
 
-    ResultSet arrRS = arr.getResultSet();
+        pstmt.setArray(1, null);
+        pstmt.setArray(2, null);
+        pstmt.setArray(3, null);
+        pstmt.executeUpdate();
 
-    Assert.assertTrue(arrRS.next());
-    PGbox box1 = (PGbox) arrRS.getObject(2);
-    PGpoint p1 = box1.point[0];
-    Assert.assertEquals(3, p1.x, 0.001);
-    Assert.assertEquals(4, p1.y, 0.001);
-
-    Assert.assertTrue(arrRS.next());
-    PGbox box2 = (PGbox) arrRS.getObject(2);
-    PGpoint p2 = box2.point[1];
-    Assert.assertEquals(5, p2.x, 0.001);
-    Assert.assertEquals(6, p2.y, 0.001);
-
-    Assert.assertTrue(!arrRS.next());
-  }
-
-  @Test
-  public void testEmptyArray() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("SELECT '{}'::int[]");
-    ResultSet rs = pstmt.executeQuery();
-
-    while (rs.next()) {
-      Array array = rs.getArray(1);
-      if (!rs.wasNull()) {
-        ResultSet ars = array.getResultSet();
-        Assert.assertEquals("get columntype should return Types.INTEGER", java.sql.Types.INTEGER,
-            ars.getMetaData().getColumnType(1));
-      }
+        pstmt.close();
+    }
+
+    @Test
+    public void testSetPrimitiveObjects() throws SQLException {
+        final String stringWithNonAsciiWhiteSpace = "a\u2001b";
+        PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
+        pstmt.setObject(1, new int[]{1, 2, 3}, Types.ARRAY);
+        pstmt.setObject(2, new double[]{3.1d, 1.4d}, Types.ARRAY);
+        pstmt.setObject(3, new String[]{stringWithNonAsciiWhiteSpace, "f'a", " \tfa\"b  "}, Types.ARRAY);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT intarr, decarr, strarr FROM arrtest");
+        Assert.assertTrue(rs.next());
+
+        Array arr = rs.getArray(1);
+        Assert.assertEquals(Types.INTEGER, arr.getBaseType());
+        Integer[] intarr = (Integer[]) arr.getArray();
+        assertEquals(3, intarr.length);
+        assertEquals(1, intarr[0].intValue());
+        assertEquals(2, intarr[1].intValue());
+        assertEquals(3, intarr[2].intValue());
+
+        arr = rs.getArray(2);
+        assertEquals(Types.NUMERIC, arr.getBaseType());
+        BigDecimal[] decarr = (BigDecimal[]) arr.getArray();
+        assertEquals(2, decarr.length);
+        assertEquals(new BigDecimal("3.1"), decarr[0]);
+        assertEquals(new BigDecimal("1.4"), decarr[1]);
+
+        arr = rs.getArray(3);
+        assertEquals(Types.VARCHAR, arr.getBaseType());
+        String[] strarr = (String[]) arr.getArray(2, 2);
+        assertEquals(2, strarr.length);
+        assertEquals("f'a", strarr[0]);
+        assertEquals(" \tfa\"b  ", strarr[1]);
+
+        strarr = (String[]) arr.getArray();
+        assertEquals(stringWithNonAsciiWhiteSpace, strarr[0]);
+
+        rs.close();
+    }
+
+    @Test
+    public void testIndexAccess() throws SQLException {
+        final int[][][] origIntArray = new int[2][2][2];
+        final double[][][] origDblArray = new double[2][2][2];
+        final String[][][] origStringArray = new String[2][2][2];
+        final Object[][][] origIntObjArray = new Object[2][2][2];
+        final Object[][][] origDblObjArray = new Object[2][2][2];
+        final Object[][][] origStringObjArray = new Object[2][2][2];
+        int i = 0;
+        for (int x = 0; x < 2; x++) {
+            for (int y = 0; y < 2; y++) {
+                for (int z = 0; z < 2; z++) {
+                    origIntArray[x][y][z] = i;
+                    origDblArray[x][y][z] = i / 10;
+                    origStringArray[x][y][z] = Integer.toString(i);
+                    origIntObjArray[x][y][z] = i;
+                    origDblObjArray[x][y][z] = i / 10;
+                    origStringObjArray[x][y][z] = Integer.toString(i);
+                    i++;
+                }
+            }
+        }
+        PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
+        pstmt.setObject(1, origIntArray[0][0], Types.ARRAY);
+        pstmt.setObject(2, origDblArray[0][0], Types.ARRAY);
+        pstmt.setObject(3, origStringArray[0][0], Types.ARRAY);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT intarr[1], decarr[1], strarr[1] FROM arrtest");
+        Assert.assertTrue(rs.next());
+
+        assertEquals(origIntArray[0][0][0], rs.getInt(1));
+        assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001);
+        assertEquals(origStringArray[0][0][0], rs.getString(3));
+        rs.close();
+        stmt.close();
+
+        pstmt = conn.prepareStatement("delete from arrtest");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
+        pstmt.setObject(1, conn.createArrayOf("int4", origIntObjArray[0][0]), Types.ARRAY);
+        pstmt.setObject(2, conn.createArrayOf("float8", origDblObjArray[0][0]), Types.ARRAY);
+        pstmt.setObject(3, conn.createArrayOf("varchar", origStringObjArray[0][0]), Types.ARRAY);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        stmt = conn.createStatement();
+        rs = stmt.executeQuery("SELECT intarr[1], decarr[1], strarr[1] FROM arrtest");
+        Assert.assertTrue(rs.next());
+
+        assertEquals(origIntArray[0][0][0], rs.getInt(1));
+        assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001);
+        assertEquals(origStringArray[0][0][0], rs.getString(3));
+        rs.close();
+        stmt.close();
+
+        pstmt = conn.prepareStatement("delete from arrtest");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
+        pstmt.setObject(1, conn.createArrayOf("int4", origIntArray[0]), Types.ARRAY);
+        pstmt.setObject(2, conn.createArrayOf("float8", origDblArray[0]), Types.ARRAY);
+        pstmt.setObject(3, conn.createArrayOf("varchar", origStringArray[0]), Types.ARRAY);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        stmt = conn.createStatement();
+        rs = stmt.executeQuery("SELECT intarr[1][1], decarr[1][1], strarr[1][1], intarr[2][1], decarr[2][1], strarr[2][1] FROM arrtest");
+        Assert.assertTrue(rs.next());
+
+        assertEquals(origIntArray[0][0][0], rs.getInt(1));
+        assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001);
+        assertEquals(origStringArray[0][0][0], rs.getString(3));
+        assertEquals(origIntArray[0][1][0], rs.getInt(4));
+        assertEquals(origDblArray[0][1][0], rs.getDouble(5), 0.001);
+        assertEquals(origStringArray[0][1][0], rs.getString(6));
+        rs.close();
+        stmt.close();
+
+        pstmt = conn.prepareStatement("delete from arrtest");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
+        pstmt.setObject(1, conn.createArrayOf("int4", origIntObjArray[0]), Types.ARRAY);
+        pstmt.setObject(2, conn.createArrayOf("float8", origDblObjArray[0]), Types.ARRAY);
+        pstmt.setObject(3, conn.createArrayOf("varchar", origStringObjArray[0]), Types.ARRAY);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        stmt = conn.createStatement();
+        rs = stmt.executeQuery("SELECT intarr[1][1], decarr[1][1], strarr[1][1], intarr[2][1], decarr[2][1], strarr[2][1] FROM arrtest");
+        Assert.assertTrue(rs.next());
+
+        assertEquals(origIntArray[0][0][0], rs.getInt(1));
+        assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001);
+        assertEquals(origStringArray[0][0][0], rs.getString(3));
+        assertEquals(origIntArray[0][1][0], rs.getInt(4));
+        assertEquals(origDblArray[0][1][0], rs.getDouble(5), 0.001);
+        assertEquals(origStringArray[0][1][0], rs.getString(6));
+        rs.close();
+        stmt.close();
+
+        pstmt = conn.prepareStatement("delete from arrtest");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
+
+        pstmt.setObject(1, conn.createArrayOf("int4", origIntArray), Types.ARRAY);
+        pstmt.setObject(2, conn.createArrayOf("float8", origDblArray), Types.ARRAY);
+        pstmt.setObject(3, conn.createArrayOf("varchar", origStringArray), Types.ARRAY);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        stmt = conn.createStatement();
+        rs = stmt.executeQuery("SELECT intarr[1][1][1], decarr[1][1][1], strarr[1][1][1], intarr[2][1][1], decarr[2][1][1], strarr[2][1][1] FROM arrtest");
+        Assert.assertTrue(rs.next());
+
+        assertEquals(origIntArray[0][0][0], rs.getInt(1));
+        assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001);
+        assertEquals(origStringArray[0][0][0], rs.getString(3));
+        assertEquals(origIntArray[1][0][0], rs.getInt(4));
+        assertEquals(origDblArray[1][0][0], rs.getDouble(5), 0.001);
+        assertEquals(origStringArray[1][0][0], rs.getString(6));
+        rs.close();
+        stmt.close();
+
+        pstmt = conn.prepareStatement("delete from arrtest");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
+
+        pstmt.setObject(1, conn.createArrayOf("int4", origIntObjArray), Types.ARRAY);
+        pstmt.setObject(2, conn.createArrayOf("float8", origDblObjArray), Types.ARRAY);
+        pstmt.setObject(3, conn.createArrayOf("varchar", origStringObjArray), Types.ARRAY);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        stmt = conn.createStatement();
+        rs = stmt.executeQuery("SELECT intarr[1][1][1], decarr[1][1][1], strarr[1][1][1], intarr[2][1][1], decarr[2][1][1], strarr[2][1][1] FROM arrtest");
+        Assert.assertTrue(rs.next());
+
+        assertEquals(origIntArray[0][0][0], rs.getInt(1));
+        assertEquals(origDblArray[0][0][0], rs.getDouble(2), 0.001);
+        assertEquals(origStringArray[0][0][0], rs.getString(3));
+        assertEquals(origIntArray[1][0][0], rs.getInt(4));
+        assertEquals(origDblArray[1][0][0], rs.getDouble(5), 0.001);
+        assertEquals(origStringArray[1][0][0], rs.getString(6));
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testSetPrimitiveArraysObjects() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
+
+        final PGConnection arraySupport = conn.unwrap(PGConnection.class);
+
+        pstmt.setArray(1, arraySupport.createArrayOf("int4", new int[]{1, 2, 3}));
+        pstmt.setObject(2, arraySupport.createArrayOf("float8", new double[]{3.1d, 1.4d}));
+        pstmt.setObject(3, arraySupport.createArrayOf("varchar", new String[]{"abc", "f'a", "fa\"b"}));
+
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT intarr, decarr, strarr FROM arrtest");
+        Assert.assertTrue(rs.next());
+
+        Array arr = rs.getArray(1);
+        Assert.assertEquals(Types.INTEGER, arr.getBaseType());
+        Integer[] intarr = (Integer[]) arr.getArray();
+        Assert.assertEquals(3, intarr.length);
+        Assert.assertEquals(1, intarr[0].intValue());
+        Assert.assertEquals(2, intarr[1].intValue());
+        Assert.assertEquals(3, intarr[2].intValue());
+
+        arr = rs.getArray(2);
+        Assert.assertEquals(Types.NUMERIC, arr.getBaseType());
+        BigDecimal[] decarr = (BigDecimal[]) arr.getArray();
+        Assert.assertEquals(2, decarr.length);
+        Assert.assertEquals(new BigDecimal("3.1"), decarr[0]);
+        Assert.assertEquals(new BigDecimal("1.4"), decarr[1]);
+
+        arr = rs.getArray(3);
+        Assert.assertEquals(Types.VARCHAR, arr.getBaseType());
+        String[] strarr = (String[]) arr.getArray(2, 2);
+        Assert.assertEquals(2, strarr.length);
+        Assert.assertEquals("f'a", strarr[0]);
+        Assert.assertEquals("fa\"b", strarr[1]);
+
+        try {
+            arraySupport.createArrayOf("int4", 1);
+            fail("not an array");
+        } catch (PSQLException e) {
+
+        }
+
+        rs.close();
+    }
+
+    @Test
+    public void testSetArraysWithAnsiTypeNames() throws SQLException {
+        try {
+            TestUtil.createTable(
+                    conn,
+                    "ansiarraytest",
+                    "floats double precision[], "
+                            + "reals real[], "
+                            + "varchars character varying(8)[], "
+                            + "times time without time zone[], "
+                            + "timestamps timestamp without time zone[], "
+                            + "timestampstz timestamp with time zone[]");
+
+            PreparedStatement pstmt =
+                    conn.prepareStatement("INSERT INTO ansiarraytest VALUES (?,?,?,?,?,?)");
+
+            final PGConnection arraySupport = conn.unwrap(PGConnection.class);
+
+            pstmt.setArray(1, arraySupport.createArrayOf("double precision", new Object[]{1d, 4d}));
+            pstmt.setArray(2, arraySupport.createArrayOf("real", new Object[]{0f, 3f}));
+            pstmt.setObject(
+                    3, arraySupport.createArrayOf("character varying", new String[]{"abc", "f'a", "fa\"b"}));
+            pstmt.setObject(
+                    4,
+                    arraySupport.createArrayOf(
+                            "time without time zone",
+                            new Object[]{Time.valueOf("12:34:56"), Time.valueOf("03:30:25")}));
+            pstmt.setObject(
+                    5,
+                    arraySupport.createArrayOf(
+                            "timestamp without time zone",
+                            new Object[]{"2023-09-05 16:21:50", "2012-01-01 13:02:03"}));
+            pstmt.setObject(
+                    6,
+                    arraySupport.createArrayOf(
+                            "timestamp with time zone",
+                            new Object[]{"1996-01-23 12:00:00-08", "1997-08-16 16:51:00-04"}));
+
+            pstmt.executeUpdate();
+            pstmt.close();
+
+            Statement stmt = conn.createStatement();
+            ResultSet rs =
+                    stmt.executeQuery(
+                            "SELECT floats, reals, varchars, times, timestamps, timestampstz FROM ansiarraytest");
+            Assert.assertTrue(rs.next());
+
+            Array arr = rs.getArray(1);
+            Assert.assertEquals(Types.DOUBLE, arr.getBaseType());
+            Double[] doubles = (Double[]) arr.getArray();
+            Assert.assertEquals(2, doubles.length);
+            Assert.assertEquals(1d, doubles[0], 0);
+            Assert.assertEquals(4d, doubles[1], 0);
+
+            arr = rs.getArray(2);
+            Assert.assertEquals(Types.REAL, arr.getBaseType());
+            Float[] floats = (Float[]) arr.getArray();
+            Assert.assertEquals(2, floats.length);
+            Assert.assertEquals(0f, floats[0], 0);
+            Assert.assertEquals(3f, floats[1], 0);
+
+            arr = rs.getArray(3);
+            Assert.assertEquals(Types.VARCHAR, arr.getBaseType());
+            String[] strings = (String[]) arr.getArray();
+            Assert.assertEquals(3, strings.length);
+            Assert.assertEquals("abc", strings[0]);
+            Assert.assertEquals("f'a", strings[1]);
+            Assert.assertEquals("fa\"b", strings[2]);
+
+            arr = rs.getArray(4);
+            Assert.assertEquals(Types.TIME, arr.getBaseType());
+            Time[] times = (Time[]) arr.getArray();
+            Assert.assertEquals(2, times.length);
+            Assert.assertEquals(Time.valueOf("12:34:56"), times[0]);
+            Assert.assertEquals(Time.valueOf("03:30:25"), times[1]);
+
+            arr = rs.getArray(5);
+            Assert.assertEquals(Types.TIMESTAMP, arr.getBaseType());
+            Timestamp[] tzarr = (Timestamp[]) arr.getArray();
+            Assert.assertEquals(2, times.length);
+            Assert.assertEquals(Timestamp.valueOf("2023-09-05 16:21:50"), tzarr[0]);
+            Assert.assertEquals(Timestamp.valueOf("2012-01-01 13:02:03"), tzarr[1]);
+
+            arr = rs.getArray(6);
+            Assert.assertEquals(Types.TIMESTAMP, arr.getBaseType());
+            tzarr = (Timestamp[]) arr.getArray();
+            Assert.assertEquals(2, times.length);
+            Assert.assertEquals(822427200000L, tzarr[0].getTime());
+            Assert.assertEquals(871764660000L, tzarr[1].getTime());
+
+            rs.close();
+        } finally {
+            TestUtil.dropTable(conn, "ansiarraytest");
+        }
+    }
+
+    @Test
+    public void testSetNullArrays() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest VALUES (?,?,?)");
+
+        final PGConnection arraySupport = conn.unwrap(PGConnection.class);
+
+        pstmt.setArray(1, arraySupport.createArrayOf("int4", null));
+        pstmt.setObject(2, conn.createArrayOf("float8", null));
+        pstmt.setObject(3, arraySupport.createArrayOf("varchar", null));
+
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT intarr, decarr, strarr FROM arrtest");
+        Assert.assertTrue(rs.next());
+
+        Array arr = rs.getArray(1);
+        Assert.assertNull(arr);
+
+        arr = rs.getArray(2);
+        Assert.assertNull(arr);
+
+        arr = rs.getArray(3);
+        Assert.assertNull(arr);
+
+        rs.close();
+    }
+
+    @Test
+    public void testRetrieveArrays() throws SQLException {
+        Statement stmt = conn.createStatement();
+
+        // you need a lot of backslashes to get a double quote in.
+        stmt.executeUpdate("INSERT INTO arrtest VALUES ('{1,2,3}','{3.1,1.4}', '"
+                + TestUtil.escapeString(conn, "{abc,f'a,\"fa\\\"b\",def, un  quot\u000B \u2001 \r}") + "')");
+
+        ResultSet rs = stmt.executeQuery("SELECT intarr, decarr, strarr FROM arrtest");
+        Assert.assertTrue(rs.next());
+
+        Array arr = rs.getArray(1);
+        Assert.assertEquals(Types.INTEGER, arr.getBaseType());
+        Integer[] intarr = (Integer[]) arr.getArray();
+        Assert.assertEquals(3, intarr.length);
+        Assert.assertEquals(1, intarr[0].intValue());
+        Assert.assertEquals(2, intarr[1].intValue());
+        Assert.assertEquals(3, intarr[2].intValue());
+
+        arr = rs.getArray(2);
+        Assert.assertEquals(Types.NUMERIC, arr.getBaseType());
+        BigDecimal[] decarr = (BigDecimal[]) arr.getArray();
+        Assert.assertEquals(2, decarr.length);
+        Assert.assertEquals(new BigDecimal("3.1"), decarr[0]);
+        Assert.assertEquals(new BigDecimal("1.4"), decarr[1]);
+
+        arr = rs.getArray(3);
+        Assert.assertEquals(Types.VARCHAR, arr.getBaseType());
+        String[] strarr = (String[]) arr.getArray(2, 2);
+        Assert.assertEquals(2, strarr.length);
+        Assert.assertEquals("f'a", strarr[0]);
+        Assert.assertEquals("fa\"b", strarr[1]);
+
+        strarr = (String[]) arr.getArray();
+        assertEquals(5, strarr.length);
+        assertEquals("un  quot\u000B \u2001", strarr[4]);
+
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testRetrieveResultSets() throws SQLException {
+        Statement stmt = conn.createStatement();
+
+        final String stringWithNonAsciiWhiteSpace = "a\u2001b";
+        // you need a lot of backslashes to get a double quote in.
+        stmt.executeUpdate("INSERT INTO arrtest VALUES ('{1,2,3}','{3.1,1.4}', '"
+                + TestUtil.escapeString(conn, "{\"a\u2001b\",f'a,\"fa\\\"b\",def}") + "')");
+
+        ResultSet rs = stmt.executeQuery("SELECT intarr, decarr, strarr FROM arrtest");
+        Assert.assertTrue(rs.next());
+
+        Array arr = rs.getArray(1);
+        Assert.assertEquals(Types.INTEGER, arr.getBaseType());
+        ResultSet arrrs = arr.getResultSet();
+        Assert.assertTrue(arrrs.next());
+        Assert.assertEquals(1, arrrs.getInt(1));
+        Assert.assertEquals(1, arrrs.getInt(2));
+        Assert.assertTrue(arrrs.next());
+        Assert.assertEquals(2, arrrs.getInt(1));
+        Assert.assertEquals(2, arrrs.getInt(2));
+        Assert.assertTrue(arrrs.next());
+        Assert.assertEquals(3, arrrs.getInt(1));
+        Assert.assertEquals(3, arrrs.getInt(2));
+        Assert.assertTrue(!arrrs.next());
+        Assert.assertTrue(arrrs.previous());
+        Assert.assertEquals(3, arrrs.getInt(2));
+        arrrs.first();
+        Assert.assertEquals(1, arrrs.getInt(2));
+        arrrs.close();
+
+        arr = rs.getArray(2);
+        Assert.assertEquals(Types.NUMERIC, arr.getBaseType());
+        arrrs = arr.getResultSet();
+        Assert.assertTrue(arrrs.next());
+        Assert.assertEquals(new BigDecimal("3.1"), arrrs.getBigDecimal(2));
+        Assert.assertTrue(arrrs.next());
+        Assert.assertEquals(new BigDecimal("1.4"), arrrs.getBigDecimal(2));
+        arrrs.close();
+
+        arr = rs.getArray(3);
+        Assert.assertEquals(Types.VARCHAR, arr.getBaseType());
+        arrrs = arr.getResultSet(2, 2);
+        Assert.assertTrue(arrrs.next());
+        Assert.assertEquals(2, arrrs.getInt(1));
+        Assert.assertEquals("f'a", arrrs.getString(2));
+        Assert.assertTrue(arrrs.next());
+        Assert.assertEquals(3, arrrs.getInt(1));
+        Assert.assertEquals("fa\"b", arrrs.getString(2));
+        Assert.assertTrue(!arrrs.next());
+        arrrs.close();
+
+        arrrs = arr.getResultSet(1, 1);
+        Assert.assertTrue(arrrs.next());
+        Assert.assertEquals(1, arrrs.getInt(1));
+        Assert.assertEquals(stringWithNonAsciiWhiteSpace, arrrs.getString(2));
+        Assert.assertFalse(arrrs.next());
+        arrrs.close();
+
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testSetArray() throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet arrRS = stmt.executeQuery("SELECT '{1,2,3}'::int4[]");
+        Assert.assertTrue(arrRS.next());
+        Array arr = arrRS.getArray(1);
+        arrRS.close();
+        stmt.close();
+
+        PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest(intarr) VALUES (?)");
+        pstmt.setArray(1, arr);
+        pstmt.executeUpdate();
+
+        pstmt.setObject(1, arr, Types.ARRAY);
+        pstmt.executeUpdate();
+
+        pstmt.setObject(1, arr);
+        pstmt.executeUpdate();
+
+        pstmt.close();
+
+        Statement select = conn.createStatement();
+        ResultSet rs = select.executeQuery("SELECT intarr FROM arrtest");
+        int resultCount = 0;
+        while (rs.next()) {
+            resultCount++;
+            Array result = rs.getArray(1);
+            Assert.assertEquals(Types.INTEGER, result.getBaseType());
+            Assert.assertEquals("int4", result.getBaseTypeName());
+
+            Integer[] intarr = (Integer[]) result.getArray();
+            Assert.assertEquals(3, intarr.length);
+            Assert.assertEquals(1, intarr[0].intValue());
+            Assert.assertEquals(2, intarr[1].intValue());
+            Assert.assertEquals(3, intarr[2].intValue());
+        }
+        Assert.assertEquals(3, resultCount);
+    }
+
+    /**
+     * Starting with 8.0 non-standard (beginning index isn't 1) bounds the dimensions are returned in
+     * the data. The following should return "[0:3]={0,1,2,3,4}" when queried. Older versions simply
+     * do not return the bounds.
+     */
+    @Test
+    public void testNonStandardBounds() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate("INSERT INTO arrtest (intarr) VALUES ('{1,2,3}')");
+        stmt.executeUpdate("UPDATE arrtest SET intarr[0] = 0");
+        ResultSet rs = stmt.executeQuery("SELECT intarr FROM arrtest");
+        Assert.assertTrue(rs.next());
+        Array result = rs.getArray(1);
+        Integer[] intarr = (Integer[]) result.getArray();
+        Assert.assertEquals(4, intarr.length);
+        for (int i = 0; i < intarr.length; i++) {
+            Assert.assertEquals(i, intarr[i].intValue());
+        }
+    }
+
+    @Test
+    public void testMultiDimensionalArray() throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT '{{1,2},{3,4}}'::int[]");
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+        Object[] oa = (Object[]) arr.getArray();
+        Assert.assertEquals(2, oa.length);
+        Integer[] i0 = (Integer[]) oa[0];
+        Assert.assertEquals(2, i0.length);
+        Assert.assertEquals(1, i0[0].intValue());
+        Assert.assertEquals(2, i0[1].intValue());
+        Integer[] i1 = (Integer[]) oa[1];
+        Assert.assertEquals(2, i1.length);
+        Assert.assertEquals(3, i1[0].intValue());
+        Assert.assertEquals(4, i1[1].intValue());
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testNullValues() throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT ARRAY[1,NULL,3]");
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+        Integer[] i = (Integer[]) arr.getArray();
+        Assert.assertEquals(3, i.length);
+        Assert.assertEquals(1, i[0].intValue());
+        Assert.assertNull(i[1]);
+        Assert.assertEquals(3, i[2].intValue());
+    }
+
+    @Test
+    public void testNullFieldString() throws SQLException {
+        Array arr = new PgArray((BaseConnection) conn, 1, (String) null);
+        Assert.assertNull(arr.toString());
+    }
+
+    @Test
+    public void testDirectFieldString() throws SQLException {
+        Array arr = new PgArray((BaseConnection) conn, Oid.VARCHAR_ARRAY,
+                "{\" lead\t\",  unquot\u000B \u2001 \r, \" \fnew \n \"\t, \f\" \" }");
+        final String[] array = (String[]) arr.getArray();
+        assertEquals(4, array.length);
+        assertEquals(" lead\t", array[0]);
+        assertEquals(" \fnew \n ", array[2]);
+        assertEquals(" ", array[3]);
+
+        // PostgreSQL drops leading and trailing whitespace, so does the driver
+        assertEquals("unquot\u2001", array[1]);
+    }
+
+    @Test
+    public void testStringEscaping() throws SQLException {
+
+        final String stringArray = "{f'a,\"fa\\\"b\",def, un  quot\u000B \u2001 \r, someString }";
+
+        final Statement stmt = conn.createStatement();
+        try {
+
+            stmt.executeUpdate("INSERT INTO arrtest VALUES (NULL, NULL, '" + TestUtil.escapeString(conn, stringArray) + "')");
+
+            final ResultSet rs = stmt.executeQuery("SELECT strarr FROM arrtest");
+            Assert.assertTrue(rs.next());
+
+            Array arr = rs.getArray(1);
+            Assert.assertEquals(Types.VARCHAR, arr.getBaseType());
+            String[] strarr = (String[]) arr.getArray();
+            assertEquals(5, strarr.length);
+            assertEquals("f'a", strarr[0]);
+            assertEquals("fa\"b", strarr[1]);
+            assertEquals("def", strarr[2]);
+            assertEquals("un  quot\u000B \u2001", strarr[3]);
+            assertEquals("someString", strarr[4]);
+
+            rs.close();
+        } finally {
+            stmt.close();
+        }
+
+        final Array directArray = new PgArray((BaseConnection) conn, Oid.VARCHAR_ARRAY, stringArray);
+        final String[] actual = (String[]) directArray.getArray();
+        assertEquals(5, actual.length);
+        assertEquals("f'a", actual[0]);
+        assertEquals("fa\"b", actual[1]);
+        assertEquals("def", actual[2]);
+        assertEquals("someString", actual[4]);
+
+        // the driver strips out ascii white spaces from an unescaped string, even in
+        // the middle of the value. while this does not exactly match the behavior of
+        // the backend, it will always quote values where ascii white spaces are
+        // present, making this difference not worth the complexity involved addressing.
+        assertEquals("unquot\u2001", actual[3]);
+    }
+
+    @Test
+    public void testUnknownArrayType() throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs =
+                stmt.executeQuery("SELECT relacl FROM pg_class WHERE relacl IS NOT NULL LIMIT 1");
+        ResultSetMetaData rsmd = rs.getMetaData();
+        Assert.assertEquals(Types.ARRAY, rsmd.getColumnType(1));
+
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+        Assert.assertEquals("aclitem", arr.getBaseTypeName());
+
+        ResultSet arrRS = arr.getResultSet();
+        ResultSetMetaData arrRSMD = arrRS.getMetaData();
+        Assert.assertEquals("aclitem", arrRSMD.getColumnTypeName(2));
+    }
+
+    @Test
+    public void testRecursiveResultSets() throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT '{{1,2},{3,4}}'::int[]");
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+
+        ResultSet arrRS = arr.getResultSet();
+        ResultSetMetaData arrRSMD = arrRS.getMetaData();
+        Assert.assertEquals(Types.ARRAY, arrRSMD.getColumnType(2));
+        Assert.assertEquals("_int4", arrRSMD.getColumnTypeName(2));
+
+        Assert.assertTrue(arrRS.next());
+        Assert.assertEquals(1, arrRS.getInt(1));
+        Array a1 = arrRS.getArray(2);
+        ResultSet a1RS = a1.getResultSet();
+        ResultSetMetaData a1RSMD = a1RS.getMetaData();
+        Assert.assertEquals(Types.INTEGER, a1RSMD.getColumnType(2));
+        Assert.assertEquals("int4", a1RSMD.getColumnTypeName(2));
+
+        Assert.assertTrue(a1RS.next());
+        Assert.assertEquals(1, a1RS.getInt(2));
+        Assert.assertTrue(a1RS.next());
+        Assert.assertEquals(2, a1RS.getInt(2));
+        Assert.assertTrue(!a1RS.next());
+        a1RS.close();
+
+        Assert.assertTrue(arrRS.next());
+        Assert.assertEquals(2, arrRS.getInt(1));
+        Array a2 = arrRS.getArray(2);
+        ResultSet a2RS = a2.getResultSet();
+
+        Assert.assertTrue(a2RS.next());
+        Assert.assertEquals(3, a2RS.getInt(2));
+        Assert.assertTrue(a2RS.next());
+        Assert.assertEquals(4, a2RS.getInt(2));
+        Assert.assertTrue(!a2RS.next());
+        a2RS.close();
+
+        arrRS.close();
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testNullString() throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT '{a,NULL}'::text[]");
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+
+        String[] s = (String[]) arr.getArray();
+        Assert.assertEquals(2, s.length);
+        Assert.assertEquals("a", s[0]);
+        Assert.assertNull(s[1]);
+    }
+
+    @Test
+    public void testEscaping() throws SQLException {
+        Statement stmt = conn.createStatement();
+        String sql = "SELECT ";
+        sql += 'E';
+        // Uggg. Three levels of escaping: Java, string literal, array.
+        sql += "'{{c\\\\\"d, ''}, {\"\\\\\\\\\",\"''\"}}'::text[]";
+
+        ResultSet rs = stmt.executeQuery(sql);
+        Assert.assertTrue(rs.next());
+
+        Array arr = rs.getArray(1);
+        String[][] s = (String[][]) arr.getArray();
+        Assert.assertEquals("c\"d", s[0][0]);
+        Assert.assertEquals("'", s[0][1]);
+        Assert.assertEquals("\\", s[1][0]);
+        Assert.assertEquals("'", s[1][1]);
+
+        ResultSet arrRS = arr.getResultSet();
+
+        Assert.assertTrue(arrRS.next());
+        Array a1 = arrRS.getArray(2);
+        ResultSet rs1 = a1.getResultSet();
+        Assert.assertTrue(rs1.next());
+        Assert.assertEquals("c\"d", rs1.getString(2));
+        Assert.assertTrue(rs1.next());
+        Assert.assertEquals("'", rs1.getString(2));
+        Assert.assertTrue(!rs1.next());
+
+        Assert.assertTrue(arrRS.next());
+        Array a2 = arrRS.getArray(2);
+        ResultSet rs2 = a2.getResultSet();
+        Assert.assertTrue(rs2.next());
+        Assert.assertEquals("\\", rs2.getString(2));
+        Assert.assertTrue(rs2.next());
+        Assert.assertEquals("'", rs2.getString(2));
+        Assert.assertTrue(!rs2.next());
+    }
+
+    @Test
+    public void testWriteMultiDimensional() throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT '{{1,2},{3,4}}'::int[]");
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+        rs.close();
+        stmt.close();
+
+        String sql = "SELECT ?";
+        if (preferQueryMode == PreferQueryMode.SIMPLE) {
+            sql = "SELECT ?::int[]";
+        }
+        PreparedStatement pstmt = conn.prepareStatement(sql);
+        pstmt.setArray(1, arr);
+        rs = pstmt.executeQuery();
+        Assert.assertTrue(rs.next());
+        arr = rs.getArray(1);
+
+        Integer[][] i = (Integer[][]) arr.getArray();
+        Assert.assertEquals(1, i[0][0].intValue());
+        Assert.assertEquals(2, i[0][1].intValue());
+        Assert.assertEquals(3, i[1][0].intValue());
+        Assert.assertEquals(4, i[1][1].intValue());
+    }
+
+    /*
+     * The box data type uses a semicolon as the array element delimiter instead of a comma which
+     * pretty much everything else uses.
+     */
+    @Test
+    public void testNonStandardDelimiter() throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT '{(3,4),(1,2);(7,8),(5,6)}'::box[]");
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+
+        ResultSet arrRS = arr.getResultSet();
+
+        Assert.assertTrue(arrRS.next());
+        PGbox box1 = (PGbox) arrRS.getObject(2);
+        PGpoint p1 = box1.point[0];
+        Assert.assertEquals(3, p1.x, 0.001);
+        Assert.assertEquals(4, p1.y, 0.001);
+
+        Assert.assertTrue(arrRS.next());
+        PGbox box2 = (PGbox) arrRS.getObject(2);
+        PGpoint p2 = box2.point[1];
+        Assert.assertEquals(5, p2.x, 0.001);
+        Assert.assertEquals(6, p2.y, 0.001);
+
+        Assert.assertTrue(!arrRS.next());
+    }
+
+    @Test
+    public void testEmptyArray() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("SELECT '{}'::int[]");
+        ResultSet rs = pstmt.executeQuery();
+
+        while (rs.next()) {
+            Array array = rs.getArray(1);
+            if (!rs.wasNull()) {
+                ResultSet ars = array.getResultSet();
+                Assert.assertEquals("get columntype should return Types.INTEGER", java.sql.Types.INTEGER,
+                        ars.getMetaData().getColumnType(1));
+            }
+        }
     }
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/AutoRollbackTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/AutoRollbackTestSuite.java
index e0a8e91..700616d 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/AutoRollbackTestSuite.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/AutoRollbackTestSuite.java
@@ -35,389 +35,385 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 @RunWith(Parameterized.class)
 public class AutoRollbackTestSuite extends BaseTest4 {
-  private static final AtomicInteger counter = new AtomicInteger();
-
-  private enum CleanSavePoint {
-    TRUE,
-    FALSE
-  }
-
-  private enum FailMode {
-    /**
-     * Executes "select 1/0" and causes transaction failure (if autocommit=no).
-     * Mitigation: "autosave=always" or "autocommit=true"
-     */
-    SELECT,
-    /**
-     * Executes "alter table rollbacktest", thus it breaks a prepared select over that table.
-     * Mitigation: "autosave in (always, conservative)"
-     */
-    ALTER,
-    /**
-     * Executes DEALLOCATE ALL.
-     * Mitigation:
-     *  1) QueryExecutor tracks "DEALLOCATE ALL" responses ({@see org.postgresql.core.QueryExecutor#setFlushCacheOnDeallocate(boolean)}
-     *  2) QueryExecutor tracks "prepared statement name is invalid" and unprepared relevant statements ({@link org.postgresql.core.v3.QueryExecutorImpl#processResults(ResultHandler, int)}
-     *  3) "autosave in (always, conservative)"
-     *  4) Non-transactional cases are healed by retry (when no transaction present, just retry is possible)
-     */
-    DEALLOCATE,
-    /**
-     * Executes DISCARD ALL.
-     * Mitigation: the same as for {@link #DEALLOCATE}
-     */
-    DISCARD,
-    /**
-     * Executes "insert ... select 1/0" in a batch statement, thus causing the transaction to fail.
-     */
-    INSERT_BATCH,
-  }
-
-  private enum ReturnColumns {
-    EXACT("a, str"),
-    STAR("*");
-
-    public final String cols;
-
-    ReturnColumns(String cols) {
-      this.cols = cols;
-    }
-  }
-
-  private enum TestStatement {
-    SELECT("select ${cols} from rollbacktest", 0),
-    WITH_INSERT_SELECT(
-        "with x as (insert into rollbacktest(a, str) values(43, 'abc') returning ${cols})"
-            + "select * from x", 1);
-
-    private final String sql;
-    private final int rowsInserted;
-
-    TestStatement(String sql, int rowsInserted) {
-      this.sql = sql;
-      this.rowsInserted = rowsInserted;
+    private static final AtomicInteger counter = new AtomicInteger();
+    private static final EnumSet<FailMode> DEALLOCATES =
+            EnumSet.of(FailMode.DEALLOCATE, FailMode.DISCARD);
+    private static final EnumSet<FailMode> TRANS_KILLERS =
+            EnumSet.of(FailMode.SELECT, FailMode.INSERT_BATCH);
+    private final AutoSave autoSave;
+    private final CleanSavePoint cleanSavePoint;
+    private final AutoCommit autoCommit;
+    private final FailMode failMode;
+    private final ContinueMode continueMode;
+    private final boolean flushCacheOnDeallocate;
+    private final boolean trans;
+    private final TestStatement testSql;
+    private final ReturnColumns cols;
+    public AutoRollbackTestSuite(AutoSave autoSave, CleanSavePoint cleanSavePoint, AutoCommit autoCommit,
+                                 FailMode failMode, ContinueMode continueMode, boolean flushCacheOnDeallocate,
+                                 boolean trans, TestStatement testSql, ReturnColumns cols) {
+        this.autoSave = autoSave;
+        this.cleanSavePoint = cleanSavePoint;
+        this.autoCommit = autoCommit;
+        this.failMode = failMode;
+        this.continueMode = continueMode;
+        this.flushCacheOnDeallocate = flushCacheOnDeallocate;
+        this.trans = trans;
+        this.testSql = testSql;
+        this.cols = cols;
     }
 
-    public String getSql(ReturnColumns cols) {
-      return sql.replace("${cols}", cols.cols);
-    }
-  }
+    @Parameterized.Parameters(name = "{index}: autorollback(autoSave={0}, cleanSavePoint={1}, autoCommit={2}, failMode={3}, continueMode={4}, flushOnDeallocate={5}, hastransaction={6}, sql={7}, columns={8})")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        boolean[] booleans = new boolean[]{true, false};
+        for (AutoSave autoSave : AutoSave.values()) {
+            for (CleanSavePoint cleanSavePoint : CleanSavePoint.values()) {
+                for (AutoCommit autoCommit : AutoCommit.values()) {
+                    for (FailMode failMode : FailMode.values()) {
+                        // ERROR: DISCARD ALL cannot run inside a transaction block
+                        if (failMode == FailMode.DISCARD && autoCommit == AutoCommit.NO) {
+                            continue;
+                        }
+                        for (ContinueMode continueMode : ContinueMode.values()) {
+                            if (failMode == FailMode.ALTER && continueMode != ContinueMode.SELECT) {
+                                continue;
+                            }
+                            for (boolean flushCacheOnDeallocate : booleans) {
+                                if (!(flushCacheOnDeallocate || DEALLOCATES.contains(failMode))) {
+                                    continue;
+                                }
 
-  private static final EnumSet<FailMode> DEALLOCATES =
-      EnumSet.of(FailMode.DEALLOCATE, FailMode.DISCARD);
-
-  private static final EnumSet<FailMode> TRANS_KILLERS =
-      EnumSet.of(FailMode.SELECT, FailMode.INSERT_BATCH);
-
-  private enum ContinueMode {
-    COMMIT,
-    IS_VALID,
-    SELECT,
-  }
-
-  private final AutoSave autoSave;
-  private final CleanSavePoint cleanSavePoint;
-  private final AutoCommit autoCommit;
-  private final FailMode failMode;
-  private final ContinueMode continueMode;
-  private final boolean flushCacheOnDeallocate;
-  private final boolean trans;
-  private final TestStatement testSql;
-  private final ReturnColumns cols;
-
-  public AutoRollbackTestSuite(AutoSave autoSave, CleanSavePoint cleanSavePoint, AutoCommit autoCommit,
-      FailMode failMode, ContinueMode continueMode, boolean flushCacheOnDeallocate,
-      boolean trans, TestStatement testSql, ReturnColumns cols) {
-    this.autoSave = autoSave;
-    this.cleanSavePoint = cleanSavePoint;
-    this.autoCommit = autoCommit;
-    this.failMode = failMode;
-    this.continueMode = continueMode;
-    this.flushCacheOnDeallocate = flushCacheOnDeallocate;
-    this.trans = trans;
-    this.testSql = testSql;
-    this.cols = cols;
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    if (testSql == TestStatement.WITH_INSERT_SELECT) {
-      assumeMinimumServerVersion(ServerVersion.v9_1);
-    }
-
-    TestUtil.createTable(con, "rollbacktest", "a int, str text");
-    con.setAutoCommit(autoCommit == AutoCommit.YES);
-    BaseConnection baseConnection = con.unwrap(BaseConnection.class);
-    baseConnection.setFlushCacheOnDeallocate(flushCacheOnDeallocate);
-    Assume.assumeTrue("DEALLOCATE ALL requires PostgreSQL 8.3+",
-        failMode != FailMode.DEALLOCATE || TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3));
-    Assume.assumeTrue("DISCARD ALL requires PostgreSQL 8.3+",
-        failMode != FailMode.DISCARD || TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3));
-    Assume.assumeTrue("Plan invalidation on table redefinition requires PostgreSQL 8.3+",
-        failMode != FailMode.ALTER || TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3));
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    try {
-      con.setAutoCommit(true);
-      TestUtil.dropTable(con, "rollbacktest");
-    } catch (Exception e) {
-      e.printStackTrace();
-    }
-    super.tearDown();
-  }
-
-  @Override
-  protected void updateProperties(Properties props) {
-    super.updateProperties(props);
-    PGProperty.AUTOSAVE.set(props, autoSave.value());
-    PGProperty.CLEANUP_SAVEPOINTS.set(props, cleanSavePoint.toString());
-    PGProperty.PREPARE_THRESHOLD.set(props, 1);
-  }
-
-  @Parameterized.Parameters(name = "{index}: autorollback(autoSave={0}, cleanSavePoint={1}, autoCommit={2}, failMode={3}, continueMode={4}, flushOnDeallocate={5}, hastransaction={6}, sql={7}, columns={8})")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    boolean[] booleans = new boolean[]{true, false};
-    for (AutoSave autoSave : AutoSave.values()) {
-      for (CleanSavePoint cleanSavePoint:CleanSavePoint.values()) {
-        for (AutoCommit autoCommit : AutoCommit.values()) {
-          for (FailMode failMode : FailMode.values()) {
-            // ERROR: DISCARD ALL cannot run inside a transaction block
-            if (failMode == FailMode.DISCARD && autoCommit == AutoCommit.NO) {
-              continue;
-            }
-            for (ContinueMode continueMode : ContinueMode.values()) {
-              if (failMode == FailMode.ALTER && continueMode != ContinueMode.SELECT) {
-                continue;
-              }
-              for (boolean flushCacheOnDeallocate : booleans) {
-                if (!(flushCacheOnDeallocate || DEALLOCATES.contains(failMode))) {
-                  continue;
-                }
-
-                for (boolean trans : new boolean[]{true, false}) {
-                  // continueMode would commit, and autoCommit=YES would commit,
-                  // so it does not make sense to test trans=true for those cases
-                  if (trans && (continueMode == ContinueMode.COMMIT
-                      || autoCommit != AutoCommit.NO)) {
-                    continue;
-                  }
-                  for (TestStatement statement : TestStatement.values()) {
-                    for (ReturnColumns columns : ReturnColumns.values()) {
-                      ids.add(new Object[]{autoSave, cleanSavePoint, autoCommit, failMode, continueMode,
-                          flushCacheOnDeallocate, trans, statement, columns});
+                                for (boolean trans : new boolean[]{true, false}) {
+                                    // continueMode would commit, and autoCommit=YES would commit,
+                                    // so it does not make sense to test trans=true for those cases
+                                    if (trans && (continueMode == ContinueMode.COMMIT
+                                            || autoCommit != AutoCommit.NO)) {
+                                        continue;
+                                    }
+                                    for (TestStatement statement : TestStatement.values()) {
+                                        for (ReturnColumns columns : ReturnColumns.values()) {
+                                            ids.add(new Object[]{autoSave, cleanSavePoint, autoCommit, failMode, continueMode,
+                                                    flushCacheOnDeallocate, trans, statement, columns});
+                                        }
+                                    }
+                                }
+                            }
+                        }
                     }
-                  }
                 }
-              }
             }
-          }
         }
-      }
-    }
-    return ids;
-  }
-
-  @Test
-  public void run() throws SQLException {
-    if (continueMode == ContinueMode.IS_VALID) {
-      // make "isValid" a server-prepared statement
-      con.isValid(4);
-    } else if (continueMode == ContinueMode.COMMIT) {
-      doCommit();
-    } else if (continueMode == ContinueMode.SELECT) {
-      assertRows("rollbacktest", 0);
+        return ids;
     }
 
-    Statement statement = con.createStatement();
-    statement.executeUpdate("insert into rollbacktest(a, str) values (0, 'test')");
-    int rowsExpected = 1;
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        if (testSql == TestStatement.WITH_INSERT_SELECT) {
+            assumeMinimumServerVersion(ServerVersion.v9_1);
+        }
 
-    PreparedStatement ps = con.prepareStatement(testSql.getSql(cols));
-    // Server-prepare the testSql
-    ps.executeQuery().close();
-    rowsExpected += testSql.rowsInserted;
-
-    if (trans) {
-      statement.executeUpdate("update rollbacktest set a=a");
+        TestUtil.createTable(con, "rollbacktest", "a int, str text");
+        con.setAutoCommit(autoCommit == AutoCommit.YES);
+        BaseConnection baseConnection = con.unwrap(BaseConnection.class);
+        baseConnection.setFlushCacheOnDeallocate(flushCacheOnDeallocate);
+        Assume.assumeTrue("DEALLOCATE ALL requires PostgreSQL 8.3+",
+                failMode != FailMode.DEALLOCATE || TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3));
+        Assume.assumeTrue("DISCARD ALL requires PostgreSQL 8.3+",
+                failMode != FailMode.DISCARD || TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3));
+        Assume.assumeTrue("Plan invalidation on table redefinition requires PostgreSQL 8.3+",
+                failMode != FailMode.ALTER || TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3));
     }
 
-    switch (failMode) {
-      case SELECT:
+    @Override
+    public void tearDown() throws SQLException {
         try {
-          statement.execute("select 1/0");
-          Assert.fail("select 1/0 should fail");
-        } catch (SQLException e) {
-          Assert.assertEquals("division by zero expected",
-              PSQLState.DIVISION_BY_ZERO.getState(), e.getSQLState());
+            con.setAutoCommit(true);
+            TestUtil.dropTable(con, "rollbacktest");
+        } catch (Exception e) {
+            e.printStackTrace();
         }
-        break;
-      case DEALLOCATE:
-        statement.executeUpdate("DEALLOCATE ALL");
-        break;
-      case DISCARD:
-        statement.executeUpdate("DISCARD ALL");
-        break;
-      case ALTER:
-        statement.executeUpdate("alter table rollbacktest add q int");
-        break;
-      case INSERT_BATCH:
+        super.tearDown();
+    }
+
+    @Override
+    protected void updateProperties(Properties props) {
+        super.updateProperties(props);
+        PGProperty.AUTOSAVE.set(props, autoSave.value());
+        PGProperty.CLEANUP_SAVEPOINTS.set(props, cleanSavePoint.toString());
+        PGProperty.PREPARE_THRESHOLD.set(props, 1);
+    }
+
+    @Test
+    public void run() throws SQLException {
+        if (continueMode == ContinueMode.IS_VALID) {
+            // make "isValid" a server-prepared statement
+            con.isValid(4);
+        } else if (continueMode == ContinueMode.COMMIT) {
+            doCommit();
+        } else if (continueMode == ContinueMode.SELECT) {
+            assertRows("rollbacktest", 0);
+        }
+
+        Statement statement = con.createStatement();
+        statement.executeUpdate("insert into rollbacktest(a, str) values (0, 'test')");
+        int rowsExpected = 1;
+
+        PreparedStatement ps = con.prepareStatement(testSql.getSql(cols));
+        // Server-prepare the testSql
+        ps.executeQuery().close();
+        rowsExpected += testSql.rowsInserted;
+
+        if (trans) {
+            statement.executeUpdate("update rollbacktest set a=a");
+        }
+
+        switch (failMode) {
+            case SELECT:
+                try {
+                    statement.execute("select 1/0");
+                    Assert.fail("select 1/0 should fail");
+                } catch (SQLException e) {
+                    Assert.assertEquals("division by zero expected",
+                            PSQLState.DIVISION_BY_ZERO.getState(), e.getSQLState());
+                }
+                break;
+            case DEALLOCATE:
+                statement.executeUpdate("DEALLOCATE ALL");
+                break;
+            case DISCARD:
+                statement.executeUpdate("DISCARD ALL");
+                break;
+            case ALTER:
+                statement.executeUpdate("alter table rollbacktest add q int");
+                break;
+            case INSERT_BATCH:
+                try {
+                    statement.addBatch("insert into rollbacktest(a, str) values (1/0, 'test')");
+                    statement.executeBatch();
+                    Assert.fail("select 1/0 should fail");
+                } catch (SQLException e) {
+                    Assert.assertEquals("division by zero expected",
+                            PSQLState.DIVISION_BY_ZERO.getState(), e.getSQLState());
+                }
+                break;
+            default:
+                Assert.fail("Fail mode " + failMode + " is not implemented");
+        }
+
+        PgConnection pgConnection = con.unwrap(PgConnection.class);
+        if (autoSave == AutoSave.ALWAYS) {
+            Assert.assertNotEquals("In AutoSave.ALWAYS, transaction should not fail",
+                    TransactionState.FAILED, pgConnection.getTransactionState());
+        }
+        if (autoCommit == AutoCommit.NO) {
+            Assert.assertNotEquals("AutoCommit == NO, thus transaction should be active (open or failed)",
+                    TransactionState.IDLE, pgConnection.getTransactionState());
+        }
+        statement.close();
+
+        switch (continueMode) {
+            case COMMIT:
+                try {
+                    doCommit();
+                    // No assert here: commit should always succeed with exception of well known failure cases in catch
+                } catch (SQLException e) {
+                    if (!flushCacheOnDeallocate && DEALLOCATES.contains(failMode)
+                            && autoSave == AutoSave.NEVER) {
+                        Assert.assertEquals(
+                                "flushCacheOnDeallocate is disabled, thus " + failMode + " should cause 'prepared statement \"...\" does not exist'"
+                                        + " error message is " + e.getMessage(),
+                                PSQLState.INVALID_SQL_STATEMENT_NAME.getState(), e.getSQLState());
+                        return;
+                    }
+                    throw e;
+                }
+                return;
+            case IS_VALID:
+                if (!flushCacheOnDeallocate && autoSave == AutoSave.NEVER
+                        && DEALLOCATES.contains(failMode) && autoCommit == AutoCommit.NO
+                        && con.unwrap(PGConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE) {
+                    Assert.assertFalse("Connection.isValid should return false since failMode=" + failMode
+                                    + ", flushCacheOnDeallocate=false, and autosave=NEVER",
+                            con.isValid(4));
+                } else {
+                    Assert.assertTrue("Connection.isValid should return true unless the connection is closed",
+                            con.isValid(4));
+                }
+                return;
+            default:
+                break;
+        }
+
         try {
-          statement.addBatch("insert into rollbacktest(a, str) values (1/0, 'test')");
-          statement.executeBatch();
-          Assert.fail("select 1/0 should fail");
+            // Try execute server-prepared statement again
+            ps.executeQuery().close();
+            rowsExpected += testSql.rowsInserted;
+            executeSqlSuccess();
         } catch (SQLException e) {
-          Assert.assertEquals("division by zero expected",
-              PSQLState.DIVISION_BY_ZERO.getState(), e.getSQLState());
+            if (autoSave != AutoSave.ALWAYS && TRANS_KILLERS.contains(failMode) && autoCommit == AutoCommit.NO) {
+                Assert.assertEquals(
+                        "AutoSave==" + autoSave + ", thus statements should fail with 'current transaction is aborted...', "
+                                + " error message is " + e.getMessage(),
+                        PSQLState.IN_FAILED_SQL_TRANSACTION.getState(), e.getSQLState());
+                return;
+            }
+
+            if (autoSave == AutoSave.NEVER && autoCommit == AutoCommit.NO) {
+                if (DEALLOCATES.contains(failMode) && !flushCacheOnDeallocate) {
+                    Assert.assertEquals(
+                            "flushCacheOnDeallocate is disabled, thus " + failMode + " should cause 'prepared statement \"...\" does not exist'"
+                                    + " error message is " + e.getMessage(),
+                            PSQLState.INVALID_SQL_STATEMENT_NAME.getState(), e.getSQLState());
+                } else if (failMode == FailMode.ALTER) {
+                    Assert.assertEquals(
+                            "AutoSave==NEVER, autocommit=NO, thus ALTER TABLE causes SELECT * to fail with "
+                                    + "'cached plan must not change result type', "
+                                    + " error message is " + e.getMessage(),
+                            PSQLState.NOT_IMPLEMENTED.getState(), e.getSQLState());
+                } else {
+                    throw e;
+                }
+            } else {
+                throw e;
+            }
         }
-        break;
-      default:
-        Assert.fail("Fail mode " + failMode + " is not implemented");
-    }
 
-    PgConnection pgConnection = con.unwrap(PgConnection.class);
-    if (autoSave == AutoSave.ALWAYS) {
-      Assert.assertNotEquals("In AutoSave.ALWAYS, transaction should not fail",
-          TransactionState.FAILED, pgConnection.getTransactionState());
-    }
-    if (autoCommit == AutoCommit.NO) {
-      Assert.assertNotEquals("AutoCommit == NO, thus transaction should be active (open or failed)",
-          TransactionState.IDLE, pgConnection.getTransactionState());
-    }
-    statement.close();
-
-    switch (continueMode) {
-      case COMMIT:
         try {
-          doCommit();
-          // No assert here: commit should always succeed with exception of well known failure cases in catch
+            assertRows("rollbacktest", rowsExpected);
+            executeSqlSuccess();
         } catch (SQLException e) {
-          if (!flushCacheOnDeallocate && DEALLOCATES.contains(failMode)
-              && autoSave == AutoSave.NEVER) {
-            Assert.assertEquals(
-                "flushCacheOnDeallocate is disabled, thus " + failMode + " should cause 'prepared statement \"...\" does not exist'"
-                    + " error message is " + e.getMessage(),
-                PSQLState.INVALID_SQL_STATEMENT_NAME.getState(), e.getSQLState());
-            return;
-          }
-          throw e;
+            if (autoSave == AutoSave.NEVER && autoCommit == AutoCommit.NO) {
+                if (DEALLOCATES.contains(failMode) && !flushCacheOnDeallocate
+                        || failMode == FailMode.ALTER) {
+                    // The above statement failed with "prepared statement does not exist", thus subsequent one should fail with
+                    // transaction aborted.
+                    Assert.assertEquals(
+                            "AutoSave==NEVER, thus statements should fail with 'current transaction is aborted...', "
+                                    + " error message is " + e.getMessage(),
+                            PSQLState.IN_FAILED_SQL_TRANSACTION.getState(), e.getSQLState());
+                }
+            } else {
+                throw e;
+            }
         }
-        return;
-      case IS_VALID:
-        if (!flushCacheOnDeallocate && autoSave == AutoSave.NEVER
-            && DEALLOCATES.contains(failMode) && autoCommit == AutoCommit.NO
-            && con.unwrap(PGConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE) {
-          Assert.assertFalse("Connection.isValid should return false since failMode=" + failMode
-              + ", flushCacheOnDeallocate=false, and autosave=NEVER",
-              con.isValid(4));
-        } else {
-          Assert.assertTrue("Connection.isValid should return true unless the connection is closed",
-              con.isValid(4));
-        }
-        return;
-      default:
-        break;
     }
 
-    try {
-      // Try execute server-prepared statement again
-      ps.executeQuery().close();
-      rowsExpected += testSql.rowsInserted;
-      executeSqlSuccess();
-    } catch (SQLException e) {
-      if (autoSave != AutoSave.ALWAYS && TRANS_KILLERS.contains(failMode) && autoCommit == AutoCommit.NO) {
-        Assert.assertEquals(
-            "AutoSave==" + autoSave + ", thus statements should fail with 'current transaction is aborted...', "
-                + " error message is " + e.getMessage(),
-            PSQLState.IN_FAILED_SQL_TRANSACTION.getState(), e.getSQLState());
-        return;
-      }
-
-      if (autoSave == AutoSave.NEVER && autoCommit == AutoCommit.NO) {
-        if (DEALLOCATES.contains(failMode) && !flushCacheOnDeallocate) {
-          Assert.assertEquals(
-              "flushCacheOnDeallocate is disabled, thus " + failMode + " should cause 'prepared statement \"...\" does not exist'"
-                  + " error message is " + e.getMessage(),
-              PSQLState.INVALID_SQL_STATEMENT_NAME.getState(), e.getSQLState());
+    private void executeSqlSuccess() throws SQLException {
+        if (autoCommit == AutoCommit.YES) {
+            // in autocommit everything should just work
+        } else if (TRANS_KILLERS.contains(failMode)) {
+            if (autoSave != AutoSave.ALWAYS) {
+                Assert.fail(
+                        "autosave= " + autoSave + " != ALWAYS, thus the transaction should be killed");
+            }
+        } else if (DEALLOCATES.contains(failMode)) {
+            if (autoSave == AutoSave.NEVER && !flushCacheOnDeallocate
+                    && con.unwrap(PGConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE) {
+                Assert.fail("flushCacheOnDeallocate == false, thus DEALLOCATE ALL should kill the transaction");
+            }
         } else if (failMode == FailMode.ALTER) {
-          Assert.assertEquals(
-              "AutoSave==NEVER, autocommit=NO, thus ALTER TABLE causes SELECT * to fail with "
-                  + "'cached plan must not change result type', "
-                  + " error message is " + e.getMessage(),
-              PSQLState.NOT_IMPLEMENTED.getState(), e.getSQLState());
+            if (autoSave == AutoSave.NEVER
+                    && con.unwrap(PGConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE
+                    && cols == ReturnColumns.STAR) {
+                Assert.fail("autosave=NEVER, thus the transaction should be killed");
+            }
         } else {
-          throw e;
+            Assert.fail("It is not specified why the test should pass, thus marking a failure");
         }
-      } else {
-        throw e;
-      }
     }
 
-    try {
-      assertRows("rollbacktest", rowsExpected);
-      executeSqlSuccess();
-    } catch (SQLException e) {
-      if (autoSave == AutoSave.NEVER && autoCommit == AutoCommit.NO) {
-        if (DEALLOCATES.contains(failMode) && !flushCacheOnDeallocate
-            || failMode == FailMode.ALTER) {
-          // The above statement failed with "prepared statement does not exist", thus subsequent one should fail with
-          // transaction aborted.
-          Assert.assertEquals(
-              "AutoSave==NEVER, thus statements should fail with 'current transaction is aborted...', "
-                  + " error message is " + e.getMessage(),
-              PSQLState.IN_FAILED_SQL_TRANSACTION.getState(), e.getSQLState());
+    private void assertRows(String tableName, int nrows) throws SQLException {
+        Statement st = con.createStatement();
+        ResultSet rs = st.executeQuery("select count(*) from " + tableName);
+        rs.next();
+        Assert.assertEquals("Table " + tableName, nrows, rs.getInt(1));
+    }
+
+    private void doCommit() throws SQLException {
+        // Such a dance is required since "commit" checks "current transaction state",
+        // so we need some pending changes, so "commit" query would be sent to the database
+        if (con.getAutoCommit()) {
+            con.setAutoCommit(false);
+            Statement st = con.createStatement();
+            st.executeUpdate(
+                    "insert into rollbacktest(a, str) values (42, '" + System.currentTimeMillis() + "," + counter.getAndIncrement() + "')");
+            st.close();
         }
-      } else {
-        throw e;
-      }
+        con.commit();
+        con.setAutoCommit(autoCommit == AutoCommit.YES);
     }
-  }
 
-  private void executeSqlSuccess() throws SQLException {
-    if (autoCommit == AutoCommit.YES) {
-      // in autocommit everything should just work
-    } else if (TRANS_KILLERS.contains(failMode)) {
-      if (autoSave != AutoSave.ALWAYS) {
-        Assert.fail(
-            "autosave= " + autoSave + " != ALWAYS, thus the transaction should be killed");
-      }
-    } else if (DEALLOCATES.contains(failMode)) {
-      if (autoSave == AutoSave.NEVER && !flushCacheOnDeallocate
-          && con.unwrap(PGConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE) {
-        Assert.fail("flushCacheOnDeallocate == false, thus DEALLOCATE ALL should kill the transaction");
-      }
-    } else if (failMode == FailMode.ALTER) {
-      if (autoSave == AutoSave.NEVER
-          && con.unwrap(PGConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE
-          && cols == ReturnColumns.STAR) {
-        Assert.fail("autosave=NEVER, thus the transaction should be killed");
-      }
-    } else {
-      Assert.fail("It is not specified why the test should pass, thus marking a failure");
+    private enum CleanSavePoint {
+        TRUE,
+        FALSE
     }
-  }
 
-  private void assertRows(String tableName, int nrows) throws SQLException {
-    Statement st = con.createStatement();
-    ResultSet rs = st.executeQuery("select count(*) from " + tableName);
-    rs.next();
-    Assert.assertEquals("Table " + tableName, nrows, rs.getInt(1));
-  }
-
-  private void doCommit() throws SQLException {
-    // Such a dance is required since "commit" checks "current transaction state",
-    // so we need some pending changes, so "commit" query would be sent to the database
-    if (con.getAutoCommit()) {
-      con.setAutoCommit(false);
-      Statement st = con.createStatement();
-      st.executeUpdate(
-          "insert into rollbacktest(a, str) values (42, '" + System.currentTimeMillis() + "," + counter.getAndIncrement() + "')");
-      st.close();
+    private enum FailMode {
+        /**
+         * Executes "select 1/0" and causes transaction failure (if autocommit=no).
+         * Mitigation: "autosave=always" or "autocommit=true"
+         */
+        SELECT,
+        /**
+         * Executes "alter table rollbacktest", thus it breaks a prepared select over that table.
+         * Mitigation: "autosave in (always, conservative)"
+         */
+        ALTER,
+        /**
+         * Executes DEALLOCATE ALL.
+         * Mitigation:
+         * 1) QueryExecutor tracks "DEALLOCATE ALL" responses ({@see org.postgresql.core.QueryExecutor#setFlushCacheOnDeallocate(boolean)}
+         * 2) QueryExecutor tracks "prepared statement name is invalid" and unprepared relevant statements ({@link org.postgresql.core.v3.QueryExecutorImpl#processResults(ResultHandler, int)}
+         * 3) "autosave in (always, conservative)"
+         * 4) Non-transactional cases are healed by retry (when no transaction present, just retry is possible)
+         */
+        DEALLOCATE,
+        /**
+         * Executes DISCARD ALL.
+         * Mitigation: the same as for {@link #DEALLOCATE}
+         */
+        DISCARD,
+        /**
+         * Executes "insert ... select 1/0" in a batch statement, thus causing the transaction to fail.
+         */
+        INSERT_BATCH,
+    }
+
+    private enum ReturnColumns {
+        EXACT("a, str"),
+        STAR("*");
+
+        public final String cols;
+
+        ReturnColumns(String cols) {
+            this.cols = cols;
+        }
+    }
+
+    private enum TestStatement {
+        SELECT("select ${cols} from rollbacktest", 0),
+        WITH_INSERT_SELECT(
+                "with x as (insert into rollbacktest(a, str) values(43, 'abc') returning ${cols})"
+                        + "select * from x", 1);
+
+        private final String sql;
+        private final int rowsInserted;
+
+        TestStatement(String sql, int rowsInserted) {
+            this.sql = sql;
+            this.rowsInserted = rowsInserted;
+        }
+
+        public String getSql(ReturnColumns cols) {
+            return sql.replace("${cols}", cols.cols);
+        }
+    }
+
+    private enum ContinueMode {
+        COMMIT,
+        IS_VALID,
+        SELECT,
     }
-    con.commit();
-    con.setAutoCommit(autoCommit == AutoCommit.YES);
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BaseTest4.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BaseTest4.java
index 2f7982b..dce07b8 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BaseTest4.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BaseTest4.java
@@ -5,8 +5,14 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.Assert.assertEquals;
-
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.Locale;
+import java.util.Properties;
+import java.util.function.Supplier;
+import org.junit.After;
+import org.junit.Assume;
+import org.junit.Before;
 import org.postgresql.PGConnection;
 import org.postgresql.PGProperty;
 import org.postgresql.core.BaseConnection;
@@ -14,139 +20,130 @@ import org.postgresql.core.Oid;
 import org.postgresql.core.Version;
 import org.postgresql.jdbc.PreferQueryMode;
 import org.postgresql.test.TestUtil;
-
-import org.junit.After;
-import org.junit.Assume;
-import org.junit.Before;
-
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.Locale;
-import java.util.Properties;
-import java.util.function.Supplier;
+import static org.junit.Assert.assertEquals;
 
 public class BaseTest4 {
 
-  public enum BinaryMode {
-    REGULAR, FORCE
-  }
+    protected Connection con;
+    protected BinaryMode binaryMode;
+    protected PreferQueryMode preferQueryMode;
+    private ReWriteBatchedInserts reWriteBatchedInserts;
+    private StringType stringType;
 
-  public enum ReWriteBatchedInserts {
-    YES, NO
-  }
-
-  public enum AutoCommit {
-    YES, NO
-  }
-
-  public enum StringType {
-    UNSPECIFIED, VARCHAR
-  }
-
-  protected Connection con;
-  protected BinaryMode binaryMode;
-  private ReWriteBatchedInserts reWriteBatchedInserts;
-  protected PreferQueryMode preferQueryMode;
-  private StringType stringType;
-
-  protected void updateProperties(Properties props) {
-    if (binaryMode == BinaryMode.FORCE) {
-      forceBinary(props);
+    public static void assumeCallableStatementsSupported(Connection con) throws SQLException {
+        PreferQueryMode preferQueryMode = con.unwrap(PGConnection.class).getPreferQueryMode();
+        Assume.assumeTrue("callable statements are not fully supported in simple protocol execution mode",
+                preferQueryMode != PreferQueryMode.SIMPLE);
     }
-    if (reWriteBatchedInserts == ReWriteBatchedInserts.YES) {
-      PGProperty.REWRITE_BATCHED_INSERTS.set(props, true);
+
+    protected void updateProperties(Properties props) {
+        if (binaryMode == BinaryMode.FORCE) {
+            forceBinary(props);
+        }
+        if (reWriteBatchedInserts == ReWriteBatchedInserts.YES) {
+            PGProperty.REWRITE_BATCHED_INSERTS.set(props, true);
+        }
+        if (stringType != null) {
+            PGProperty.STRING_TYPE.set(props, stringType.name().toLowerCase(Locale.ROOT));
+        }
     }
-    if (stringType != null) {
-      PGProperty.STRING_TYPE.set(props, stringType.name().toLowerCase(Locale.ROOT));
+
+    protected void forceBinary(Properties props) {
+        PGProperty.PREPARE_THRESHOLD.set(props, -1);
+        PGProperty.BINARY_TRANSFER_ENABLE.set(props, Oid.BOOL);
     }
-  }
 
-  protected void forceBinary(Properties props) {
-    PGProperty.PREPARE_THRESHOLD.set(props, -1);
-    PGProperty.BINARY_TRANSFER_ENABLE.set(props, Oid.BOOL);
-  }
+    public final void setBinaryMode(BinaryMode binaryMode) {
+        this.binaryMode = binaryMode;
+    }
 
-  public final void setBinaryMode(BinaryMode binaryMode) {
-    this.binaryMode = binaryMode;
-  }
+    public StringType getStringType() {
+        return stringType;
+    }
 
-  public StringType getStringType() {
-    return stringType;
-  }
+    public void setStringType(StringType stringType) {
+        this.stringType = stringType;
+    }
 
-  public void setStringType(StringType stringType) {
-    this.stringType = stringType;
-  }
+    public void setReWriteBatchedInserts(
+            ReWriteBatchedInserts reWriteBatchedInserts) {
+        this.reWriteBatchedInserts = reWriteBatchedInserts;
+    }
 
-  public void setReWriteBatchedInserts(
-      ReWriteBatchedInserts reWriteBatchedInserts) {
-    this.reWriteBatchedInserts = reWriteBatchedInserts;
-  }
+    @Before
+    public void setUp() throws Exception {
+        Properties props = new Properties();
+        updateProperties(props);
+        con = TestUtil.openDB(props);
+        PGConnection pg = con.unwrap(PGConnection.class);
+        preferQueryMode = pg == null ? PreferQueryMode.EXTENDED : pg.getPreferQueryMode();
+    }
 
-  @Before
-  public void setUp() throws Exception {
-    Properties props = new Properties();
-    updateProperties(props);
-    con = TestUtil.openDB(props);
-    PGConnection pg = con.unwrap(PGConnection.class);
-    preferQueryMode = pg == null ? PreferQueryMode.EXTENDED : pg.getPreferQueryMode();
-  }
+    @After
+    public void tearDown() throws SQLException {
+        TestUtil.closeDB(con);
+    }
 
-  @After
-  public void tearDown() throws SQLException {
-    TestUtil.closeDB(con);
-  }
+    public void assumeByteaSupported() {
+        Assume.assumeTrue("bytea is not supported in simple protocol execution mode",
+                preferQueryMode != PreferQueryMode.SIMPLE);
+    }
 
-  public void assumeByteaSupported() {
-    Assume.assumeTrue("bytea is not supported in simple protocol execution mode",
-        preferQueryMode != PreferQueryMode.SIMPLE);
-  }
+    public void assumeCallableStatementsSupported() {
+        Assume.assumeTrue("callable statements are not fully supported in simple protocol execution mode",
+                preferQueryMode != PreferQueryMode.SIMPLE);
+    }
 
-  public static void assumeCallableStatementsSupported(Connection con) throws SQLException {
-    PreferQueryMode preferQueryMode = con.unwrap(PGConnection.class).getPreferQueryMode();
-    Assume.assumeTrue("callable statements are not fully supported in simple protocol execution mode",
-        preferQueryMode != PreferQueryMode.SIMPLE);
-  }
+    public void assumeBinaryModeRegular() {
+        Assume.assumeTrue(binaryMode == BinaryMode.REGULAR);
+    }
 
-  public void assumeCallableStatementsSupported() {
-    Assume.assumeTrue("callable statements are not fully supported in simple protocol execution mode",
-        preferQueryMode != PreferQueryMode.SIMPLE);
-  }
+    public void assumeBinaryModeForce() {
+        Assume.assumeTrue(binaryMode == BinaryMode.FORCE);
+        Assume.assumeTrue(preferQueryMode != PreferQueryMode.SIMPLE);
+    }
 
-  public void assumeBinaryModeRegular() {
-    Assume.assumeTrue(binaryMode == BinaryMode.REGULAR);
-  }
+    public void assumeNotSimpleQueryMode() {
+        Assume.assumeTrue(preferQueryMode != PreferQueryMode.SIMPLE);
+    }
 
-  public void assumeBinaryModeForce() {
-    Assume.assumeTrue(binaryMode == BinaryMode.FORCE);
-    Assume.assumeTrue(preferQueryMode != PreferQueryMode.SIMPLE);
-  }
+    /**
+     * Shorthand for {@code Assume.assumeTrue(TestUtil.haveMinimumServerVersion(conn, version)}.
+     */
+    public void assumeMinimumServerVersion(String message, Version version) throws SQLException {
+        Assume.assumeTrue(message, TestUtil.haveMinimumServerVersion(con, version));
+    }
 
-  public void assumeNotSimpleQueryMode() {
-    Assume.assumeTrue(preferQueryMode != PreferQueryMode.SIMPLE);
-  }
+    /**
+     * Shorthand for {@code Assume.assumeTrue(TestUtil.haveMinimumServerVersion(conn, version)}.
+     */
+    public void assumeMinimumServerVersion(Version version) throws SQLException {
+        Assume.assumeTrue(TestUtil.haveMinimumServerVersion(con, version));
+    }
 
-  /**
-   * Shorthand for {@code Assume.assumeTrue(TestUtil.haveMinimumServerVersion(conn, version)}.
-   */
-  public void assumeMinimumServerVersion(String message, Version version) throws SQLException {
-    Assume.assumeTrue(message, TestUtil.haveMinimumServerVersion(con, version));
-  }
+    protected void assertBinaryForReceive(int oid, boolean expected, Supplier<String> message) throws SQLException {
+        assertEquals(message.get() + ", useBinaryForReceive(oid=" + oid + ")", expected,
+                con.unwrap(BaseConnection.class).getQueryExecutor().useBinaryForReceive(oid));
+    }
 
-  /**
-   * Shorthand for {@code Assume.assumeTrue(TestUtil.haveMinimumServerVersion(conn, version)}.
-   */
-  public void assumeMinimumServerVersion(Version version) throws SQLException {
-    Assume.assumeTrue(TestUtil.haveMinimumServerVersion(con, version));
-  }
+    protected void assertBinaryForSend(int oid, boolean expected, Supplier<String> message) throws SQLException {
+        assertEquals(message.get() + ", useBinaryForSend(oid=" + oid + ")", expected,
+                con.unwrap(BaseConnection.class).getQueryExecutor().useBinaryForSend(oid));
+    }
 
-  protected void assertBinaryForReceive(int oid, boolean expected, Supplier<String> message) throws SQLException {
-    assertEquals(message.get() + ", useBinaryForReceive(oid=" + oid + ")", expected,
-        con.unwrap(BaseConnection.class).getQueryExecutor().useBinaryForReceive(oid));
-  }
+    public enum BinaryMode {
+        REGULAR, FORCE
+    }
 
-  protected void assertBinaryForSend(int oid, boolean expected, Supplier<String> message) throws SQLException {
-    assertEquals(message.get() + ", useBinaryForSend(oid=" + oid + ")", expected,
-        con.unwrap(BaseConnection.class).getQueryExecutor().useBinaryForSend(oid));
-  }
+    public enum ReWriteBatchedInserts {
+        YES, NO
+    }
+
+    public enum AutoCommit {
+        YES, NO
+    }
+
+    public enum StringType {
+        UNSPECIFIED, VARCHAR
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchExecuteTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchExecuteTest.java
index c121b6f..9a07f3a 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchExecuteTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchExecuteTest.java
@@ -38,619 +38,645 @@ import java.util.Properties;
 @RunWith(Parameterized.class)
 public class BatchExecuteTest extends BaseTest4 {
 
-  private boolean insertRewrite;
+    private boolean insertRewrite;
 
-  public BatchExecuteTest(BinaryMode binaryMode, boolean insertRewrite) {
-    this.insertRewrite = insertRewrite;
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}, insertRewrite = {1}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      for (boolean insertRewrite : new boolean[]{false, true}) {
-        ids.add(new Object[]{binaryMode, insertRewrite});
-      }
+    public BatchExecuteTest(BinaryMode binaryMode, boolean insertRewrite) {
+        this.insertRewrite = insertRewrite;
+        setBinaryMode(binaryMode);
     }
-    return ids;
-  }
 
-  @Override
-  protected void updateProperties(Properties props) {
-    super.updateProperties(props);
-    PGProperty.REWRITE_BATCHED_INSERTS.set(props, insertRewrite);
-  }
-
-  // Set up the fixture for this testcase: a connection to a database with
-  // a table for this test.
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    Statement stmt = con.createStatement();
-
-    // Drop the test table if it already exists for some reason. It is
-    // not an error if it doesn't exist.
-    TestUtil.createTempTable(con, "testbatch", "pk INTEGER, col1 INTEGER");
-
-    stmt.executeUpdate("INSERT INTO testbatch VALUES (1, 0)");
-
-    TestUtil.createTempTable(con, "prep", "a integer, b integer, d date");
-
-    TestUtil.createTempTable(con, "batchUpdCnt", "id varchar(512) primary key, data varchar(512)");
-    stmt.executeUpdate("INSERT INTO batchUpdCnt(id) VALUES ('key-2')");
-
-    stmt.close();
-
-    // Generally recommended with batch updates. By default we run all
-    // tests in this test case with autoCommit disabled.
-    con.setAutoCommit(false);
-  }
-
-  // Tear down the fixture for this test case.
-  @Override
-  public void tearDown() throws SQLException {
-    con.setAutoCommit(true);
-
-    TestUtil.dropTable(con, "testbatch");
-    super.tearDown();
-  }
-
-  @Test
-  public void testSupportsBatchUpdates() throws Exception {
-    DatabaseMetaData dbmd = con.getMetaData();
-    Assert.assertTrue("Expected that Batch Updates are supported", dbmd.supportsBatchUpdates());
-  }
-
-  @Test
-  public void testEmptyClearBatch() throws Exception {
-    Statement stmt = con.createStatement();
-    stmt.clearBatch(); // No-op.
-
-    PreparedStatement ps = con.prepareStatement("SELECT ?");
-    ps.clearBatch(); // No-op.
-  }
-
-  private void assertCol1HasValue(int expected) throws Exception {
-    Statement getCol1 = con.createStatement();
-    try {
-      ResultSet rs = getCol1.executeQuery("SELECT col1 FROM testbatch WHERE pk = 1");
-      Assert.assertTrue(rs.next());
-
-      int actual = rs.getInt("col1");
-
-      Assert.assertEquals(expected, actual);
-      Assert.assertFalse(rs.next());
-
-      rs.close();
-    } finally {
-      TestUtil.closeQuietly(getCol1);
+    @Parameterized.Parameters(name = "binary = {0}, insertRewrite = {1}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            for (boolean insertRewrite : new boolean[]{false, true}) {
+                ids.add(new Object[]{binaryMode, insertRewrite});
+            }
+        }
+        return ids;
     }
-  }
 
-  @Test
-  public void testExecuteEmptyBatch() throws Exception {
-    Statement stmt = con.createStatement();
-    try {
-      int[] updateCount = stmt.executeBatch();
-      Assert.assertEquals(0, updateCount.length);
-
-      stmt.addBatch("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1");
-      stmt.clearBatch();
-      updateCount = stmt.executeBatch();
-      Assert.assertEquals(0, updateCount.length);
-      stmt.close();
-    } finally {
-      TestUtil.closeQuietly(stmt);
+    public static void assertSimpleInsertBatch(int n, int[] actual) {
+        int[] expected = new int[n];
+        Arrays.fill(expected, 1);
+        assertBatchResult(n + " addBatch, 1 row each", expected, actual);
     }
-  }
 
-  @Test
-  public void testExecuteEmptyPreparedBatch() throws Exception {
-    PreparedStatement ps = con.prepareStatement("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1");
-    try {
-      int[] updateCount = ps.executeBatch();
-      Assert.assertEquals("Empty batch should update empty result", 0, updateCount.length);
-    } finally {
-      TestUtil.closeQuietly(ps);
+    public static void assertBatchResult(String message, int[] expected, int[] actual) {
+        int[] clone = expected.clone();
+        boolean hasChanges = false;
+        for (int i = 0; i < actual.length; i++) {
+            int a = actual[i];
+            if (a == Statement.SUCCESS_NO_INFO && expected[i] >= 0) {
+                clone[i] = a;
+                hasChanges = true;
+            }
+        }
+        if (hasChanges) {
+            message += ", original expectation: " + Arrays.toString(expected);
+        }
+        Assert.assertEquals(
+                message,
+                Arrays.toString(clone),
+                Arrays.toString(actual));
     }
-  }
 
-  @Test
-  public void testPreparedNoParameters() throws SQLException {
-    PreparedStatement ps = con.prepareStatement("INSERT INTO prep(a) VALUES (1)");
-    try {
-      ps.addBatch();
-      ps.addBatch();
-      ps.addBatch();
-      ps.addBatch();
-      int[] actual = ps.executeBatch();
-      assertBatchResult("4 rows inserted via batch", new int[]{1, 1, 1, 1}, actual);
-    } finally {
-      TestUtil.closeQuietly(ps);
+    @Override
+    protected void updateProperties(Properties props) {
+        super.updateProperties(props);
+        PGProperty.REWRITE_BATCHED_INSERTS.set(props, insertRewrite);
     }
-  }
 
-  @Test
-  public void testClearBatch() throws Exception {
-    Statement stmt = con.createStatement();
-    try {
-      stmt.addBatch("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1");
-      assertCol1HasValue(0);
-      stmt.addBatch("UPDATE testbatch SET col1 = col1 + 2 WHERE pk = 1");
-      assertCol1HasValue(0);
-      stmt.clearBatch();
-      assertCol1HasValue(0);
-      stmt.addBatch("UPDATE testbatch SET col1 = col1 + 4 WHERE pk = 1");
-      assertCol1HasValue(0);
-      stmt.executeBatch();
-      assertCol1HasValue(4);
-      con.commit();
-      assertCol1HasValue(4);
-    } finally {
-      TestUtil.closeQuietly(stmt);
+    // Set up the fixture for this testcase: a connection to a database with
+    // a table for this test.
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        Statement stmt = con.createStatement();
+
+        // Drop the test table if it already exists for some reason. It is
+        // not an error if it doesn't exist.
+        TestUtil.createTempTable(con, "testbatch", "pk INTEGER, col1 INTEGER");
+
+        stmt.executeUpdate("INSERT INTO testbatch VALUES (1, 0)");
+
+        TestUtil.createTempTable(con, "prep", "a integer, b integer, d date");
+
+        TestUtil.createTempTable(con, "batchUpdCnt", "id varchar(512) primary key, data varchar(512)");
+        stmt.executeUpdate("INSERT INTO batchUpdCnt(id) VALUES ('key-2')");
+
+        stmt.close();
+
+        // Generally recommended with batch updates. By default we run all
+        // tests in this test case with autoCommit disabled.
+        con.setAutoCommit(false);
     }
-  }
 
-  @Test
-  public void testClearPreparedNoArgBatch() throws Exception {
-    PreparedStatement ps = con.prepareStatement("INSERT INTO prep(a) VALUES (1)");
-    try {
-      ps.addBatch();
-      ps.clearBatch();
-      int[] updateCount = ps.executeBatch();
-      Assert.assertEquals("Empty batch should update empty result", 0, updateCount.length);
-    } finally {
-      TestUtil.closeQuietly(ps);
+    // Tear down the fixture for this test case.
+    @Override
+    public void tearDown() throws SQLException {
+        con.setAutoCommit(true);
+
+        TestUtil.dropTable(con, "testbatch");
+        super.tearDown();
     }
-  }
 
-  @Test
-  public void testClearPreparedEmptyBatch() throws Exception {
-    PreparedStatement ps = con.prepareStatement("INSERT INTO prep(a) VALUES (1)");
-    try {
-      ps.clearBatch();
-    } finally {
-      TestUtil.closeQuietly(ps);
+    @Test
+    public void testSupportsBatchUpdates() throws Exception {
+        DatabaseMetaData dbmd = con.getMetaData();
+        Assert.assertTrue("Expected that Batch Updates are supported", dbmd.supportsBatchUpdates());
     }
-  }
 
-  @Test
-  public void testSelectInBatch() throws Exception {
-    Statement stmt = stmt = con.createStatement();
-    try {
-      stmt.addBatch("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1");
-      stmt.addBatch("SELECT col1 FROM testbatch WHERE pk = 1");
-      stmt.addBatch("UPDATE testbatch SET col1 = col1 + 2 WHERE pk = 1");
+    @Test
+    public void testEmptyClearBatch() throws Exception {
+        Statement stmt = con.createStatement();
+        stmt.clearBatch(); // No-op.
 
-      // There's no reason to Assert.fail
-      int[] updateCounts = stmt.executeBatch();
-
-      Assert.assertTrue("First update should succeed, thus updateCount should be 1 or SUCCESS_NO_INFO"
-              + ", actual value: " + updateCounts[0],
-          updateCounts[0] == 1 || updateCounts[0] == Statement.SUCCESS_NO_INFO);
-      Assert.assertTrue("For SELECT, number of modified rows should be either 0 or SUCCESS_NO_INFO"
-              + ", actual value: " + updateCounts[1],
-          updateCounts[1] == 0 || updateCounts[1] == Statement.SUCCESS_NO_INFO);
-      Assert.assertTrue("Second update should succeed, thus updateCount should be 1 or SUCCESS_NO_INFO"
-              + ", actual value: " + updateCounts[2],
-          updateCounts[2] == 1 || updateCounts[2] == Statement.SUCCESS_NO_INFO);
-    } finally {
-      TestUtil.closeQuietly(stmt);
+        PreparedStatement ps = con.prepareStatement("SELECT ?");
+        ps.clearBatch(); // No-op.
     }
-  }
 
-  @Test
-  public void testSelectInBatchThrowsAutoCommit() throws Exception {
-    con.setAutoCommit(true);
-    testSelectInBatchThrows();
-  }
+    private void assertCol1HasValue(int expected) throws Exception {
+        Statement getCol1 = con.createStatement();
+        try {
+            ResultSet rs = getCol1.executeQuery("SELECT col1 FROM testbatch WHERE pk = 1");
+            Assert.assertTrue(rs.next());
 
-  @Test
-  public void testSelectInBatchThrows() throws Exception {
-    Statement stmt = con.createStatement();
-    try {
-      int oldValue = getCol1Value();
-      stmt.addBatch("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1");
-      stmt.addBatch("SELECT 0/0 FROM testbatch WHERE pk = 1");
-      stmt.addBatch("UPDATE testbatch SET col1 = col1 + 2 WHERE pk = 1");
+            int actual = rs.getInt("col1");
 
-      int[] updateCounts;
-      try {
-        updateCounts = stmt.executeBatch();
-        Assert.fail("0/0 should throw BatchUpdateException");
-      } catch (BatchUpdateException be) {
-        updateCounts = be.getUpdateCounts();
-      }
+            Assert.assertEquals(expected, actual);
+            Assert.assertFalse(rs.next());
+
+            rs.close();
+        } finally {
+            TestUtil.closeQuietly(getCol1);
+        }
+    }
+
+    @Test
+    public void testExecuteEmptyBatch() throws Exception {
+        Statement stmt = con.createStatement();
+        try {
+            int[] updateCount = stmt.executeBatch();
+            Assert.assertEquals(0, updateCount.length);
+
+            stmt.addBatch("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1");
+            stmt.clearBatch();
+            updateCount = stmt.executeBatch();
+            Assert.assertEquals(0, updateCount.length);
+            stmt.close();
+        } finally {
+            TestUtil.closeQuietly(stmt);
+        }
+    }
+
+    @Test
+    public void testExecuteEmptyPreparedBatch() throws Exception {
+        PreparedStatement ps = con.prepareStatement("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1");
+        try {
+            int[] updateCount = ps.executeBatch();
+            Assert.assertEquals("Empty batch should update empty result", 0, updateCount.length);
+        } finally {
+            TestUtil.closeQuietly(ps);
+        }
+    }
+
+    @Test
+    public void testPreparedNoParameters() throws SQLException {
+        PreparedStatement ps = con.prepareStatement("INSERT INTO prep(a) VALUES (1)");
+        try {
+            ps.addBatch();
+            ps.addBatch();
+            ps.addBatch();
+            ps.addBatch();
+            int[] actual = ps.executeBatch();
+            assertBatchResult("4 rows inserted via batch", new int[]{1, 1, 1, 1}, actual);
+        } finally {
+            TestUtil.closeQuietly(ps);
+        }
+    }
+
+    @Test
+    public void testClearBatch() throws Exception {
+        Statement stmt = con.createStatement();
+        try {
+            stmt.addBatch("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1");
+            assertCol1HasValue(0);
+            stmt.addBatch("UPDATE testbatch SET col1 = col1 + 2 WHERE pk = 1");
+            assertCol1HasValue(0);
+            stmt.clearBatch();
+            assertCol1HasValue(0);
+            stmt.addBatch("UPDATE testbatch SET col1 = col1 + 4 WHERE pk = 1");
+            assertCol1HasValue(0);
+            stmt.executeBatch();
+            assertCol1HasValue(4);
+            con.commit();
+            assertCol1HasValue(4);
+        } finally {
+            TestUtil.closeQuietly(stmt);
+        }
+    }
+
+    @Test
+    public void testClearPreparedNoArgBatch() throws Exception {
+        PreparedStatement ps = con.prepareStatement("INSERT INTO prep(a) VALUES (1)");
+        try {
+            ps.addBatch();
+            ps.clearBatch();
+            int[] updateCount = ps.executeBatch();
+            Assert.assertEquals("Empty batch should update empty result", 0, updateCount.length);
+        } finally {
+            TestUtil.closeQuietly(ps);
+        }
+    }
+
+    @Test
+    public void testClearPreparedEmptyBatch() throws Exception {
+        PreparedStatement ps = con.prepareStatement("INSERT INTO prep(a) VALUES (1)");
+        try {
+            ps.clearBatch();
+        } finally {
+            TestUtil.closeQuietly(ps);
+        }
+    }
+
+    @Test
+    public void testSelectInBatch() throws Exception {
+        Statement stmt = stmt = con.createStatement();
+        try {
+            stmt.addBatch("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1");
+            stmt.addBatch("SELECT col1 FROM testbatch WHERE pk = 1");
+            stmt.addBatch("UPDATE testbatch SET col1 = col1 + 2 WHERE pk = 1");
+
+            // There's no reason to Assert.fail
+            int[] updateCounts = stmt.executeBatch();
+
+            Assert.assertTrue("First update should succeed, thus updateCount should be 1 or SUCCESS_NO_INFO"
+                            + ", actual value: " + updateCounts[0],
+                    updateCounts[0] == 1 || updateCounts[0] == Statement.SUCCESS_NO_INFO);
+            Assert.assertTrue("For SELECT, number of modified rows should be either 0 or SUCCESS_NO_INFO"
+                            + ", actual value: " + updateCounts[1],
+                    updateCounts[1] == 0 || updateCounts[1] == Statement.SUCCESS_NO_INFO);
+            Assert.assertTrue("Second update should succeed, thus updateCount should be 1 or SUCCESS_NO_INFO"
+                            + ", actual value: " + updateCounts[2],
+                    updateCounts[2] == 1 || updateCounts[2] == Statement.SUCCESS_NO_INFO);
+        } finally {
+            TestUtil.closeQuietly(stmt);
+        }
+    }
+
+    @Test
+    public void testSelectInBatchThrowsAutoCommit() throws Exception {
+        con.setAutoCommit(true);
+        testSelectInBatchThrows();
+    }
+
+    @Test
+    public void testSelectInBatchThrows() throws Exception {
+        Statement stmt = con.createStatement();
+        try {
+            int oldValue = getCol1Value();
+            stmt.addBatch("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1");
+            stmt.addBatch("SELECT 0/0 FROM testbatch WHERE pk = 1");
+            stmt.addBatch("UPDATE testbatch SET col1 = col1 + 2 WHERE pk = 1");
+
+            int[] updateCounts;
+            try {
+                updateCounts = stmt.executeBatch();
+                Assert.fail("0/0 should throw BatchUpdateException");
+            } catch (BatchUpdateException be) {
+                updateCounts = be.getUpdateCounts();
+            }
+
+            if (!con.getAutoCommit()) {
+                con.commit();
+            }
+
+            int newValue = getCol1Value();
+            boolean firstOk = updateCounts[0] == 1 || updateCounts[0] == Statement.SUCCESS_NO_INFO;
+            boolean lastOk = updateCounts[2] == 1 || updateCounts[2] == Statement.SUCCESS_NO_INFO;
+
+            Assert.assertEquals("testbatch.col1 should account +1 and +2 for the relevant successful rows: "
+                            + Arrays.toString(updateCounts),
+                    oldValue + (firstOk ? 1 : 0) + (lastOk ? 2 : 0), newValue);
+
+            Assert.assertEquals("SELECT 0/0 should be marked as Statement.EXECUTE_FAILED",
+                    Statement.EXECUTE_FAILED,
+                    updateCounts[1]);
+
+        } finally {
+            TestUtil.closeQuietly(stmt);
+        }
+    }
+
+    private int getCol1Value() throws SQLException {
+        Statement stmt = con.createStatement();
+        try {
+            ResultSet rs = stmt.executeQuery("select col1 from testbatch where pk=1");
+            rs.next();
+            return rs.getInt(1);
+        } finally {
+            stmt.close();
+        }
+    }
+
+    @Test
+    public void testStringAddBatchOnPreparedStatement() throws Exception {
+        PreparedStatement pstmt =
+                con.prepareStatement("UPDATE testbatch SET col1 = col1 + ? WHERE PK = ?");
+        pstmt.setInt(1, 1);
+        pstmt.setInt(2, 1);
+        pstmt.addBatch();
+
+        try {
+            pstmt.addBatch("UPDATE testbatch SET col1 = 3");
+            Assert.fail(
+                    "Should have thrown an exception about using the string addBatch method on a prepared statement.");
+        } catch (SQLException sqle) {
+        }
+
+        pstmt.close();
+    }
+
+    @Test
+    public void testPreparedStatement() throws Exception {
+        PreparedStatement pstmt =
+                con.prepareStatement("UPDATE testbatch SET col1 = col1 + ? WHERE PK = ?");
+
+        // Note that the first parameter changes for every statement in the
+        // batch, whereas the second parameter remains constant.
+        pstmt.setInt(1, 1);
+        pstmt.setInt(2, 1);
+        pstmt.addBatch();
+        assertCol1HasValue(0);
+
+        pstmt.setInt(1, 2);
+        pstmt.addBatch();
+        assertCol1HasValue(0);
+
+        pstmt.setInt(1, 4);
+        pstmt.addBatch();
+        assertCol1HasValue(0);
+
+        pstmt.executeBatch();
+        assertCol1HasValue(7);
+
+        // now test to see that we can still use the statement after the execute
+        pstmt.setInt(1, 3);
+        pstmt.addBatch();
+        assertCol1HasValue(7);
+
+        pstmt.executeBatch();
+        assertCol1HasValue(10);
 
-      if (!con.getAutoCommit()) {
         con.commit();
-      }
+        assertCol1HasValue(10);
 
-      int newValue = getCol1Value();
-      boolean firstOk = updateCounts[0] == 1 || updateCounts[0] == Statement.SUCCESS_NO_INFO;
-      boolean lastOk = updateCounts[2] == 1 || updateCounts[2] == Statement.SUCCESS_NO_INFO;
+        con.rollback();
+        assertCol1HasValue(10);
 
-      Assert.assertEquals("testbatch.col1 should account +1 and +2 for the relevant successful rows: "
-              + Arrays.toString(updateCounts),
-          oldValue + (firstOk ? 1 : 0) + (lastOk ? 2 : 0), newValue);
-
-      Assert.assertEquals("SELECT 0/0 should be marked as Statement.EXECUTE_FAILED",
-          Statement.EXECUTE_FAILED,
-          updateCounts[1]);
-
-    } finally {
-      TestUtil.closeQuietly(stmt);
-    }
-  }
-
-  private int getCol1Value() throws SQLException {
-    Statement stmt = con.createStatement();
-    try {
-      ResultSet rs = stmt.executeQuery("select col1 from testbatch where pk=1");
-      rs.next();
-      return rs.getInt(1);
-    } finally {
-      stmt.close();
-    }
-  }
-
-  @Test
-  public void testStringAddBatchOnPreparedStatement() throws Exception {
-    PreparedStatement pstmt =
-        con.prepareStatement("UPDATE testbatch SET col1 = col1 + ? WHERE PK = ?");
-    pstmt.setInt(1, 1);
-    pstmt.setInt(2, 1);
-    pstmt.addBatch();
-
-    try {
-      pstmt.addBatch("UPDATE testbatch SET col1 = 3");
-      Assert.fail(
-          "Should have thrown an exception about using the string addBatch method on a prepared statement.");
-    } catch (SQLException sqle) {
+        pstmt.close();
     }
 
-    pstmt.close();
-  }
+    @Test
+    public void testTransactionalBehaviour() throws Exception {
+        Statement stmt = con.createStatement();
 
-  @Test
-  public void testPreparedStatement() throws Exception {
-    PreparedStatement pstmt =
-        con.prepareStatement("UPDATE testbatch SET col1 = col1 + ? WHERE PK = ?");
+        stmt.addBatch("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1");
+        stmt.addBatch("UPDATE testbatch SET col1 = col1 + 2 WHERE pk = 1");
+        stmt.executeBatch();
+        con.rollback();
+        assertCol1HasValue(0);
 
-    // Note that the first parameter changes for every statement in the
-    // batch, whereas the second parameter remains constant.
-    pstmt.setInt(1, 1);
-    pstmt.setInt(2, 1);
-    pstmt.addBatch();
-    assertCol1HasValue(0);
+        stmt.addBatch("UPDATE testbatch SET col1 = col1 + 4 WHERE pk = 1");
+        stmt.addBatch("UPDATE testbatch SET col1 = col1 + 8 WHERE pk = 1");
 
-    pstmt.setInt(1, 2);
-    pstmt.addBatch();
-    assertCol1HasValue(0);
+        // The statement has been added to the batch, but it should not yet
+        // have been executed.
+        assertCol1HasValue(0);
 
-    pstmt.setInt(1, 4);
-    pstmt.addBatch();
-    assertCol1HasValue(0);
+        int[] updateCounts = stmt.executeBatch();
+        Assert.assertEquals(2, updateCounts.length);
+        Assert.assertEquals(1, updateCounts[0]);
+        Assert.assertEquals(1, updateCounts[1]);
 
-    pstmt.executeBatch();
-    assertCol1HasValue(7);
+        assertCol1HasValue(12);
+        con.commit();
+        assertCol1HasValue(12);
+        con.rollback();
+        assertCol1HasValue(12);
 
-    // now test to see that we can still use the statement after the execute
-    pstmt.setInt(1, 3);
-    pstmt.addBatch();
-    assertCol1HasValue(7);
-
-    pstmt.executeBatch();
-    assertCol1HasValue(10);
-
-    con.commit();
-    assertCol1HasValue(10);
-
-    con.rollback();
-    assertCol1HasValue(10);
-
-    pstmt.close();
-  }
-
-  @Test
-  public void testTransactionalBehaviour() throws Exception {
-    Statement stmt = con.createStatement();
-
-    stmt.addBatch("UPDATE testbatch SET col1 = col1 + 1 WHERE pk = 1");
-    stmt.addBatch("UPDATE testbatch SET col1 = col1 + 2 WHERE pk = 1");
-    stmt.executeBatch();
-    con.rollback();
-    assertCol1HasValue(0);
-
-    stmt.addBatch("UPDATE testbatch SET col1 = col1 + 4 WHERE pk = 1");
-    stmt.addBatch("UPDATE testbatch SET col1 = col1 + 8 WHERE pk = 1");
-
-    // The statement has been added to the batch, but it should not yet
-    // have been executed.
-    assertCol1HasValue(0);
-
-    int[] updateCounts = stmt.executeBatch();
-    Assert.assertEquals(2, updateCounts.length);
-    Assert.assertEquals(1, updateCounts[0]);
-    Assert.assertEquals(1, updateCounts[1]);
-
-    assertCol1HasValue(12);
-    con.commit();
-    assertCol1HasValue(12);
-    con.rollback();
-    assertCol1HasValue(12);
-
-    TestUtil.closeQuietly(stmt);
-  }
-
-  @Test
-  public void testWarningsAreCleared() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.addBatch("CREATE TEMP TABLE unused (a int primary key)");
-    stmt.executeBatch();
-    // Execute an empty batch to clear warnings.
-    stmt.executeBatch();
-    Assert.assertNull(stmt.getWarnings());
-    TestUtil.closeQuietly(stmt);
-  }
-
-  @Test
-  public void testBatchEscapeProcessing() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.execute("CREATE TEMP TABLE batchescape (d date)");
-
-    stmt.addBatch("INSERT INTO batchescape (d) VALUES ({d '2007-11-20'})");
-    stmt.executeBatch();
-
-    PreparedStatement pstmt =
-        con.prepareStatement("INSERT INTO batchescape (d) VALUES ({d '2007-11-20'})");
-    pstmt.addBatch();
-    pstmt.executeBatch();
-    pstmt.close();
-
-    ResultSet rs = stmt.executeQuery("SELECT d FROM batchescape");
-    Assert.assertTrue(rs.next());
-    Assert.assertEquals("2007-11-20", rs.getString(1));
-    Assert.assertTrue(rs.next());
-    Assert.assertEquals("2007-11-20", rs.getString(1));
-    Assert.assertTrue(!rs.next());
-    TestUtil.closeQuietly(stmt);
-  }
-
-  @Test
-  public void testBatchWithEmbeddedNulls() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.execute("CREATE TEMP TABLE batchstring (a text)");
-
-    con.commit();
-
-    PreparedStatement pstmt = con.prepareStatement("INSERT INTO batchstring VALUES (?)");
-
-    try {
-      pstmt.setString(1, "a");
-      pstmt.addBatch();
-      pstmt.setString(1, "\u0000");
-      pstmt.addBatch();
-      pstmt.setString(1, "b");
-      pstmt.addBatch();
-      pstmt.executeBatch();
-      Assert.fail("Should have thrown an exception.");
-    } catch (SQLException sqle) {
-      con.rollback();
+        TestUtil.closeQuietly(stmt);
     }
-    pstmt.close();
 
-    ResultSet rs = stmt.executeQuery("SELECT COUNT(*) FROM batchstring");
-    Assert.assertTrue(rs.next());
-    Assert.assertEquals(0, rs.getInt(1));
-    TestUtil.closeQuietly(stmt);
-  }
-
-  @Test
-  public void testMixedBatch() throws SQLException {
-    try {
-      Statement st = con.createStatement();
-      st.executeUpdate("DELETE FROM prep;");
-      st.close();
-
-      st = con.createStatement();
-      st.addBatch("INSERT INTO prep (a, b) VALUES (1,2)");
-      st.addBatch("INSERT INTO prep (a, b) VALUES (100,200)");
-      st.addBatch("DELETE FROM prep WHERE a = 1 AND b = 2");
-      st.addBatch("CREATE TEMPORARY TABLE waffles(sauce text)");
-      st.addBatch("INSERT INTO waffles(sauce) VALUES ('cream'), ('strawberry jam')");
-      int[] batchResult = st.executeBatch();
-      Assert.assertEquals(1, batchResult[0]);
-      Assert.assertEquals(1, batchResult[1]);
-      Assert.assertEquals(1, batchResult[2]);
-      Assert.assertEquals(0, batchResult[3]);
-      Assert.assertEquals(2, batchResult[4]);
-    } catch (SQLException ex) {
-      ex.getNextException().printStackTrace();
-      throw ex;
+    @Test
+    public void testWarningsAreCleared() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.addBatch("CREATE TEMP TABLE unused (a int primary key)");
+        stmt.executeBatch();
+        // Execute an empty batch to clear warnings.
+        stmt.executeBatch();
+        Assert.assertNull(stmt.getWarnings());
+        TestUtil.closeQuietly(stmt);
     }
-  }
 
-  /*
-   * A user reported that a query that uses RETURNING (via getGeneratedKeys) in a batch, and a
-   * 'text' field value in a table is assigned NULL in the first execution of the batch then
-   * non-NULL afterwards using PreparedStatement.setObject(int, Object) (i.e. no Types param or
-   * setString call) the batch may Assert.fail with:
-   *
-   * "Received resultset tuples, but no field structure for them"
-   *
-   * at org.postgresql.core.v3.QueryExecutorImpl.processResults
-   *
-   * Prior to 245b388 it would instead Assert.fail with a NullPointerException in
-   * AbstractJdbc2ResultSet.checkColumnIndex
-   *
-   * The cause is complicated. The Assert.failure arises because the query gets re-planned mid-batch. This
-   * re-planning clears the cached information about field types. The field type information for
-   * parameters gets re-acquired later but the information for *returned* values does not.
-   *
-   * (The reason why the returned value types aren't recalculated is not yet known.)
-   *
-   * The re-plan's cause is its self complicated.
-   *
-   * The first bind of the parameter, which is null, gets the type oid 0 (unknown/unspecified).
-   * Unless Types.VARCHAR is specified or setString is used, in which case the oid is set to 1043
-   * (varchar).
-   *
-   * The second bind identifies the object class as String so it calls setString internally. This
-   * sets the type to 1043 (varchar).
-   *
-   * The third and subsequent binds, whether null or non-null, will get type 1043, because there's
-   * logic to avoid overwriting a known parameter type with the unknown type oid. This is why the
-   * issue can only occur when null is the first entry.
-   *
-   * When executed the first time a describe is run. This reports the parameter oid to be 25 (text),
-   * because that's the type of the table column the param is being assigned to. That's why the cast
-   * to ?::varchar works - because it overrides the type for the parameter to 1043 (varchar).
-   *
-   * The second execution sees that the bind parameter type is already known to PgJDBC as 1043
-   * (varchar). PgJDBC doesn't see that text and varchar are the same - and, in fact, under some
-   * circumstances they aren't exactly the same. So it discards the planned query and re-plans.
-   *
-   * This issue can be reproduced with any pair of implicitly or assignment castable types; for
-   * example, using Integer in JDBC and bigint in the Pg table will do it.
-   */
-  @Test
-  public void testBatchReturningMixedNulls() throws SQLException {
-    String[] testData = new String[]{null, "test", null, null, null};
+    @Test
+    public void testBatchEscapeProcessing() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.execute("CREATE TEMP TABLE batchescape (d date)");
 
-    try {
-      Statement setup = con.createStatement();
-      setup.execute("DROP TABLE IF EXISTS mixednulltest;");
-      // It's significant that "value' is 'text' not 'varchar' here;
-      // if 'varchar' is used then everything works fine.
-      setup.execute("CREATE TABLE mixednulltest (key serial primary key, value text);");
-      setup.close();
+        stmt.addBatch("INSERT INTO batchescape (d) VALUES ({d '2007-11-20'})");
+        stmt.executeBatch();
 
-      // If the parameter is given as ?::varchar then this issue
-      // does not arise.
-      PreparedStatement st =
-          con.prepareStatement("INSERT INTO mixednulltest (value) VALUES (?)", new String[]{"key"});
+        PreparedStatement pstmt =
+                con.prepareStatement("INSERT INTO batchescape (d) VALUES ({d '2007-11-20'})");
+        pstmt.addBatch();
+        pstmt.executeBatch();
+        pstmt.close();
 
-      for (String val : testData) {
-        /*
-         * This is the crucial bit. It's set to null first time around, so the RETURNING clause's
-         * type oid is undefined.
-         *
-         * The second time around the value is assigned so Pg reports the type oid is TEXT, like the
-         * table. But we expected VARCHAR.
-         *
-         * This causes PgJDBC to replan the query, and breaks other things.
-         */
-        st.setObject(1, val);
-        st.addBatch();
-      }
-      st.executeBatch();
-      ResultSet rs = st.getGeneratedKeys();
-      for (int i = 1; i <= testData.length; i++) {
-        rs.next();
-        Assert.assertEquals(i, rs.getInt(1));
-      }
-      Assert.assertTrue(!rs.next());
-    } catch (SQLException ex) {
-      ex.getNextException().printStackTrace();
-      throw ex;
+        ResultSet rs = stmt.executeQuery("SELECT d FROM batchescape");
+        Assert.assertTrue(rs.next());
+        Assert.assertEquals("2007-11-20", rs.getString(1));
+        Assert.assertTrue(rs.next());
+        Assert.assertEquals("2007-11-20", rs.getString(1));
+        Assert.assertTrue(!rs.next());
+        TestUtil.closeQuietly(stmt);
     }
-  }
 
-  @Test
-  public void testBatchWithAlternatingAndUnknownTypes0() throws SQLException {
-    testBatchWithAlternatingAndUnknownTypesN(0);
-  }
+    @Test
+    public void testBatchWithEmbeddedNulls() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.execute("CREATE TEMP TABLE batchstring (a text)");
 
-  @Test
-  public void testBatchWithAlternatingAndUnknownTypes1() throws SQLException {
-    testBatchWithAlternatingAndUnknownTypesN(1);
-  }
+        con.commit();
 
-  @Test
-  public void testBatchWithAlternatingAndUnknownTypes2() throws SQLException {
-    testBatchWithAlternatingAndUnknownTypesN(2);
-  }
+        PreparedStatement pstmt = con.prepareStatement("INSERT INTO batchstring VALUES (?)");
 
-  @Test
-  public void testBatchWithAlternatingAndUnknownTypes3() throws SQLException {
-    testBatchWithAlternatingAndUnknownTypesN(3);
-  }
+        try {
+            pstmt.setString(1, "a");
+            pstmt.addBatch();
+            pstmt.setString(1, "\u0000");
+            pstmt.addBatch();
+            pstmt.setString(1, "b");
+            pstmt.addBatch();
+            pstmt.executeBatch();
+            Assert.fail("Should have thrown an exception.");
+        } catch (SQLException sqle) {
+            con.rollback();
+        }
+        pstmt.close();
 
-  @Test
-  public void testBatchWithAlternatingAndUnknownTypes4() throws SQLException {
-    testBatchWithAlternatingAndUnknownTypesN(4);
-  }
-
-  @Test
-  public void testBatchWithAlternatingAndUnknownTypes5() throws SQLException {
-    testBatchWithAlternatingAndUnknownTypesN(5);
-  }
-
-  @Test
-  public void testBatchWithAlternatingAndUnknownTypes6() throws SQLException {
-    testBatchWithAlternatingAndUnknownTypesN(6);
-  }
-
-  /**
-   * <p>This one is reproduced in regular (non-force binary) mode.</p>
-   *
-   * <p>As of 9.4.1208 the following tests fail:
-   * BatchExecuteTest.testBatchWithAlternatingAndUnknownTypes3
-   * BatchExecuteTest.testBatchWithAlternatingAndUnknownTypes4
-   * BatchExecuteTest.testBatchWithAlternatingAndUnknownTypes5
-   * BatchExecuteTest.testBatchWithAlternatingAndUnknownTypes6</p>
-   * @param numPreliminaryInserts number of preliminary inserts to make so the statement gets
-   *                              prepared
-   * @throws SQLException in case of failure
-   */
-  public void testBatchWithAlternatingAndUnknownTypesN(int numPreliminaryInserts)
-      throws SQLException {
-    PreparedStatement ps = null;
-    try {
-      con.setAutoCommit(true);
-      // This test requires autoCommit false to reproduce
-      ps = con.prepareStatement("insert into prep(a, d) values(?, ?)");
-      for (int i = 0; i < numPreliminaryInserts; i++) {
-        ps.setNull(1, Types.SMALLINT);
-        ps.setObject(2, new Date(42));
-        ps.addBatch();
-        ps.executeBatch();
-      }
-
-      ps.setObject(1, 43.0);
-      ps.setObject(2, new Date(43));
-      ps.addBatch();
-      ps.setNull(1, Types.SMALLINT);
-      ps.setObject(2, new Date(44));
-      ps.addBatch();
-      ps.executeBatch();
-
-      ps.setObject(1, 45.0);
-      ps.setObject(2, new Date(45)); // <-- this causes "oid of bind unknown, send Describe"
-      ps.addBatch();
-      ps.setNull(1, Types.SMALLINT);
-      ps.setNull(2, Types.DATE);     // <-- this uses Oid.DATE, thus no describe message
-      // As the same query object was reused the describe from Date(45) overwrites
-      // parameter types, thus Double(45)'s type (double) comes instead of SMALLINT.
-      // Thus pgjdbc thinks the prepared statement is prepared for (double, date) types
-      // however in reality the statement is prepared for (smallint, date) types.
-
-      ps.addBatch();
-      ps.executeBatch();
-
-      // This execution with (double, unknown) passes isPreparedForTypes check, and causes
-      // the failure
-      ps.setObject(1, 47.0);
-      ps.setObject(2, new Date(47));
-      ps.addBatch();
-      ps.executeBatch();
-    } catch (BatchUpdateException e) {
-      throw e.getNextException();
-    } finally {
-      TestUtil.closeQuietly(ps);
+        ResultSet rs = stmt.executeQuery("SELECT COUNT(*) FROM batchstring");
+        Assert.assertTrue(rs.next());
+        Assert.assertEquals(0, rs.getInt(1));
+        TestUtil.closeQuietly(stmt);
     }
+
+    @Test
+    public void testMixedBatch() throws SQLException {
+        try {
+            Statement st = con.createStatement();
+            st.executeUpdate("DELETE FROM prep;");
+            st.close();
+
+            st = con.createStatement();
+            st.addBatch("INSERT INTO prep (a, b) VALUES (1,2)");
+            st.addBatch("INSERT INTO prep (a, b) VALUES (100,200)");
+            st.addBatch("DELETE FROM prep WHERE a = 1 AND b = 2");
+            st.addBatch("CREATE TEMPORARY TABLE waffles(sauce text)");
+            st.addBatch("INSERT INTO waffles(sauce) VALUES ('cream'), ('strawberry jam')");
+            int[] batchResult = st.executeBatch();
+            Assert.assertEquals(1, batchResult[0]);
+            Assert.assertEquals(1, batchResult[1]);
+            Assert.assertEquals(1, batchResult[2]);
+            Assert.assertEquals(0, batchResult[3]);
+            Assert.assertEquals(2, batchResult[4]);
+        } catch (SQLException ex) {
+            ex.getNextException().printStackTrace();
+            throw ex;
+        }
+    }
+
+    /*
+     * A user reported that a query that uses RETURNING (via getGeneratedKeys) in a batch, and a
+     * 'text' field value in a table is assigned NULL in the first execution of the batch then
+     * non-NULL afterwards using PreparedStatement.setObject(int, Object) (i.e. no Types param or
+     * setString call) the batch may Assert.fail with:
+     *
+     * "Received resultset tuples, but no field structure for them"
+     *
+     * at org.postgresql.core.v3.QueryExecutorImpl.processResults
+     *
+     * Prior to 245b388 it would instead Assert.fail with a NullPointerException in
+     * AbstractJdbc2ResultSet.checkColumnIndex
+     *
+     * The cause is complicated. The Assert.failure arises because the query gets re-planned mid-batch. This
+     * re-planning clears the cached information about field types. The field type information for
+     * parameters gets re-acquired later but the information for *returned* values does not.
+     *
+     * (The reason why the returned value types aren't recalculated is not yet known.)
+     *
+     * The re-plan's cause is its self complicated.
+     *
+     * The first bind of the parameter, which is null, gets the type oid 0 (unknown/unspecified).
+     * Unless Types.VARCHAR is specified or setString is used, in which case the oid is set to 1043
+     * (varchar).
+     *
+     * The second bind identifies the object class as String so it calls setString internally. This
+     * sets the type to 1043 (varchar).
+     *
+     * The third and subsequent binds, whether null or non-null, will get type 1043, because there's
+     * logic to avoid overwriting a known parameter type with the unknown type oid. This is why the
+     * issue can only occur when null is the first entry.
+     *
+     * When executed the first time a describe is run. This reports the parameter oid to be 25 (text),
+     * because that's the type of the table column the param is being assigned to. That's why the cast
+     * to ?::varchar works - because it overrides the type for the parameter to 1043 (varchar).
+     *
+     * The second execution sees that the bind parameter type is already known to PgJDBC as 1043
+     * (varchar). PgJDBC doesn't see that text and varchar are the same - and, in fact, under some
+     * circumstances they aren't exactly the same. So it discards the planned query and re-plans.
+     *
+     * This issue can be reproduced with any pair of implicitly or assignment castable types; for
+     * example, using Integer in JDBC and bigint in the Pg table will do it.
+     */
+    @Test
+    public void testBatchReturningMixedNulls() throws SQLException {
+        String[] testData = new String[]{null, "test", null, null, null};
+
+        try {
+            Statement setup = con.createStatement();
+            setup.execute("DROP TABLE IF EXISTS mixednulltest;");
+            // It's significant that "value' is 'text' not 'varchar' here;
+            // if 'varchar' is used then everything works fine.
+            setup.execute("CREATE TABLE mixednulltest (key serial primary key, value text);");
+            setup.close();
+
+            // If the parameter is given as ?::varchar then this issue
+            // does not arise.
+            PreparedStatement st =
+                    con.prepareStatement("INSERT INTO mixednulltest (value) VALUES (?)", new String[]{"key"});
+
+            for (String val : testData) {
+                /*
+                 * This is the crucial bit. It's set to null first time around, so the RETURNING clause's
+                 * type oid is undefined.
+                 *
+                 * The second time around the value is assigned so Pg reports the type oid is TEXT, like the
+                 * table. But we expected VARCHAR.
+                 *
+                 * This causes PgJDBC to replan the query, and breaks other things.
+                 */
+                st.setObject(1, val);
+                st.addBatch();
+            }
+            st.executeBatch();
+            ResultSet rs = st.getGeneratedKeys();
+            for (int i = 1; i <= testData.length; i++) {
+                rs.next();
+                Assert.assertEquals(i, rs.getInt(1));
+            }
+            Assert.assertTrue(!rs.next());
+        } catch (SQLException ex) {
+            ex.getNextException().printStackTrace();
+            throw ex;
+        }
+    }
+
+    @Test
+    public void testBatchWithAlternatingAndUnknownTypes0() throws SQLException {
+        testBatchWithAlternatingAndUnknownTypesN(0);
+    }
+
+    @Test
+    public void testBatchWithAlternatingAndUnknownTypes1() throws SQLException {
+        testBatchWithAlternatingAndUnknownTypesN(1);
+    }
+
+    @Test
+    public void testBatchWithAlternatingAndUnknownTypes2() throws SQLException {
+        testBatchWithAlternatingAndUnknownTypesN(2);
+    }
+
+    @Test
+    public void testBatchWithAlternatingAndUnknownTypes3() throws SQLException {
+        testBatchWithAlternatingAndUnknownTypesN(3);
+    }
+
+    @Test
+    public void testBatchWithAlternatingAndUnknownTypes4() throws SQLException {
+        testBatchWithAlternatingAndUnknownTypesN(4);
+    }
+
+    @Test
+    public void testBatchWithAlternatingAndUnknownTypes5() throws SQLException {
+        testBatchWithAlternatingAndUnknownTypesN(5);
+    }
+
+    @Test
+    public void testBatchWithAlternatingAndUnknownTypes6() throws SQLException {
+        testBatchWithAlternatingAndUnknownTypesN(6);
+    }
+
+    /**
+     * <p>This one is reproduced in regular (non-force binary) mode.</p>
+     *
+     * <p>As of 9.4.1208 the following tests fail:
+     * BatchExecuteTest.testBatchWithAlternatingAndUnknownTypes3
+     * BatchExecuteTest.testBatchWithAlternatingAndUnknownTypes4
+     * BatchExecuteTest.testBatchWithAlternatingAndUnknownTypes5
+     * BatchExecuteTest.testBatchWithAlternatingAndUnknownTypes6</p>
+     *
+     * @param numPreliminaryInserts number of preliminary inserts to make so the statement gets
+     *                              prepared
+     * @throws SQLException in case of failure
+     */
+    public void testBatchWithAlternatingAndUnknownTypesN(int numPreliminaryInserts)
+            throws SQLException {
+        PreparedStatement ps = null;
+        try {
+            con.setAutoCommit(true);
+            // This test requires autoCommit false to reproduce
+            ps = con.prepareStatement("insert into prep(a, d) values(?, ?)");
+            for (int i = 0; i < numPreliminaryInserts; i++) {
+                ps.setNull(1, Types.SMALLINT);
+                ps.setObject(2, new Date(42));
+                ps.addBatch();
+                ps.executeBatch();
+            }
+
+            ps.setObject(1, 43.0);
+            ps.setObject(2, new Date(43));
+            ps.addBatch();
+            ps.setNull(1, Types.SMALLINT);
+            ps.setObject(2, new Date(44));
+            ps.addBatch();
+            ps.executeBatch();
+
+            ps.setObject(1, 45.0);
+            ps.setObject(2, new Date(45)); // <-- this causes "oid of bind unknown, send Describe"
+            ps.addBatch();
+            ps.setNull(1, Types.SMALLINT);
+            ps.setNull(2, Types.DATE);     // <-- this uses Oid.DATE, thus no describe message
+            // As the same query object was reused the describe from Date(45) overwrites
+            // parameter types, thus Double(45)'s type (double) comes instead of SMALLINT.
+            // Thus pgjdbc thinks the prepared statement is prepared for (double, date) types
+            // however in reality the statement is prepared for (smallint, date) types.
+
+            ps.addBatch();
+            ps.executeBatch();
+
+            // This execution with (double, unknown) passes isPreparedForTypes check, and causes
+            // the failure
+            ps.setObject(1, 47.0);
+            ps.setObject(2, new Date(47));
+            ps.addBatch();
+            ps.executeBatch();
+        } catch (BatchUpdateException e) {
+            throw e.getNextException();
+        } finally {
+            TestUtil.closeQuietly(ps);
+        }
     /*
 Here's the log
 11:33:10.708 (1)  FE=> Parse(stmt=null,query="CREATE TABLE prep (a integer, b integer, d date) ",oids={})
@@ -734,40 +760,40 @@ org.postgresql.util.PSQLException: ERROR: incorrect binary data format in bind p
   at org.postgresql.jdbc.PgStatement.executeBatch(PgStatement.java:2534)
   at org.postgresql.test.jdbc2.BatchExecuteTest.testBatchWithAlternatingTypes2(BatchExecuteTest.java:460)
     */
-  }
-
-  /**
-   * Tests {@link PreparedStatement#addBatch} in case types of parameters change from one batch to
-   * another. Change of the datatypes causes re-prepare server-side statement, thus exactly the same
-   * query object might have different statement names.
-   */
-  @Test
-  public void testBatchWithAlternatingTypes() throws SQLException {
-    try {
-      Statement s = con.createStatement();
-      s.execute("BEGIN");
-      PreparedStatement ps;
-      ps = con.prepareStatement("insert into prep(a,b)  values(?::int4,?)");
-      ps.setInt(1, 2);
-      ps.setInt(2, 2);
-      ps.addBatch();
-      ps.addBatch();
-      ps.addBatch();
-      ps.addBatch();
-      ps.addBatch();
-      ps.setString(1, "1");
-      ps.setInt(2, 2);
-      ps.addBatch();
-      ps.executeBatch();
-      ps.setString(1, "2");
-      ps.setInt(2, 2);
-      ps.addBatch();
-      ps.executeBatch();
-      ps.close();
-      s.execute("COMMIT");
-    } catch (BatchUpdateException e) {
-      throw e.getNextException();
     }
+
+    /**
+     * Tests {@link PreparedStatement#addBatch} in case types of parameters change from one batch to
+     * another. Change of the datatypes causes re-prepare server-side statement, thus exactly the same
+     * query object might have different statement names.
+     */
+    @Test
+    public void testBatchWithAlternatingTypes() throws SQLException {
+        try {
+            Statement s = con.createStatement();
+            s.execute("BEGIN");
+            PreparedStatement ps;
+            ps = con.prepareStatement("insert into prep(a,b)  values(?::int4,?)");
+            ps.setInt(1, 2);
+            ps.setInt(2, 2);
+            ps.addBatch();
+            ps.addBatch();
+            ps.addBatch();
+            ps.addBatch();
+            ps.addBatch();
+            ps.setString(1, "1");
+            ps.setInt(2, 2);
+            ps.addBatch();
+            ps.executeBatch();
+            ps.setString(1, "2");
+            ps.setInt(2, 2);
+            ps.addBatch();
+            ps.executeBatch();
+            ps.close();
+            s.execute("COMMIT");
+        } catch (BatchUpdateException e) {
+            throw e.getNextException();
+        }
     /*
 Key part is (see "before the fix"):
      23:00:30.354 (1)  <=BE ParseComplete [S_2]
@@ -1171,216 +1197,194 @@ Server SQLState: 25001)
 23:15:33.934 (1)  FE=> Terminate
 <<<<<<< HEAD
      */
-  }
-
-  @Test
-  public void testSmallBatchUpdateFailureSimple() throws SQLException {
-    con.setAutoCommit(true);
-
-    // update as batch
-    PreparedStatement batchSt = con.prepareStatement("INSERT INTO batchUpdCnt(id) VALUES (?)");
-    batchSt.setString(1, "key-1");
-    batchSt.addBatch();
-
-    batchSt.setString(1, "key-2");
-    batchSt.addBatch();
-
-    int[] batchResult;
-    try {
-      batchResult = batchSt.executeBatch();
-      Assert.fail("Expecting BatchUpdateException as key-2 is duplicated in batchUpdCnt.id. "
-          + " executeBatch returned " + Arrays.toString(batchResult));
-    } catch (BatchUpdateException ex) {
-      batchResult = ex.getUpdateCounts();
-    } finally {
-      TestUtil.closeQuietly(batchSt);
     }
 
-    int newCount = getBatchUpdCount();
-    if (newCount == 2) {
-      // key-1 did succeed
-      Assert.assertTrue("batchResult[0] should be 1 or SUCCESS_NO_INFO since 'key-1' was inserted,"
-          + " actual result is " + Arrays.toString(batchResult),
-          batchResult[0] == 1 || batchResult[0] == Statement.SUCCESS_NO_INFO);
-    } else {
-      Assert.assertTrue("batchResult[0] should be 0 or EXECUTE_FAILED since 'key-1' was NOT inserted,"
-              + " actual result is " + Arrays.toString(batchResult),
-          batchResult[0] == 0 || batchResult[0] == Statement.EXECUTE_FAILED);
+    @Test
+    public void testSmallBatchUpdateFailureSimple() throws SQLException {
+        con.setAutoCommit(true);
+
+        // update as batch
+        PreparedStatement batchSt = con.prepareStatement("INSERT INTO batchUpdCnt(id) VALUES (?)");
+        batchSt.setString(1, "key-1");
+        batchSt.addBatch();
+
+        batchSt.setString(1, "key-2");
+        batchSt.addBatch();
+
+        int[] batchResult;
+        try {
+            batchResult = batchSt.executeBatch();
+            Assert.fail("Expecting BatchUpdateException as key-2 is duplicated in batchUpdCnt.id. "
+                    + " executeBatch returned " + Arrays.toString(batchResult));
+        } catch (BatchUpdateException ex) {
+            batchResult = ex.getUpdateCounts();
+        } finally {
+            TestUtil.closeQuietly(batchSt);
+        }
+
+        int newCount = getBatchUpdCount();
+        if (newCount == 2) {
+            // key-1 did succeed
+            Assert.assertTrue("batchResult[0] should be 1 or SUCCESS_NO_INFO since 'key-1' was inserted,"
+                            + " actual result is " + Arrays.toString(batchResult),
+                    batchResult[0] == 1 || batchResult[0] == Statement.SUCCESS_NO_INFO);
+        } else {
+            Assert.assertTrue("batchResult[0] should be 0 or EXECUTE_FAILED since 'key-1' was NOT inserted,"
+                            + " actual result is " + Arrays.toString(batchResult),
+                    batchResult[0] == 0 || batchResult[0] == Statement.EXECUTE_FAILED);
+        }
+
+        Assert.assertEquals("'key-2' insertion should have Assert.failed",
+                Statement.EXECUTE_FAILED, batchResult[1]);
     }
 
-    Assert.assertEquals("'key-2' insertion should have Assert.failed",
-        Statement.EXECUTE_FAILED, batchResult[1]);
-  }
+    private int getBatchUpdCount() throws SQLException {
+        PreparedStatement ps = con.prepareStatement("select count(*) from batchUpdCnt");
+        ResultSet rs = ps.executeQuery();
+        Assert.assertTrue("count(*) must return 1 row", rs.next());
+        return rs.getInt(1);
+    }
 
-  private int getBatchUpdCount() throws SQLException {
-    PreparedStatement ps = con.prepareStatement("select count(*) from batchUpdCnt");
-    ResultSet rs = ps.executeQuery();
-    Assert.assertTrue("count(*) must return 1 row", rs.next());
-    return rs.getInt(1);
-  }
-
-  /**
-   * Check batching using two individual statements that are both the same type.
-   * Test coverage to check default behaviour is not broken.
-   * @throws SQLException for issues during test
-   */
-  @Test
-  public void testBatchWithRepeatedInsertStatement() throws SQLException {
-    PreparedStatement pstmt = null;
-    /* Optimization to re-write insert statements is disabled by default.
-     * Do nothing here.
+    /**
+     * Check batching using two individual statements that are both the same type.
+     * Test coverage to check default behaviour is not broken.
+     *
+     * @throws SQLException for issues during test
      */
-    try {
-      pstmt = con.prepareStatement("INSERT INTO testbatch VALUES (?,?)");
-      pstmt.setInt(1, 1);
-      pstmt.setInt(2, 1);
-      pstmt.addBatch(); //statement one
-      pstmt.setInt(1, 2);
-      pstmt.setInt(2, 2);
-      pstmt.addBatch();//statement two
-      int[] outcome = pstmt.executeBatch();
+    @Test
+    public void testBatchWithRepeatedInsertStatement() throws SQLException {
+        PreparedStatement pstmt = null;
+        /* Optimization to re-write insert statements is disabled by default.
+         * Do nothing here.
+         */
+        try {
+            pstmt = con.prepareStatement("INSERT INTO testbatch VALUES (?,?)");
+            pstmt.setInt(1, 1);
+            pstmt.setInt(2, 1);
+            pstmt.addBatch(); //statement one
+            pstmt.setInt(1, 2);
+            pstmt.setInt(2, 2);
+            pstmt.addBatch();//statement two
+            int[] outcome = pstmt.executeBatch();
 
-      Assert.assertNotNull(outcome);
-      Assert.assertEquals(2, outcome.length);
-      int rowsInserted = insertRewrite ? Statement.SUCCESS_NO_INFO : 1;
-      Assert.assertEquals(rowsInserted, outcome[0]);
-      Assert.assertEquals(rowsInserted, outcome[1]);
-    } catch (SQLException sqle) {
-      Assert.fail("Failed to execute two statements added to a batch. Reason:" + sqle.getMessage());
-    } finally {
-      TestUtil.closeQuietly(pstmt);
+            Assert.assertNotNull(outcome);
+            Assert.assertEquals(2, outcome.length);
+            int rowsInserted = insertRewrite ? Statement.SUCCESS_NO_INFO : 1;
+            Assert.assertEquals(rowsInserted, outcome[0]);
+            Assert.assertEquals(rowsInserted, outcome[1]);
+        } catch (SQLException sqle) {
+            Assert.fail("Failed to execute two statements added to a batch. Reason:" + sqle.getMessage());
+        } finally {
+            TestUtil.closeQuietly(pstmt);
+        }
     }
-  }
 
-  /**
-  * Test case to make sure the update counter is correct for the
-  * one statement executed. Test coverage to check default behaviour is
-  * not broken.
-  * @throws SQLException for issues during test
-  */
-  @Test
-  public void testBatchWithMultiInsert() throws SQLException {
-    PreparedStatement pstmt = null;
-    try {
-      pstmt = con.prepareStatement("INSERT INTO testbatch VALUES (?,?),(?,?)");
-      pstmt.setInt(1, 1);
-      pstmt.setInt(2, 1);
-      pstmt.setInt(3, 2);
-      pstmt.setInt(4, 2);
-      pstmt.addBatch();//statement one
-      int[] outcome = pstmt.executeBatch();
-      Assert.assertNotNull(outcome);
-      Assert.assertEquals(1, outcome.length);
-      Assert.assertEquals(2, outcome[0]);
-    } catch (SQLException sqle) {
-      Assert.fail("Failed to execute two statements added to a batch. Reason:" + sqle.getMessage());
-    } finally {
-      TestUtil.closeQuietly(pstmt);
+    /**
+     * Test case to make sure the update counter is correct for the
+     * one statement executed. Test coverage to check default behaviour is
+     * not broken.
+     *
+     * @throws SQLException for issues during test
+     */
+    @Test
+    public void testBatchWithMultiInsert() throws SQLException {
+        PreparedStatement pstmt = null;
+        try {
+            pstmt = con.prepareStatement("INSERT INTO testbatch VALUES (?,?),(?,?)");
+            pstmt.setInt(1, 1);
+            pstmt.setInt(2, 1);
+            pstmt.setInt(3, 2);
+            pstmt.setInt(4, 2);
+            pstmt.addBatch();//statement one
+            int[] outcome = pstmt.executeBatch();
+            Assert.assertNotNull(outcome);
+            Assert.assertEquals(1, outcome.length);
+            Assert.assertEquals(2, outcome[0]);
+        } catch (SQLException sqle) {
+            Assert.fail("Failed to execute two statements added to a batch. Reason:" + sqle.getMessage());
+        } finally {
+            TestUtil.closeQuietly(pstmt);
+        }
     }
-  }
 
-  /**
-  * Test case to make sure the update counter is correct for the
-  * two double-row statements executed. Test coverage to check default behaviour is
-  * not broken.
-  * @throws SQLException for issues during test
-  */
-  @Test
-  public void testBatchWithTwoMultiInsertStatements() throws SQLException {
-    PreparedStatement pstmt = null;
-    try {
-      pstmt = con.prepareStatement("INSERT INTO testbatch VALUES (?,?),(?,?)");
-      pstmt.setInt(1, 1);
-      pstmt.setInt(2, 1);
-      pstmt.setInt(3, 2);
-      pstmt.setInt(4, 2);
-      pstmt.addBatch(); //statement one
-      pstmt.setInt(1, 3);
-      pstmt.setInt(2, 3);
-      pstmt.setInt(3, 4);
-      pstmt.setInt(4, 4);
-      pstmt.addBatch(); //statement two
-      int[] outcome = pstmt.executeBatch();
-      int rowsInserted = insertRewrite ? Statement.SUCCESS_NO_INFO : 2;
-      Assert.assertEquals(
-          "Inserting two multi-valued statements with two rows each. Expecting {2, 2} rows inserted (or SUCCESS_NO_INFO)",
-          Arrays.toString(new int[]{rowsInserted, rowsInserted}),
-          Arrays.toString(outcome));
-    } catch (SQLException sqle) {
-      Assert.fail("Failed to execute two statements added to a batch. Reason:" + sqle.getMessage());
-    } finally {
-      TestUtil.closeQuietly(pstmt);
+    /**
+     * Test case to make sure the update counter is correct for the
+     * two double-row statements executed. Test coverage to check default behaviour is
+     * not broken.
+     *
+     * @throws SQLException for issues during test
+     */
+    @Test
+    public void testBatchWithTwoMultiInsertStatements() throws SQLException {
+        PreparedStatement pstmt = null;
+        try {
+            pstmt = con.prepareStatement("INSERT INTO testbatch VALUES (?,?),(?,?)");
+            pstmt.setInt(1, 1);
+            pstmt.setInt(2, 1);
+            pstmt.setInt(3, 2);
+            pstmt.setInt(4, 2);
+            pstmt.addBatch(); //statement one
+            pstmt.setInt(1, 3);
+            pstmt.setInt(2, 3);
+            pstmt.setInt(3, 4);
+            pstmt.setInt(4, 4);
+            pstmt.addBatch(); //statement two
+            int[] outcome = pstmt.executeBatch();
+            int rowsInserted = insertRewrite ? Statement.SUCCESS_NO_INFO : 2;
+            Assert.assertEquals(
+                    "Inserting two multi-valued statements with two rows each. Expecting {2, 2} rows inserted (or SUCCESS_NO_INFO)",
+                    Arrays.toString(new int[]{rowsInserted, rowsInserted}),
+                    Arrays.toString(outcome));
+        } catch (SQLException sqle) {
+            Assert.fail("Failed to execute two statements added to a batch. Reason:" + sqle.getMessage());
+        } finally {
+            TestUtil.closeQuietly(pstmt);
+        }
     }
-  }
 
-  public static void assertSimpleInsertBatch(int n, int[] actual) {
-    int[] expected = new int[n];
-    Arrays.fill(expected, 1);
-    assertBatchResult(n + " addBatch, 1 row each", expected, actual);
-  }
+    @Test
+    public void testServerPrepareMultipleRows() throws SQLException {
+        PreparedStatement ps = null;
+        try {
+            ps = con.prepareStatement("INSERT INTO prep(a) VALUES (?)");
+            // 2 is not enough for insertRewrite=true case since it would get executed as a single multi-insert statement
+            for (int i = 0; i < 3; i++) {
+                ps.setInt(1, i);
+                ps.addBatch();
+            }
+            int[] actual = ps.executeBatch();
+            Assert.assertTrue(
+                    "More than 1 row is inserted via executeBatch, it should lead to multiple server statements, thus the statements should be server-prepared",
+                    ((PGStatement) ps).isUseServerPrepare());
+            assertBatchResult("3 rows inserted via batch", new int[]{1, 1, 1}, actual);
+        } finally {
+            TestUtil.closeQuietly(ps);
+        }
+    }
 
-  public static void assertBatchResult(String message, int[] expected, int[] actual) {
-    int[] clone = expected.clone();
-    boolean hasChanges = false;
-    for (int i = 0; i < actual.length; i++) {
-      int a = actual[i];
-      if (a == Statement.SUCCESS_NO_INFO && expected[i] >= 0) {
-        clone[i] = a;
-        hasChanges = true;
-      }
+    @Test
+    public void testNoServerPrepareOneRow() throws SQLException {
+        PreparedStatement ps = null;
+        try {
+            ps = con.prepareStatement("INSERT INTO prep(a) VALUES (?)");
+            ps.setInt(1, 1);
+            ps.addBatch();
+            int[] actual = ps.executeBatch();
+            int prepareThreshold = ((PGStatement) ps).getPrepareThreshold();
+            if (prepareThreshold == 1) {
+                Assert.assertTrue(
+                        "prepareThreshold=" + prepareThreshold
+                                + " thus the statement should be server-prepared",
+                        ((PGStatement) ps).isUseServerPrepare());
+            } else {
+                Assert.assertFalse(
+                        "Just one row inserted via executeBatch, prepareThreshold=" + prepareThreshold
+                                + " thus the statement should not be server-prepared",
+                        ((PGStatement) ps).isUseServerPrepare());
+            }
+            assertBatchResult("1 rows inserted via batch", new int[]{1}, actual);
+        } finally {
+            TestUtil.closeQuietly(ps);
+        }
     }
-    if (hasChanges) {
-      message += ", original expectation: " + Arrays.toString(expected);
-    }
-    Assert.assertEquals(
-        message,
-        Arrays.toString(clone),
-        Arrays.toString(actual));
-  }
-
-  @Test
-  public void testServerPrepareMultipleRows() throws SQLException {
-    PreparedStatement ps = null;
-    try {
-      ps = con.prepareStatement("INSERT INTO prep(a) VALUES (?)");
-      // 2 is not enough for insertRewrite=true case since it would get executed as a single multi-insert statement
-      for (int i = 0; i < 3; i++) {
-        ps.setInt(1, i);
-        ps.addBatch();
-      }
-      int[] actual = ps.executeBatch();
-      Assert.assertTrue(
-          "More than 1 row is inserted via executeBatch, it should lead to multiple server statements, thus the statements should be server-prepared",
-          ((PGStatement) ps).isUseServerPrepare());
-      assertBatchResult("3 rows inserted via batch", new int[]{1, 1, 1}, actual);
-    } finally {
-      TestUtil.closeQuietly(ps);
-    }
-  }
-
-  @Test
-  public void testNoServerPrepareOneRow() throws SQLException {
-    PreparedStatement ps = null;
-    try {
-      ps = con.prepareStatement("INSERT INTO prep(a) VALUES (?)");
-      ps.setInt(1, 1);
-      ps.addBatch();
-      int[] actual = ps.executeBatch();
-      int prepareThreshold = ((PGStatement) ps).getPrepareThreshold();
-      if (prepareThreshold == 1) {
-        Assert.assertTrue(
-            "prepareThreshold=" + prepareThreshold
-                + " thus the statement should be server-prepared",
-            ((PGStatement) ps).isUseServerPrepare());
-      } else {
-        Assert.assertFalse(
-            "Just one row inserted via executeBatch, prepareThreshold=" + prepareThreshold
-                + " thus the statement should not be server-prepared",
-            ((PGStatement) ps).isUseServerPrepare());
-      }
-      assertBatchResult("1 rows inserted via batch", new int[]{1}, actual);
-    } finally {
-      TestUtil.closeQuietly(ps);
-    }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchFailureTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchFailureTest.java
index 3d25615..4666eaf 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchFailureTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchFailureTest.java
@@ -29,246 +29,246 @@ import java.util.Set;
 
 @RunWith(Parameterized.class)
 public class BatchFailureTest extends BaseTest4 {
-  private final BatchType batchType;
-  private final AutoCommit autoCommit;
-  private final FailMode failMode;
-  private final FailPosition failPosition;
-  private final BinaryMode binaryMode;
-  private final boolean insertRewrite;
+    private final BatchType batchType;
+    private final AutoCommit autoCommit;
+    private final FailMode failMode;
+    private final FailPosition failPosition;
+    private final BinaryMode binaryMode;
+    private final boolean insertRewrite;
 
-  enum BatchType {
-    SIMPLE {
-      @Override
-      public Statement createStatement(Connection con) throws SQLException {
-        return con.createStatement();
-      }
-    },
-    PREPARED {
-      @Override
-      public Statement createStatement(Connection con) throws SQLException {
-        return con.prepareStatement("INSERT INTO batchUpdCnt(id) VALUES (?)");
-      }
-    },
-    PREPARED_WITH_GENERATED {
-      @Override
-      public Statement createStatement(Connection con) throws SQLException {
-        return con.prepareStatement("INSERT INTO batchUpdCnt(id) VALUES (?)", new String[]{"id"});
-      }
-    };
-
-    public abstract Statement createStatement(Connection con) throws SQLException;
-
-    public void addRow(Statement statement, String value) throws SQLException {
-      switch (this) {
-        case SIMPLE:
-          statement.addBatch("INSERT INTO batchUpdCnt(id) VALUES ('" + value + "')");
-          break;
-        case PREPARED:
-        case PREPARED_WITH_GENERATED:
-          PreparedStatement ps = (PreparedStatement) statement;
-          ps.setString(1, value);
-          ps.addBatch();
-          break;
-      }
-    }
-  }
-
-  private enum FailMode {
-    NO_FAIL_JUST_INSERTS, NO_FAIL_SELECT,
-    FAIL_VIA_SELECT_PARSE, FAIL_VIA_SELECT_RUNTIME,
-    FAIL_VIA_DUP_KEY;
-
-    public boolean supports(BatchType batchType) {
-      return batchType != BatchType.SIMPLE ^ this.name().contains("SELECT");
+    public BatchFailureTest(BatchType batchType, AutoCommit autoCommit,
+                            FailMode failMode, FailPosition failPosition, BinaryMode binaryMode,
+                            boolean insertRewrite) {
+        this.batchType = batchType;
+        this.autoCommit = autoCommit;
+        this.failMode = failMode;
+        this.failPosition = failPosition;
+        this.binaryMode = binaryMode;
+        this.insertRewrite = insertRewrite;
     }
 
-    public void injectFailure(Statement statement, BatchType batchType) throws SQLException {
-      switch (this) {
-        case NO_FAIL_JUST_INSERTS:
-          break;
-        case NO_FAIL_SELECT:
-          statement.addBatch("select 1 union all select 2");
-          break;
-        case FAIL_VIA_SELECT_RUNTIME:
-          statement.addBatch("select 0/count(*) where 1=2");
-          break;
-        case FAIL_VIA_SELECT_PARSE:
-          statement.addBatch("seeeeleeeect 1");
-          break;
-        case FAIL_VIA_DUP_KEY:
-          batchType.addRow(statement, "key-2");
-          break;
-        default:
-          throw new IllegalArgumentException("Unexpected value " + this);
-      }
-    }
-  }
-
-  private enum FailPosition {
-    NONE, FIRST_ROW, SECOND_ROW, MIDDLE, ALMOST_LAST_ROW, LAST_ROW;
-
-    public boolean supports(FailMode mode) {
-      return this == NONE ^ mode.name().startsWith("FAIL");
-    }
-  }
-
-  public BatchFailureTest(BatchType batchType, AutoCommit autoCommit,
-      FailMode failMode, FailPosition failPosition, BinaryMode binaryMode,
-      boolean insertRewrite) {
-    this.batchType = batchType;
-    this.autoCommit = autoCommit;
-    this.failMode = failMode;
-    this.failPosition = failPosition;
-    this.binaryMode = binaryMode;
-    this.insertRewrite = insertRewrite;
-  }
-
-  @Parameterized.Parameters(name = "{index}: batchTest(mode={2}, position={3}, autoCommit={1}, batchType={0}, generateKeys={1}, binary={4}, insertRewrite={5})")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    boolean[] booleans = new boolean[]{true, false};
-    for (BatchType batchType : BatchType.values()) {
-      for (FailMode failMode : FailMode.values()) {
-        if (!failMode.supports(batchType)) {
-          continue;
-        }
-        for (FailPosition failPosition : FailPosition.values()) {
-          if (!failPosition.supports(failMode)) {
-            continue;
-          }
-          for (AutoCommit autoCommit : AutoCommit.values()) {
-            for (BinaryMode binaryMode : BinaryMode.values()) {
-              for (boolean insertRewrite : booleans) {
-                ids.add(new Object[]{batchType, autoCommit, failMode, failPosition, binaryMode, insertRewrite});
-              }
+    @Parameterized.Parameters(name = "{index}: batchTest(mode={2}, position={3}, autoCommit={1}, batchType={0}, generateKeys={1}, binary={4}, insertRewrite={5})")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        boolean[] booleans = new boolean[]{true, false};
+        for (BatchType batchType : BatchType.values()) {
+            for (FailMode failMode : FailMode.values()) {
+                if (!failMode.supports(batchType)) {
+                    continue;
+                }
+                for (FailPosition failPosition : FailPosition.values()) {
+                    if (!failPosition.supports(failMode)) {
+                        continue;
+                    }
+                    for (AutoCommit autoCommit : AutoCommit.values()) {
+                        for (BinaryMode binaryMode : BinaryMode.values()) {
+                            for (boolean insertRewrite : booleans) {
+                                ids.add(new Object[]{batchType, autoCommit, failMode, failPosition, binaryMode, insertRewrite});
+                            }
+                        }
+                    }
+                }
             }
-          }
         }
-      }
-    }
-    return ids;
-  }
-
-  @Override
-  protected void updateProperties(Properties props) {
-    if (binaryMode == BinaryMode.FORCE) {
-      forceBinary(props);
-    }
-    PGProperty.REWRITE_BATCHED_INSERTS.set(props, insertRewrite);
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTempTable(con, "batchUpdCnt", "id varchar(512) primary key, data varchar(512)");
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate("INSERT INTO batchUpdCnt(id) VALUES ('key-2')");
-    stmt.close();
-    con.setAutoCommit(autoCommit == AutoCommit.YES);
-  }
-
-  @Test
-  public void run() throws SQLException {
-    Statement statement = batchType.createStatement(con);
-
-    int minBatchResults = 0;
-    int pos = 0;
-    if (failPosition == FailPosition.FIRST_ROW) {
-      failMode.injectFailure(statement, batchType);
-      pos++;
-      minBatchResults = pos;
+        return ids;
     }
 
-    batchType.addRow(statement, "key-1");
-    pos++;
-
-    if (failPosition == FailPosition.SECOND_ROW) {
-      failMode.injectFailure(statement, batchType);
-      pos++;
-      minBatchResults = pos;
+    @Override
+    protected void updateProperties(Properties props) {
+        if (binaryMode == BinaryMode.FORCE) {
+            forceBinary(props);
+        }
+        PGProperty.REWRITE_BATCHED_INSERTS.set(props, insertRewrite);
     }
 
-    for (int i = 0; i < 1000; i++) {
-      batchType.addRow(statement, "key_" + i);
-      pos++;
-      if (failPosition == FailPosition.ALMOST_LAST_ROW && i == 997
-          || failPosition == FailPosition.MIDDLE && i == 500) {
-        failMode.injectFailure(statement, batchType);
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTempTable(con, "batchUpdCnt", "id varchar(512) primary key, data varchar(512)");
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate("INSERT INTO batchUpdCnt(id) VALUES ('key-2')");
+        stmt.close();
+        con.setAutoCommit(autoCommit == AutoCommit.YES);
+    }
+
+    @Test
+    public void run() throws SQLException {
+        Statement statement = batchType.createStatement(con);
+
+        int minBatchResults = 0;
+        int pos = 0;
+        if (failPosition == FailPosition.FIRST_ROW) {
+            failMode.injectFailure(statement, batchType);
+            pos++;
+            minBatchResults = pos;
+        }
+
+        batchType.addRow(statement, "key-1");
         pos++;
-        minBatchResults = pos;
-      }
-    }
 
-    if (failPosition == FailPosition.LAST_ROW) {
-      failMode.injectFailure(statement, batchType);
-      pos++;
-      minBatchResults = pos;
-    }
-
-    List<String> keys = new ArrayList<>();
-    int[] batchResult;
-    int expectedRows = 1;
-    try {
-      batchResult = statement.executeBatch();
-      Assert.assertTrue("Expecting BatchUpdateException due to " + failMode
-              + ", executeBatch returned " + Arrays.toString(batchResult),
-          failPosition == FailPosition.NONE);
-      expectedRows = pos + 1; // +1 since key-2 is already in the DB
-    } catch (BatchUpdateException ex) {
-      batchResult = ex.getUpdateCounts();
-      Assert.assertTrue("Should not fail since fail mode should be " + failMode
-              + ", executeBatch returned " + Arrays.toString(batchResult),
-          failPosition != FailPosition.NONE);
-
-      for (int i : batchResult) {
-        if (i != Statement.EXECUTE_FAILED) {
-          expectedRows++;
+        if (failPosition == FailPosition.SECOND_ROW) {
+            failMode.injectFailure(statement, batchType);
+            pos++;
+            minBatchResults = pos;
         }
-      }
 
-      Assert.assertTrue("Batch should fail at row " + minBatchResults
-              + ", thus at least " + minBatchResults
-              + " items should be returned, actual result is " + batchResult.length + " items, "
-              + Arrays.toString(batchResult),
-          batchResult.length >= minBatchResults);
-    } finally {
-      if (batchType == BatchType.PREPARED_WITH_GENERATED) {
-        ResultSet rs = statement.getGeneratedKeys();
-        while (rs.next()) {
-          keys.add(rs.getString(1));
+        for (int i = 0; i < 1000; i++) {
+            batchType.addRow(statement, "key_" + i);
+            pos++;
+            if (failPosition == FailPosition.ALMOST_LAST_ROW && i == 997
+                    || failPosition == FailPosition.MIDDLE && i == 500) {
+                failMode.injectFailure(statement, batchType);
+                pos++;
+                minBatchResults = pos;
+            }
         }
-      }
-      statement.close();
+
+        if (failPosition == FailPosition.LAST_ROW) {
+            failMode.injectFailure(statement, batchType);
+            pos++;
+            minBatchResults = pos;
+        }
+
+        List<String> keys = new ArrayList<>();
+        int[] batchResult;
+        int expectedRows = 1;
+        try {
+            batchResult = statement.executeBatch();
+            Assert.assertTrue("Expecting BatchUpdateException due to " + failMode
+                            + ", executeBatch returned " + Arrays.toString(batchResult),
+                    failPosition == FailPosition.NONE);
+            expectedRows = pos + 1; // +1 since key-2 is already in the DB
+        } catch (BatchUpdateException ex) {
+            batchResult = ex.getUpdateCounts();
+            Assert.assertTrue("Should not fail since fail mode should be " + failMode
+                            + ", executeBatch returned " + Arrays.toString(batchResult),
+                    failPosition != FailPosition.NONE);
+
+            for (int i : batchResult) {
+                if (i != Statement.EXECUTE_FAILED) {
+                    expectedRows++;
+                }
+            }
+
+            Assert.assertTrue("Batch should fail at row " + minBatchResults
+                            + ", thus at least " + minBatchResults
+                            + " items should be returned, actual result is " + batchResult.length + " items, "
+                            + Arrays.toString(batchResult),
+                    batchResult.length >= minBatchResults);
+        } finally {
+            if (batchType == BatchType.PREPARED_WITH_GENERATED) {
+                ResultSet rs = statement.getGeneratedKeys();
+                while (rs.next()) {
+                    keys.add(rs.getString(1));
+                }
+            }
+            statement.close();
+        }
+
+        if (!con.getAutoCommit()) {
+            con.commit();
+        }
+
+        int finalCount = getBatchUpdCount();
+        Assert.assertEquals(
+                "Number of new rows in batchUpdCnt should match number of non-error batchResult items"
+                        + Arrays.toString(batchResult),
+                expectedRows - 1, finalCount - 1);
+
+        if (batchType != BatchType.PREPARED_WITH_GENERATED) {
+            return;
+        }
+
+        if (finalCount > 1) {
+            Assert.assertFalse((finalCount - 1) + " rows were inserted, thus expecting generated keys",
+                    keys.isEmpty());
+        }
+        Set<String> uniqueKeys = new HashSet<>(keys);
+        Assert.assertEquals("Generated keys should be unique: " + keys, keys.size(), uniqueKeys.size());
+        Assert.assertEquals("Number of generated keys should match the number of inserted rows" + keys,
+                keys.size(), finalCount - 1);
     }
 
-    if (!con.getAutoCommit()) {
-      con.commit();
+    private int getBatchUpdCount() throws SQLException {
+        PreparedStatement ps = con.prepareStatement("select count(*) from batchUpdCnt");
+        ResultSet rs = ps.executeQuery();
+        Assert.assertTrue("count(*) must return 1 row", rs.next());
+        return rs.getInt(1);
     }
 
-    int finalCount = getBatchUpdCount();
-    Assert.assertEquals(
-        "Number of new rows in batchUpdCnt should match number of non-error batchResult items"
-            + Arrays.toString(batchResult),
-        expectedRows - 1, finalCount - 1);
+    enum BatchType {
+        SIMPLE {
+            @Override
+            public Statement createStatement(Connection con) throws SQLException {
+                return con.createStatement();
+            }
+        },
+        PREPARED {
+            @Override
+            public Statement createStatement(Connection con) throws SQLException {
+                return con.prepareStatement("INSERT INTO batchUpdCnt(id) VALUES (?)");
+            }
+        },
+        PREPARED_WITH_GENERATED {
+            @Override
+            public Statement createStatement(Connection con) throws SQLException {
+                return con.prepareStatement("INSERT INTO batchUpdCnt(id) VALUES (?)", new String[]{"id"});
+            }
+        };
 
-    if (batchType != BatchType.PREPARED_WITH_GENERATED) {
-      return;
+        public abstract Statement createStatement(Connection con) throws SQLException;
+
+        public void addRow(Statement statement, String value) throws SQLException {
+            switch (this) {
+                case SIMPLE:
+                    statement.addBatch("INSERT INTO batchUpdCnt(id) VALUES ('" + value + "')");
+                    break;
+                case PREPARED:
+                case PREPARED_WITH_GENERATED:
+                    PreparedStatement ps = (PreparedStatement) statement;
+                    ps.setString(1, value);
+                    ps.addBatch();
+                    break;
+            }
+        }
     }
 
-    if (finalCount > 1) {
-      Assert.assertFalse((finalCount - 1) + " rows were inserted, thus expecting generated keys",
-          keys.isEmpty());
-    }
-    Set<String> uniqueKeys = new HashSet<>(keys);
-    Assert.assertEquals("Generated keys should be unique: " + keys, keys.size(), uniqueKeys.size());
-    Assert.assertEquals("Number of generated keys should match the number of inserted rows" + keys,
-        keys.size(), finalCount - 1);
-  }
+    private enum FailMode {
+        NO_FAIL_JUST_INSERTS, NO_FAIL_SELECT,
+        FAIL_VIA_SELECT_PARSE, FAIL_VIA_SELECT_RUNTIME,
+        FAIL_VIA_DUP_KEY;
 
-  private int getBatchUpdCount() throws SQLException {
-    PreparedStatement ps = con.prepareStatement("select count(*) from batchUpdCnt");
-    ResultSet rs = ps.executeQuery();
-    Assert.assertTrue("count(*) must return 1 row", rs.next());
-    return rs.getInt(1);
-  }
+        public boolean supports(BatchType batchType) {
+            return batchType != BatchType.SIMPLE ^ this.name().contains("SELECT");
+        }
+
+        public void injectFailure(Statement statement, BatchType batchType) throws SQLException {
+            switch (this) {
+                case NO_FAIL_JUST_INSERTS:
+                    break;
+                case NO_FAIL_SELECT:
+                    statement.addBatch("select 1 union all select 2");
+                    break;
+                case FAIL_VIA_SELECT_RUNTIME:
+                    statement.addBatch("select 0/count(*) where 1=2");
+                    break;
+                case FAIL_VIA_SELECT_PARSE:
+                    statement.addBatch("seeeeleeeect 1");
+                    break;
+                case FAIL_VIA_DUP_KEY:
+                    batchType.addRow(statement, "key-2");
+                    break;
+                default:
+                    throw new IllegalArgumentException("Unexpected value " + this);
+            }
+        }
+    }
+
+    private enum FailPosition {
+        NONE, FIRST_ROW, SECOND_ROW, MIDDLE, ALMOST_LAST_ROW, LAST_ROW;
+
+        public boolean supports(FailMode mode) {
+            return this == NONE ^ mode.name().startsWith("FAIL");
+        }
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchedInsertReWriteEnabledTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchedInsertReWriteEnabledTest.java
index b804e05..7f20e38 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchedInsertReWriteEnabledTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BatchedInsertReWriteEnabledTest.java
@@ -25,425 +25,425 @@ import java.util.Properties;
 
 @RunWith(Parameterized.class)
 public class BatchedInsertReWriteEnabledTest extends BaseTest4 {
-  private final AutoCommit autoCommit;
+    private final AutoCommit autoCommit;
 
-  public BatchedInsertReWriteEnabledTest(AutoCommit autoCommit,
-      BinaryMode binaryMode) {
-    this.autoCommit = autoCommit;
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "{index}: autoCommit={0}, binary={1}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (AutoCommit autoCommit : AutoCommit.values()) {
-      for (BinaryMode binaryMode : BinaryMode.values()) {
-        ids.add(new Object[]{autoCommit, binaryMode});
-      }
+    public BatchedInsertReWriteEnabledTest(AutoCommit autoCommit,
+                                           BinaryMode binaryMode) {
+        this.autoCommit = autoCommit;
+        setBinaryMode(binaryMode);
     }
-    return ids;
-  }
 
-  /* Set up the fixture for this testcase: a connection to a database with
-  a table for this test. */
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTable(con, "testbatch", "pk INTEGER, col1 VARCHAR, col2 INTEGER");
-    con.setAutoCommit(autoCommit == AutoCommit.YES);
-  }
-
-  // Tear down the fixture for this test case.
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "testbatch");
-    super.tearDown();
-  }
-
-  @Override
-  protected void updateProperties(Properties props) {
-    super.updateProperties(props);
-    PGProperty.REWRITE_BATCHED_INSERTS.set(props, true);
-  }
-
-  /**
-   * Check batching using two individual statements that are both the same type.
-   * Test to check the re-write optimisation behaviour.
-   */
-
-  @Test
-  public void testBatchWithReWrittenRepeatedInsertStatementOptimizationEnabled()
-      throws SQLException {
-    PreparedStatement pstmt = null;
-    try {
-      pstmt = con.prepareStatement("INSERT INTO testbatch VALUES (?,?)");
-      pstmt.setInt(1, 1);
-      pstmt.setInt(2, 2);
-      pstmt.addBatch();
-      pstmt.setInt(1, 3);
-      pstmt.setInt(2, 4);
-      pstmt.addBatch();
-      pstmt.setInt(1, 5);
-      pstmt.setInt(2, 6);
-      pstmt.addBatch();
-      BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
-      TestUtil.assertNumberOfRows(con, "testbatch", 3, "3 rows inserted");
-
-      /*
-       * Now check the ps can be reused. The batched statement should be reset
-       * and have no knowledge of prior re-written batch. This test uses a
-       * different batch size. To test if the driver detects the different size
-       * and prepares the statement on with the backend. If not then an
-       * exception will be thrown for an unknown prepared statement.
-       */
-      pstmt.setInt(1, 1);
-      pstmt.setInt(2, 2);
-      pstmt.addBatch();
-      pstmt.setInt(1, 3);
-      pstmt.setInt(2, 4);
-      pstmt.addBatch();
-      pstmt.setInt(1, 5);
-      pstmt.setInt(2, 6);
-      pstmt.addBatch();
-      pstmt.setInt(1, 7);
-      pstmt.setInt(2, 8);
-      pstmt.addBatch();
-      BatchExecuteTest.assertSimpleInsertBatch(4, pstmt.executeBatch());
-      TestUtil.assertNumberOfRows(con, "testbatch", 7, "3+4 rows inserted");
-
-      pstmt.setInt(1, 1);
-      pstmt.setInt(2, 2);
-      pstmt.addBatch();
-      pstmt.setInt(1, 3);
-      pstmt.setInt(2, 4);
-      pstmt.addBatch();
-      pstmt.setInt(1, 5);
-      pstmt.setInt(2, 6);
-      pstmt.addBatch();
-      pstmt.setInt(1, 7);
-      pstmt.setInt(2, 8);
-      pstmt.addBatch();
-      BatchExecuteTest.assertSimpleInsertBatch(4, pstmt.executeBatch());
-      TestUtil.assertNumberOfRows(con, "testbatch", 11, "3+4+4 rows inserted");
-
-    } finally {
-      TestUtil.closeQuietly(pstmt);
-    }
-  }
-
-  /**
-   * Check batching using a statement with fixed parameter.
-   */
-  @Test
-  public void testBatchWithReWrittenBatchStatementWithFixedParameter()
-      throws SQLException {
-    String[] odd = new String[]{
-        "INSERT INTO testbatch VALUES (?, '1, (, $1234, a''n?d )' /*xxxx)*/, ?) -- xxx",
-        // "INSERT /*xxx*/INTO testbatch VALUES (?, '1, (, $1234, a''n?d )' /*xxxx)*/, ?) -- xxx",
-    };
-    for (String s : odd) {
-      PreparedStatement pstmt = null;
-      try {
-        pstmt = con.prepareStatement(s);
-        pstmt.setInt(1, 1);
-        pstmt.setInt(2, 2);
-        pstmt.addBatch();
-        pstmt.setInt(1, 3);
-        pstmt.setInt(2, 4);
-        pstmt.addBatch();
-        pstmt.setInt(1, 5);
-        pstmt.setInt(2, 6);
-        pstmt.addBatch();
-        BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
-        TestUtil.assertNumberOfRows(con, "testbatch", 3, "3 rows inserted");
-      } finally {
-        TestUtil.closeQuietly(pstmt);
-      }
-    }
-  }
-
-  /**
-   * Check batching using a statement with fixed parameters only.
-   */
-  @Test
-  public void testBatchWithReWrittenBatchStatementWithFixedParametersOnly()
-      throws SQLException {
-    String[] odd = new String[]{
-        "INSERT INTO testbatch VALUES (9, '1, (, $1234, a''n?d )' /*xxxx)*/, 7) -- xxx",
-        // "INSERT /*xxx*/INTO testbatch VALUES (?, '1, (, $1234, a''n?d )' /*xxxx)*/, ?) -- xxx",
-    };
-    for (String s : odd) {
-      PreparedStatement pstmt = null;
-      try {
-        pstmt = con.prepareStatement(s);
-        pstmt.addBatch();
-        pstmt.addBatch();
-        pstmt.addBatch();
-        BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
-        TestUtil.assertNumberOfRows(con, "testbatch", 3, "3 rows inserted");
-      } finally {
-        TestUtil.closeQuietly(pstmt);
-      }
-    }
-  }
-
-  /**
-   * Test to make sure a statement with a semicolon is not broken.
-   */
-  private void simpleRewriteBatch(String values, String suffix)
-      throws SQLException {
-    PreparedStatement pstmt = null;
-    try {
-      PreparedStatement clean = con.prepareStatement("truncate table testbatch");
-      clean.execute();
-      clean.close();
-
-      pstmt = con.prepareStatement("INSERT INTO testbatch " + values + "(?,?,?)" + suffix);
-      pstmt.setInt(1, 1);
-      pstmt.setString(2, "a");
-      pstmt.setInt(3, 2);
-      pstmt.addBatch();
-      pstmt.setInt(1, 3);
-      pstmt.setString(2, "b");
-      pstmt.setInt(3, 4);
-      pstmt.addBatch();
-      pstmt.setInt(1, 5);
-      pstmt.setString(2, "c");
-      pstmt.setInt(3, 6);
-      pstmt.addBatch();
-      BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
-      TestUtil.assertNumberOfRows(con, "testbatch", 3, "3 rows inserted");
-    } finally {
-      TestUtil.closeQuietly(pstmt);
-    }
-  }
-
-  /**
-   * Test to make sure a statement with a semicolon is not broken.
-   */
-  @Test
-  public void testBatchWithReWrittenBatchStatementWithSemiColon()
-      throws SQLException {
-    simpleRewriteBatch("values", ";");
-  }
-
-  /**
-   * Test to make sure a statement with a semicolon is not broken.
-   */
-  @Test
-  public void testBatchWithReWrittenSpaceAfterValues()
-      throws SQLException {
-    simpleRewriteBatch("values ", "");
-    simpleRewriteBatch("values  ", "");
-    simpleRewriteBatch("values\t", "");
-  }
-
-  /**
-   * Test VALUES word with mixed case.
-   */
-  @Test
-  public void testBatchWithReWrittenMixedCaseValues()
-      throws SQLException {
-    simpleRewriteBatch("vAlues", "");
-    simpleRewriteBatch("vaLUES", "");
-    simpleRewriteBatch("VALUES", "");
-  }
-
-  /**
-   * Test to make sure a statement with a semicolon is not broken.
-   */
-  @Test
-  public void testBindsInNestedParens()
-      throws SQLException {
-    PreparedStatement pstmt = null;
-    try {
-      pstmt = con.prepareStatement("INSERT INTO testbatch VALUES ((?),((?)),?);");
-      pstmt.setInt(1, 1);
-      pstmt.setString(2, "a");
-      pstmt.setInt(3, 2);
-      pstmt.addBatch();
-      pstmt.setInt(1, 3);
-      pstmt.setString(2, "b");
-      pstmt.setInt(3, 4);
-      pstmt.addBatch();
-      pstmt.setInt(1, 5);
-      pstmt.setString(2, "c");
-      pstmt.setInt(3, 6);
-      pstmt.addBatch();
-      BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
-    } finally {
-      TestUtil.closeQuietly(pstmt);
-    }
-  }
-
-  /**
-   * Test to make sure a statement with a semicolon is not broken.
-   */
-  @Test
-  public void testMultiValues1bind()
-      throws SQLException {
-    PreparedStatement pstmt = null;
-    try {
-      pstmt = con.prepareStatement("INSERT INTO testbatch (pk) VALUES (?), (?)");
-      pstmt.setInt(1, 100);
-      pstmt.setInt(2, 200);
-      pstmt.addBatch();
-      pstmt.setInt(1, 300);
-      pstmt.setInt(2, 400);
-      pstmt.addBatch();
-      BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch());
-    } finally {
-      TestUtil.closeQuietly(pstmt);
-    }
-  }
-
-  /**
-   * Test case to check the outcome for a batch with a single row/batch is
-   * consistent across calls to executeBatch. Especially after a batch
-   * has been re-written.
-   */
-  @Test
-  public void testConsistentOutcome() throws SQLException {
-    PreparedStatement pstmt = null;
-    try {
-      pstmt = con.prepareStatement("INSERT INTO testbatch VALUES (?,?,?);");
-      pstmt.setInt(1, 1);
-      pstmt.setString(2, "a");
-      pstmt.setInt(3, 2);
-      pstmt.addBatch();
-      BatchExecuteTest.assertSimpleInsertBatch(1, pstmt.executeBatch());
-
-      pstmt.setInt(1, 1);
-      pstmt.setString(2, "b");
-      pstmt.setInt(3, 2);
-      pstmt.addBatch();
-      pstmt.setInt(1, 3);
-      pstmt.setString(2, "c");
-      pstmt.setInt(3, 4);
-      pstmt.addBatch();
-      BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch());
-
-      pstmt.setInt(1, 1);
-      pstmt.setString(2, "d");
-      pstmt.setInt(3, 2);
-      pstmt.addBatch();
-      BatchExecuteTest.assertSimpleInsertBatch(1, pstmt.executeBatch());
-    } finally {
-      TestUtil.closeQuietly(pstmt);
-    }
-  }
-
-  /**
-   * Test to check statement with named columns still work as expected.
-   */
-  @Test
-  public void testINSERTwithNamedColumnsNotBroken() throws SQLException {
-    PreparedStatement pstmt = null;
-    try {
-      pstmt = con
-          .prepareStatement("INSERT INTO testbatch (pk, col1, col2) VALUES (?,?,?);");
-      pstmt.setInt(1, 1);
-      pstmt.setString(2, "a");
-      pstmt.setInt(3, 2);
-      pstmt.addBatch();
-      BatchExecuteTest.assertSimpleInsertBatch(1, pstmt.executeBatch());
-    } finally {
-      TestUtil.closeQuietly(pstmt);
-    }
-  }
-
-  @Test
-  public void testMixedCaseInSeRtStatement() throws SQLException {
-    PreparedStatement pstmt = null;
-    try {
-      pstmt = con.prepareStatement("InSeRt INTO testbatch VALUES (?,?,?);");
-      pstmt.setInt(1, 1);
-      pstmt.setString(2, "a");
-      pstmt.setInt(3, 2);
-      pstmt.addBatch();
-      pstmt.setInt(1, 3);
-      pstmt.setString(2, "b");
-      pstmt.setInt(3, 4);
-      pstmt.addBatch();
-      BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch());
-    } finally {
-      TestUtil.closeQuietly(pstmt);
-    }
-  }
-
-  @Test
-  public void testReWriteDisabledForPlainBatch() throws Exception {
-    Statement stmt = null;
-    try {
-      con = TestUtil.openDB(new Properties());
-      stmt = con.createStatement();
-      stmt.addBatch("INSERT INTO testbatch VALUES (100,'a',200);");
-      stmt.addBatch("INSERT INTO testbatch VALUES (300,'b',400);");
-      Assert.assertEquals(
-          "Expected outcome not returned by batch execution. The driver"
-              + " allowed re-write in combination with plain statements.",
-          Arrays.toString(new int[]{1, 1}), Arrays.toString(stmt.executeBatch()));
-    } finally {
-      TestUtil.closeQuietly(stmt);
-    }
-  }
-
-  @Test
-  public void test32767Binds() throws Exception {
-    testNBinds(32767);
-  }
-
-  @Test
-  public void test32768Binds() throws Exception {
-    testNBinds(32768);
-  }
-
-  @Test
-  public void test65535Binds() throws Exception {
-    testNBinds(65535);
-  }
-
-  public void testNBinds(int nBinds) throws Exception {
-    PreparedStatement pstmt = null;
-    try {
-      StringBuilder sb = new StringBuilder();
-      sb.append("INSERT INTO testbatch(pk) VALUES (coalesce(?");
-      for (int i = 0; i < nBinds - 1 /* note one ? above */; i++) {
-        sb.append(",?");
-      }
-      sb.append("))");
-      pstmt = con.prepareStatement(sb.toString());
-      for (int k = 0; k < 2; k++) {
-        for (int i = 1; i <= nBinds; i++) {
-          pstmt.setInt(i, i + k * nBinds);
+    @Parameterized.Parameters(name = "{index}: autoCommit={0}, binary={1}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (AutoCommit autoCommit : AutoCommit.values()) {
+            for (BinaryMode binaryMode : BinaryMode.values()) {
+                ids.add(new Object[]{autoCommit, binaryMode});
+            }
+        }
+        return ids;
+    }
+
+    /* Set up the fixture for this testcase: a connection to a database with
+    a table for this test. */
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTable(con, "testbatch", "pk INTEGER, col1 VARCHAR, col2 INTEGER");
+        con.setAutoCommit(autoCommit == AutoCommit.YES);
+    }
+
+    // Tear down the fixture for this test case.
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "testbatch");
+        super.tearDown();
+    }
+
+    @Override
+    protected void updateProperties(Properties props) {
+        super.updateProperties(props);
+        PGProperty.REWRITE_BATCHED_INSERTS.set(props, true);
+    }
+
+    /**
+     * Check batching using two individual statements that are both the same type.
+     * Test to check the re-write optimisation behaviour.
+     */
+
+    @Test
+    public void testBatchWithReWrittenRepeatedInsertStatementOptimizationEnabled()
+            throws SQLException {
+        PreparedStatement pstmt = null;
+        try {
+            pstmt = con.prepareStatement("INSERT INTO testbatch VALUES (?,?)");
+            pstmt.setInt(1, 1);
+            pstmt.setInt(2, 2);
+            pstmt.addBatch();
+            pstmt.setInt(1, 3);
+            pstmt.setInt(2, 4);
+            pstmt.addBatch();
+            pstmt.setInt(1, 5);
+            pstmt.setInt(2, 6);
+            pstmt.addBatch();
+            BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
+            TestUtil.assertNumberOfRows(con, "testbatch", 3, "3 rows inserted");
+
+            /*
+             * Now check the ps can be reused. The batched statement should be reset
+             * and have no knowledge of prior re-written batch. This test uses a
+             * different batch size. To test if the driver detects the different size
+             * and prepares the statement on with the backend. If not then an
+             * exception will be thrown for an unknown prepared statement.
+             */
+            pstmt.setInt(1, 1);
+            pstmt.setInt(2, 2);
+            pstmt.addBatch();
+            pstmt.setInt(1, 3);
+            pstmt.setInt(2, 4);
+            pstmt.addBatch();
+            pstmt.setInt(1, 5);
+            pstmt.setInt(2, 6);
+            pstmt.addBatch();
+            pstmt.setInt(1, 7);
+            pstmt.setInt(2, 8);
+            pstmt.addBatch();
+            BatchExecuteTest.assertSimpleInsertBatch(4, pstmt.executeBatch());
+            TestUtil.assertNumberOfRows(con, "testbatch", 7, "3+4 rows inserted");
+
+            pstmt.setInt(1, 1);
+            pstmt.setInt(2, 2);
+            pstmt.addBatch();
+            pstmt.setInt(1, 3);
+            pstmt.setInt(2, 4);
+            pstmt.addBatch();
+            pstmt.setInt(1, 5);
+            pstmt.setInt(2, 6);
+            pstmt.addBatch();
+            pstmt.setInt(1, 7);
+            pstmt.setInt(2, 8);
+            pstmt.addBatch();
+            BatchExecuteTest.assertSimpleInsertBatch(4, pstmt.executeBatch());
+            TestUtil.assertNumberOfRows(con, "testbatch", 11, "3+4+4 rows inserted");
+
+        } finally {
+            TestUtil.closeQuietly(pstmt);
+        }
+    }
+
+    /**
+     * Check batching using a statement with fixed parameter.
+     */
+    @Test
+    public void testBatchWithReWrittenBatchStatementWithFixedParameter()
+            throws SQLException {
+        String[] odd = new String[]{
+                "INSERT INTO testbatch VALUES (?, '1, (, $1234, a''n?d )' /*xxxx)*/, ?) -- xxx",
+                // "INSERT /*xxx*/INTO testbatch VALUES (?, '1, (, $1234, a''n?d )' /*xxxx)*/, ?) -- xxx",
+        };
+        for (String s : odd) {
+            PreparedStatement pstmt = null;
+            try {
+                pstmt = con.prepareStatement(s);
+                pstmt.setInt(1, 1);
+                pstmt.setInt(2, 2);
+                pstmt.addBatch();
+                pstmt.setInt(1, 3);
+                pstmt.setInt(2, 4);
+                pstmt.addBatch();
+                pstmt.setInt(1, 5);
+                pstmt.setInt(2, 6);
+                pstmt.addBatch();
+                BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
+                TestUtil.assertNumberOfRows(con, "testbatch", 3, "3 rows inserted");
+            } finally {
+                TestUtil.closeQuietly(pstmt);
+            }
+        }
+    }
+
+    /**
+     * Check batching using a statement with fixed parameters only.
+     */
+    @Test
+    public void testBatchWithReWrittenBatchStatementWithFixedParametersOnly()
+            throws SQLException {
+        String[] odd = new String[]{
+                "INSERT INTO testbatch VALUES (9, '1, (, $1234, a''n?d )' /*xxxx)*/, 7) -- xxx",
+                // "INSERT /*xxx*/INTO testbatch VALUES (?, '1, (, $1234, a''n?d )' /*xxxx)*/, ?) -- xxx",
+        };
+        for (String s : odd) {
+            PreparedStatement pstmt = null;
+            try {
+                pstmt = con.prepareStatement(s);
+                pstmt.addBatch();
+                pstmt.addBatch();
+                pstmt.addBatch();
+                BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
+                TestUtil.assertNumberOfRows(con, "testbatch", 3, "3 rows inserted");
+            } finally {
+                TestUtil.closeQuietly(pstmt);
+            }
+        }
+    }
+
+    /**
+     * Test to make sure a statement with a semicolon is not broken.
+     */
+    private void simpleRewriteBatch(String values, String suffix)
+            throws SQLException {
+        PreparedStatement pstmt = null;
+        try {
+            PreparedStatement clean = con.prepareStatement("truncate table testbatch");
+            clean.execute();
+            clean.close();
+
+            pstmt = con.prepareStatement("INSERT INTO testbatch " + values + "(?,?,?)" + suffix);
+            pstmt.setInt(1, 1);
+            pstmt.setString(2, "a");
+            pstmt.setInt(3, 2);
+            pstmt.addBatch();
+            pstmt.setInt(1, 3);
+            pstmt.setString(2, "b");
+            pstmt.setInt(3, 4);
+            pstmt.addBatch();
+            pstmt.setInt(1, 5);
+            pstmt.setString(2, "c");
+            pstmt.setInt(3, 6);
+            pstmt.addBatch();
+            BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
+            TestUtil.assertNumberOfRows(con, "testbatch", 3, "3 rows inserted");
+        } finally {
+            TestUtil.closeQuietly(pstmt);
+        }
+    }
+
+    /**
+     * Test to make sure a statement with a semicolon is not broken.
+     */
+    @Test
+    public void testBatchWithReWrittenBatchStatementWithSemiColon()
+            throws SQLException {
+        simpleRewriteBatch("values", ";");
+    }
+
+    /**
+     * Test to make sure a statement with a semicolon is not broken.
+     */
+    @Test
+    public void testBatchWithReWrittenSpaceAfterValues()
+            throws SQLException {
+        simpleRewriteBatch("values ", "");
+        simpleRewriteBatch("values  ", "");
+        simpleRewriteBatch("values\t", "");
+    }
+
+    /**
+     * Test VALUES word with mixed case.
+     */
+    @Test
+    public void testBatchWithReWrittenMixedCaseValues()
+            throws SQLException {
+        simpleRewriteBatch("vAlues", "");
+        simpleRewriteBatch("vaLUES", "");
+        simpleRewriteBatch("VALUES", "");
+    }
+
+    /**
+     * Test to make sure a statement with a semicolon is not broken.
+     */
+    @Test
+    public void testBindsInNestedParens()
+            throws SQLException {
+        PreparedStatement pstmt = null;
+        try {
+            pstmt = con.prepareStatement("INSERT INTO testbatch VALUES ((?),((?)),?);");
+            pstmt.setInt(1, 1);
+            pstmt.setString(2, "a");
+            pstmt.setInt(3, 2);
+            pstmt.addBatch();
+            pstmt.setInt(1, 3);
+            pstmt.setString(2, "b");
+            pstmt.setInt(3, 4);
+            pstmt.addBatch();
+            pstmt.setInt(1, 5);
+            pstmt.setString(2, "c");
+            pstmt.setInt(3, 6);
+            pstmt.addBatch();
+            BatchExecuteTest.assertSimpleInsertBatch(3, pstmt.executeBatch());
+        } finally {
+            TestUtil.closeQuietly(pstmt);
+        }
+    }
+
+    /**
+     * Test to make sure a statement with a semicolon is not broken.
+     */
+    @Test
+    public void testMultiValues1bind()
+            throws SQLException {
+        PreparedStatement pstmt = null;
+        try {
+            pstmt = con.prepareStatement("INSERT INTO testbatch (pk) VALUES (?), (?)");
+            pstmt.setInt(1, 100);
+            pstmt.setInt(2, 200);
+            pstmt.addBatch();
+            pstmt.setInt(1, 300);
+            pstmt.setInt(2, 400);
+            pstmt.addBatch();
+            BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch());
+        } finally {
+            TestUtil.closeQuietly(pstmt);
+        }
+    }
+
+    /**
+     * Test case to check the outcome for a batch with a single row/batch is
+     * consistent across calls to executeBatch. Especially after a batch
+     * has been re-written.
+     */
+    @Test
+    public void testConsistentOutcome() throws SQLException {
+        PreparedStatement pstmt = null;
+        try {
+            pstmt = con.prepareStatement("INSERT INTO testbatch VALUES (?,?,?);");
+            pstmt.setInt(1, 1);
+            pstmt.setString(2, "a");
+            pstmt.setInt(3, 2);
+            pstmt.addBatch();
+            BatchExecuteTest.assertSimpleInsertBatch(1, pstmt.executeBatch());
+
+            pstmt.setInt(1, 1);
+            pstmt.setString(2, "b");
+            pstmt.setInt(3, 2);
+            pstmt.addBatch();
+            pstmt.setInt(1, 3);
+            pstmt.setString(2, "c");
+            pstmt.setInt(3, 4);
+            pstmt.addBatch();
+            BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch());
+
+            pstmt.setInt(1, 1);
+            pstmt.setString(2, "d");
+            pstmt.setInt(3, 2);
+            pstmt.addBatch();
+            BatchExecuteTest.assertSimpleInsertBatch(1, pstmt.executeBatch());
+        } finally {
+            TestUtil.closeQuietly(pstmt);
+        }
+    }
+
+    /**
+     * Test to check statement with named columns still work as expected.
+     */
+    @Test
+    public void testINSERTwithNamedColumnsNotBroken() throws SQLException {
+        PreparedStatement pstmt = null;
+        try {
+            pstmt = con
+                    .prepareStatement("INSERT INTO testbatch (pk, col1, col2) VALUES (?,?,?);");
+            pstmt.setInt(1, 1);
+            pstmt.setString(2, "a");
+            pstmt.setInt(3, 2);
+            pstmt.addBatch();
+            BatchExecuteTest.assertSimpleInsertBatch(1, pstmt.executeBatch());
+        } finally {
+            TestUtil.closeQuietly(pstmt);
+        }
+    }
+
+    @Test
+    public void testMixedCaseInSeRtStatement() throws SQLException {
+        PreparedStatement pstmt = null;
+        try {
+            pstmt = con.prepareStatement("InSeRt INTO testbatch VALUES (?,?,?);");
+            pstmt.setInt(1, 1);
+            pstmt.setString(2, "a");
+            pstmt.setInt(3, 2);
+            pstmt.addBatch();
+            pstmt.setInt(1, 3);
+            pstmt.setString(2, "b");
+            pstmt.setInt(3, 4);
+            pstmt.addBatch();
+            BatchExecuteTest.assertSimpleInsertBatch(2, pstmt.executeBatch());
+        } finally {
+            TestUtil.closeQuietly(pstmt);
+        }
+    }
+
+    @Test
+    public void testReWriteDisabledForPlainBatch() throws Exception {
+        Statement stmt = null;
+        try {
+            con = TestUtil.openDB(new Properties());
+            stmt = con.createStatement();
+            stmt.addBatch("INSERT INTO testbatch VALUES (100,'a',200);");
+            stmt.addBatch("INSERT INTO testbatch VALUES (300,'b',400);");
+            Assert.assertEquals(
+                    "Expected outcome not returned by batch execution. The driver"
+                            + " allowed re-write in combination with plain statements.",
+                    Arrays.toString(new int[]{1, 1}), Arrays.toString(stmt.executeBatch()));
+        } finally {
+            TestUtil.closeQuietly(stmt);
+        }
+    }
+
+    @Test
+    public void test32767Binds() throws Exception {
+        testNBinds(32767);
+    }
+
+    @Test
+    public void test32768Binds() throws Exception {
+        testNBinds(32768);
+    }
+
+    @Test
+    public void test65535Binds() throws Exception {
+        testNBinds(65535);
+    }
+
+    public void testNBinds(int nBinds) throws Exception {
+        PreparedStatement pstmt = null;
+        try {
+            StringBuilder sb = new StringBuilder();
+            sb.append("INSERT INTO testbatch(pk) VALUES (coalesce(?");
+            for (int i = 0; i < nBinds - 1 /* note one ? above */; i++) {
+                sb.append(",?");
+            }
+            sb.append("))");
+            pstmt = con.prepareStatement(sb.toString());
+            for (int k = 0; k < 2; k++) {
+                for (int i = 1; i <= nBinds; i++) {
+                    pstmt.setInt(i, i + k * nBinds);
+                }
+                pstmt.addBatch();
+            }
+            if (nBinds * 2 <= 65535 || preferQueryMode == PreferQueryMode.SIMPLE) {
+                Assert.assertEquals(
+                        "Insert with " + nBinds + " binds should be rewritten into multi-value insert"
+                                + ", so expecting Statement.SUCCESS_NO_INFO == -2",
+                        Arrays.toString(new int[]{Statement.SUCCESS_NO_INFO, Statement.SUCCESS_NO_INFO}),
+                        Arrays.toString(pstmt.executeBatch()));
+            } else {
+                Assert.assertEquals(
+                        "Insert with " + nBinds + " binds can't be rewritten into multi-value insert"
+                                + " since write format allows 65535 binds maximum"
+                                + ", so expecting batch to be executed as individual statements",
+                        Arrays.toString(new int[]{1, 1}),
+                        Arrays.toString(pstmt.executeBatch()));
+            }
+        } catch (BatchUpdateException be) {
+            SQLException e = be;
+            while (true) {
+                e.printStackTrace();
+                SQLException next = e.getNextException();
+                if (next == null) {
+                    break;
+                }
+                e = next;
+            }
+            throw e;
+        } finally {
+            TestUtil.closeQuietly(pstmt);
         }
-        pstmt.addBatch();
-      }
-      if (nBinds * 2 <= 65535 || preferQueryMode == PreferQueryMode.SIMPLE) {
-        Assert.assertEquals(
-            "Insert with " + nBinds + " binds should be rewritten into multi-value insert"
-                + ", so expecting Statement.SUCCESS_NO_INFO == -2",
-            Arrays.toString(new int[]{Statement.SUCCESS_NO_INFO, Statement.SUCCESS_NO_INFO}),
-            Arrays.toString(pstmt.executeBatch()));
-      } else {
-        Assert.assertEquals(
-            "Insert with " + nBinds + " binds can't be rewritten into multi-value insert"
-                + " since write format allows 65535 binds maximum"
-                + ", so expecting batch to be executed as individual statements",
-            Arrays.toString(new int[]{1, 1}),
-            Arrays.toString(pstmt.executeBatch()));
-      }
-    } catch (BatchUpdateException be) {
-      SQLException e = be;
-      while (true) {
-        e.printStackTrace();
-        SQLException next = e.getNextException();
-        if (next == null) {
-          break;
-        }
-        e = next;
-      }
-      throw e;
-    } finally {
-      TestUtil.closeQuietly(pstmt);
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BlobTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BlobTest.java
index 2f835d9..0eafe8a 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BlobTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BlobTest.java
@@ -5,23 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.PGConnection;
-import org.postgresql.core.ServerVersion;
-import org.postgresql.largeobject.LargeObject;
-import org.postgresql.largeobject.LargeObjectManager;
-import org.postgresql.test.TestUtil;
-
-import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.sql.Blob;
@@ -32,507 +15,521 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.sql.Types;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGConnection;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.largeobject.LargeObject;
+import org.postgresql.largeobject.LargeObjectManager;
+import org.postgresql.test.TestUtil;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /**
  * Some simple tests based on problems reported by users. Hopefully these will help prevent previous
  * problems from re-occurring ;-)
  */
 class BlobTest {
-  private static final String TEST_FILE =  "/test-file.xml";
+    private static final String TEST_FILE = "/test-file.xml";
 
-  private static final int LOOP = 0; // LargeObject API using loop
-  private static final int NATIVE_STREAM = 1; // LargeObject API using OutputStream
+    private static final int LOOP = 0; // LargeObject API using loop
+    private static final int NATIVE_STREAM = 1; // LargeObject API using OutputStream
 
-  private Connection con;
+    private Connection con;
 
-  /*
-    Only do this once
-  */
-  @BeforeAll
-  static void createLargeBlob() throws Exception {
-    try (Connection con = TestUtil.openDB()) {
-      TestUtil.createTable(con, "testblob", "id name,lo oid");
-      con.setAutoCommit(false);
-      LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI();
-      long oid = lom.createLO(LargeObjectManager.READWRITE);
-      LargeObject blob = lom.open(oid);
+    /*
+      Only do this once
+    */
+    @BeforeAll
+    static void createLargeBlob() throws Exception {
+        try (Connection con = TestUtil.openDB()) {
+            TestUtil.createTable(con, "testblob", "id name,lo oid");
+            con.setAutoCommit(false);
+            LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI();
+            long oid = lom.createLO(LargeObjectManager.READWRITE);
+            LargeObject blob = lom.open(oid);
 
-      byte[] buf = new byte[256];
-      for (int i = 0; i < buf.length; i++) {
-        buf[i] = (byte) i;
-      }
-      // I want to create a large object
-      int i = 1024 / buf.length;
-      for (int j = i; j > 0; j--) {
-        blob.write(buf, 0, buf.length);
-      }
-      assertEquals(1024, blob.size());
-      blob.close();
-      try (PreparedStatement pstmt = con.prepareStatement("INSERT INTO testblob(id, lo) VALUES(?,?)")) {
-        pstmt.setString(1, "l1");
-        pstmt.setLong(2, oid);
-        pstmt.executeUpdate();
-      }
-      con.commit();
+            byte[] buf = new byte[256];
+            for (int i = 0; i < buf.length; i++) {
+                buf[i] = (byte) i;
+            }
+            // I want to create a large object
+            int i = 1024 / buf.length;
+            for (int j = i; j > 0; j--) {
+                blob.write(buf, 0, buf.length);
+            }
+            assertEquals(1024, blob.size());
+            blob.close();
+            try (PreparedStatement pstmt = con.prepareStatement("INSERT INTO testblob(id, lo) VALUES(?,?)")) {
+                pstmt.setString(1, "l1");
+                pstmt.setLong(2, oid);
+                pstmt.executeUpdate();
+            }
+            con.commit();
+        }
     }
-  }
 
-  @AfterAll
-  static void cleanup() throws Exception {
-    try (Connection con = TestUtil.openDB()) {
-      try (Statement stmt = con.createStatement()) {
-        stmt.execute("SELECT lo_unlink(lo) FROM testblob where id = 'l1'");
-      } finally {
-        TestUtil.dropTable(con, "testblob");
-      }
+    @AfterAll
+    static void cleanup() throws Exception {
+        try (Connection con = TestUtil.openDB()) {
+            try (Statement stmt = con.createStatement()) {
+                stmt.execute("SELECT lo_unlink(lo) FROM testblob where id = 'l1'");
+            } finally {
+                TestUtil.dropTable(con, "testblob");
+            }
+        }
     }
-  }
 
-  @BeforeEach
-  void setUp() throws Exception {
-    con = TestUtil.openDB();
-    con.setAutoCommit(false);
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    con.setAutoCommit(true);
-    try (Statement stmt = con.createStatement()) {
-      stmt.execute("SELECT lo_unlink(lo) FROM testblob where id != 'l1'");
-      stmt.execute("delete from testblob where id != 'l1'");
-    } finally {
-      TestUtil.closeDB(con);
+    @BeforeEach
+    void setUp() throws Exception {
+        con = TestUtil.openDB();
+        con.setAutoCommit(false);
     }
-  }
 
-  @Test
-  void setNull() throws Exception {
-    try (PreparedStatement pstmt = con.prepareStatement("INSERT INTO testblob(lo) VALUES (?)")) {
-
-      pstmt.setBlob(1, (Blob) null);
-      pstmt.executeUpdate();
-
-      pstmt.setNull(1, Types.BLOB);
-      pstmt.executeUpdate();
-
-      pstmt.setObject(1, null, Types.BLOB);
-      pstmt.executeUpdate();
-
-      pstmt.setClob(1, (Clob) null);
-      pstmt.executeUpdate();
-
-      pstmt.setNull(1, Types.CLOB);
-      pstmt.executeUpdate();
-
-      pstmt.setObject(1, null, Types.CLOB);
-      pstmt.executeUpdate();
+    @AfterEach
+    void tearDown() throws Exception {
+        con.setAutoCommit(true);
+        try (Statement stmt = con.createStatement()) {
+            stmt.execute("SELECT lo_unlink(lo) FROM testblob where id != 'l1'");
+            stmt.execute("delete from testblob where id != 'l1'");
+        } finally {
+            TestUtil.closeDB(con);
+        }
     }
-  }
 
-  @Test
-  void set() throws SQLException {
-    try (Statement stmt = con.createStatement()) {
-      stmt.execute("INSERT INTO testblob(id,lo) VALUES ('1', lo_creat(-1))");
-      ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id = '1'");
-      assertTrue(rs.next());
+    @Test
+    void setNull() throws Exception {
+        try (PreparedStatement pstmt = con.prepareStatement("INSERT INTO testblob(lo) VALUES (?)")) {
 
-      PreparedStatement pstmt = con.prepareStatement("INSERT INTO testblob(id, lo) VALUES(?,?)");
+            pstmt.setBlob(1, (Blob) null);
+            pstmt.executeUpdate();
 
-      Blob blob = rs.getBlob(1);
-      pstmt.setString(1, "setObjectTypeBlob");
-      pstmt.setObject(2, blob, Types.BLOB);
-      assertEquals(1, pstmt.executeUpdate());
+            pstmt.setNull(1, Types.BLOB);
+            pstmt.executeUpdate();
 
-      blob = rs.getBlob(1);
-      pstmt.setString(1, "setObjectBlob");
-      pstmt.setObject(2, blob);
-      assertEquals(1, pstmt.executeUpdate());
+            pstmt.setObject(1, null, Types.BLOB);
+            pstmt.executeUpdate();
 
-      blob = rs.getBlob(1);
-      pstmt.setString(1, "setBlob");
-      pstmt.setBlob(2, blob);
-      assertEquals(1, pstmt.executeUpdate());
+            pstmt.setClob(1, (Clob) null);
+            pstmt.executeUpdate();
 
-      Clob clob = rs.getClob(1);
-      pstmt.setString(1, "setObjectTypeClob");
-      pstmt.setObject(2, clob, Types.CLOB);
-      assertEquals(1, pstmt.executeUpdate());
+            pstmt.setNull(1, Types.CLOB);
+            pstmt.executeUpdate();
 
-      clob = rs.getClob(1);
-      pstmt.setString(1, "setObjectClob");
-      pstmt.setObject(2, clob);
-      assertEquals(1, pstmt.executeUpdate());
-
-      clob = rs.getClob(1);
-      pstmt.setString(1, "setClob");
-      pstmt.setClob(2, clob);
-      assertEquals(1, pstmt.executeUpdate());
+            pstmt.setObject(1, null, Types.CLOB);
+            pstmt.executeUpdate();
+        }
     }
-  }
 
-  /*
-   * Tests one method of uploading a blob to the database
-   */
-  @Test
-  void uploadBlob_LOOP() throws Exception {
-    assertTrue(uploadFile(TEST_FILE, LOOP) > 0);
+    @Test
+    void set() throws SQLException {
+        try (Statement stmt = con.createStatement()) {
+            stmt.execute("INSERT INTO testblob(id,lo) VALUES ('1', lo_creat(-1))");
+            ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id = '1'");
+            assertTrue(rs.next());
 
-    // Now compare the blob & the file. Note this actually tests the
-    // InputStream implementation!
-    assertTrue(compareBlobsLOAPI(TEST_FILE));
-    assertTrue(compareBlobs(TEST_FILE));
-    assertTrue(compareClobs(TEST_FILE));
-  }
+            PreparedStatement pstmt = con.prepareStatement("INSERT INTO testblob(id, lo) VALUES(?,?)");
 
-  /*
-   * Tests one method of uploading a blob to the database
-   */
-  @Test
-  void uploadBlob_NATIVE() throws Exception {
-    assertTrue(uploadFile(TEST_FILE, NATIVE_STREAM) > 0);
+            Blob blob = rs.getBlob(1);
+            pstmt.setString(1, "setObjectTypeBlob");
+            pstmt.setObject(2, blob, Types.BLOB);
+            assertEquals(1, pstmt.executeUpdate());
 
-    // Now compare the blob & the file. Note this actually tests the
-    // InputStream implementation!
-    assertTrue(compareBlobs(TEST_FILE));
-  }
+            blob = rs.getBlob(1);
+            pstmt.setString(1, "setObjectBlob");
+            pstmt.setObject(2, blob);
+            assertEquals(1, pstmt.executeUpdate());
 
-  @Test
-  void markResetStream() throws Exception {
-    assertTrue(uploadFile(TEST_FILE, NATIVE_STREAM) > 0);
+            blob = rs.getBlob(1);
+            pstmt.setString(1, "setBlob");
+            pstmt.setBlob(2, blob);
+            assertEquals(1, pstmt.executeUpdate());
 
-    try (Statement stmt = con.createStatement()) {
-      try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id = '/test-file.xml'")) {
-        assertTrue(rs.next());
+            Clob clob = rs.getClob(1);
+            pstmt.setString(1, "setObjectTypeClob");
+            pstmt.setObject(2, clob, Types.CLOB);
+            assertEquals(1, pstmt.executeUpdate());
+
+            clob = rs.getClob(1);
+            pstmt.setString(1, "setObjectClob");
+            pstmt.setObject(2, clob);
+            assertEquals(1, pstmt.executeUpdate());
+
+            clob = rs.getClob(1);
+            pstmt.setString(1, "setClob");
+            pstmt.setClob(2, clob);
+            assertEquals(1, pstmt.executeUpdate());
+        }
+    }
+
+    /*
+     * Tests one method of uploading a blob to the database
+     */
+    @Test
+    void uploadBlob_LOOP() throws Exception {
+        assertTrue(uploadFile(TEST_FILE, LOOP) > 0);
+
+        // Now compare the blob & the file. Note this actually tests the
+        // InputStream implementation!
+        assertTrue(compareBlobsLOAPI(TEST_FILE));
+        assertTrue(compareBlobs(TEST_FILE));
+        assertTrue(compareClobs(TEST_FILE));
+    }
+
+    /*
+     * Tests one method of uploading a blob to the database
+     */
+    @Test
+    void uploadBlob_NATIVE() throws Exception {
+        assertTrue(uploadFile(TEST_FILE, NATIVE_STREAM) > 0);
+
+        // Now compare the blob & the file. Note this actually tests the
+        // InputStream implementation!
+        assertTrue(compareBlobs(TEST_FILE));
+    }
+
+    @Test
+    void markResetStream() throws Exception {
+        assertTrue(uploadFile(TEST_FILE, NATIVE_STREAM) > 0);
+
+        try (Statement stmt = con.createStatement()) {
+            try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id = '/test-file.xml'")) {
+                assertTrue(rs.next());
+
+                LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI();
+
+                long oid = rs.getLong(1);
+                LargeObject blob = lom.open(oid);
+                InputStream bis = blob.getInputStream();
+
+                assertEquals('<', bis.read());
+                bis.mark(4);
+                assertEquals('?', bis.read());
+                assertEquals('x', bis.read());
+                assertEquals('m', bis.read());
+                assertEquals('l', bis.read());
+                bis.reset();
+                assertEquals('?', bis.read());
+            }
+        }
+    }
+
+    @Test
+    void getBytesOffset() throws Exception {
+        assertTrue(uploadFile(TEST_FILE, NATIVE_STREAM) > 0);
+
+        try (Statement stmt = con.createStatement()) {
+            try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id = '/test-file.xml'")) {
+
+                assertTrue(rs.next());
+
+                Blob lob = rs.getBlob(1);
+                byte[] data = lob.getBytes(2, 4);
+                assertEquals(4, data.length);
+                assertEquals('?', data[0]);
+                assertEquals('x', data[1]);
+                assertEquals('m', data[2]);
+                assertEquals('l', data[3]);
+            }
+        }
+    }
+
+    @Test
+    void multipleStreams() throws Exception {
+        assertTrue(uploadFile(TEST_FILE, NATIVE_STREAM) > 0);
+
+        try (Statement stmt = con.createStatement()) {
+            try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id = '/test-file.xml'")) {
+                assertTrue(rs.next());
+
+                Blob lob = rs.getBlob(1);
+                byte[] data = new byte[2];
+
+                InputStream is = lob.getBinaryStream();
+                assertEquals(data.length, is.read(data));
+                assertEquals('<', data[0]);
+                assertEquals('?', data[1]);
+                is.close();
+
+                is = lob.getBinaryStream();
+                assertEquals(data.length, is.read(data));
+                assertEquals('<', data[0]);
+                assertEquals('?', data[1]);
+                is.close();
+            }
+        }
+    }
+
+    @Test
+    void parallelStreams() throws Exception {
+        assertTrue(uploadFile(TEST_FILE, NATIVE_STREAM) > 0);
+
+        try (Statement stmt = con.createStatement()) {
+            try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id = '/test-file.xml'")) {
+                assertTrue(rs.next());
+
+                Blob lob = rs.getBlob(1);
+                InputStream is1 = lob.getBinaryStream();
+                InputStream is2 = lob.getBinaryStream();
+
+                while (true) {
+                    int i1 = is1.read();
+                    int i2 = is2.read();
+                    assertEquals(i1, i2);
+                    if (i1 == -1) {
+                        break;
+                    }
+                }
+
+                is1.close();
+                is2.close();
+            }
+        }
+    }
+
+    @Test
+    void largeLargeObject() throws Exception {
+        if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_3)) {
+            return;
+        }
+
+        try (Statement stmt = con.createStatement()) {
+            stmt.execute("INSERT INTO testblob(id,lo) VALUES ('1', lo_creat(-1))");
+            try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id ='1'")) {
+                assertTrue(rs.next());
+
+                Blob lob = rs.getBlob(1);
+                long length = ((long) Integer.MAX_VALUE) + 1024;
+                lob.truncate(length);
+                assertEquals(length, lob.length());
+            }
+        }
+    }
+
+    @Test
+    void largeObjectRead() throws Exception {
+        con.setAutoCommit(false);
+        LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI();
+        try (Statement stmt = con.createStatement()) {
+            try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id='l1'")) {
+                assertTrue(rs.next());
+
+                long oid = rs.getLong(1);
+                try (InputStream lois = lom.open(oid).getInputStream()) {
+                    // read half of the data with read
+                    for (int j = 0; j < 512; j++) {
+                        lois.read();
+                    }
+                    byte[] buf2 = new byte[512];
+                    lois.read(buf2, 0, 512);
+                }
+            }
+        }
+        con.commit();
+    }
+
+    @Test
+    void largeObjectRead1() throws Exception {
+        con.setAutoCommit(false);
+        LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI();
+        try (Statement stmt = con.createStatement()) {
+            try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id='l1'")) {
+                assertTrue(rs.next());
+
+                long oid = rs.getLong(1);
+                try (InputStream lois = lom.open(oid).getInputStream(512, 1024)) {
+                    // read one byte
+                    assertEquals(0, lois.read());
+                    byte[] buf2 = new byte[1024];
+                    int bytesRead = lois.read(buf2, 0, buf2.length);
+                    assertEquals(1023, bytesRead);
+                    assertEquals(1, buf2[0]);
+                }
+            }
+        }
+        con.commit();
+    }
+
+    /*
+     * Helper - uploads a file into a blob using old style methods. We use this because it always
+     * works, and we can use it as a base to test the new methods.
+     */
+    private long uploadFile(String file, int method) throws Exception {
+        LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI();
+
+        InputStream fis = getClass().getResourceAsStream(file);
+
+        long oid = lom.createLO(LargeObjectManager.READWRITE);
+        LargeObject blob = lom.open(oid);
+
+        int s;
+        int t;
+        byte[] buf;
+        OutputStream os;
+
+        switch (method) {
+            case LOOP:
+                buf = new byte[2048];
+                t = 0;
+                while ((s = fis.read(buf, 0, buf.length)) > 0) {
+                    t += s;
+                    blob.write(buf, 0, s);
+                }
+                break;
+
+            case NATIVE_STREAM:
+                os = blob.getOutputStream();
+                s = fis.read();
+                while (s > -1) {
+                    os.write(s);
+                    s = fis.read();
+                }
+                os.close();
+                break;
+
+            default:
+                fail("Unknown method in uploadFile");
+        }
+
+        blob.close();
+        fis.close();
+
+        // Insert into the table
+        Statement st = con.createStatement();
+        st.executeUpdate(TestUtil.insertSQL("testblob", "id,lo", "'" + file + "'," + oid));
+        con.commit();
+        st.close();
+
+        return oid;
+    }
+
+    /*
+     * Helper - compares the blobs in a table with a local file. Note this uses the postgresql
+     * specific Large Object API
+     */
+    private boolean compareBlobsLOAPI(String id) throws Exception {
+        boolean result = true;
 
         LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI();
 
-        long oid = rs.getLong(1);
-        LargeObject blob = lom.open(oid);
-        InputStream bis = blob.getInputStream();
+        try (Statement st = con.createStatement()) {
+            try (ResultSet rs = st.executeQuery(TestUtil.selectSQL("testblob", "id,lo", "id = '" + id + "'"))) {
+                assertNotNull(rs);
 
-        assertEquals('<', bis.read());
-        bis.mark(4);
-        assertEquals('?', bis.read());
-        assertEquals('x', bis.read());
-        assertEquals('m', bis.read());
-        assertEquals('l', bis.read());
-        bis.reset();
-        assertEquals('?', bis.read());
-      }
+                while (rs.next()) {
+                    String file = rs.getString(1);
+                    long oid = rs.getLong(2);
+
+                    InputStream fis = getClass().getResourceAsStream(file);
+                    LargeObject blob = lom.open(oid);
+                    InputStream bis = blob.getInputStream();
+
+                    int f = fis.read();
+                    int b = bis.read();
+                    int c = 0;
+                    while (f >= 0 && b >= 0 & result) {
+                        result = f == b;
+                        f = fis.read();
+                        b = bis.read();
+                        c++;
+                    }
+                    result = result && f == -1 && b == -1;
+
+                    if (!result) {
+                        fail("Large Object API Blob compare failed at " + c + " of " + blob.size());
+                    }
+
+                    blob.close();
+                    fis.close();
+                }
+            }
+        }
+        return result;
     }
-  }
 
-  @Test
-  void getBytesOffset() throws Exception {
-    assertTrue(uploadFile(TEST_FILE, NATIVE_STREAM) > 0);
+    /*
+     * Helper - compares the blobs in a table with a local file. This uses the jdbc java.sql.Blob api
+     */
+    private boolean compareBlobs(String id) throws Exception {
+        boolean result = true;
 
-    try (Statement stmt = con.createStatement()) {
-      try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id = '/test-file.xml'")) {
+        try (Statement st = con.createStatement()) {
+            try (ResultSet rs = st.executeQuery(TestUtil.selectSQL("testblob", "id,lo", "id = '" + id + "'"))) {
+                assertNotNull(rs);
 
-        assertTrue(rs.next());
+                while (rs.next()) {
+                    String file = rs.getString(1);
+                    Blob blob = rs.getBlob(2);
 
-        Blob lob = rs.getBlob(1);
-        byte[] data = lob.getBytes(2, 4);
-        assertEquals(4, data.length);
-        assertEquals('?', data[0]);
-        assertEquals('x', data[1]);
-        assertEquals('m', data[2]);
-        assertEquals('l', data[3]);
-      }
+                    InputStream fis = getClass().getResourceAsStream(file);
+                    InputStream bis = blob.getBinaryStream();
+
+                    int f = fis.read();
+                    int b = bis.read();
+                    int c = 0;
+                    while (f >= 0 && b >= 0 & result) {
+                        result = f == b;
+                        f = fis.read();
+                        b = bis.read();
+                        c++;
+                    }
+                    result = result && f == -1 && b == -1;
+
+                    if (!result) {
+                        fail("JDBC API Blob compare failed at " + c + " of " + blob.length());
+                    }
+
+                    bis.close();
+                    fis.close();
+                }
+            }
+        }
+        return result;
     }
-  }
 
-  @Test
-  void multipleStreams() throws Exception {
-    assertTrue(uploadFile(TEST_FILE, NATIVE_STREAM) > 0);
+    /*
+     * Helper - compares the clobs in a table with a local file.
+     */
+    private boolean compareClobs(String id) throws Exception {
+        boolean result = true;
 
-    try (Statement stmt = con.createStatement()) {
-      try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id = '/test-file.xml'")) {
-        assertTrue(rs.next());
+        try (Statement st = con.createStatement()) {
+            try (ResultSet rs = st.executeQuery(TestUtil.selectSQL("testblob", "id,lo", "id = '" + id + "'"))) {
+                assertNotNull(rs);
 
-        Blob lob = rs.getBlob(1);
-        byte[] data = new byte[2];
+                while (rs.next()) {
+                    String file = rs.getString(1);
+                    Clob clob = rs.getClob(2);
 
-        InputStream is = lob.getBinaryStream();
-        assertEquals(data.length, is.read(data));
-        assertEquals('<', data[0]);
-        assertEquals('?', data[1]);
-        is.close();
+                    InputStream fis = getClass().getResourceAsStream(file);
+                    InputStream bis = clob.getAsciiStream();
 
-        is = lob.getBinaryStream();
-        assertEquals(data.length, is.read(data));
-        assertEquals('<', data[0]);
-        assertEquals('?', data[1]);
-        is.close();
-      }
-    }
-  }
+                    int f = fis.read();
+                    int b = bis.read();
+                    int c = 0;
+                    while (f >= 0 && b >= 0 & result) {
+                        result = f == b;
+                        f = fis.read();
+                        b = bis.read();
+                        c++;
+                    }
+                    result = result && f == -1 && b == -1;
 
-  @Test
-  void parallelStreams() throws Exception {
-    assertTrue(uploadFile(TEST_FILE, NATIVE_STREAM) > 0);
+                    if (!result) {
+                        fail("Clob compare failed at " + c + " of " + clob.length());
+                    }
 
-    try (Statement stmt = con.createStatement()) {
-      try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id = '/test-file.xml'")) {
-        assertTrue(rs.next());
-
-        Blob lob = rs.getBlob(1);
-        InputStream is1 = lob.getBinaryStream();
-        InputStream is2 = lob.getBinaryStream();
-
-        while (true) {
-          int i1 = is1.read();
-          int i2 = is2.read();
-          assertEquals(i1, i2);
-          if (i1 == -1) {
-            break;
-          }
+                    bis.close();
+                    fis.close();
+                }
+            }
         }
 
-        is1.close();
-        is2.close();
-      }
+        return result;
     }
-  }
-
-  @Test
-  void largeLargeObject() throws Exception {
-    if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_3)) {
-      return;
-    }
-
-    try (Statement stmt = con.createStatement()) {
-      stmt.execute("INSERT INTO testblob(id,lo) VALUES ('1', lo_creat(-1))");
-      try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id ='1'")) {
-        assertTrue(rs.next());
-
-        Blob lob = rs.getBlob(1);
-        long length = ((long) Integer.MAX_VALUE) + 1024;
-        lob.truncate(length);
-        assertEquals(length, lob.length());
-      }
-    }
-  }
-
-  @Test
-  void largeObjectRead() throws Exception {
-    con.setAutoCommit(false);
-    LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI();
-    try (Statement stmt = con.createStatement()) {
-      try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id='l1'")) {
-        assertTrue(rs.next());
-
-        long oid = rs.getLong(1);
-        try (InputStream lois = lom.open(oid).getInputStream()) {
-          // read half of the data with read
-          for (int j = 0; j < 512; j++) {
-            lois.read();
-          }
-          byte[] buf2 = new byte[512];
-          lois.read(buf2, 0, 512);
-        }
-      }
-    }
-    con.commit();
-  }
-
-  @Test
-  void largeObjectRead1() throws Exception {
-    con.setAutoCommit(false);
-    LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI();
-    try (Statement stmt = con.createStatement()) {
-      try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob where id='l1'")) {
-        assertTrue(rs.next());
-
-        long oid = rs.getLong(1);
-        try (InputStream lois = lom.open(oid).getInputStream(512, 1024)) {
-          // read one byte
-          assertEquals(0, lois.read());
-          byte[] buf2 = new byte[1024];
-          int bytesRead = lois.read(buf2, 0, buf2.length);
-          assertEquals(1023, bytesRead);
-          assertEquals(1, buf2[0]);
-        }
-      }
-    }
-    con.commit();
-  }
-
-  /*
-   * Helper - uploads a file into a blob using old style methods. We use this because it always
-   * works, and we can use it as a base to test the new methods.
-   */
-  private long uploadFile(String file, int method) throws Exception {
-    LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI();
-
-    InputStream fis = getClass().getResourceAsStream(file);
-
-    long oid = lom.createLO(LargeObjectManager.READWRITE);
-    LargeObject blob = lom.open(oid);
-
-    int s;
-    int t;
-    byte[] buf;
-    OutputStream os;
-
-    switch (method) {
-      case LOOP:
-        buf = new byte[2048];
-        t = 0;
-        while ((s = fis.read(buf, 0, buf.length)) > 0) {
-          t += s;
-          blob.write(buf, 0, s);
-        }
-        break;
-
-      case NATIVE_STREAM:
-        os = blob.getOutputStream();
-        s = fis.read();
-        while (s > -1) {
-          os.write(s);
-          s = fis.read();
-        }
-        os.close();
-        break;
-
-      default:
-        fail("Unknown method in uploadFile");
-    }
-
-    blob.close();
-    fis.close();
-
-    // Insert into the table
-    Statement st = con.createStatement();
-    st.executeUpdate(TestUtil.insertSQL("testblob", "id,lo", "'" + file + "'," + oid));
-    con.commit();
-    st.close();
-
-    return oid;
-  }
-
-  /*
-   * Helper - compares the blobs in a table with a local file. Note this uses the postgresql
-   * specific Large Object API
-   */
-  private boolean compareBlobsLOAPI(String id) throws Exception {
-    boolean result = true;
-
-    LargeObjectManager lom = ((PGConnection) con).getLargeObjectAPI();
-
-    try (Statement st = con.createStatement()) {
-      try (ResultSet rs = st.executeQuery(TestUtil.selectSQL("testblob", "id,lo", "id = '" + id + "'"))) {
-        assertNotNull(rs);
-
-        while (rs.next()) {
-          String file = rs.getString(1);
-          long oid = rs.getLong(2);
-
-          InputStream fis = getClass().getResourceAsStream(file);
-          LargeObject blob = lom.open(oid);
-          InputStream bis = blob.getInputStream();
-
-          int f = fis.read();
-          int b = bis.read();
-          int c = 0;
-          while (f >= 0 && b >= 0 & result) {
-            result = f == b;
-            f = fis.read();
-            b = bis.read();
-            c++;
-          }
-          result = result && f == -1 && b == -1;
-
-          if (!result) {
-            fail("Large Object API Blob compare failed at " + c + " of " + blob.size());
-          }
-
-          blob.close();
-          fis.close();
-        }
-      }
-    }
-    return result;
-  }
-
-  /*
-   * Helper - compares the blobs in a table with a local file. This uses the jdbc java.sql.Blob api
-   */
-  private boolean compareBlobs(String id) throws Exception {
-    boolean result = true;
-
-    try (Statement st = con.createStatement()) {
-      try (ResultSet rs = st.executeQuery(TestUtil.selectSQL("testblob", "id,lo", "id = '" + id + "'"))) {
-        assertNotNull(rs);
-
-        while (rs.next()) {
-          String file = rs.getString(1);
-          Blob blob = rs.getBlob(2);
-
-          InputStream fis = getClass().getResourceAsStream(file);
-          InputStream bis = blob.getBinaryStream();
-
-          int f = fis.read();
-          int b = bis.read();
-          int c = 0;
-          while (f >= 0 && b >= 0 & result) {
-            result = f == b;
-            f = fis.read();
-            b = bis.read();
-            c++;
-          }
-          result = result && f == -1 && b == -1;
-
-          if (!result) {
-            fail("JDBC API Blob compare failed at " + c + " of " + blob.length());
-          }
-
-          bis.close();
-          fis.close();
-        }
-      }
-    }
-    return result;
-  }
-
-  /*
-   * Helper - compares the clobs in a table with a local file.
-   */
-  private boolean compareClobs(String id) throws Exception {
-    boolean result = true;
-
-    try (Statement st = con.createStatement()) {
-      try (ResultSet rs = st.executeQuery(TestUtil.selectSQL("testblob", "id,lo", "id = '" + id + "'"))) {
-        assertNotNull(rs);
-
-        while (rs.next()) {
-          String file = rs.getString(1);
-          Clob clob = rs.getClob(2);
-
-          InputStream fis = getClass().getResourceAsStream(file);
-          InputStream bis = clob.getAsciiStream();
-
-          int f = fis.read();
-          int b = bis.read();
-          int c = 0;
-          while (f >= 0 && b >= 0 & result) {
-            result = f == b;
-            f = fis.read();
-            b = bis.read();
-            c++;
-          }
-          result = result && f == -1 && b == -1;
-
-          if (!result) {
-            fail("Clob compare failed at " + c + " of " + clob.length());
-          }
-
-          bis.close();
-          fis.close();
-        }
-      }
-    }
-
-    return result;
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BlobTransactionTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BlobTransactionTest.java
index 281874d..b578249 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BlobTransactionTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/BlobTransactionTest.java
@@ -32,147 +32,147 @@ import javax.sql.rowset.serial.SerialBlob;
  * Require the lo module accessible in $libdir
  */
 class BlobTransactionTest {
-  private Connection con;
-  private Connection con2;
+    private Connection con;
+    private Connection con2;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    con = TestUtil.openDB();
-    con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
-    con2 = TestUtil.openDB();
-    con2.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
+    @BeforeEach
+    void setUp() throws Exception {
+        con = TestUtil.openDB();
+        con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
+        con2 = TestUtil.openDB();
+        con2.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
 
-    TestUtil.createTable(con, "testblob", "id name,lo oid");
+        TestUtil.createTable(con, "testblob", "id name,lo oid");
 
-    String sql;
+        String sql;
 
-    /*
-     * this would have to be executed using the postgres user in order to get access to a C function
-     *
-     */
-    Connection privilegedCon = TestUtil.openPrivilegedDB();
-    Statement st = privilegedCon.createStatement();
-    try {
-      sql =
-          "CREATE OR REPLACE FUNCTION lo_manage() RETURNS pg_catalog.trigger AS '$libdir/lo' LANGUAGE C";
-      st.executeUpdate(sql);
-    } finally {
-      st.close();
-    }
-
-    st = privilegedCon.createStatement();
-    try {
-      sql =
-          "CREATE TRIGGER testblob_lomanage BEFORE UPDATE OR DELETE ON testblob FOR EACH ROW EXECUTE PROCEDURE lo_manage(lo)";
-      st.executeUpdate(sql);
-    } finally {
-      st.close();
-    }
-    TestUtil.closeDB(privilegedCon);
-
-    con.setAutoCommit(false);
-    con2.setAutoCommit(false);
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    TestUtil.closeDB(con2);
-
-    con.setAutoCommit(true);
-    try {
-      Statement stmt = con.createStatement();
-      try {
-        stmt.execute("SELECT lo_unlink(lo) FROM testblob");
-      } finally {
+        /*
+         * this would have to be executed using the postgres user in order to get access to a C function
+         *
+         */
+        Connection privilegedCon = TestUtil.openPrivilegedDB();
+        Statement st = privilegedCon.createStatement();
         try {
-          stmt.close();
-        } catch (Exception e) {
+            sql =
+                    "CREATE OR REPLACE FUNCTION lo_manage() RETURNS pg_catalog.trigger AS '$libdir/lo' LANGUAGE C";
+            st.executeUpdate(sql);
+        } finally {
+            st.close();
         }
-      }
-    } finally {
-      TestUtil.dropTable(con, "testblob");
-      TestUtil.closeDB(con);
-    }
-  }
 
-  private byte[] randomData() {
-    byte[] data = new byte[64 * 1024 * 8];
-    for (int i = 0; i < data.length; i++) {
-      data[i] = (byte) (Math.random() * 256);
-    }
-    return data;
-  }
+        st = privilegedCon.createStatement();
+        try {
+            sql =
+                    "CREATE TRIGGER testblob_lomanage BEFORE UPDATE OR DELETE ON testblob FOR EACH ROW EXECUTE PROCEDURE lo_manage(lo)";
+            st.executeUpdate(sql);
+        } finally {
+            st.close();
+        }
+        TestUtil.closeDB(privilegedCon);
 
-  private byte[] readInputStream(InputStream is) throws IOException {
-    byte[] result = new byte[1024];
-    int readPos = 0;
-    int d;
-    while ((d = is.read()) != -1) {
-      if (readPos == result.length) {
-        result = Arrays.copyOf(result, result.length * 2);
-      }
-      result[readPos++] = (byte) d;
+        con.setAutoCommit(false);
+        con2.setAutoCommit(false);
     }
 
-    return Arrays.copyOf(result, readPos);
-  }
+    @AfterEach
+    void tearDown() throws Exception {
+        TestUtil.closeDB(con2);
 
-  @Test
-  void concurrentReplace() throws SQLException, IOException {
-    // Statement stmt = con.createStatement();
-    // stmt.execute("INSERT INTO testblob(id,lo) VALUES ('1', lo_creat(-1))");
-    // ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob");
-    // assertTrue(rs.next());
-
-    PreparedStatement pstmt = con.prepareStatement("INSERT INTO testblob(id, lo) VALUES(?,?)");
-
-    byte[] initialData = randomData();
-
-    pstmt.setString(1, "testConcurrentReplace");
-    pstmt.setObject(2, new SerialBlob(initialData), Types.BLOB);
-    assertEquals(1, pstmt.executeUpdate());
-
-    con.commit();
-
-    con2.rollback();
-
-    // con2 access the blob
-    PreparedStatement pstmt2 = con2.prepareStatement("SELECT lo FROM testblob WHERE id=?");
-    pstmt2.setString(1, "testConcurrentReplace");
-    ResultSet rs2 = pstmt2.executeQuery();
-    assertTrue(rs2.next());
-
-    // con replace the blob
-    byte[] newData = randomData();
-    pstmt = con.prepareStatement("UPDATE testblob SET lo=? where id=?");
-    pstmt.setObject(1, new SerialBlob(newData), Types.BLOB);
-    pstmt.setString(2, "testConcurrentReplace");
-    assertEquals(1, pstmt.executeUpdate());
-
-    // con2 read the blob content
-    Blob initContentBlob = rs2.getBlob(1);
-    byte[] initialContentReRead = readInputStream(initContentBlob.getBinaryStream());
-    assertEquals(initialContentReRead.length, initialData.length);
-    for (int i = 0; i < initialContentReRead.length; i++) {
-      assertEquals(initialContentReRead[i], initialData[i]);
+        con.setAutoCommit(true);
+        try {
+            Statement stmt = con.createStatement();
+            try {
+                stmt.execute("SELECT lo_unlink(lo) FROM testblob");
+            } finally {
+                try {
+                    stmt.close();
+                } catch (Exception e) {
+                }
+            }
+        } finally {
+            TestUtil.dropTable(con, "testblob");
+            TestUtil.closeDB(con);
+        }
     }
 
-    con2.rollback();
-    pstmt2 = con2.prepareStatement("SELECT lo FROM testblob WHERE id=?");
-    pstmt2.setString(1, "testConcurrentReplace");
-    rs2 = pstmt2.executeQuery();
-    assertTrue(rs2.next());
-
-    // con commit
-    con.commit();
-
-    initContentBlob = rs2.getBlob(1);
-    initialContentReRead = readInputStream(initContentBlob.getBinaryStream());
-    assertEquals(initialContentReRead.length, initialData.length);
-    for (int i = 0; i < initialContentReRead.length; i++) {
-      assertEquals(initialContentReRead[i], initialData[i]);
+    private byte[] randomData() {
+        byte[] data = new byte[64 * 1024 * 8];
+        for (int i = 0; i < data.length; i++) {
+            data[i] = (byte) (Math.random() * 256);
+        }
+        return data;
     }
 
-    con2.commit();
-  }
+    private byte[] readInputStream(InputStream is) throws IOException {
+        byte[] result = new byte[1024];
+        int readPos = 0;
+        int d;
+        while ((d = is.read()) != -1) {
+            if (readPos == result.length) {
+                result = Arrays.copyOf(result, result.length * 2);
+            }
+            result[readPos++] = (byte) d;
+        }
+
+        return Arrays.copyOf(result, readPos);
+    }
+
+    @Test
+    void concurrentReplace() throws SQLException, IOException {
+        // Statement stmt = con.createStatement();
+        // stmt.execute("INSERT INTO testblob(id,lo) VALUES ('1', lo_creat(-1))");
+        // ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob");
+        // assertTrue(rs.next());
+
+        PreparedStatement pstmt = con.prepareStatement("INSERT INTO testblob(id, lo) VALUES(?,?)");
+
+        byte[] initialData = randomData();
+
+        pstmt.setString(1, "testConcurrentReplace");
+        pstmt.setObject(2, new SerialBlob(initialData), Types.BLOB);
+        assertEquals(1, pstmt.executeUpdate());
+
+        con.commit();
+
+        con2.rollback();
+
+        // con2 access the blob
+        PreparedStatement pstmt2 = con2.prepareStatement("SELECT lo FROM testblob WHERE id=?");
+        pstmt2.setString(1, "testConcurrentReplace");
+        ResultSet rs2 = pstmt2.executeQuery();
+        assertTrue(rs2.next());
+
+        // con replace the blob
+        byte[] newData = randomData();
+        pstmt = con.prepareStatement("UPDATE testblob SET lo=? where id=?");
+        pstmt.setObject(1, new SerialBlob(newData), Types.BLOB);
+        pstmt.setString(2, "testConcurrentReplace");
+        assertEquals(1, pstmt.executeUpdate());
+
+        // con2 read the blob content
+        Blob initContentBlob = rs2.getBlob(1);
+        byte[] initialContentReRead = readInputStream(initContentBlob.getBinaryStream());
+        assertEquals(initialContentReRead.length, initialData.length);
+        for (int i = 0; i < initialContentReRead.length; i++) {
+            assertEquals(initialContentReRead[i], initialData[i]);
+        }
+
+        con2.rollback();
+        pstmt2 = con2.prepareStatement("SELECT lo FROM testblob WHERE id=?");
+        pstmt2.setString(1, "testConcurrentReplace");
+        rs2 = pstmt2.executeQuery();
+        assertTrue(rs2.next());
+
+        // con commit
+        con.commit();
+
+        initContentBlob = rs2.getBlob(1);
+        initialContentReRead = readInputStream(initContentBlob.getBinaryStream());
+        assertEquals(initialContentReRead.length, initialData.length);
+        for (int i = 0; i < initialContentReRead.length; i++) {
+            assertEquals(initialContentReRead[i], initialData[i]);
+        }
+
+        con2.commit();
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CallableStmtTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CallableStmtTest.java
index fa4d0e7..7fcde43 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CallableStmtTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CallableStmtTest.java
@@ -31,276 +31,276 @@ import java.sql.Types;
  * @author Paul Bethe
  */
 public class CallableStmtTest extends BaseTest4 {
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    try (Connection con = TestUtil.openDB()) {
-      assumeCallableStatementsSupported(con);
+    final String func = "{ ? = call ";
+    final String pkgName = "testspg__";
+
+    @BeforeClass
+    public static void beforeClass() throws Exception {
+        try (Connection con = TestUtil.openDB()) {
+            assumeCallableStatementsSupported(con);
+        }
     }
-  }
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTable(con, "int_table", "id int");
-    Statement stmt = con.createStatement();
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION testspg__getString (varchar) "
-        + "RETURNS varchar AS ' DECLARE inString alias for $1; begin "
-        + "return ''bob''; end; ' LANGUAGE plpgsql;");
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION testspg__getDouble (float) "
-        + "RETURNS float AS ' DECLARE inString alias for $1; begin "
-        + "return 42.42; end; ' LANGUAGE plpgsql;");
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION testspg__getVoid (float) "
-        + "RETURNS void AS ' DECLARE inString alias for $1; begin "
-        + " return; end; ' LANGUAGE plpgsql;");
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION testspg__getInt (int) RETURNS int "
-        + " AS 'DECLARE inString alias for $1; begin "
-        + "return 42; end;' LANGUAGE plpgsql;");
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION testspg__getShort (int2) RETURNS int2 "
-        + " AS 'DECLARE inString alias for $1; begin "
-        + "return 42; end;' LANGUAGE plpgsql;");
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION testspg__getNumeric (numeric) "
-        + "RETURNS numeric AS ' DECLARE inString alias for $1; "
-        + "begin return 42; end; ' LANGUAGE plpgsql;");
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTable(con, "int_table", "id int");
+        Statement stmt = con.createStatement();
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION testspg__getString (varchar) "
+                        + "RETURNS varchar AS ' DECLARE inString alias for $1; begin "
+                        + "return ''bob''; end; ' LANGUAGE plpgsql;");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION testspg__getDouble (float) "
+                        + "RETURNS float AS ' DECLARE inString alias for $1; begin "
+                        + "return 42.42; end; ' LANGUAGE plpgsql;");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION testspg__getVoid (float) "
+                        + "RETURNS void AS ' DECLARE inString alias for $1; begin "
+                        + " return; end; ' LANGUAGE plpgsql;");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION testspg__getInt (int) RETURNS int "
+                        + " AS 'DECLARE inString alias for $1; begin "
+                        + "return 42; end;' LANGUAGE plpgsql;");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION testspg__getShort (int2) RETURNS int2 "
+                        + " AS 'DECLARE inString alias for $1; begin "
+                        + "return 42; end;' LANGUAGE plpgsql;");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION testspg__getNumeric (numeric) "
+                        + "RETURNS numeric AS ' DECLARE inString alias for $1; "
+                        + "begin return 42; end; ' LANGUAGE plpgsql;");
 
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION testspg__getNumericWithoutArg() "
-        + "RETURNS numeric AS '  "
-        + "begin return 42; end; ' LANGUAGE plpgsql;");
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION testspg__getarray() RETURNS int[] as "
-        + "'SELECT ''{1,2}''::int[];' LANGUAGE sql");
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION testspg__raisenotice() RETURNS int as "
-        + "'BEGIN RAISE NOTICE ''hello'';  RAISE NOTICE ''goodbye''; RETURN 1; END;' LANGUAGE plpgsql");
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION testspg__insertInt(int) RETURNS int as "
-        + "'BEGIN INSERT INTO int_table(id) VALUES ($1); RETURN 1; END;' LANGUAGE plpgsql");
-    stmt.close();
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    Statement stmt = con.createStatement();
-    TestUtil.dropTable(con, "int_table");
-    stmt.execute("drop FUNCTION testspg__getString (varchar);");
-    stmt.execute("drop FUNCTION testspg__getDouble (float);");
-    stmt.execute("drop FUNCTION testspg__getVoid(float);");
-    stmt.execute("drop FUNCTION testspg__getInt (int);");
-    stmt.execute("drop FUNCTION testspg__getShort(int2)");
-    stmt.execute("drop FUNCTION testspg__getNumeric (numeric);");
-
-    stmt.execute("drop FUNCTION testspg__getNumericWithoutArg ();");
-    stmt.execute("DROP FUNCTION testspg__getarray();");
-    stmt.execute("DROP FUNCTION testspg__raisenotice();");
-    stmt.execute("DROP FUNCTION testspg__insertInt(int);");
-    super.tearDown();
-  }
-
-  final String func = "{ ? = call ";
-  final String pkgName = "testspg__";
-
-  @Test
-  public void testGetUpdateCount() throws SQLException {
-    CallableStatement call = con.prepareCall(func + pkgName + "getDouble (?) }");
-    call.setDouble(2, 3.04);
-    call.registerOutParameter(1, Types.DOUBLE);
-    call.execute();
-    assertEquals(-1, call.getUpdateCount());
-    assertNull(call.getResultSet());
-    assertEquals(42.42, call.getDouble(1), 0.00001);
-    call.close();
-
-    // test without an out parameter
-    call = con.prepareCall("{ call " + pkgName + "getDouble(?) }");
-    call.setDouble(1, 3.04);
-    call.execute();
-    assertEquals(-1, call.getUpdateCount());
-    ResultSet rs = call.getResultSet();
-    assertNotNull(rs);
-    assertTrue(rs.next());
-    assertEquals(42.42, rs.getDouble(1), 0.00001);
-    assertTrue(!rs.next());
-    rs.close();
-
-    assertEquals(-1, call.getUpdateCount());
-    assertTrue(!call.getMoreResults());
-    call.close();
-  }
-
-  @Test
-  public void testGetDouble() throws Throwable {
-    CallableStatement call = con.prepareCall(func + pkgName + "getDouble (?) }");
-    call.setDouble(2, 3.04);
-    call.registerOutParameter(1, Types.DOUBLE);
-    call.execute();
-    assertEquals(42.42, call.getDouble(1), 0.00001);
-
-    // test without an out parameter
-    call = con.prepareCall("{ call " + pkgName + "getDouble(?) }");
-    call.setDouble(1, 3.04);
-    call.execute();
-
-    call = con.prepareCall("{ call " + pkgName + "getVoid(?) }");
-    call.setDouble(1, 3.04);
-    call.execute();
-  }
-
-  @Test
-  public void testGetInt() throws Throwable {
-    CallableStatement call = con.prepareCall(func + pkgName + "getInt (?) }");
-    call.setInt(2, 4);
-    call.registerOutParameter(1, Types.INTEGER);
-    call.execute();
-    assertEquals(42, call.getInt(1));
-  }
-
-  @Test
-  public void testGetShort() throws Throwable {
-    CallableStatement call = con.prepareCall(func + pkgName + "getShort (?) }");
-    call.setShort(2, (short) 4);
-    call.registerOutParameter(1, Types.SMALLINT);
-    call.execute();
-    assertEquals(42, call.getShort(1));
-  }
-
-  @Test
-  public void testGetNumeric() throws Throwable {
-    CallableStatement call = con.prepareCall(func + pkgName + "getNumeric (?) }");
-    call.setBigDecimal(2, new java.math.BigDecimal(4));
-    call.registerOutParameter(1, Types.NUMERIC);
-    call.execute();
-    assertEquals(new java.math.BigDecimal(42), call.getBigDecimal(1));
-  }
-
-  @Test
-  public void testGetNumericWithoutArg() throws Throwable {
-    CallableStatement call = con.prepareCall(func + pkgName + "getNumericWithoutArg () }");
-    call.registerOutParameter(1, Types.NUMERIC);
-    call.execute();
-    assertEquals(new java.math.BigDecimal(42), call.getBigDecimal(1));
-  }
-
-  @Test
-  public void testGetString() throws Throwable {
-    CallableStatement call = con.prepareCall(func + pkgName + "getString (?) }");
-    call.setString(2, "foo");
-    call.registerOutParameter(1, Types.VARCHAR);
-    call.execute();
-    assertEquals("bob", call.getString(1));
-
-  }
-
-  @Test
-  public void testGetArray() throws SQLException {
-    CallableStatement call = con.prepareCall(func + pkgName + "getarray()}");
-    call.registerOutParameter(1, Types.ARRAY);
-    call.execute();
-    Array arr = call.getArray(1);
-    ResultSet rs = arr.getResultSet();
-    assertTrue(rs.next());
-    assertEquals(1, rs.getInt(1));
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
-    assertTrue(!rs.next());
-  }
-
-  @Test
-  public void testRaiseNotice() throws SQLException {
-    Statement statement = con.createStatement();
-    statement.execute("SET SESSION client_min_messages = 'NOTICE'");
-    CallableStatement call = con.prepareCall(func + pkgName + "raisenotice()}");
-    call.registerOutParameter(1, Types.INTEGER);
-    call.execute();
-    SQLWarning warn = call.getWarnings();
-    assertNotNull(warn);
-    assertEquals("hello", warn.getMessage());
-    warn = warn.getNextWarning();
-    assertNotNull(warn);
-    assertEquals("goodbye", warn.getMessage());
-    assertEquals(1, call.getInt(1));
-  }
-
-  @Test
-  public void testWasNullBeforeFetch() throws SQLException {
-    CallableStatement cs = con.prepareCall("{? = call lower(?)}");
-    cs.registerOutParameter(1, Types.VARCHAR);
-    cs.setString(2, "Hi");
-    try {
-      cs.wasNull();
-      fail("expected exception");
-    } catch (Exception e) {
-      assertTrue(e instanceof SQLException);
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION testspg__getNumericWithoutArg() "
+                        + "RETURNS numeric AS '  "
+                        + "begin return 42; end; ' LANGUAGE plpgsql;");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION testspg__getarray() RETURNS int[] as "
+                        + "'SELECT ''{1,2}''::int[];' LANGUAGE sql");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION testspg__raisenotice() RETURNS int as "
+                        + "'BEGIN RAISE NOTICE ''hello'';  RAISE NOTICE ''goodbye''; RETURN 1; END;' LANGUAGE plpgsql");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION testspg__insertInt(int) RETURNS int as "
+                        + "'BEGIN INSERT INTO int_table(id) VALUES ($1); RETURN 1; END;' LANGUAGE plpgsql");
+        stmt.close();
     }
-  }
 
-  @Test
-  public void testFetchBeforeExecute() throws SQLException {
-    CallableStatement cs = con.prepareCall("{? = call lower(?)}");
-    cs.registerOutParameter(1, Types.VARCHAR);
-    cs.setString(2, "Hi");
-    try {
-      cs.getString(1);
-      fail("expected exception");
-    } catch (Exception e) {
-      assertTrue(e instanceof SQLException);
+    @Override
+    public void tearDown() throws SQLException {
+        Statement stmt = con.createStatement();
+        TestUtil.dropTable(con, "int_table");
+        stmt.execute("drop FUNCTION testspg__getString (varchar);");
+        stmt.execute("drop FUNCTION testspg__getDouble (float);");
+        stmt.execute("drop FUNCTION testspg__getVoid(float);");
+        stmt.execute("drop FUNCTION testspg__getInt (int);");
+        stmt.execute("drop FUNCTION testspg__getShort(int2)");
+        stmt.execute("drop FUNCTION testspg__getNumeric (numeric);");
+
+        stmt.execute("drop FUNCTION testspg__getNumericWithoutArg ();");
+        stmt.execute("DROP FUNCTION testspg__getarray();");
+        stmt.execute("DROP FUNCTION testspg__raisenotice();");
+        stmt.execute("DROP FUNCTION testspg__insertInt(int);");
+        super.tearDown();
     }
-  }
 
-  @Test
-  public void testFetchWithNoResults() throws SQLException {
-    CallableStatement cs = con.prepareCall("{call now()}");
-    cs.execute();
-    try {
-      cs.getObject(1);
-      fail("expected exception");
-    } catch (Exception e) {
-      assertTrue(e instanceof SQLException);
+    @Test
+    public void testGetUpdateCount() throws SQLException {
+        CallableStatement call = con.prepareCall(func + pkgName + "getDouble (?) }");
+        call.setDouble(2, 3.04);
+        call.registerOutParameter(1, Types.DOUBLE);
+        call.execute();
+        assertEquals(-1, call.getUpdateCount());
+        assertNull(call.getResultSet());
+        assertEquals(42.42, call.getDouble(1), 0.00001);
+        call.close();
+
+        // test without an out parameter
+        call = con.prepareCall("{ call " + pkgName + "getDouble(?) }");
+        call.setDouble(1, 3.04);
+        call.execute();
+        assertEquals(-1, call.getUpdateCount());
+        ResultSet rs = call.getResultSet();
+        assertNotNull(rs);
+        assertTrue(rs.next());
+        assertEquals(42.42, rs.getDouble(1), 0.00001);
+        assertTrue(!rs.next());
+        rs.close();
+
+        assertEquals(-1, call.getUpdateCount());
+        assertTrue(!call.getMoreResults());
+        call.close();
     }
-  }
 
-  @Test
-  public void testBadStmt() throws Throwable {
-    tryOneBadStmt("{ ?= " + pkgName + "getString (?) }");
-    tryOneBadStmt("{ ?= call getString (?) ");
-    tryOneBadStmt("{ = ? call getString (?); }");
-  }
+    @Test
+    public void testGetDouble() throws Throwable {
+        CallableStatement call = con.prepareCall(func + pkgName + "getDouble (?) }");
+        call.setDouble(2, 3.04);
+        call.registerOutParameter(1, Types.DOUBLE);
+        call.execute();
+        assertEquals(42.42, call.getDouble(1), 0.00001);
 
-  protected void tryOneBadStmt(String sql) throws SQLException {
-    try {
-      con.prepareCall(sql);
-      fail("Bad statement (" + sql + ") was not caught.");
+        // test without an out parameter
+        call = con.prepareCall("{ call " + pkgName + "getDouble(?) }");
+        call.setDouble(1, 3.04);
+        call.execute();
 
-    } catch (SQLException e) {
+        call = con.prepareCall("{ call " + pkgName + "getVoid(?) }");
+        call.setDouble(1, 3.04);
+        call.execute();
     }
-  }
 
-  @Test
-  public void testBatchCall() throws SQLException {
-    CallableStatement call = con.prepareCall("{ call " + pkgName + "insertInt(?) }");
-    call.setInt(1, 1);
-    call.addBatch();
-    call.setInt(1, 2);
-    call.addBatch();
-    call.setInt(1, 3);
-    call.addBatch();
-    call.executeBatch();
-    call.close();
+    @Test
+    public void testGetInt() throws Throwable {
+        CallableStatement call = con.prepareCall(func + pkgName + "getInt (?) }");
+        call.setInt(2, 4);
+        call.registerOutParameter(1, Types.INTEGER);
+        call.execute();
+        assertEquals(42, call.getInt(1));
+    }
 
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT id FROM int_table ORDER BY id");
-    assertTrue(rs.next());
-    assertEquals(1, rs.getInt(1));
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
-    assertTrue(rs.next());
-    assertEquals(3, rs.getInt(1));
-    assertTrue(!rs.next());
-  }
+    @Test
+    public void testGetShort() throws Throwable {
+        CallableStatement call = con.prepareCall(func + pkgName + "getShort (?) }");
+        call.setShort(2, (short) 4);
+        call.registerOutParameter(1, Types.SMALLINT);
+        call.execute();
+        assertEquals(42, call.getShort(1));
+    }
+
+    @Test
+    public void testGetNumeric() throws Throwable {
+        CallableStatement call = con.prepareCall(func + pkgName + "getNumeric (?) }");
+        call.setBigDecimal(2, new java.math.BigDecimal(4));
+        call.registerOutParameter(1, Types.NUMERIC);
+        call.execute();
+        assertEquals(new java.math.BigDecimal(42), call.getBigDecimal(1));
+    }
+
+    @Test
+    public void testGetNumericWithoutArg() throws Throwable {
+        CallableStatement call = con.prepareCall(func + pkgName + "getNumericWithoutArg () }");
+        call.registerOutParameter(1, Types.NUMERIC);
+        call.execute();
+        assertEquals(new java.math.BigDecimal(42), call.getBigDecimal(1));
+    }
+
+    @Test
+    public void testGetString() throws Throwable {
+        CallableStatement call = con.prepareCall(func + pkgName + "getString (?) }");
+        call.setString(2, "foo");
+        call.registerOutParameter(1, Types.VARCHAR);
+        call.execute();
+        assertEquals("bob", call.getString(1));
+
+    }
+
+    @Test
+    public void testGetArray() throws SQLException {
+        CallableStatement call = con.prepareCall(func + pkgName + "getarray()}");
+        call.registerOutParameter(1, Types.ARRAY);
+        call.execute();
+        Array arr = call.getArray(1);
+        ResultSet rs = arr.getResultSet();
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        assertTrue(!rs.next());
+    }
+
+    @Test
+    public void testRaiseNotice() throws SQLException {
+        Statement statement = con.createStatement();
+        statement.execute("SET SESSION client_min_messages = 'NOTICE'");
+        CallableStatement call = con.prepareCall(func + pkgName + "raisenotice()}");
+        call.registerOutParameter(1, Types.INTEGER);
+        call.execute();
+        SQLWarning warn = call.getWarnings();
+        assertNotNull(warn);
+        assertEquals("hello", warn.getMessage());
+        warn = warn.getNextWarning();
+        assertNotNull(warn);
+        assertEquals("goodbye", warn.getMessage());
+        assertEquals(1, call.getInt(1));
+    }
+
+    @Test
+    public void testWasNullBeforeFetch() throws SQLException {
+        CallableStatement cs = con.prepareCall("{? = call lower(?)}");
+        cs.registerOutParameter(1, Types.VARCHAR);
+        cs.setString(2, "Hi");
+        try {
+            cs.wasNull();
+            fail("expected exception");
+        } catch (Exception e) {
+            assertTrue(e instanceof SQLException);
+        }
+    }
+
+    @Test
+    public void testFetchBeforeExecute() throws SQLException {
+        CallableStatement cs = con.prepareCall("{? = call lower(?)}");
+        cs.registerOutParameter(1, Types.VARCHAR);
+        cs.setString(2, "Hi");
+        try {
+            cs.getString(1);
+            fail("expected exception");
+        } catch (Exception e) {
+            assertTrue(e instanceof SQLException);
+        }
+    }
+
+    @Test
+    public void testFetchWithNoResults() throws SQLException {
+        CallableStatement cs = con.prepareCall("{call now()}");
+        cs.execute();
+        try {
+            cs.getObject(1);
+            fail("expected exception");
+        } catch (Exception e) {
+            assertTrue(e instanceof SQLException);
+        }
+    }
+
+    @Test
+    public void testBadStmt() throws Throwable {
+        tryOneBadStmt("{ ?= " + pkgName + "getString (?) }");
+        tryOneBadStmt("{ ?= call getString (?) ");
+        tryOneBadStmt("{ = ? call getString (?); }");
+    }
+
+    protected void tryOneBadStmt(String sql) throws SQLException {
+        try {
+            con.prepareCall(sql);
+            fail("Bad statement (" + sql + ") was not caught.");
+
+        } catch (SQLException e) {
+        }
+    }
+
+    @Test
+    public void testBatchCall() throws SQLException {
+        CallableStatement call = con.prepareCall("{ call " + pkgName + "insertInt(?) }");
+        call.setInt(1, 1);
+        call.addBatch();
+        call.setInt(1, 2);
+        call.addBatch();
+        call.setInt(1, 3);
+        call.addBatch();
+        call.executeBatch();
+        call.close();
+
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT id FROM int_table ORDER BY id");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        assertTrue(rs.next());
+        assertEquals(3, rs.getInt(1));
+        assertTrue(!rs.next());
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ClientEncodingTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ClientEncodingTest.java
index 117b399..7a5250c 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ClientEncodingTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ClientEncodingTest.java
@@ -23,63 +23,63 @@ import java.util.Properties;
 @RunWith(Parameterized.class)
 public class ClientEncodingTest extends BaseTest4 {
 
-  @Parameterized.Parameter(0)
-  public boolean allowEncodingChanges;
+    @Parameterized.Parameter(0)
+    public boolean allowEncodingChanges;
 
-  @Override
-  protected void updateProperties(Properties props) {
-    super.updateProperties(props);
-    PGProperty.ALLOW_ENCODING_CHANGES.set(props, allowEncodingChanges);
-  }
-
-  @Parameterized.Parameters(name = "allowEncodingChanges={0}")
-  public static Iterable<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-        {true},
-        {false}
-    });
-  }
-
-  @Test
-  public void setEncodingUtf8() throws SQLException {
-    // UTF-8 is a default encoding, so it should always be safe to set encoding to UTF-8
-    setEncoding("UTF-8");
-
-    checkConnectionSanity();
-  }
-
-  @Test
-  public void setEncodingAscii() throws SQLException {
-    try {
-      setEncoding("sql_ascii");
-      if (!allowEncodingChanges) {
-        Assert.fail(
-            "allowEncodingChanges is false, thus set client_encoding=aql_ascii is expected to fail");
-      }
-    } catch (SQLException e) {
-      if (!allowEncodingChanges && !PSQLState.CONNECTION_FAILURE.getState()
-          .equals(e.getSQLState())) {
-        throw e;
-      }
-      Assert.assertTrue("Connection should be closed on client_encoding change", con.isClosed());
-      return;
+    @Parameterized.Parameters(name = "allowEncodingChanges={0}")
+    public static Iterable<Object[]> data() {
+        return Arrays.asList(new Object[][]{
+                {true},
+                {false}
+        });
     }
 
-    checkConnectionSanity();
-  }
+    @Override
+    protected void updateProperties(Properties props) {
+        super.updateProperties(props);
+        PGProperty.ALLOW_ENCODING_CHANGES.set(props, allowEncodingChanges);
+    }
 
-  private void checkConnectionSanity() throws SQLException {
-    Statement st = con.createStatement();
-    ResultSet rs = st.executeQuery("select 'abc' as x");
-    rs.next();
-    Assert.assertEquals("abc", rs.getString(1));
-    TestUtil.closeQuietly(rs);
-    TestUtil.closeQuietly(st);
-  }
+    @Test
+    public void setEncodingUtf8() throws SQLException {
+        // UTF-8 is a default encoding, so it should always be safe to set encoding to UTF-8
+        setEncoding("UTF-8");
 
-  private void setEncoding(String encoding) throws SQLException {
-    Statement st = con.createStatement();
-    st.execute("set client_encoding='" + encoding + "'");
-    TestUtil.closeQuietly(st);
-  }
+        checkConnectionSanity();
+    }
+
+    @Test
+    public void setEncodingAscii() throws SQLException {
+        try {
+            setEncoding("sql_ascii");
+            if (!allowEncodingChanges) {
+                Assert.fail(
+                        "allowEncodingChanges is false, thus set client_encoding=aql_ascii is expected to fail");
+            }
+        } catch (SQLException e) {
+            if (!allowEncodingChanges && !PSQLState.CONNECTION_FAILURE.getState()
+                    .equals(e.getSQLState())) {
+                throw e;
+            }
+            Assert.assertTrue("Connection should be closed on client_encoding change", con.isClosed());
+            return;
+        }
+
+        checkConnectionSanity();
+    }
+
+    private void checkConnectionSanity() throws SQLException {
+        Statement st = con.createStatement();
+        ResultSet rs = st.executeQuery("select 'abc' as x");
+        rs.next();
+        Assert.assertEquals("abc", rs.getString(1));
+        TestUtil.closeQuietly(rs);
+        TestUtil.closeQuietly(st);
+    }
+
+    private void setEncoding(String encoding) throws SQLException {
+        Statement st = con.createStatement();
+        st.execute("set client_encoding='" + encoding + "'");
+        TestUtil.closeQuietly(st);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ColumnSanitiserDisabledTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ColumnSanitiserDisabledTest.java
index 7b37286..8d12309 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ColumnSanitiserDisabledTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ColumnSanitiserDisabledTest.java
@@ -22,94 +22,94 @@ import java.sql.Statement;
 import java.util.Properties;
 
 /*
-* This test suite will check the behaviour of the findColumnIndex method. This is testing the
-* behaviour when sanitiser is disabled.
-*/
+ * This test suite will check the behaviour of the findColumnIndex method. This is testing the
+ * behaviour when sanitiser is disabled.
+ */
 class ColumnSanitiserDisabledTest {
-  private Connection conn;
+    private Connection conn;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    Properties props = new Properties();
-    props.setProperty("disableColumnSanitiser", Boolean.TRUE.toString());
-    conn = TestUtil.openDB(props);
-    assertTrue(conn instanceof BaseConnection);
-    BaseConnection bc = (BaseConnection) conn;
-    assertTrue(bc.isColumnSanitiserDisabled(),
-        "Expected state [TRUE] of base connection configuration failed test.");
-    /*
-     * Quoted columns will be stored with case preserved. Driver will receive column names as
-     * defined in db server.
-     */
-    TestUtil.createTable(conn, "allmixedup",
-        "id int primary key, \"DESCRIPTION\" varchar(40), \"fOo\" varchar(3)");
-    Statement data = conn.createStatement();
-    data.execute(TestUtil.insertSQL("allmixedup", "1,'mixed case test', 'bar'"));
-    data.close();
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    TestUtil.dropTable(conn, "allmixedup");
-    TestUtil.closeDB(conn);
-    System.setProperty("disableColumnSanitiser", "false");
-  }
-
-  /*
-   * Test cases checking different combinations of columns origination from database against
-   * application supplied column names.
-   */
-
-  @Test
-  void tableColumnLowerNowFindFindLowerCaseColumn() throws SQLException {
-    findColumn("id", true);
-  }
-
-  @Test
-  void tableColumnLowerNowFindFindUpperCaseColumn() throws SQLException {
-    findColumn("ID", true);
-  }
-
-  @Test
-  void tableColumnLowerNowFindFindMixedCaseColumn() throws SQLException {
-    findColumn("Id", false);
-  }
-
-  @Test
-  void tableColumnUpperNowFindFindLowerCaseColumn() throws SQLException {
-    findColumn("description", true);
-  }
-
-  @Test
-  void tableColumnUpperNowFindFindUpperCaseColumn() throws SQLException {
-    findColumn("DESCRIPTION", true);
-  }
-
-  @Test
-  void tableColumnUpperNowFindFindMixedCaseColumn() throws SQLException {
-    findColumn("Description", false);
-  }
-
-  @Test
-  void tableColumnMixedNowFindLowerCaseColumn() throws SQLException {
-    findColumn("foo", false);
-  }
-
-  @Test
-  void tableColumnMixedNowFindFindUpperCaseColumn() throws SQLException {
-    findColumn("FOO", false);
-  }
-
-  @Test
-  void tableColumnMixedNowFindFindMixedCaseColumn() throws SQLException {
-    findColumn("fOo", true);
-  }
-
-  private void findColumn(String label, boolean failOnNotFound) throws SQLException {
-    PreparedStatement query = conn.prepareStatement("select * from allmixedup");
-    if ((TestUtil.findColumn(query, label) == 0) && failOnNotFound) {
-      fail(String.format("Expected to find the column with the label [%1$s].", label));
+    @BeforeEach
+    void setUp() throws Exception {
+        Properties props = new Properties();
+        props.setProperty("disableColumnSanitiser", Boolean.TRUE.toString());
+        conn = TestUtil.openDB(props);
+        assertTrue(conn instanceof BaseConnection);
+        BaseConnection bc = (BaseConnection) conn;
+        assertTrue(bc.isColumnSanitiserDisabled(),
+                "Expected state [TRUE] of base connection configuration failed test.");
+        /*
+         * Quoted columns will be stored with case preserved. Driver will receive column names as
+         * defined in db server.
+         */
+        TestUtil.createTable(conn, "allmixedup",
+                "id int primary key, \"DESCRIPTION\" varchar(40), \"fOo\" varchar(3)");
+        Statement data = conn.createStatement();
+        data.execute(TestUtil.insertSQL("allmixedup", "1,'mixed case test', 'bar'"));
+        data.close();
+    }
+
+    @AfterEach
+    void tearDown() throws Exception {
+        TestUtil.dropTable(conn, "allmixedup");
+        TestUtil.closeDB(conn);
+        System.setProperty("disableColumnSanitiser", "false");
+    }
+
+    /*
+     * Test cases checking different combinations of columns origination from database against
+     * application supplied column names.
+     */
+
+    @Test
+    void tableColumnLowerNowFindFindLowerCaseColumn() throws SQLException {
+        findColumn("id", true);
+    }
+
+    @Test
+    void tableColumnLowerNowFindFindUpperCaseColumn() throws SQLException {
+        findColumn("ID", true);
+    }
+
+    @Test
+    void tableColumnLowerNowFindFindMixedCaseColumn() throws SQLException {
+        findColumn("Id", false);
+    }
+
+    @Test
+    void tableColumnUpperNowFindFindLowerCaseColumn() throws SQLException {
+        findColumn("description", true);
+    }
+
+    @Test
+    void tableColumnUpperNowFindFindUpperCaseColumn() throws SQLException {
+        findColumn("DESCRIPTION", true);
+    }
+
+    @Test
+    void tableColumnUpperNowFindFindMixedCaseColumn() throws SQLException {
+        findColumn("Description", false);
+    }
+
+    @Test
+    void tableColumnMixedNowFindLowerCaseColumn() throws SQLException {
+        findColumn("foo", false);
+    }
+
+    @Test
+    void tableColumnMixedNowFindFindUpperCaseColumn() throws SQLException {
+        findColumn("FOO", false);
+    }
+
+    @Test
+    void tableColumnMixedNowFindFindMixedCaseColumn() throws SQLException {
+        findColumn("fOo", true);
+    }
+
+    private void findColumn(String label, boolean failOnNotFound) throws SQLException {
+        PreparedStatement query = conn.prepareStatement("select * from allmixedup");
+        if ((TestUtil.findColumn(query, label) == 0) && failOnNotFound) {
+            fail(String.format("Expected to find the column with the label [%1$s].", label));
+        }
+        query.close();
     }
-    query.close();
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ColumnSanitiserEnabledTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ColumnSanitiserEnabledTest.java
index b68a35f..600732a 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ColumnSanitiserEnabledTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ColumnSanitiserEnabledTest.java
@@ -22,88 +22,88 @@ import java.sql.Statement;
 import java.util.Properties;
 
 /*
-* This test suite will check the behaviour of the findColumnIndex method. The tests will check the
-* behaviour of the method when the sanitiser is enabled. Default behaviour of the driver.
-*/
+ * This test suite will check the behaviour of the findColumnIndex method. The tests will check the
+ * behaviour of the method when the sanitiser is enabled. Default behaviour of the driver.
+ */
 class ColumnSanitiserEnabledTest {
-  private Connection conn;
+    private Connection conn;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    Properties props = new Properties();
-    props.setProperty("disableColumnSanitiser", Boolean.FALSE.toString());
-    conn = TestUtil.openDB(props);
-    assertTrue(conn instanceof BaseConnection);
-    BaseConnection bc = (BaseConnection) conn;
-    assertFalse(bc.isColumnSanitiserDisabled(),
-        "Expected state [FALSE] of base connection configuration failed test.");
-    TestUtil.createTable(conn, "allmixedup",
-        "id int primary key, \"DESCRIPTION\" varchar(40), \"fOo\" varchar(3)");
-    Statement data = conn.createStatement();
-    data.execute(TestUtil.insertSQL("allmixedup", "1,'mixed case test', 'bar'"));
-    data.close();
-  }
-
-  protected void tearDown() throws Exception {
-    TestUtil.dropTable(conn, "allmixedup");
-    TestUtil.closeDB(conn);
-  }
-
-  /*
-   * Test cases checking different combinations of columns origination from database against
-   * application supplied column names.
-   */
-
-  @Test
-  void tableColumnLowerNowFindFindLowerCaseColumn() throws SQLException {
-    findColumn("id", true);
-  }
-
-  @Test
-  void tableColumnLowerNowFindFindUpperCaseColumn() throws SQLException {
-    findColumn("ID", true);
-  }
-
-  @Test
-  void tableColumnLowerNowFindFindMixedCaseColumn() throws SQLException {
-    findColumn("Id", true);
-  }
-
-  @Test
-  void tableColumnUpperNowFindFindLowerCaseColumn() throws SQLException {
-    findColumn("description", true);
-  }
-
-  @Test
-  void tableColumnUpperNowFindFindUpperCaseColumn() throws SQLException {
-    findColumn("DESCRIPTION", true);
-  }
-
-  @Test
-  void tableColumnUpperNowFindFindMixedCaseColumn() throws SQLException {
-    findColumn("Description", true);
-  }
-
-  @Test
-  void tableColumnMixedNowFindLowerCaseColumn() throws SQLException {
-    findColumn("foo", true);
-  }
-
-  @Test
-  void tableColumnMixedNowFindFindUpperCaseColumn() throws SQLException {
-    findColumn("FOO", true);
-  }
-
-  @Test
-  void tableColumnMixedNowFindFindMixedCaseColumn() throws SQLException {
-    findColumn("fOo", true);
-  }
-
-  private void findColumn(String label, boolean failOnNotFound) throws SQLException {
-    PreparedStatement query = conn.prepareStatement("select * from allmixedup");
-    if ((TestUtil.findColumn(query, label) == 0) && failOnNotFound) {
-      fail(String.format("Expected to find the column with the label [%1$s].", label));
+    @BeforeEach
+    void setUp() throws Exception {
+        Properties props = new Properties();
+        props.setProperty("disableColumnSanitiser", Boolean.FALSE.toString());
+        conn = TestUtil.openDB(props);
+        assertTrue(conn instanceof BaseConnection);
+        BaseConnection bc = (BaseConnection) conn;
+        assertFalse(bc.isColumnSanitiserDisabled(),
+                "Expected state [FALSE] of base connection configuration failed test.");
+        TestUtil.createTable(conn, "allmixedup",
+                "id int primary key, \"DESCRIPTION\" varchar(40), \"fOo\" varchar(3)");
+        Statement data = conn.createStatement();
+        data.execute(TestUtil.insertSQL("allmixedup", "1,'mixed case test', 'bar'"));
+        data.close();
+    }
+
+    protected void tearDown() throws Exception {
+        TestUtil.dropTable(conn, "allmixedup");
+        TestUtil.closeDB(conn);
+    }
+
+    /*
+     * Test cases checking different combinations of columns origination from database against
+     * application supplied column names.
+     */
+
+    @Test
+    void tableColumnLowerNowFindFindLowerCaseColumn() throws SQLException {
+        findColumn("id", true);
+    }
+
+    @Test
+    void tableColumnLowerNowFindFindUpperCaseColumn() throws SQLException {
+        findColumn("ID", true);
+    }
+
+    @Test
+    void tableColumnLowerNowFindFindMixedCaseColumn() throws SQLException {
+        findColumn("Id", true);
+    }
+
+    @Test
+    void tableColumnUpperNowFindFindLowerCaseColumn() throws SQLException {
+        findColumn("description", true);
+    }
+
+    @Test
+    void tableColumnUpperNowFindFindUpperCaseColumn() throws SQLException {
+        findColumn("DESCRIPTION", true);
+    }
+
+    @Test
+    void tableColumnUpperNowFindFindMixedCaseColumn() throws SQLException {
+        findColumn("Description", true);
+    }
+
+    @Test
+    void tableColumnMixedNowFindLowerCaseColumn() throws SQLException {
+        findColumn("foo", true);
+    }
+
+    @Test
+    void tableColumnMixedNowFindFindUpperCaseColumn() throws SQLException {
+        findColumn("FOO", true);
+    }
+
+    @Test
+    void tableColumnMixedNowFindFindMixedCaseColumn() throws SQLException {
+        findColumn("fOo", true);
+    }
+
+    private void findColumn(String label, boolean failOnNotFound) throws SQLException {
+        PreparedStatement query = conn.prepareStatement("select * from allmixedup");
+        if ((TestUtil.findColumn(query, label) == 0) && failOnNotFound) {
+            fail(String.format("Expected to find the column with the label [%1$s].", label));
+        }
+        query.close();
     }
-    query.close();
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConcurrentStatementFetch.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConcurrentStatementFetch.java
index 53b5b52..ec471b8 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConcurrentStatementFetch.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConcurrentStatementFetch.java
@@ -22,60 +22,60 @@ import java.util.Collection;
 @RunWith(Parameterized.class)
 public class ConcurrentStatementFetch extends BaseTest4 {
 
-  private final AutoCommit autoCommit;
-  private final int fetchSize;
+    private final AutoCommit autoCommit;
+    private final int fetchSize;
 
-  public ConcurrentStatementFetch(AutoCommit autoCommit, int fetchSize, BinaryMode binaryMode) {
-    this.autoCommit = autoCommit;
-    this.fetchSize = fetchSize;
-    setBinaryMode(binaryMode);
-  }
+    public ConcurrentStatementFetch(AutoCommit autoCommit, int fetchSize, BinaryMode binaryMode) {
+        this.autoCommit = autoCommit;
+        this.fetchSize = fetchSize;
+        setBinaryMode(binaryMode);
+    }
 
-  @Parameterized.Parameters(name = "{index}: fetch(autoCommit={0}, fetchSize={1}, binaryMode={2})")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (AutoCommit autoCommit : AutoCommit.values()) {
-      for (int fetchSize : new int[]{1, 2, 20}) {
-        for (BinaryMode binaryMode : BinaryMode.values()) {
-          ids.add(new Object[]{autoCommit, fetchSize, binaryMode});
+    @Parameterized.Parameters(name = "{index}: fetch(autoCommit={0}, fetchSize={1}, binaryMode={2})")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (AutoCommit autoCommit : AutoCommit.values()) {
+            for (int fetchSize : new int[]{1, 2, 20}) {
+                for (BinaryMode binaryMode : BinaryMode.values()) {
+                    ids.add(new Object[]{autoCommit, fetchSize, binaryMode});
+                }
+            }
         }
-      }
+        return ids;
     }
-    return ids;
-  }
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    con.setAutoCommit(autoCommit == AutoCommit.YES);
-  }
-
-  @Test
-  public void testFetchTwoStatements() throws Exception {
-    // This test definitely fails at 8.2 in autocommit=false, and works with 8.4+
-    Assume.assumeTrue(autoCommit == AutoCommit.YES
-        || TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4));
-    PreparedStatement ps1 = null;
-    PreparedStatement ps2 = null;
-    try {
-      ps1 = con.prepareStatement("select * from generate_series(0, 9)");
-      ps1.setFetchSize(fetchSize);
-      ResultSet rs1 = ps1.executeQuery();
-      ps2 = con.prepareStatement("select * from generate_series(10, 19)");
-      ps2.setFetchSize(fetchSize);
-      ResultSet rs2 = ps2.executeQuery();
-
-      for (int i = 0; i < 10; i++) {
-        Assert.assertTrue(rs1.next());
-        Assert.assertTrue(rs2.next());
-        Assert.assertEquals("Row#" + i + ", resultset 1", i, rs1.getInt(1));
-        Assert.assertEquals("Row#" + i + ", resultset 2", i + 10, rs2.getInt(1));
-      }
-      Assert.assertFalse(rs1.next());
-      Assert.assertFalse(rs2.next());
-    } finally {
-      TestUtil.closeQuietly(ps1);
-      TestUtil.closeQuietly(ps2);
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        con.setAutoCommit(autoCommit == AutoCommit.YES);
+    }
+
+    @Test
+    public void testFetchTwoStatements() throws Exception {
+        // This test definitely fails at 8.2 in autocommit=false, and works with 8.4+
+        Assume.assumeTrue(autoCommit == AutoCommit.YES
+                || TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4));
+        PreparedStatement ps1 = null;
+        PreparedStatement ps2 = null;
+        try {
+            ps1 = con.prepareStatement("select * from generate_series(0, 9)");
+            ps1.setFetchSize(fetchSize);
+            ResultSet rs1 = ps1.executeQuery();
+            ps2 = con.prepareStatement("select * from generate_series(10, 19)");
+            ps2.setFetchSize(fetchSize);
+            ResultSet rs2 = ps2.executeQuery();
+
+            for (int i = 0; i < 10; i++) {
+                Assert.assertTrue(rs1.next());
+                Assert.assertTrue(rs2.next());
+                Assert.assertEquals("Row#" + i + ", resultset 1", i, rs1.getInt(1));
+                Assert.assertEquals("Row#" + i + ", resultset 2", i + 10, rs2.getInt(1));
+            }
+            Assert.assertFalse(rs1.next());
+            Assert.assertFalse(rs2.next());
+        } finally {
+            TestUtil.closeQuietly(ps1);
+            TestUtil.closeQuietly(ps2);
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConnectTimeoutTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConnectTimeoutTest.java
index aa37f02..d5117c2 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConnectTimeoutTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConnectTimeoutTest.java
@@ -22,53 +22,53 @@ import java.sql.SQLException;
 import java.util.Properties;
 
 class ConnectTimeoutTest {
-  // The IP below is non-routable (see http://stackoverflow.com/a/904609/1261287)
-  private static final String UNREACHABLE_HOST = "10.255.255.1";
-  private static final String UNREACHABLE_URL = "jdbc:postgresql://" + UNREACHABLE_HOST + ":5432/test";
-  private static final int CONNECT_TIMEOUT = 5;
+    // The IP below is non-routable (see http://stackoverflow.com/a/904609/1261287)
+    private static final String UNREACHABLE_HOST = "10.255.255.1";
+    private static final String UNREACHABLE_URL = "jdbc:postgresql://" + UNREACHABLE_HOST + ":5432/test";
+    private static final int CONNECT_TIMEOUT = 5;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    TestUtil.initDriver();
-  }
-
-  @Test
-  void timeout() {
-    final Properties props = new Properties();
-    PGProperty.USER.set(props, TestUtil.getUser());
-    PGProperty.PASSWORD.set(props, TestUtil.getPassword());
-    // with 0 (default value) it hangs for about 60 seconds (platform dependent)
-    PGProperty.CONNECT_TIMEOUT.set(props, CONNECT_TIMEOUT);
-
-    final long startTime = System.currentTimeMillis();
-    try {
-      DriverManager.getConnection(UNREACHABLE_URL, props);
-    } catch (SQLException e) {
-      final long interval = System.currentTimeMillis() - startTime;
-      final long connectTimeoutMillis = CONNECT_TIMEOUT * 1000;
-      final long maxDeviation = connectTimeoutMillis / 10;
-
-      /*
-       * If the platform fast-fails the unroutable address connection then this
-       * test may not time out, instead throwing
-       * java.net.NoRouteToHostException. The test has failed in that the connection
-       * attempt did not time out.
-       *
-       * We treat this as a skipped test, as the test didn't really "succeed"
-       * in testing the original behaviour, but it didn't fail either.
-       */
-      Assumptions.assumeFalse(e.getCause() instanceof NoRouteToHostException
-                        && interval < connectTimeoutMillis,
-                        "Host fast-failed connection to unreachable address "
-                        + UNREACHABLE_HOST + " after " + interval + " ms, "
-                        + " before timeout should have triggered.");
-
-      assertTrue(e.getCause() instanceof SocketTimeoutException,
-          "Unexpected " + e.toString() + " with cause " + e.getCause());
-      // check that it was not a default system timeout, an approximate value is used
-      assertTrue(Math.abs(interval - connectTimeoutMillis) < maxDeviation);
-      return;
+    @BeforeEach
+    void setUp() throws Exception {
+        TestUtil.initDriver();
+    }
+
+    @Test
+    void timeout() {
+        final Properties props = new Properties();
+        PGProperty.USER.set(props, TestUtil.getUser());
+        PGProperty.PASSWORD.set(props, TestUtil.getPassword());
+        // with 0 (default value) it hangs for about 60 seconds (platform dependent)
+        PGProperty.CONNECT_TIMEOUT.set(props, CONNECT_TIMEOUT);
+
+        final long startTime = System.currentTimeMillis();
+        try {
+            DriverManager.getConnection(UNREACHABLE_URL, props);
+        } catch (SQLException e) {
+            final long interval = System.currentTimeMillis() - startTime;
+            final long connectTimeoutMillis = CONNECT_TIMEOUT * 1000;
+            final long maxDeviation = connectTimeoutMillis / 10;
+
+            /*
+             * If the platform fast-fails the unroutable address connection then this
+             * test may not time out, instead throwing
+             * java.net.NoRouteToHostException. The test has failed in that the connection
+             * attempt did not time out.
+             *
+             * We treat this as a skipped test, as the test didn't really "succeed"
+             * in testing the original behaviour, but it didn't fail either.
+             */
+            Assumptions.assumeFalse(e.getCause() instanceof NoRouteToHostException
+                            && interval < connectTimeoutMillis,
+                    "Host fast-failed connection to unreachable address "
+                            + UNREACHABLE_HOST + " after " + interval + " ms, "
+                            + " before timeout should have triggered.");
+
+            assertTrue(e.getCause() instanceof SocketTimeoutException,
+                    "Unexpected " + e.toString() + " with cause " + e.getCause());
+            // check that it was not a default system timeout, an approximate value is used
+            assertTrue(Math.abs(interval - connectTimeoutMillis) < maxDeviation);
+            return;
+        }
+        fail("SQLException expected");
     }
-    fail("SQLException expected");
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConnectionTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConnectionTest.java
index c6f3f96..402e5f9 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConnectionTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ConnectionTest.java
@@ -5,25 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.PGConnection;
-import org.postgresql.PGProperty;
-import org.postgresql.core.PGStream;
-import org.postgresql.core.QueryExecutor;
-import org.postgresql.jdbc.PgConnection;
-import org.postgresql.test.TestUtil;
-import org.postgresql.util.PSQLState;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
 import java.lang.reflect.Field;
 import java.sql.Connection;
 import java.sql.PreparedStatement;
@@ -34,521 +15,537 @@ import java.sql.Statement;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Properties;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGConnection;
+import org.postgresql.PGProperty;
+import org.postgresql.core.PGStream;
+import org.postgresql.core.QueryExecutor;
+import org.postgresql.jdbc.PgConnection;
+import org.postgresql.test.TestUtil;
+import org.postgresql.util.PSQLState;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /**
  * TestCase to test the internal functionality of org.postgresql.jdbc2.Connection and it's
  * superclass.
  */
 class ConnectionTest {
-  private Connection con;
+    private Connection con;
 
-  // Set up the fixture for this testcase: the tables for this test.
-  @BeforeEach
-  void setUp() throws Exception {
-    con = TestUtil.openDB();
-
-    TestUtil.createTable(con, "test_a", "imagename name,image oid,id int4");
-    TestUtil.createTable(con, "test_c", "source text,cost money,imageid int4");
-
-    TestUtil.closeDB(con);
-  }
-
-  // Tear down the fixture for this test case.
-  @AfterEach
-  void tearDown() throws Exception {
-    TestUtil.closeDB(con);
-
-    con = TestUtil.openDB();
-
-    TestUtil.dropTable(con, "test_a");
-    TestUtil.dropTable(con, "test_c");
-
-    TestUtil.closeDB(con);
-  }
-
-  /*
-   * Tests the two forms of createStatement()
-   */
-  @Test
-  void createStatement() throws Exception {
-    con = TestUtil.openDB();
-
-    // A standard Statement
-    Statement stat = con.createStatement();
-    assertNotNull(stat);
-    stat.close();
-
-    // Ask for Updateable ResultSets
-    stat = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    assertNotNull(stat);
-    stat.close();
-  }
-
-  /*
-   * Tests the two forms of prepareStatement()
-   */
-  @Test
-  void prepareStatement() throws Exception {
-    con = TestUtil.openDB();
-
-    String sql = "select source,cost,imageid from test_c";
-
-    // A standard Statement
-    PreparedStatement stat = con.prepareStatement(sql);
-    assertNotNull(stat);
-    stat.close();
-
-    // Ask for Updateable ResultSets
-    stat = con.prepareStatement(sql, ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    assertNotNull(stat);
-    stat.close();
-  }
-
-  /*
-   * Put the test for createPrepareCall here
-   */
-  @Test
-  void prepareCall() {
-  }
-
-  /*
-   * Test nativeSQL
-   */
-  @Test
-  void nativeSQL() throws Exception {
-    // test a simple escape
-    con = TestUtil.openDB();
-    assertEquals("DATE '2005-01-24'", con.nativeSQL("{d '2005-01-24'}"));
-  }
-
-  /*
-   * Test autoCommit (both get & set)
-   */
-  @Test
-  void transactions() throws Exception {
-    con = TestUtil.openDB();
-    Statement st;
-    ResultSet rs;
-
-    // Turn it off
-    con.setAutoCommit(false);
-    assertFalse(con.getAutoCommit());
-
-    // Turn it back on
-    con.setAutoCommit(true);
-    assertTrue(con.getAutoCommit());
-
-    // Now test commit
-    st = con.createStatement();
-    st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)");
-
-    con.setAutoCommit(false);
-
-    // Now update image to 9876 and commit
-    st.executeUpdate("update test_a set image=9876 where id=5678");
-    con.commit();
-    rs = st.executeQuery("select image from test_a where id=5678");
-    assertTrue(rs.next());
-    assertEquals(9876, rs.getInt(1));
-    rs.close();
-
-    // Now try to change it but rollback
-    st.executeUpdate("update test_a set image=1111 where id=5678");
-    con.rollback();
-    rs = st.executeQuery("select image from test_a where id=5678");
-    assertTrue(rs.next());
-    assertEquals(9876, rs.getInt(1)); // Should not change!
-    rs.close();
-
-    TestUtil.closeDB(con);
-  }
-
-  /*
-   * Tests for session and transaction read only behavior with "always" read only mode.
-   */
-  @Test
-  void readOnly_always() throws Exception {
-    final Properties props = new Properties();
-    PGProperty.READ_ONLY_MODE.set(props, "always");
-    con = TestUtil.openDB(props);
-    Statement st;
-    ResultSet rs;
-
-    con.setAutoCommit(true);
-    con.setReadOnly(true);
-    assertTrue(con.getAutoCommit());
-    assertTrue(con.isReadOnly());
-
-    // Now test insert with auto commit true and read only
-    st = con.createStatement();
-    try {
-      st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)");
-      fail("insert should have failed when read only");
-    } catch (SQLException e) {
-      assertStringContains(e.getMessage(), "read-only");
+    private static void assertStringContains(String orig, String toContain) {
+        if (!orig.contains(toContain)) {
+            fail("expected [" + orig + ']' + "to contain [" + toContain + "].");
+        }
     }
 
-    con.setAutoCommit(false);
+    // Set up the fixture for this testcase: the tables for this test.
+    @BeforeEach
+    void setUp() throws Exception {
+        con = TestUtil.openDB();
 
-    // auto commit false and read only
-    try {
-      st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)");
-      fail("insert should have failed when read only");
-    } catch (SQLException e) {
-      assertStringContains(e.getMessage(), "read-only");
+        TestUtil.createTable(con, "test_a", "imagename name,image oid,id int4");
+        TestUtil.createTable(con, "test_c", "source text,cost money,imageid int4");
+
+        TestUtil.closeDB(con);
     }
 
-    try {
-      con.setReadOnly(false);
-      fail("cannot set read only during transaction");
-    } catch (SQLException e) {
-      assertEquals(PSQLState.ACTIVE_SQL_TRANSACTION.getState(), e.getSQLState(), "Expecting <<cannot change transaction read-only>>");
+    // Tear down the fixture for this test case.
+    @AfterEach
+    void tearDown() throws Exception {
+        TestUtil.closeDB(con);
+
+        con = TestUtil.openDB();
+
+        TestUtil.dropTable(con, "test_a");
+        TestUtil.dropTable(con, "test_c");
+
+        TestUtil.closeDB(con);
     }
 
-    // end the transaction
-    con.rollback();
+    /*
+     * Tests the two forms of createStatement()
+     */
+    @Test
+    void createStatement() throws Exception {
+        con = TestUtil.openDB();
 
-    // disable read only
-    con.setReadOnly(false);
+        // A standard Statement
+        Statement stat = con.createStatement();
+        assertNotNull(stat);
+        stat.close();
 
-    assertEquals(1, st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)"));
-
-    // Now update image to 9876 and commit
-    st.executeUpdate("update test_a set image=9876 where id=5678");
-    con.commit();
-
-    // back to read only for successful query
-    con.setReadOnly(true);
-    rs = st.executeQuery("select image from test_a where id=5678");
-    assertTrue(rs.next());
-    assertEquals(9876, rs.getInt(1));
-    rs.close();
-
-    // Now try to change with auto commit false
-    try {
-      st.executeUpdate("update test_a set image=1111 where id=5678");
-      fail("update should fail when read only");
-    } catch (SQLException e) {
-      assertStringContains(e.getMessage(), "read-only");
-      con.rollback();
+        // Ask for Updateable ResultSets
+        stat = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        assertNotNull(stat);
+        stat.close();
     }
 
-    // test that value did not change
-    rs = st.executeQuery("select image from test_a where id=5678");
-    assertTrue(rs.next());
-    assertEquals(9876, rs.getInt(1)); // Should not change!
-    rs.close();
+    /*
+     * Tests the two forms of prepareStatement()
+     */
+    @Test
+    void prepareStatement() throws Exception {
+        con = TestUtil.openDB();
 
-    // repeat attempt to change with auto commit true
-    con.setAutoCommit(true);
+        String sql = "select source,cost,imageid from test_c";
 
-    try {
-      st.executeUpdate("update test_a set image=1111 where id=5678");
-      fail("update should fail when read only");
-    } catch (SQLException e) {
-      assertStringContains(e.getMessage(), "read-only");
+        // A standard Statement
+        PreparedStatement stat = con.prepareStatement(sql);
+        assertNotNull(stat);
+        stat.close();
+
+        // Ask for Updateable ResultSets
+        stat = con.prepareStatement(sql, ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        assertNotNull(stat);
+        stat.close();
     }
 
-    // test that value did not change
-    rs = st.executeQuery("select image from test_a where id=5678");
-    assertTrue(rs.next());
-    assertEquals(9876, rs.getInt(1)); // Should not change!
-    rs.close();
-
-    TestUtil.closeDB(con);
-  }
-
-  /*
-   * Tests for session and transaction read only behavior with "ignore" read only mode.
-   */
-  @Test
-  void readOnly_ignore() throws Exception {
-    final Properties props = new Properties();
-    PGProperty.READ_ONLY_MODE.set(props, "ignore");
-    con = TestUtil.openDB(props);
-    Statement st;
-    ResultSet rs;
-
-    con.setAutoCommit(true);
-    con.setReadOnly(true);
-    assertTrue(con.getAutoCommit());
-    assertTrue(con.isReadOnly());
-
-    // Now test insert with auto commit true and read only
-    st = con.createStatement();
-    assertEquals(1, st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)"));
-    con.setAutoCommit(false);
-
-    // Now update image to 9876 and commit
-    st.executeUpdate("update test_a set image=9876 where id=5678");
-
-    // back to read only for successful query
-    rs = st.executeQuery("select image from test_a where id=5678");
-    assertTrue(rs.next());
-    assertEquals(9876, rs.getInt(1));
-    rs.close();
-
-    con.rollback();
-
-    // test that value did not change
-    rs = st.executeQuery("select image from test_a where id=5678");
-    assertTrue(rs.next());
-    assertEquals(1234, rs.getInt(1)); // Should not change!
-    rs.close();
-
-    TestUtil.closeDB(con);
-  }
-
-  /*
-   * Tests for session and transaction read only behavior with "transaction" read only mode.
-   */
-  @Test
-  void readOnly_transaction() throws Exception {
-    final Properties props = new Properties();
-    PGProperty.READ_ONLY_MODE.set(props, "transaction");
-    con = TestUtil.openDB(props);
-    Statement st;
-    ResultSet rs;
-
-    con.setAutoCommit(false);
-    con.setReadOnly(true);
-    assertFalse(con.getAutoCommit());
-    assertTrue(con.isReadOnly());
-
-    // Test insert with auto commit false and read only
-    st = con.createStatement();
-    try {
-      st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)");
-      fail("insert should have failed when read only");
-    } catch (SQLException e) {
-      assertStringContains(e.getMessage(), "read-only");
+    /*
+     * Put the test for createPrepareCall here
+     */
+    @Test
+    void prepareCall() {
     }
 
-    con.rollback();
-
-    con.setAutoCommit(true);
-    assertTrue(con.isReadOnly());
-    //with autocommit true and read only, can still insert
-    assertEquals(1, st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)"));
-
-    // Now update image to 9876
-    st.executeUpdate("update test_a set image=9876 where id=5678");
-
-    //successful query
-    rs = st.executeQuery("select image from test_a where id=5678");
-    assertTrue(rs.next());
-    assertEquals(9876, rs.getInt(1));
-    rs.close();
-
-    con.setAutoCommit(false);
-    // Now try to change with auto commit false
-    try {
-      st.executeUpdate("update test_a set image=1111 where id=5678");
-      fail("update should fail when read only");
-    } catch (SQLException e) {
-      assertStringContains(e.getMessage(), "read-only");
+    /*
+     * Test nativeSQL
+     */
+    @Test
+    void nativeSQL() throws Exception {
+        // test a simple escape
+        con = TestUtil.openDB();
+        assertEquals("DATE '2005-01-24'", con.nativeSQL("{d '2005-01-24'}"));
     }
 
-    con.rollback();
+    /*
+     * Test autoCommit (both get & set)
+     */
+    @Test
+    void transactions() throws Exception {
+        con = TestUtil.openDB();
+        Statement st;
+        ResultSet rs;
 
-    // test that value did not change
-    rs = st.executeQuery("select image from test_a where id=5678");
-    assertTrue(rs.next());
-    assertEquals(9876, rs.getInt(1)); // Should not change!
-    rs.close();
+        // Turn it off
+        con.setAutoCommit(false);
+        assertFalse(con.getAutoCommit());
 
-    // repeat attempt to change with auto commit true
-    con.setAutoCommit(true);
+        // Turn it back on
+        con.setAutoCommit(true);
+        assertTrue(con.getAutoCommit());
 
-    assertEquals(1, st.executeUpdate("update test_a set image=1111 where id=5678"));
+        // Now test commit
+        st = con.createStatement();
+        st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)");
 
-    // test that value did not change
-    rs = st.executeQuery("select image from test_a where id=5678");
-    assertTrue(rs.next());
-    assertEquals(1111, rs.getInt(1)); // Should not change!
-    rs.close();
+        con.setAutoCommit(false);
 
-    TestUtil.closeDB(con);
-  }
+        // Now update image to 9876 and commit
+        st.executeUpdate("update test_a set image=9876 where id=5678");
+        con.commit();
+        rs = st.executeQuery("select image from test_a where id=5678");
+        assertTrue(rs.next());
+        assertEquals(9876, rs.getInt(1));
+        rs.close();
 
-  /*
-   * Simple test to see if isClosed works.
-   */
-  @Test
-  void isClosed() throws Exception {
-    con = TestUtil.openDB();
+        // Now try to change it but rollback
+        st.executeUpdate("update test_a set image=1111 where id=5678");
+        con.rollback();
+        rs = st.executeQuery("select image from test_a where id=5678");
+        assertTrue(rs.next());
+        assertEquals(9876, rs.getInt(1)); // Should not change!
+        rs.close();
 
-    // Should not say closed
-    assertFalse(con.isClosed());
-
-    TestUtil.closeDB(con);
-
-    // Should now say closed
-    assertTrue(con.isClosed());
-  }
-
-  /*
-   * Test the warnings system
-   */
-  @Test
-  void warnings() throws Exception {
-    con = TestUtil.openDB();
-
-    String testStr = "This Is OuR TeSt message";
-
-    // The connection must be ours!
-    assertTrue(con instanceof PGConnection);
-
-    // Clear any existing warnings
-    con.clearWarnings();
-
-    // Set the test warning
-    ((PgConnection) con).addWarning(new SQLWarning(testStr));
-
-    // Retrieve it
-    SQLWarning warning = con.getWarnings();
-    assertNotNull(warning);
-    assertEquals(testStr, warning.getMessage());
-
-    // Finally test clearWarnings() this time there must be something to delete
-    con.clearWarnings();
-    assertNull(con.getWarnings());
-
-    TestUtil.closeDB(con);
-  }
-
-  /*
-   * Transaction Isolation Levels
-   */
-  @Test
-  void transactionIsolation() throws Exception {
-    con = TestUtil.openDB();
-
-    int defaultLevel = con.getTransactionIsolation();
-
-    // Begin a transaction
-    con.setAutoCommit(false);
-
-    // The isolation level should not have changed
-    assertEquals(defaultLevel, con.getTransactionIsolation());
-
-    // Now run some tests with autocommit enabled.
-    con.setAutoCommit(true);
-
-    assertEquals(defaultLevel, con.getTransactionIsolation());
-
-    con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
-    assertEquals(Connection.TRANSACTION_SERIALIZABLE, con.getTransactionIsolation());
-
-    con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
-    assertEquals(Connection.TRANSACTION_READ_COMMITTED, con.getTransactionIsolation());
-
-    // Test if a change of isolation level before beginning the
-    // transaction affects the isolation level inside the transaction.
-    con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
-    assertEquals(Connection.TRANSACTION_SERIALIZABLE, con.getTransactionIsolation());
-    con.setAutoCommit(false);
-    assertEquals(Connection.TRANSACTION_SERIALIZABLE, con.getTransactionIsolation());
-    con.setAutoCommit(true);
-    assertEquals(Connection.TRANSACTION_SERIALIZABLE, con.getTransactionIsolation());
-    con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
-    assertEquals(Connection.TRANSACTION_READ_COMMITTED, con.getTransactionIsolation());
-    con.setAutoCommit(false);
-    assertEquals(Connection.TRANSACTION_READ_COMMITTED, con.getTransactionIsolation());
-    con.commit();
-
-    // Test that getTransactionIsolation() does not actually start a new txn.
-    // Shouldn't start a new transaction.
-    con.getTransactionIsolation();
-    // Should be ok -- we're not in a transaction.
-    con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
-    // Should still be ok.
-    con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
-
-    // Test that we can't change isolation mid-transaction
-    Statement stmt = con.createStatement();
-    stmt.executeQuery("SELECT 1"); // Start transaction.
-    stmt.close();
-
-    try {
-      con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
-      fail("Expected an exception when changing transaction isolation mid-transaction");
-    } catch (SQLException e) {
-      // Ok.
+        TestUtil.closeDB(con);
     }
 
-    con.rollback();
-    TestUtil.closeDB(con);
-  }
+    /*
+     * Tests for session and transaction read only behavior with "always" read only mode.
+     */
+    @Test
+    void readOnly_always() throws Exception {
+        final Properties props = new Properties();
+        PGProperty.READ_ONLY_MODE.set(props, "always");
+        con = TestUtil.openDB(props);
+        Statement st;
+        ResultSet rs;
 
-  /*
-   * JDBC2 Type mappings
-   */
-  @Test
-  void typeMaps() throws Exception {
-    con = TestUtil.openDB();
+        con.setAutoCommit(true);
+        con.setReadOnly(true);
+        assertTrue(con.getAutoCommit());
+        assertTrue(con.isReadOnly());
 
-    // preserve the current map
-    Map<String, Class<?>> oldmap = con.getTypeMap();
+        // Now test insert with auto commit true and read only
+        st = con.createStatement();
+        try {
+            st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)");
+            fail("insert should have failed when read only");
+        } catch (SQLException e) {
+            assertStringContains(e.getMessage(), "read-only");
+        }
 
-    // now change it for an empty one
-    Map<String, Class<?>> newmap = new HashMap<>();
-    con.setTypeMap(newmap);
-    assertEquals(newmap, con.getTypeMap());
+        con.setAutoCommit(false);
 
-    // restore the old one
-    con.setTypeMap(oldmap);
-    assertEquals(oldmap, con.getTypeMap());
+        // auto commit false and read only
+        try {
+            st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)");
+            fail("insert should have failed when read only");
+        } catch (SQLException e) {
+            assertStringContains(e.getMessage(), "read-only");
+        }
 
-    TestUtil.closeDB(con);
-  }
+        try {
+            con.setReadOnly(false);
+            fail("cannot set read only during transaction");
+        } catch (SQLException e) {
+            assertEquals(PSQLState.ACTIVE_SQL_TRANSACTION.getState(), e.getSQLState(), "Expecting <<cannot change transaction read-only>>");
+        }
 
-  /**
-   * Closing a Connection more than once is not an error.
-   */
-  @Test
-  void doubleClose() throws Exception {
-    con = TestUtil.openDB();
-    con.close();
-    con.close();
-  }
+        // end the transaction
+        con.rollback();
 
-  /**
-   * Make sure that type map is empty and not null
-   */
-  @Test
-  void getTypeMapEmpty() throws Exception {
-    con = TestUtil.openDB();
-    Map typeMap = con.getTypeMap();
-    assertNotNull(typeMap);
-    assertTrue(typeMap.isEmpty(), "TypeMap should be empty");
-    con.close();
-  }
+        // disable read only
+        con.setReadOnly(false);
 
-  @Test
-  void pGStreamSettings() throws Exception {
-    con = TestUtil.openDB();
-    QueryExecutor queryExecutor = ((PgConnection) con).getQueryExecutor();
+        assertEquals(1, st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)"));
 
-    Field f = queryExecutor.getClass().getSuperclass().getDeclaredField("pgStream");
-    f.setAccessible(true);
-    PGStream pgStream = (PGStream) f.get(queryExecutor);
-    pgStream.setNetworkTimeout(1000);
-    pgStream.getSocket().setKeepAlive(true);
-    pgStream.getSocket().setSendBufferSize(8192);
-    pgStream.getSocket().setReceiveBufferSize(2048);
-    PGStream newStream = new PGStream(pgStream, 10);
-    assertEquals(1000, newStream.getSocket().getSoTimeout());
-    assertEquals(2048, newStream.getSocket().getReceiveBufferSize());
-    assertEquals(8192, newStream.getSocket().getSendBufferSize());
-    assertTrue(newStream.getSocket().getKeepAlive());
+        // Now update image to 9876 and commit
+        st.executeUpdate("update test_a set image=9876 where id=5678");
+        con.commit();
 
-    TestUtil.closeDB(con);
-  }
+        // back to read only for successful query
+        con.setReadOnly(true);
+        rs = st.executeQuery("select image from test_a where id=5678");
+        assertTrue(rs.next());
+        assertEquals(9876, rs.getInt(1));
+        rs.close();
 
-  private static void assertStringContains(String orig, String toContain) {
-    if (!orig.contains(toContain)) {
-      fail("expected [" + orig + ']' + "to contain [" + toContain + "].");
+        // Now try to change with auto commit false
+        try {
+            st.executeUpdate("update test_a set image=1111 where id=5678");
+            fail("update should fail when read only");
+        } catch (SQLException e) {
+            assertStringContains(e.getMessage(), "read-only");
+            con.rollback();
+        }
+
+        // test that value did not change
+        rs = st.executeQuery("select image from test_a where id=5678");
+        assertTrue(rs.next());
+        assertEquals(9876, rs.getInt(1)); // Should not change!
+        rs.close();
+
+        // repeat attempt to change with auto commit true
+        con.setAutoCommit(true);
+
+        try {
+            st.executeUpdate("update test_a set image=1111 where id=5678");
+            fail("update should fail when read only");
+        } catch (SQLException e) {
+            assertStringContains(e.getMessage(), "read-only");
+        }
+
+        // test that value did not change
+        rs = st.executeQuery("select image from test_a where id=5678");
+        assertTrue(rs.next());
+        assertEquals(9876, rs.getInt(1)); // Should not change!
+        rs.close();
+
+        TestUtil.closeDB(con);
+    }
+
+    /*
+     * Tests for session and transaction read only behavior with "ignore" read only mode.
+     */
+    @Test
+    void readOnly_ignore() throws Exception {
+        final Properties props = new Properties();
+        PGProperty.READ_ONLY_MODE.set(props, "ignore");
+        con = TestUtil.openDB(props);
+        Statement st;
+        ResultSet rs;
+
+        con.setAutoCommit(true);
+        con.setReadOnly(true);
+        assertTrue(con.getAutoCommit());
+        assertTrue(con.isReadOnly());
+
+        // Now test insert with auto commit true and read only
+        st = con.createStatement();
+        assertEquals(1, st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)"));
+        con.setAutoCommit(false);
+
+        // Now update image to 9876 and commit
+        st.executeUpdate("update test_a set image=9876 where id=5678");
+
+        // back to read only for successful query
+        rs = st.executeQuery("select image from test_a where id=5678");
+        assertTrue(rs.next());
+        assertEquals(9876, rs.getInt(1));
+        rs.close();
+
+        con.rollback();
+
+        // test that value did not change
+        rs = st.executeQuery("select image from test_a where id=5678");
+        assertTrue(rs.next());
+        assertEquals(1234, rs.getInt(1)); // Should not change!
+        rs.close();
+
+        TestUtil.closeDB(con);
+    }
+
+    /*
+     * Tests for session and transaction read only behavior with "transaction" read only mode.
+     */
+    @Test
+    void readOnly_transaction() throws Exception {
+        final Properties props = new Properties();
+        PGProperty.READ_ONLY_MODE.set(props, "transaction");
+        con = TestUtil.openDB(props);
+        Statement st;
+        ResultSet rs;
+
+        con.setAutoCommit(false);
+        con.setReadOnly(true);
+        assertFalse(con.getAutoCommit());
+        assertTrue(con.isReadOnly());
+
+        // Test insert with auto commit false and read only
+        st = con.createStatement();
+        try {
+            st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)");
+            fail("insert should have failed when read only");
+        } catch (SQLException e) {
+            assertStringContains(e.getMessage(), "read-only");
+        }
+
+        con.rollback();
+
+        con.setAutoCommit(true);
+        assertTrue(con.isReadOnly());
+        //with autocommit true and read only, can still insert
+        assertEquals(1, st.executeUpdate("insert into test_a (imagename,image,id) values ('comttest',1234,5678)"));
+
+        // Now update image to 9876
+        st.executeUpdate("update test_a set image=9876 where id=5678");
+
+        //successful query
+        rs = st.executeQuery("select image from test_a where id=5678");
+        assertTrue(rs.next());
+        assertEquals(9876, rs.getInt(1));
+        rs.close();
+
+        con.setAutoCommit(false);
+        // Now try to change with auto commit false
+        try {
+            st.executeUpdate("update test_a set image=1111 where id=5678");
+            fail("update should fail when read only");
+        } catch (SQLException e) {
+            assertStringContains(e.getMessage(), "read-only");
+        }
+
+        con.rollback();
+
+        // test that value did not change
+        rs = st.executeQuery("select image from test_a where id=5678");
+        assertTrue(rs.next());
+        assertEquals(9876, rs.getInt(1)); // Should not change!
+        rs.close();
+
+        // repeat attempt to change with auto commit true
+        con.setAutoCommit(true);
+
+        assertEquals(1, st.executeUpdate("update test_a set image=1111 where id=5678"));
+
+        // test that value did not change
+        rs = st.executeQuery("select image from test_a where id=5678");
+        assertTrue(rs.next());
+        assertEquals(1111, rs.getInt(1)); // Should not change!
+        rs.close();
+
+        TestUtil.closeDB(con);
+    }
+
+    /*
+     * Simple test to see if isClosed works.
+     */
+    @Test
+    void isClosed() throws Exception {
+        con = TestUtil.openDB();
+
+        // Should not say closed
+        assertFalse(con.isClosed());
+
+        TestUtil.closeDB(con);
+
+        // Should now say closed
+        assertTrue(con.isClosed());
+    }
+
+    /*
+     * Test the warnings system
+     */
+    @Test
+    void warnings() throws Exception {
+        con = TestUtil.openDB();
+
+        String testStr = "This Is OuR TeSt message";
+
+        // The connection must be ours!
+        assertTrue(con instanceof PGConnection);
+
+        // Clear any existing warnings
+        con.clearWarnings();
+
+        // Set the test warning
+        ((PgConnection) con).addWarning(new SQLWarning(testStr));
+
+        // Retrieve it
+        SQLWarning warning = con.getWarnings();
+        assertNotNull(warning);
+        assertEquals(testStr, warning.getMessage());
+
+        // Finally test clearWarnings() this time there must be something to delete
+        con.clearWarnings();
+        assertNull(con.getWarnings());
+
+        TestUtil.closeDB(con);
+    }
+
+    /*
+     * Transaction Isolation Levels
+     */
+    @Test
+    void transactionIsolation() throws Exception {
+        con = TestUtil.openDB();
+
+        int defaultLevel = con.getTransactionIsolation();
+
+        // Begin a transaction
+        con.setAutoCommit(false);
+
+        // The isolation level should not have changed
+        assertEquals(defaultLevel, con.getTransactionIsolation());
+
+        // Now run some tests with autocommit enabled.
+        con.setAutoCommit(true);
+
+        assertEquals(defaultLevel, con.getTransactionIsolation());
+
+        con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
+        assertEquals(Connection.TRANSACTION_SERIALIZABLE, con.getTransactionIsolation());
+
+        con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
+        assertEquals(Connection.TRANSACTION_READ_COMMITTED, con.getTransactionIsolation());
+
+        // Test if a change of isolation level before beginning the
+        // transaction affects the isolation level inside the transaction.
+        con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
+        assertEquals(Connection.TRANSACTION_SERIALIZABLE, con.getTransactionIsolation());
+        con.setAutoCommit(false);
+        assertEquals(Connection.TRANSACTION_SERIALIZABLE, con.getTransactionIsolation());
+        con.setAutoCommit(true);
+        assertEquals(Connection.TRANSACTION_SERIALIZABLE, con.getTransactionIsolation());
+        con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
+        assertEquals(Connection.TRANSACTION_READ_COMMITTED, con.getTransactionIsolation());
+        con.setAutoCommit(false);
+        assertEquals(Connection.TRANSACTION_READ_COMMITTED, con.getTransactionIsolation());
+        con.commit();
+
+        // Test that getTransactionIsolation() does not actually start a new txn.
+        // Shouldn't start a new transaction.
+        con.getTransactionIsolation();
+        // Should be ok -- we're not in a transaction.
+        con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
+        // Should still be ok.
+        con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
+
+        // Test that we can't change isolation mid-transaction
+        Statement stmt = con.createStatement();
+        stmt.executeQuery("SELECT 1"); // Start transaction.
+        stmt.close();
+
+        try {
+            con.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
+            fail("Expected an exception when changing transaction isolation mid-transaction");
+        } catch (SQLException e) {
+            // Ok.
+        }
+
+        con.rollback();
+        TestUtil.closeDB(con);
+    }
+
+    /*
+     * JDBC2 Type mappings
+     */
+    @Test
+    void typeMaps() throws Exception {
+        con = TestUtil.openDB();
+
+        // preserve the current map
+        Map<String, Class<?>> oldmap = con.getTypeMap();
+
+        // now change it for an empty one
+        Map<String, Class<?>> newmap = new HashMap<>();
+        con.setTypeMap(newmap);
+        assertEquals(newmap, con.getTypeMap());
+
+        // restore the old one
+        con.setTypeMap(oldmap);
+        assertEquals(oldmap, con.getTypeMap());
+
+        TestUtil.closeDB(con);
+    }
+
+    /**
+     * Closing a Connection more than once is not an error.
+     */
+    @Test
+    void doubleClose() throws Exception {
+        con = TestUtil.openDB();
+        con.close();
+        con.close();
+    }
+
+    /**
+     * Make sure that type map is empty and not null
+     */
+    @Test
+    void getTypeMapEmpty() throws Exception {
+        con = TestUtil.openDB();
+        Map typeMap = con.getTypeMap();
+        assertNotNull(typeMap);
+        assertTrue(typeMap.isEmpty(), "TypeMap should be empty");
+        con.close();
+    }
+
+    @Test
+    void pGStreamSettings() throws Exception {
+        con = TestUtil.openDB();
+        QueryExecutor queryExecutor = ((PgConnection) con).getQueryExecutor();
+
+        Field f = queryExecutor.getClass().getSuperclass().getDeclaredField("pgStream");
+        f.setAccessible(true);
+        PGStream pgStream = (PGStream) f.get(queryExecutor);
+        pgStream.setNetworkTimeout(1000);
+        pgStream.getSocket().setKeepAlive(true);
+        pgStream.getSocket().setSendBufferSize(8192);
+        pgStream.getSocket().setReceiveBufferSize(2048);
+        PGStream newStream = new PGStream(pgStream, 10);
+        assertEquals(1000, newStream.getSocket().getSoTimeout());
+        assertEquals(2048, newStream.getSocket().getReceiveBufferSize());
+        assertEquals(8192, newStream.getSocket().getSendBufferSize());
+        assertTrue(newStream.getSocket().getKeepAlive());
+
+        TestUtil.closeDB(con);
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CopyLargeFileTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CopyLargeFileTest.java
index af0ef92..69a2eb0 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CopyLargeFileTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CopyLargeFileTest.java
@@ -31,100 +31,100 @@ import java.util.Random;
  */
 class CopyLargeFileTest {
 
-  private static final int FEED_COUNT = 10;
+    private static final int FEED_COUNT = 10;
 
-  private Connection con;
-  private CopyManager copyAPI;
+    private Connection con;
+    private CopyManager copyAPI;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    con = TestUtil.openDB();
+    @BeforeEach
+    void setUp() throws Exception {
+        con = TestUtil.openDB();
 
-    TestUtil.createTable(con, "pgjdbc_issue366_test_glossary",
-        "id SERIAL, text_id VARCHAR(1000) NOT NULL UNIQUE, name VARCHAR(10) NOT NULL UNIQUE");
-    TestUtil.createTable(con, "pgjdbc_issue366_test_data",
-        "id SERIAL,\n"
-            + "data_text_id VARCHAR(1000) NOT NULL /*UNIQUE <-- it slows down inserts due to additional index */,\n"
-            + "glossary_text_id VARCHAR(1000) NOT NULL /* REFERENCES pgjdbc_issue366_test_glossary(text_id) */,\n"
-            + "value DOUBLE PRECISION NOT NULL");
+        TestUtil.createTable(con, "pgjdbc_issue366_test_glossary",
+                "id SERIAL, text_id VARCHAR(1000) NOT NULL UNIQUE, name VARCHAR(10) NOT NULL UNIQUE");
+        TestUtil.createTable(con, "pgjdbc_issue366_test_data",
+                "id SERIAL,\n"
+                        + "data_text_id VARCHAR(1000) NOT NULL /*UNIQUE <-- it slows down inserts due to additional index */,\n"
+                        + "glossary_text_id VARCHAR(1000) NOT NULL /* REFERENCES pgjdbc_issue366_test_glossary(text_id) */,\n"
+                        + "value DOUBLE PRECISION NOT NULL");
 
-    feedTable();
-    BufferGenerator.main(new String[]{});
-    copyAPI = ((PGConnection) con).getCopyAPI();
-  }
-
-  private void feedTable() throws Exception {
-    PreparedStatement stmt = con.prepareStatement(
-        TestUtil.insertSQL("pgjdbc_issue366_test_glossary", "text_id, name", "?, ?"));
-    for (int i = 0; i < 26; i++) {
-      char ch = (char) ('A' + i); // black magic
-      insertData(stmt, "VERY_LONG_STRING_TO_REPRODUCE_ISSUE_366_" + ch + ch + ch,
-          "" + ch + ch + ch);
+        feedTable();
+        BufferGenerator.main(new String[]{});
+        copyAPI = ((PGConnection) con).getCopyAPI();
     }
-  }
 
-  private void insertData(PreparedStatement stmt, String textId, String name) throws SQLException {
-    stmt.setString(1, textId);
-    stmt.setString(2, name);
-    stmt.executeUpdate();
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    try {
-      TestUtil.dropTable(con, "pgjdbc_issue366_test_data");
-      TestUtil.dropTable(con, "pgjdbc_issue366_test_glossary");
-      new File("target/buffer.txt").delete();
-    } finally {
-      con.close();
-    }
-  }
-
-  @Test
-  void feedTableSeveralTimesTest() throws Throwable {
-    for (int i = 1; i <= FEED_COUNT; i++) {
-      feedTableAndCheckTableFeedIsOk(con);
-      cleanupTable(con);
-    }
-  }
-
-  private void feedTableAndCheckTableFeedIsOk(Connection conn) throws Throwable {
-    Long seed = Long.getLong("StrangeInputStream.seed");
-    if (seed == null) {
-      seed = new Random().nextLong();
-    }
-    InputStream in = null;
-    try {
-      in = new StrangeInputStream(new FileInputStream("target/buffer.txt"), seed);
-      long size = copyAPI.copyIn(
-          "COPY pgjdbc_issue366_test_data(data_text_id, glossary_text_id, value) FROM STDIN", in);
-      assertEquals(BufferGenerator.ROW_COUNT, size);
-    } catch (Throwable t) {
-      String message = "Using seed = " + seed + " for StrangeInputStream. Set -DStrangeInputStream.seed="
-          + seed + " to reproduce the test";
-      t.addSuppressed(new Throwable(message) {
-        @Override
-        public Throwable fillInStackTrace() {
-          return this;
+    private void feedTable() throws Exception {
+        PreparedStatement stmt = con.prepareStatement(
+                TestUtil.insertSQL("pgjdbc_issue366_test_glossary", "text_id, name", "?, ?"));
+        for (int i = 0; i < 26; i++) {
+            char ch = (char) ('A' + i); // black magic
+            insertData(stmt, "VERY_LONG_STRING_TO_REPRODUCE_ISSUE_366_" + ch + ch + ch,
+                    "" + ch + ch + ch);
         }
-      });
-    } finally {
-      if (in != null) {
-        in.close();
-      }
-    }
-  }
-
-  private void cleanupTable(Connection conn) throws Exception {
-    CallableStatement stmt = null;
-    try {
-      stmt = conn.prepareCall("TRUNCATE pgjdbc_issue366_test_data;");
-      stmt.execute();
-    } finally {
-      if (stmt != null) {
-        stmt.close();
-      }
     }
 
-  }
+    private void insertData(PreparedStatement stmt, String textId, String name) throws SQLException {
+        stmt.setString(1, textId);
+        stmt.setString(2, name);
+        stmt.executeUpdate();
+    }
+
+    @AfterEach
+    void tearDown() throws Exception {
+        try {
+            TestUtil.dropTable(con, "pgjdbc_issue366_test_data");
+            TestUtil.dropTable(con, "pgjdbc_issue366_test_glossary");
+            new File("target/buffer.txt").delete();
+        } finally {
+            con.close();
+        }
+    }
+
+    @Test
+    void feedTableSeveralTimesTest() throws Throwable {
+        for (int i = 1; i <= FEED_COUNT; i++) {
+            feedTableAndCheckTableFeedIsOk(con);
+            cleanupTable(con);
+        }
+    }
+
+    private void feedTableAndCheckTableFeedIsOk(Connection conn) throws Throwable {
+        Long seed = Long.getLong("StrangeInputStream.seed");
+        if (seed == null) {
+            seed = new Random().nextLong();
+        }
+        InputStream in = null;
+        try {
+            in = new StrangeInputStream(new FileInputStream("target/buffer.txt"), seed);
+            long size = copyAPI.copyIn(
+                    "COPY pgjdbc_issue366_test_data(data_text_id, glossary_text_id, value) FROM STDIN", in);
+            assertEquals(BufferGenerator.ROW_COUNT, size);
+        } catch (Throwable t) {
+            String message = "Using seed = " + seed + " for StrangeInputStream. Set -DStrangeInputStream.seed="
+                    + seed + " to reproduce the test";
+            t.addSuppressed(new Throwable(message) {
+                @Override
+                public Throwable fillInStackTrace() {
+                    return this;
+                }
+            });
+        } finally {
+            if (in != null) {
+                in.close();
+            }
+        }
+    }
+
+    private void cleanupTable(Connection conn) throws Exception {
+        CallableStatement stmt = null;
+        try {
+            stmt = conn.prepareCall("TRUNCATE pgjdbc_issue366_test_data;");
+            stmt.execute();
+        } finally {
+            if (stmt != null) {
+                stmt.close();
+            }
+        }
+
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CopyTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CopyTest.java
index 3850e61..04edd3a 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CopyTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CopyTest.java
@@ -5,26 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.PGConnection;
-import org.postgresql.copy.CopyIn;
-import org.postgresql.copy.CopyManager;
-import org.postgresql.copy.CopyOut;
-import org.postgresql.copy.PGCopyOutputStream;
-import org.postgresql.core.ServerVersion;
-import org.postgresql.test.TestUtil;
-import org.postgresql.util.ByteBufferByteStreamWriter;
-import org.postgresql.util.PSQLState;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
@@ -38,465 +18,482 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Locale;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGConnection;
+import org.postgresql.copy.CopyIn;
+import org.postgresql.copy.CopyManager;
+import org.postgresql.copy.CopyOut;
+import org.postgresql.copy.PGCopyOutputStream;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.test.TestUtil;
+import org.postgresql.util.ByteBufferByteStreamWriter;
+import org.postgresql.util.PSQLState;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /**
  * @author kato@iki.fi
  */
 class CopyTest {
-  private Connection con;
-  private CopyManager copyAPI;
-  private String copyParams;
-  // 0's required to match DB output for numeric(5,2)
-  private final String[] origData =
-      {"First Row\t1\t1.10\n",
-          "Second Row\t2\t-22.20\n",
-          "\\N\t\\N\t\\N\n",
-          "\t4\t444.40\n"};
-  private final int dataRows = origData.length;
+    // 0's required to match DB output for numeric(5,2)
+    private final String[] origData =
+            {"First Row\t1\t1.10\n",
+                    "Second Row\t2\t-22.20\n",
+                    "\\N\t\\N\t\\N\n",
+                    "\t4\t444.40\n"};
+    private final int dataRows = origData.length;
+    private Connection con;
+    private CopyManager copyAPI;
+    private String copyParams;
 
-  private byte[] getData(String[] origData) {
-    ByteArrayOutputStream buf = new ByteArrayOutputStream();
-    PrintStream ps = new PrintStream(buf);
-    for (String anOrigData : origData) {
-      ps.print(anOrigData);
-    }
-    return buf.toByteArray();
-  }
-
-  @BeforeEach
-  void setUp() throws Exception {
-    con = TestUtil.openDB();
-
-    TestUtil.createTempTable(con, "copytest", "stringvalue text, intvalue int, numvalue numeric(5,2)");
-
-    copyAPI = ((PGConnection) con).getCopyAPI();
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
-      copyParams = "(FORMAT CSV, HEADER false)";
-    } else {
-      copyParams = "CSV";
-    }
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    TestUtil.closeDB(con);
-
-    // one of the tests will render the existing connection broken,
-    // so we need to drop the table on a fresh one.
-    con = TestUtil.openDB();
-    try {
-      TestUtil.dropTable(con, "copytest");
-    } finally {
-      con.close();
-    }
-  }
-
-  private int getCount() throws SQLException {
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT count(*) FROM copytest");
-    rs.next();
-    int result = rs.getInt(1);
-    rs.close();
-    return result;
-  }
-
-  @Test
-  void copyInByRow() throws SQLException {
-    String sql = "COPY copytest FROM STDIN";
-    CopyIn cp = copyAPI.copyIn(sql);
-    for (String anOrigData : origData) {
-      byte[] buf = anOrigData.getBytes();
-      cp.writeToCopy(buf, 0, buf.length);
-    }
-
-    long count1 = cp.endCopy();
-    long count2 = cp.getHandledRowCount();
-    assertEquals(dataRows, count1);
-    assertEquals(dataRows, count2);
-
-    try {
-      cp.cancelCopy();
-    } catch (SQLException se) { // should fail with obsolete operation
-      if (!PSQLState.OBJECT_NOT_IN_STATE.getState().equals(se.getSQLState())) {
-        fail("should have thrown object not in state exception.");
-      }
-    }
-    int rowCount = getCount();
-    assertEquals(dataRows, rowCount);
-  }
-
-  @Test
-  void copyInAsOutputStream() throws SQLException, IOException {
-    String sql = "COPY copytest FROM STDIN";
-    OutputStream os = new PGCopyOutputStream((PGConnection) con, sql, 1000);
-    for (String anOrigData : origData) {
-      byte[] buf = anOrigData.getBytes();
-      os.write(buf);
-    }
-    os.close();
-    int rowCount = getCount();
-    assertEquals(dataRows, rowCount);
-  }
-
-  @Test
-  void copyInAsOutputStreamClosesAfterEndCopy() throws SQLException, IOException {
-    String sql = "COPY copytest FROM STDIN";
-    PGCopyOutputStream os = new PGCopyOutputStream((PGConnection) con, sql, 1000);
-    try {
-      for (String anOrigData : origData) {
-        byte[] buf = anOrigData.getBytes();
-        os.write(buf);
-      }
-      os.endCopy();
-    } finally {
-      os.close();
-    }
-    assertFalse(os.isActive());
-    int rowCount = getCount();
-    assertEquals(dataRows, rowCount);
-  }
-
-  @Test
-  void copyInAsOutputStreamFailsOnFlushAfterEndCopy() throws SQLException, IOException {
-    String sql = "COPY copytest FROM STDIN";
-    PGCopyOutputStream os = new PGCopyOutputStream((PGConnection) con, sql, 1000);
-    try {
-      for (String anOrigData : origData) {
-        byte[] buf = anOrigData.getBytes();
-        os.write(buf);
-      }
-      os.endCopy();
-    } finally {
-      os.close();
-    }
-    try {
-      os.flush();
-      fail("should have failed flushing an inactive copy stream.");
-    } catch (IOException e) {
-      // We expect "This copy stream is closed", however, the message is locale-dependent
-      if (Locale.getDefault().getLanguage().equals(new Locale("en").getLanguage())
-          && !e.toString().contains("This copy stream is closed.")) {
-        fail("has failed not due to checkClosed(): " + e);
-      }
-    }
-  }
-
-  @Test
-  void copyInFromInputStream() throws SQLException, IOException {
-    String sql = "COPY copytest FROM STDIN";
-    copyAPI.copyIn(sql, new ByteArrayInputStream(getData(origData)), 3);
-    int rowCount = getCount();
-    assertEquals(dataRows, rowCount);
-  }
-
-  @Test
-  void copyInFromStreamFail() throws SQLException {
-    String sql = "COPY copytest FROM STDIN";
-    try {
-      copyAPI.copyIn(sql, new InputStream() {
-        @Override
-        public int read() {
-          throw new RuntimeException("COPYTEST");
+    private byte[] getData(String[] origData) {
+        ByteArrayOutputStream buf = new ByteArrayOutputStream();
+        PrintStream ps = new PrintStream(buf);
+        for (String anOrigData : origData) {
+            ps.print(anOrigData);
         }
-      }, 3);
-    } catch (Exception e) {
-      if (!e.toString().contains("COPYTEST")) {
-        fail("should have failed trying to read from our bogus stream.");
-      }
-    }
-    int rowCount = getCount();
-    assertEquals(0, rowCount);
-  }
-
-  @Test
-  void copyInFromReader() throws SQLException, IOException {
-    String sql = "COPY copytest FROM STDIN";
-    copyAPI.copyIn(sql, new StringReader(new String(getData(origData))), 3);
-    int rowCount = getCount();
-    assertEquals(dataRows, rowCount);
-  }
-
-  @Test
-  void copyInFromByteStreamWriter() throws SQLException, IOException {
-    String sql = "COPY copytest FROM STDIN";
-    copyAPI.copyIn(sql, new ByteBufferByteStreamWriter(ByteBuffer.wrap(getData(origData))));
-    int rowCount = getCount();
-    assertEquals(dataRows, rowCount);
-  }
-
-  /**
-   * Tests writing to a COPY ... FROM STDIN using both the standard OutputStream API
-   * write(byte[]) and the driver specific write(ByteStreamWriter) API interleaved.
-   */
-  @Test
-  void copyMultiApi() throws SQLException, IOException {
-    TestUtil.execute(con, "CREATE TABLE pg_temp.copy_api_test (data text)");
-    String sql = "COPY pg_temp.copy_api_test (data) FROM STDIN";
-    PGCopyOutputStream out = new PGCopyOutputStream(copyAPI.copyIn(sql));
-    try {
-      out.write("a".getBytes());
-      out.writeToCopy(new ByteBufferByteStreamWriter(ByteBuffer.wrap("b".getBytes())));
-      out.write("c".getBytes());
-      out.writeToCopy(new ByteBufferByteStreamWriter(ByteBuffer.wrap("d".getBytes())));
-      out.write("\n".getBytes());
-    } finally {
-      out.close();
-    }
-    String data = TestUtil.queryForString(con, "SELECT data FROM pg_temp.copy_api_test");
-    assertEquals("abcd", data, "The writes to the COPY should be in order");
-  }
-
-  @Test
-  void skipping() {
-    String sql = "COPY copytest FROM STDIN";
-    String at = "init";
-    int rowCount = -1;
-    int skip = 0;
-    int skipChar = 1;
-    try {
-      while (skipChar > 0) {
-        at = "buffering";
-        InputStream ins = new ByteArrayInputStream(getData(origData));
-        at = "skipping";
-        ins.skip(skip++);
-        skipChar = ins.read();
-        at = "copying";
-        copyAPI.copyIn(sql, ins, 3);
-        at = "using connection after writing copy";
-        rowCount = getCount();
-      }
-    } catch (Exception e) {
-      if (skipChar != '\t') {
-        // error expected when field separator consumed
-        fail("testSkipping at " + at + " round " + skip + ": " + e.toString());
-      }
-    }
-    assertEquals(dataRows * (skip - 1), rowCount);
-  }
-
-  @Test
-  void copyOutByRow() throws SQLException, IOException {
-    copyInByRow(); // ensure we have some data.
-    String sql = "COPY copytest TO STDOUT";
-    CopyOut cp = copyAPI.copyOut(sql);
-    int count = 0;
-    byte[] buf;
-    while ((buf = cp.readFromCopy()) != null) {
-      count++;
-    }
-    assertFalse(cp.isActive());
-    assertEquals(dataRows, count);
-
-    long rowCount = cp.getHandledRowCount();
-
-    assertEquals(dataRows, rowCount);
-
-    assertEquals(dataRows, getCount());
-  }
-
-  @Test
-  void copyOut() throws SQLException, IOException {
-    copyInByRow(); // ensure we have some data.
-    String sql = "COPY copytest TO STDOUT";
-    ByteArrayOutputStream copydata = new ByteArrayOutputStream();
-    copyAPI.copyOut(sql, copydata);
-    assertEquals(dataRows, getCount());
-    // deep comparison of data written and read
-    byte[] copybytes = copydata.toByteArray();
-    assertNotNull(copybytes);
-    for (int i = 0, l = 0; i < origData.length; i++) {
-      byte[] origBytes = origData[i].getBytes();
-      assertTrue(copybytes.length >= l + origBytes.length, "Copy is shorter than original");
-      for (int j = 0; j < origBytes.length; j++, l++) {
-        assertEquals(origBytes[j], copybytes[l], "content changed at byte#" + j + ": " + origBytes[j] + copybytes[l]);
-      }
-    }
-  }
-
-  @Test
-  void nonCopyOut() throws SQLException, IOException {
-    String sql = "SELECT 1";
-    try {
-      copyAPI.copyOut(sql, new ByteArrayOutputStream());
-      fail("Can't use a non-copy query.");
-    } catch (SQLException sqle) {
-    }
-    // Ensure connection still works.
-    assertEquals(0, getCount());
-  }
-
-  @Test
-  void nonCopyIn() throws SQLException, IOException {
-    String sql = "SELECT 1";
-    try {
-      copyAPI.copyIn(sql, new ByteArrayInputStream(new byte[0]));
-      fail("Can't use a non-copy query.");
-    } catch (SQLException sqle) {
-    }
-    // Ensure connection still works.
-    assertEquals(0, getCount());
-  }
-
-  @Test
-  void statementCopyIn() throws SQLException {
-    Statement stmt = con.createStatement();
-    try {
-      stmt.execute("COPY copytest FROM STDIN");
-      fail("Should have failed because copy doesn't work from a Statement.");
-    } catch (SQLException sqle) {
-    }
-    stmt.close();
-
-    assertEquals(0, getCount());
-  }
-
-  @Test
-  void statementCopyOut() throws SQLException {
-    copyInByRow(); // ensure we have some data.
-
-    Statement stmt = con.createStatement();
-    try {
-      stmt.execute("COPY copytest TO STDOUT");
-      fail("Should have failed because copy doesn't work from a Statement.");
-    } catch (SQLException sqle) {
-    }
-    stmt.close();
-
-    assertEquals(dataRows, getCount());
-  }
-
-  @Test
-  void copyQuery() throws SQLException, IOException {
-    copyInByRow(); // ensure we have some data.
-
-    long count = copyAPI.copyOut("COPY (SELECT generate_series(1,1000)) TO STDOUT",
-        new ByteArrayOutputStream());
-    assertEquals(1000, count);
-  }
-
-  @Test
-  void copyRollback() throws SQLException {
-    con.setAutoCommit(false);
-    copyInByRow();
-    con.rollback();
-    assertEquals(0, getCount());
-  }
-
-  @Test
-  void changeDateStyle() throws SQLException {
-    try {
-      con.setAutoCommit(false);
-      con.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ);
-      CopyManager manager = con.unwrap(PGConnection.class).getCopyAPI();
-
-      Statement stmt = con.createStatement();
-
-      stmt.execute("SET DateStyle = 'ISO, DMY'");
-
-      // I expect an SQLException
-      String sql = "COPY copytest FROM STDIN with xxx " + copyParams;
-      CopyIn cp = manager.copyIn(sql);
-      for (String anOrigData : origData) {
-        byte[] buf = anOrigData.getBytes();
-        cp.writeToCopy(buf, 0, buf.length);
-      }
-
-      long count1 = cp.endCopy();
-      long count2 = cp.getHandledRowCount();
-      con.commit();
-    } catch (SQLException ex) {
-
-      // the with xxx is a syntax error which should return a state of 42601
-      // if this fails the 'S' command is not being handled in the copy manager query handler
-      assertEquals("42601", ex.getSQLState());
-      con.rollback();
-    }
-  }
-
-  @Test
-  void lockReleaseOnCancelFailure() throws SQLException, InterruptedException {
-    if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) {
-      // pg_backend_pid() requires PostgreSQL 8.4+
-      return;
+        return buf.toByteArray();
     }
 
-    // This is a fairly complex test because it is testing a
-    // deadlock that only occurs when the connection to postgres
-    // is broken during a copy operation. We'll start a copy
-    // operation, use pg_terminate_backend to rudely break it,
-    // and then cancel. The test passes if a subsequent operation
-    // on the Connection object fails to deadlock.
-    con.setAutoCommit(false);
+    @BeforeEach
+    void setUp() throws Exception {
+        con = TestUtil.openDB();
 
-    CopyManager manager = con.unwrap(PGConnection.class).getCopyAPI();
-    CopyIn copyIn = manager.copyIn("COPY copytest FROM STDIN with " + copyParams);
-    TestUtil.terminateBackend(con);
-    try {
-      byte[] bunchOfNulls = ",,\n".getBytes();
-      while (true) {
-        copyIn.writeToCopy(bunchOfNulls, 0, bunchOfNulls.length);
-      }
-    } catch (SQLException e) {
-      acceptIOCause(e);
-    } finally {
-      if (copyIn.isActive()) {
+        TestUtil.createTempTable(con, "copytest", "stringvalue text, intvalue int, numvalue numeric(5,2)");
+
+        copyAPI = ((PGConnection) con).getCopyAPI();
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
+            copyParams = "(FORMAT CSV, HEADER false)";
+        } else {
+            copyParams = "CSV";
+        }
+    }
+
+    @AfterEach
+    void tearDown() throws Exception {
+        TestUtil.closeDB(con);
+
+        // one of the tests will render the existing connection broken,
+        // so we need to drop the table on a fresh one.
+        con = TestUtil.openDB();
         try {
-          copyIn.cancelCopy();
-          fail("cancelCopy should have thrown an exception");
-        } catch (SQLException e) {
-          acceptIOCause(e);
+            TestUtil.dropTable(con, "copytest");
+        } finally {
+            con.close();
         }
-      }
     }
 
-    // Now we'll execute rollback on another thread so that if the
-    // deadlock _does_ occur the case doesn't just hange forever.
-    Rollback rollback = new Rollback(con);
-    rollback.start();
-    rollback.join(1000);
-    if (rollback.isAlive()) {
-      fail("rollback did not terminate");
-    }
-    SQLException rollbackException = rollback.exception();
-    if (rollbackException == null) {
-      fail("rollback should have thrown an exception");
-    }
-    acceptIOCause(rollbackException);
-  }
-
-  private static class Rollback extends Thread {
-    private final Connection con;
-    private SQLException rollbackException;
-
-    Rollback(Connection con) {
-      setName("Asynchronous rollback");
-      setDaemon(true);
-      this.con = con;
+    private int getCount() throws SQLException {
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT count(*) FROM copytest");
+        rs.next();
+        int result = rs.getInt(1);
+        rs.close();
+        return result;
     }
 
-    @Override
-    public void run() {
-      try {
+    @Test
+    void copyInByRow() throws SQLException {
+        String sql = "COPY copytest FROM STDIN";
+        CopyIn cp = copyAPI.copyIn(sql);
+        for (String anOrigData : origData) {
+            byte[] buf = anOrigData.getBytes();
+            cp.writeToCopy(buf, 0, buf.length);
+        }
+
+        long count1 = cp.endCopy();
+        long count2 = cp.getHandledRowCount();
+        assertEquals(dataRows, count1);
+        assertEquals(dataRows, count2);
+
+        try {
+            cp.cancelCopy();
+        } catch (SQLException se) { // should fail with obsolete operation
+            if (!PSQLState.OBJECT_NOT_IN_STATE.getState().equals(se.getSQLState())) {
+                fail("should have thrown object not in state exception.");
+            }
+        }
+        int rowCount = getCount();
+        assertEquals(dataRows, rowCount);
+    }
+
+    @Test
+    void copyInAsOutputStream() throws SQLException, IOException {
+        String sql = "COPY copytest FROM STDIN";
+        OutputStream os = new PGCopyOutputStream((PGConnection) con, sql, 1000);
+        for (String anOrigData : origData) {
+            byte[] buf = anOrigData.getBytes();
+            os.write(buf);
+        }
+        os.close();
+        int rowCount = getCount();
+        assertEquals(dataRows, rowCount);
+    }
+
+    @Test
+    void copyInAsOutputStreamClosesAfterEndCopy() throws SQLException, IOException {
+        String sql = "COPY copytest FROM STDIN";
+        PGCopyOutputStream os = new PGCopyOutputStream((PGConnection) con, sql, 1000);
+        try {
+            for (String anOrigData : origData) {
+                byte[] buf = anOrigData.getBytes();
+                os.write(buf);
+            }
+            os.endCopy();
+        } finally {
+            os.close();
+        }
+        assertFalse(os.isActive());
+        int rowCount = getCount();
+        assertEquals(dataRows, rowCount);
+    }
+
+    @Test
+    void copyInAsOutputStreamFailsOnFlushAfterEndCopy() throws SQLException, IOException {
+        String sql = "COPY copytest FROM STDIN";
+        PGCopyOutputStream os = new PGCopyOutputStream((PGConnection) con, sql, 1000);
+        try {
+            for (String anOrigData : origData) {
+                byte[] buf = anOrigData.getBytes();
+                os.write(buf);
+            }
+            os.endCopy();
+        } finally {
+            os.close();
+        }
+        try {
+            os.flush();
+            fail("should have failed flushing an inactive copy stream.");
+        } catch (IOException e) {
+            // We expect "This copy stream is closed", however, the message is locale-dependent
+            if (Locale.getDefault().getLanguage().equals(new Locale("en").getLanguage())
+                    && !e.toString().contains("This copy stream is closed.")) {
+                fail("has failed not due to checkClosed(): " + e);
+            }
+        }
+    }
+
+    @Test
+    void copyInFromInputStream() throws SQLException, IOException {
+        String sql = "COPY copytest FROM STDIN";
+        copyAPI.copyIn(sql, new ByteArrayInputStream(getData(origData)), 3);
+        int rowCount = getCount();
+        assertEquals(dataRows, rowCount);
+    }
+
+    @Test
+    void copyInFromStreamFail() throws SQLException {
+        String sql = "COPY copytest FROM STDIN";
+        try {
+            copyAPI.copyIn(sql, new InputStream() {
+                @Override
+                public int read() {
+                    throw new RuntimeException("COPYTEST");
+                }
+            }, 3);
+        } catch (Exception e) {
+            if (!e.toString().contains("COPYTEST")) {
+                fail("should have failed trying to read from our bogus stream.");
+            }
+        }
+        int rowCount = getCount();
+        assertEquals(0, rowCount);
+    }
+
+    @Test
+    void copyInFromReader() throws SQLException, IOException {
+        String sql = "COPY copytest FROM STDIN";
+        copyAPI.copyIn(sql, new StringReader(new String(getData(origData))), 3);
+        int rowCount = getCount();
+        assertEquals(dataRows, rowCount);
+    }
+
+    @Test
+    void copyInFromByteStreamWriter() throws SQLException, IOException {
+        String sql = "COPY copytest FROM STDIN";
+        copyAPI.copyIn(sql, new ByteBufferByteStreamWriter(ByteBuffer.wrap(getData(origData))));
+        int rowCount = getCount();
+        assertEquals(dataRows, rowCount);
+    }
+
+    /**
+     * Tests writing to a COPY ... FROM STDIN using both the standard OutputStream API
+     * write(byte[]) and the driver specific write(ByteStreamWriter) API interleaved.
+     */
+    @Test
+    void copyMultiApi() throws SQLException, IOException {
+        TestUtil.execute(con, "CREATE TABLE pg_temp.copy_api_test (data text)");
+        String sql = "COPY pg_temp.copy_api_test (data) FROM STDIN";
+        PGCopyOutputStream out = new PGCopyOutputStream(copyAPI.copyIn(sql));
+        try {
+            out.write("a".getBytes());
+            out.writeToCopy(new ByteBufferByteStreamWriter(ByteBuffer.wrap("b".getBytes())));
+            out.write("c".getBytes());
+            out.writeToCopy(new ByteBufferByteStreamWriter(ByteBuffer.wrap("d".getBytes())));
+            out.write("\n".getBytes());
+        } finally {
+            out.close();
+        }
+        String data = TestUtil.queryForString(con, "SELECT data FROM pg_temp.copy_api_test");
+        assertEquals("abcd", data, "The writes to the COPY should be in order");
+    }
+
+    @Test
+    void skipping() {
+        String sql = "COPY copytest FROM STDIN";
+        String at = "init";
+        int rowCount = -1;
+        int skip = 0;
+        int skipChar = 1;
+        try {
+            while (skipChar > 0) {
+                at = "buffering";
+                InputStream ins = new ByteArrayInputStream(getData(origData));
+                at = "skipping";
+                ins.skip(skip++);
+                skipChar = ins.read();
+                at = "copying";
+                copyAPI.copyIn(sql, ins, 3);
+                at = "using connection after writing copy";
+                rowCount = getCount();
+            }
+        } catch (Exception e) {
+            if (skipChar != '\t') {
+                // error expected when field separator consumed
+                fail("testSkipping at " + at + " round " + skip + ": " + e.toString());
+            }
+        }
+        assertEquals(dataRows * (skip - 1), rowCount);
+    }
+
+    @Test
+    void copyOutByRow() throws SQLException, IOException {
+        copyInByRow(); // ensure we have some data.
+        String sql = "COPY copytest TO STDOUT";
+        CopyOut cp = copyAPI.copyOut(sql);
+        int count = 0;
+        byte[] buf;
+        while ((buf = cp.readFromCopy()) != null) {
+            count++;
+        }
+        assertFalse(cp.isActive());
+        assertEquals(dataRows, count);
+
+        long rowCount = cp.getHandledRowCount();
+
+        assertEquals(dataRows, rowCount);
+
+        assertEquals(dataRows, getCount());
+    }
+
+    @Test
+    void copyOut() throws SQLException, IOException {
+        copyInByRow(); // ensure we have some data.
+        String sql = "COPY copytest TO STDOUT";
+        ByteArrayOutputStream copydata = new ByteArrayOutputStream();
+        copyAPI.copyOut(sql, copydata);
+        assertEquals(dataRows, getCount());
+        // deep comparison of data written and read
+        byte[] copybytes = copydata.toByteArray();
+        assertNotNull(copybytes);
+        for (int i = 0, l = 0; i < origData.length; i++) {
+            byte[] origBytes = origData[i].getBytes();
+            assertTrue(copybytes.length >= l + origBytes.length, "Copy is shorter than original");
+            for (int j = 0; j < origBytes.length; j++, l++) {
+                assertEquals(origBytes[j], copybytes[l], "content changed at byte#" + j + ": " + origBytes[j] + copybytes[l]);
+            }
+        }
+    }
+
+    @Test
+    void nonCopyOut() throws SQLException, IOException {
+        String sql = "SELECT 1";
+        try {
+            copyAPI.copyOut(sql, new ByteArrayOutputStream());
+            fail("Can't use a non-copy query.");
+        } catch (SQLException sqle) {
+        }
+        // Ensure connection still works.
+        assertEquals(0, getCount());
+    }
+
+    @Test
+    void nonCopyIn() throws SQLException, IOException {
+        String sql = "SELECT 1";
+        try {
+            copyAPI.copyIn(sql, new ByteArrayInputStream(new byte[0]));
+            fail("Can't use a non-copy query.");
+        } catch (SQLException sqle) {
+        }
+        // Ensure connection still works.
+        assertEquals(0, getCount());
+    }
+
+    @Test
+    void statementCopyIn() throws SQLException {
+        Statement stmt = con.createStatement();
+        try {
+            stmt.execute("COPY copytest FROM STDIN");
+            fail("Should have failed because copy doesn't work from a Statement.");
+        } catch (SQLException sqle) {
+        }
+        stmt.close();
+
+        assertEquals(0, getCount());
+    }
+
+    @Test
+    void statementCopyOut() throws SQLException {
+        copyInByRow(); // ensure we have some data.
+
+        Statement stmt = con.createStatement();
+        try {
+            stmt.execute("COPY copytest TO STDOUT");
+            fail("Should have failed because copy doesn't work from a Statement.");
+        } catch (SQLException sqle) {
+        }
+        stmt.close();
+
+        assertEquals(dataRows, getCount());
+    }
+
+    @Test
+    void copyQuery() throws SQLException, IOException {
+        copyInByRow(); // ensure we have some data.
+
+        long count = copyAPI.copyOut("COPY (SELECT generate_series(1,1000)) TO STDOUT",
+                new ByteArrayOutputStream());
+        assertEquals(1000, count);
+    }
+
+    @Test
+    void copyRollback() throws SQLException {
+        con.setAutoCommit(false);
+        copyInByRow();
         con.rollback();
-      } catch (SQLException e) {
-        rollbackException = e;
-      }
+        assertEquals(0, getCount());
     }
 
-    public SQLException exception() {
-      return rollbackException;
-    }
-  }
+    @Test
+    void changeDateStyle() throws SQLException {
+        try {
+            con.setAutoCommit(false);
+            con.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ);
+            CopyManager manager = con.unwrap(PGConnection.class).getCopyAPI();
 
-  private void acceptIOCause(SQLException e) throws SQLException {
-    if (e.getSQLState().equals(PSQLState.CONNECTION_FAILURE.getState())
-        || e.getSQLState().equals(PSQLState.CONNECTION_DOES_NOT_EXIST.getState())) {
-      // The test expects network exception, so CONNECTION_FAILURE looks good
-      return;
+            Statement stmt = con.createStatement();
+
+            stmt.execute("SET DateStyle = 'ISO, DMY'");
+
+            // I expect an SQLException
+            String sql = "COPY copytest FROM STDIN with xxx " + copyParams;
+            CopyIn cp = manager.copyIn(sql);
+            for (String anOrigData : origData) {
+                byte[] buf = anOrigData.getBytes();
+                cp.writeToCopy(buf, 0, buf.length);
+            }
+
+            long count1 = cp.endCopy();
+            long count2 = cp.getHandledRowCount();
+            con.commit();
+        } catch (SQLException ex) {
+
+            // the with xxx is a syntax error which should return a state of 42601
+            // if this fails the 'S' command is not being handled in the copy manager query handler
+            assertEquals("42601", ex.getSQLState());
+            con.rollback();
+        }
     }
-    if (!(e.getCause() instanceof IOException)) {
-      throw e;
+
+    @Test
+    void lockReleaseOnCancelFailure() throws SQLException, InterruptedException {
+        if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) {
+            // pg_backend_pid() requires PostgreSQL 8.4+
+            return;
+        }
+
+        // This is a fairly complex test because it is testing a
+        // deadlock that only occurs when the connection to postgres
+        // is broken during a copy operation. We'll start a copy
+        // operation, use pg_terminate_backend to rudely break it,
+        // and then cancel. The test passes if a subsequent operation
+        // on the Connection object fails to deadlock.
+        con.setAutoCommit(false);
+
+        CopyManager manager = con.unwrap(PGConnection.class).getCopyAPI();
+        CopyIn copyIn = manager.copyIn("COPY copytest FROM STDIN with " + copyParams);
+        TestUtil.terminateBackend(con);
+        try {
+            byte[] bunchOfNulls = ",,\n".getBytes();
+            while (true) {
+                copyIn.writeToCopy(bunchOfNulls, 0, bunchOfNulls.length);
+            }
+        } catch (SQLException e) {
+            acceptIOCause(e);
+        } finally {
+            if (copyIn.isActive()) {
+                try {
+                    copyIn.cancelCopy();
+                    fail("cancelCopy should have thrown an exception");
+                } catch (SQLException e) {
+                    acceptIOCause(e);
+                }
+            }
+        }
+
+        // Now we'll execute rollback on another thread so that if the
+        // deadlock _does_ occur the case doesn't just hange forever.
+        Rollback rollback = new Rollback(con);
+        rollback.start();
+        rollback.join(1000);
+        if (rollback.isAlive()) {
+            fail("rollback did not terminate");
+        }
+        SQLException rollbackException = rollback.exception();
+        if (rollbackException == null) {
+            fail("rollback should have thrown an exception");
+        }
+        acceptIOCause(rollbackException);
+    }
+
+    private void acceptIOCause(SQLException e) throws SQLException {
+        if (e.getSQLState().equals(PSQLState.CONNECTION_FAILURE.getState())
+                || e.getSQLState().equals(PSQLState.CONNECTION_DOES_NOT_EXIST.getState())) {
+            // The test expects network exception, so CONNECTION_FAILURE looks good
+            return;
+        }
+        if (!(e.getCause() instanceof IOException)) {
+            throw e;
+        }
+    }
+
+    private static class Rollback extends Thread {
+        private final Connection con;
+        private SQLException rollbackException;
+
+        Rollback(Connection con) {
+            setName("Asynchronous rollback");
+            setDaemon(true);
+            this.con = con;
+        }
+
+        @Override
+        public void run() {
+            try {
+                con.rollback();
+            } catch (SQLException e) {
+                rollbackException = e;
+            }
+        }
+
+        public SQLException exception() {
+            return rollbackException;
+        }
     }
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CursorFetchTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CursorFetchTest.java
index 794e54d..fd19807 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CursorFetchTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CursorFetchTest.java
@@ -5,21 +5,18 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import org.postgresql.test.TestUtil;
-
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.ArrayList;
 import java.util.Collection;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.postgresql.test.TestUtil;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 /*
  * Tests for using non-zero setFetchSize().
@@ -27,514 +24,514 @@ import java.util.Collection;
 @RunWith(Parameterized.class)
 public class CursorFetchTest extends BaseTest4 {
 
-  public CursorFetchTest(BinaryMode binaryMode) {
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
-    }
-    return ids;
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTable(con, "test_fetch", "value integer");
-    con.setAutoCommit(false);
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    if (!con.getAutoCommit()) {
-      con.rollback();
+    public CursorFetchTest(BinaryMode binaryMode) {
+        setBinaryMode(binaryMode);
     }
 
-    con.setAutoCommit(true);
-    TestUtil.dropTable(con, "test_fetch");
-    super.tearDown();
-  }
-
-  protected void createRows(int count) throws Exception {
-    PreparedStatement stmt = con.prepareStatement("insert into test_fetch(value) values(?)");
-    for (int i = 0; i < count; i++) {
-      stmt.setInt(1, i);
-      stmt.executeUpdate();
-    }
-  }
-
-  // Test various fetchsizes.
-  @Test
-  public void testBasicFetch() throws Exception {
-    createRows(100);
-
-    PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value");
-    int[] testSizes = {0, 1, 49, 50, 51, 99, 100, 101};
-    for (int testSize : testSizes) {
-      stmt.setFetchSize(testSize);
-      assertEquals(testSize, stmt.getFetchSize());
-
-      ResultSet rs = stmt.executeQuery();
-      assertEquals(testSize, rs.getFetchSize());
-
-      int count = 0;
-      while (rs.next()) {
-        assertEquals("query value error with fetch size " + testSize, count, rs.getInt(1));
-        ++count;
-      }
-
-      assertEquals("total query size error with fetch size " + testSize, 100, count);
-    }
-  }
-
-  // Similar, but for scrollable resultsets.
-  @Test
-  public void testScrollableFetch() throws Exception {
-    createRows(100);
-
-    PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value",
-        ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
-
-    int[] testSizes = {0, 1, 49, 50, 51, 99, 100, 101};
-    for (int testSize : testSizes) {
-      stmt.setFetchSize(testSize);
-      assertEquals(testSize, stmt.getFetchSize());
-
-      ResultSet rs = stmt.executeQuery();
-      assertEquals(testSize, rs.getFetchSize());
-
-      for (int j = 0; j <= 50; j++) {
-        assertTrue("ran out of rows at position " + j + " with fetch size " + testSize, rs.next());
-        assertEquals("query value error with fetch size " + testSize, j, rs.getInt(1));
-      }
-
-      int position = 50;
-      for (int j = 1; j < 100; j++) {
-        for (int k = 0; k < j; k++) {
-          if (j % 2 == 0) {
-            ++position;
-            assertTrue("ran out of rows doing a forward fetch on iteration " + j + "/" + k
-                + " at position " + position + " with fetch size " + testSize, rs.next());
-          } else {
-            --position;
-            assertTrue(
-                "ran out of rows doing a reverse fetch on iteration " + j + "/" + k
-                    + " at position " + position + " with fetch size " + testSize,
-                rs.previous());
-          }
-
-          assertEquals(
-              "query value error on iteration " + j + "/" + k + " with fetch size " + testSize,
-              position, rs.getInt(1));
+    @Parameterized.Parameters(name = "binary = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
         }
-      }
+        return ids;
     }
-  }
 
-  @Test
-  public void testScrollableAbsoluteFetch() throws Exception {
-    createRows(100);
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTable(con, "test_fetch", "value integer");
+        con.setAutoCommit(false);
+    }
 
-    PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value",
-        ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
-
-    int[] testSizes = {0, 1, 49, 50, 51, 99, 100, 101};
-    for (int testSize : testSizes) {
-      stmt.setFetchSize(testSize);
-      assertEquals(testSize, stmt.getFetchSize());
-
-      ResultSet rs = stmt.executeQuery();
-      assertEquals(testSize, rs.getFetchSize());
-
-      int position = 50;
-      assertTrue("ran out of rows doing an absolute fetch at " + position + " with fetch size "
-          + testSize, rs.absolute(position + 1));
-      assertEquals("query value error with fetch size " + testSize, position, rs.getInt(1));
-
-      for (int j = 1; j < 100; j++) {
-        if (j % 2 == 0) {
-          position += j;
-        } else {
-          position -= j;
+    @Override
+    public void tearDown() throws SQLException {
+        if (!con.getAutoCommit()) {
+            con.rollback();
         }
 
-        assertTrue("ran out of rows doing an absolute fetch at " + position + " on iteration " + j
-            + " with fetchsize" + testSize, rs.absolute(position + 1));
-        assertEquals("query value error with fetch size " + testSize, position, rs.getInt(1));
-      }
-    }
-  }
-
-  //
-  // Tests for ResultSet.setFetchSize().
-  //
-
-  // test one:
-  // -set fetchsize = 0
-  // -run query (all rows should be fetched)
-  // -set fetchsize = 50 (should have no effect)
-  // -process results
-  @Test
-  public void testResultSetFetchSizeOne() throws Exception {
-    createRows(100);
-
-    PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value");
-    stmt.setFetchSize(0);
-    ResultSet rs = stmt.executeQuery();
-    rs.setFetchSize(50); // Should have no effect.
-
-    int count = 0;
-    while (rs.next()) {
-      assertEquals(count, rs.getInt(1));
-      ++count;
+        con.setAutoCommit(true);
+        TestUtil.dropTable(con, "test_fetch");
+        super.tearDown();
     }
 
-    assertEquals(100, count);
-  }
-
-  // test two:
-  // -set fetchsize = 25
-  // -run query (25 rows fetched)
-  // -set fetchsize = 0
-  // -process results:
-  // --process 25 rows
-  // --should do a FETCH ALL to get more data
-  // --process 75 rows
-  @Test
-  public void testResultSetFetchSizeTwo() throws Exception {
-    createRows(100);
-
-    PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value");
-    stmt.setFetchSize(25);
-    ResultSet rs = stmt.executeQuery();
-    rs.setFetchSize(0);
-
-    int count = 0;
-    while (rs.next()) {
-      assertEquals(count, rs.getInt(1));
-      ++count;
+    protected void createRows(int count) throws Exception {
+        PreparedStatement stmt = con.prepareStatement("insert into test_fetch(value) values(?)");
+        for (int i = 0; i < count; i++) {
+            stmt.setInt(1, i);
+            stmt.executeUpdate();
+        }
     }
 
-    assertEquals(100, count);
-  }
+    // Test various fetchsizes.
+    @Test
+    public void testBasicFetch() throws Exception {
+        createRows(100);
 
-  // test three:
-  // -set fetchsize = 25
-  // -run query (25 rows fetched)
-  // -set fetchsize = 50
-  // -process results:
-  // --process 25 rows. should NOT hit end-of-results here.
-  // --do a FETCH FORWARD 50
-  // --process 50 rows
-  // --do a FETCH FORWARD 50
-  // --process 25 rows. end of results.
-  @Test
-  public void testResultSetFetchSizeThree() throws Exception {
-    createRows(100);
+        PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value");
+        int[] testSizes = {0, 1, 49, 50, 51, 99, 100, 101};
+        for (int testSize : testSizes) {
+            stmt.setFetchSize(testSize);
+            assertEquals(testSize, stmt.getFetchSize());
 
-    PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value");
-    stmt.setFetchSize(25);
-    ResultSet rs = stmt.executeQuery();
-    rs.setFetchSize(50);
+            ResultSet rs = stmt.executeQuery();
+            assertEquals(testSize, rs.getFetchSize());
 
-    int count = 0;
-    while (rs.next()) {
-      assertEquals(count, rs.getInt(1));
-      ++count;
+            int count = 0;
+            while (rs.next()) {
+                assertEquals("query value error with fetch size " + testSize, count, rs.getInt(1));
+                ++count;
+            }
+
+            assertEquals("total query size error with fetch size " + testSize, 100, count);
+        }
     }
 
-    assertEquals(100, count);
-  }
+    // Similar, but for scrollable resultsets.
+    @Test
+    public void testScrollableFetch() throws Exception {
+        createRows(100);
 
-  // test four:
-  // -set fetchsize = 50
-  // -run query (50 rows fetched)
-  // -set fetchsize = 25
-  // -process results:
-  // --process 50 rows.
-  // --do a FETCH FORWARD 25
-  // --process 25 rows
-  // --do a FETCH FORWARD 25
-  // --process 25 rows. end of results.
-  @Test
-  public void testResultSetFetchSizeFour() throws Exception {
-    createRows(100);
+        PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value",
+                ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
 
-    PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value");
-    stmt.setFetchSize(50);
-    ResultSet rs = stmt.executeQuery();
-    rs.setFetchSize(25);
+        int[] testSizes = {0, 1, 49, 50, 51, 99, 100, 101};
+        for (int testSize : testSizes) {
+            stmt.setFetchSize(testSize);
+            assertEquals(testSize, stmt.getFetchSize());
 
-    int count = 0;
-    while (rs.next()) {
-      assertEquals(count, rs.getInt(1));
-      ++count;
+            ResultSet rs = stmt.executeQuery();
+            assertEquals(testSize, rs.getFetchSize());
+
+            for (int j = 0; j <= 50; j++) {
+                assertTrue("ran out of rows at position " + j + " with fetch size " + testSize, rs.next());
+                assertEquals("query value error with fetch size " + testSize, j, rs.getInt(1));
+            }
+
+            int position = 50;
+            for (int j = 1; j < 100; j++) {
+                for (int k = 0; k < j; k++) {
+                    if (j % 2 == 0) {
+                        ++position;
+                        assertTrue("ran out of rows doing a forward fetch on iteration " + j + "/" + k
+                                + " at position " + position + " with fetch size " + testSize, rs.next());
+                    } else {
+                        --position;
+                        assertTrue(
+                                "ran out of rows doing a reverse fetch on iteration " + j + "/" + k
+                                        + " at position " + position + " with fetch size " + testSize,
+                                rs.previous());
+                    }
+
+                    assertEquals(
+                            "query value error on iteration " + j + "/" + k + " with fetch size " + testSize,
+                            position, rs.getInt(1));
+                }
+            }
+        }
     }
 
-    assertEquals(100, count);
-  }
+    @Test
+    public void testScrollableAbsoluteFetch() throws Exception {
+        createRows(100);
 
-  @Test
-  public void testSingleRowResultPositioning() throws Exception {
-    String msg;
-    createRows(1);
+        PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value",
+                ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
 
-    int[] sizes = {0, 1, 10};
-    for (int size : sizes) {
-      Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
-      stmt.setFetchSize(size);
+        int[] testSizes = {0, 1, 49, 50, 51, 99, 100, 101};
+        for (int testSize : testSizes) {
+            stmt.setFetchSize(testSize);
+            assertEquals(testSize, stmt.getFetchSize());
 
-      // Create a one row result set.
-      ResultSet rs = stmt.executeQuery("select * from test_fetch order by value");
+            ResultSet rs = stmt.executeQuery();
+            assertEquals(testSize, rs.getFetchSize());
 
-      msg = "before-first row positioning error with fetchsize=" + size;
-      assertTrue(msg, rs.isBeforeFirst());
-      assertTrue(msg, !rs.isAfterLast());
-      assertTrue(msg, !rs.isFirst());
-      assertTrue(msg, !rs.isLast());
+            int position = 50;
+            assertTrue("ran out of rows doing an absolute fetch at " + position + " with fetch size "
+                    + testSize, rs.absolute(position + 1));
+            assertEquals("query value error with fetch size " + testSize, position, rs.getInt(1));
 
-      msg = "row 1 positioning error with fetchsize=" + size;
-      assertTrue(msg, rs.next());
+            for (int j = 1; j < 100; j++) {
+                if (j % 2 == 0) {
+                    position += j;
+                } else {
+                    position -= j;
+                }
 
-      assertTrue(msg, !rs.isBeforeFirst());
-      assertTrue(msg, !rs.isAfterLast());
-      assertTrue(msg, rs.isFirst());
-      assertTrue(msg, rs.isLast());
-      assertEquals(msg, 0, rs.getInt(1));
-
-      msg = "after-last row positioning error with fetchsize=" + size;
-      assertTrue(msg, !rs.next());
-
-      assertTrue(msg, !rs.isBeforeFirst());
-      assertTrue(msg, rs.isAfterLast());
-      assertTrue(msg, !rs.isFirst());
-      assertTrue(msg, !rs.isLast());
-
-      rs.close();
-      stmt.close();
+                assertTrue("ran out of rows doing an absolute fetch at " + position + " on iteration " + j
+                        + " with fetchsize" + testSize, rs.absolute(position + 1));
+                assertEquals("query value error with fetch size " + testSize, position, rs.getInt(1));
+            }
+        }
     }
-  }
 
-  @Test
-  public void testMultiRowResultPositioning() throws Exception {
-    String msg;
+    //
+    // Tests for ResultSet.setFetchSize().
+    //
 
-    createRows(100);
+    // test one:
+    // -set fetchsize = 0
+    // -run query (all rows should be fetched)
+    // -set fetchsize = 50 (should have no effect)
+    // -process results
+    @Test
+    public void testResultSetFetchSizeOne() throws Exception {
+        createRows(100);
 
-    int[] sizes = {0, 1, 10, 100};
-    for (int size : sizes) {
-      Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
-      stmt.setFetchSize(size);
+        PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value");
+        stmt.setFetchSize(0);
+        ResultSet rs = stmt.executeQuery();
+        rs.setFetchSize(50); // Should have no effect.
 
-      ResultSet rs = stmt.executeQuery("select * from test_fetch order by value");
-      msg = "before-first row positioning error with fetchsize=" + size;
-      assertTrue(msg, rs.isBeforeFirst());
-      assertTrue(msg, !rs.isAfterLast());
-      assertTrue(msg, !rs.isFirst());
-      assertTrue(msg, !rs.isLast());
-
-      for (int j = 0; j < 100; j++) {
-        msg = "row " + j + " positioning error with fetchsize=" + size;
-        assertTrue(msg, rs.next());
-        assertEquals(msg, j, rs.getInt(1));
-
-        assertTrue(msg, !rs.isBeforeFirst());
-        assertTrue(msg, !rs.isAfterLast());
-        if (j == 0) {
-          assertTrue(msg, rs.isFirst());
-        } else {
-          assertTrue(msg, !rs.isFirst());
+        int count = 0;
+        while (rs.next()) {
+            assertEquals(count, rs.getInt(1));
+            ++count;
         }
 
-        if (j == 99) {
-          assertTrue(msg, rs.isLast());
-        } else {
-          assertTrue(msg, !rs.isLast());
+        assertEquals(100, count);
+    }
+
+    // test two:
+    // -set fetchsize = 25
+    // -run query (25 rows fetched)
+    // -set fetchsize = 0
+    // -process results:
+    // --process 25 rows
+    // --should do a FETCH ALL to get more data
+    // --process 75 rows
+    @Test
+    public void testResultSetFetchSizeTwo() throws Exception {
+        createRows(100);
+
+        PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value");
+        stmt.setFetchSize(25);
+        ResultSet rs = stmt.executeQuery();
+        rs.setFetchSize(0);
+
+        int count = 0;
+        while (rs.next()) {
+            assertEquals(count, rs.getInt(1));
+            ++count;
         }
-      }
 
-      msg = "after-last row positioning error with fetchsize=" + size;
-      assertTrue(msg, !rs.next());
-
-      assertTrue(msg, !rs.isBeforeFirst());
-      assertTrue(msg, rs.isAfterLast());
-      assertTrue(msg, !rs.isFirst());
-      assertTrue(msg, !rs.isLast());
-
-      rs.close();
-      stmt.close();
-    }
-  }
-
-  // Test odd queries that should not be transformed into cursor-based fetches.
-  @Test
-  public void testInsert() throws Exception {
-    // INSERT should not be transformed.
-    PreparedStatement stmt = con.prepareStatement("insert into test_fetch(value) values(1)");
-    stmt.setFetchSize(100); // Should be meaningless.
-    stmt.executeUpdate();
-  }
-
-  @Test
-  public void testMultistatement() throws Exception {
-    // Queries with multiple statements should not be transformed.
-
-    createRows(100); // 0 .. 99
-    PreparedStatement stmt = con.prepareStatement(
-        "insert into test_fetch(value) values(100); select * from test_fetch order by value");
-    stmt.setFetchSize(10);
-
-    assertTrue(!stmt.execute()); // INSERT
-    assertTrue(stmt.getMoreResults()); // SELECT
-    ResultSet rs = stmt.getResultSet();
-    int count = 0;
-    while (rs.next()) {
-      assertEquals(count, rs.getInt(1));
-      ++count;
+        assertEquals(100, count);
     }
 
-    assertEquals(101, count);
-  }
+    // test three:
+    // -set fetchsize = 25
+    // -run query (25 rows fetched)
+    // -set fetchsize = 50
+    // -process results:
+    // --process 25 rows. should NOT hit end-of-results here.
+    // --do a FETCH FORWARD 50
+    // --process 50 rows
+    // --do a FETCH FORWARD 50
+    // --process 25 rows. end of results.
+    @Test
+    public void testResultSetFetchSizeThree() throws Exception {
+        createRows(100);
 
-  // if the driver tries to use a cursor with autocommit on
-  // it will fail because the cursor will disappear partway
-  // through execution
-  @Test
-  public void testNoCursorWithAutoCommit() throws Exception {
-    createRows(10); // 0 .. 9
-    con.setAutoCommit(true);
-    Statement stmt = con.createStatement();
-    stmt.setFetchSize(3);
-    ResultSet rs = stmt.executeQuery("SELECT * FROM test_fetch ORDER BY value");
-    int count = 0;
-    while (rs.next()) {
-      assertEquals(count++, rs.getInt(1));
-    }
+        PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value");
+        stmt.setFetchSize(25);
+        ResultSet rs = stmt.executeQuery();
+        rs.setFetchSize(50);
 
-    assertEquals(10, count);
-  }
-
-  @Test
-  public void testGetRow() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.setFetchSize(1);
-    ResultSet rs = stmt.executeQuery("SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3");
-    int count = 0;
-    while (rs.next()) {
-      count++;
-      assertEquals(count, rs.getInt(1));
-      assertEquals(count, rs.getRow());
-    }
-    assertEquals(3, count);
-  }
-
-  // isLast() may change the results of other positioning methods as it has to
-  // buffer some more results. This tests avoid using it so as to test robustness
-  // other positioning methods
-  @Test
-  public void testRowResultPositioningWithoutIsLast() throws Exception {
-    String msg;
-
-    int rowCount = 4;
-    createRows(rowCount);
-
-    int[] sizes = {1, 2, 3, 4, 5};
-    for (int size : sizes) {
-      Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
-      stmt.setFetchSize(size);
-
-      ResultSet rs = stmt.executeQuery("select * from test_fetch order by value");
-      msg = "before-first row positioning error with fetchsize=" + size;
-      assertTrue(msg, rs.isBeforeFirst());
-      assertTrue(msg, !rs.isAfterLast());
-      assertTrue(msg, !rs.isFirst());
-
-      for (int j = 0; j < rowCount; j++) {
-        msg = "row " + j + " positioning error with fetchsize=" + size;
-        assertTrue(msg, rs.next());
-        assertEquals(msg, j, rs.getInt(1));
-
-        assertTrue(msg, !rs.isBeforeFirst());
-        assertTrue(msg, !rs.isAfterLast());
-        if (j == 0) {
-          assertTrue(msg, rs.isFirst());
-        } else {
-          assertTrue(msg, !rs.isFirst());
+        int count = 0;
+        while (rs.next()) {
+            assertEquals(count, rs.getInt(1));
+            ++count;
         }
-      }
 
-      msg = "after-last row positioning error with fetchsize=" + size;
-      assertTrue(msg, !rs.next());
-
-      assertTrue(msg, !rs.isBeforeFirst());
-      assertTrue(msg, rs.isAfterLast());
-      assertTrue(msg, !rs.isFirst());
-      assertTrue(msg, !rs.isLast());
-
-      rs.close();
-      stmt.close();
+        assertEquals(100, count);
     }
-  }
 
-  // Empty resultsets require all row positioning methods to return false
-  @Test
-  public void testNoRowResultPositioning() throws Exception {
-    int[] sizes = {0, 1, 50, 100};
-    for (int size : sizes) {
-      Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
-      stmt.setFetchSize(size);
+    // test four:
+    // -set fetchsize = 50
+    // -run query (50 rows fetched)
+    // -set fetchsize = 25
+    // -process results:
+    // --process 50 rows.
+    // --do a FETCH FORWARD 25
+    // --process 25 rows
+    // --do a FETCH FORWARD 25
+    // --process 25 rows. end of results.
+    @Test
+    public void testResultSetFetchSizeFour() throws Exception {
+        createRows(100);
 
-      ResultSet rs = stmt.executeQuery("select * from test_fetch order by value");
-      String msg = "no row (empty resultset) positioning error with fetchsize=" + size;
-      assertTrue(msg, !rs.isBeforeFirst());
-      assertTrue(msg, !rs.isAfterLast());
-      assertTrue(msg, !rs.isFirst());
-      assertTrue(msg, !rs.isLast());
+        PreparedStatement stmt = con.prepareStatement("select * from test_fetch order by value");
+        stmt.setFetchSize(50);
+        ResultSet rs = stmt.executeQuery();
+        rs.setFetchSize(25);
 
-      assertTrue(msg, !rs.next());
-      assertTrue(msg, !rs.isBeforeFirst());
-      assertTrue(msg, !rs.isAfterLast());
-      assertTrue(msg, !rs.isFirst());
-      assertTrue(msg, !rs.isLast());
+        int count = 0;
+        while (rs.next()) {
+            assertEquals(count, rs.getInt(1));
+            ++count;
+        }
 
-      rs.close();
-      stmt.close();
+        assertEquals(100, count);
     }
-  }
 
-  // Empty resultsets require all row positioning methods to return false
-  @Test
-  public void testScrollableNoRowResultPositioning() throws Exception {
-    int[] sizes = {0, 1, 50, 100};
-    for (int size : sizes) {
-      Statement stmt =
-          con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-      stmt.setFetchSize(size);
+    @Test
+    public void testSingleRowResultPositioning() throws Exception {
+        String msg;
+        createRows(1);
 
-      ResultSet rs = stmt.executeQuery("select * from test_fetch order by value");
-      String msg = "no row (empty resultset) positioning error with fetchsize=" + size;
-      assertTrue(msg, !rs.isBeforeFirst());
-      assertTrue(msg, !rs.isAfterLast());
-      assertTrue(msg, !rs.isFirst());
-      assertTrue(msg, !rs.isLast());
+        int[] sizes = {0, 1, 10};
+        for (int size : sizes) {
+            Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
+            stmt.setFetchSize(size);
 
-      assertTrue(msg, !rs.first());
-      assertTrue(msg, !rs.isBeforeFirst());
-      assertTrue(msg, !rs.isAfterLast());
-      assertTrue(msg, !rs.isFirst());
-      assertTrue(msg, !rs.isLast());
+            // Create a one row result set.
+            ResultSet rs = stmt.executeQuery("select * from test_fetch order by value");
 
-      assertTrue(msg, !rs.next());
-      assertTrue(msg, !rs.isBeforeFirst());
-      assertTrue(msg, !rs.isAfterLast());
-      assertTrue(msg, !rs.isFirst());
-      assertTrue(msg, !rs.isLast());
+            msg = "before-first row positioning error with fetchsize=" + size;
+            assertTrue(msg, rs.isBeforeFirst());
+            assertTrue(msg, !rs.isAfterLast());
+            assertTrue(msg, !rs.isFirst());
+            assertTrue(msg, !rs.isLast());
 
-      rs.close();
-      stmt.close();
+            msg = "row 1 positioning error with fetchsize=" + size;
+            assertTrue(msg, rs.next());
+
+            assertTrue(msg, !rs.isBeforeFirst());
+            assertTrue(msg, !rs.isAfterLast());
+            assertTrue(msg, rs.isFirst());
+            assertTrue(msg, rs.isLast());
+            assertEquals(msg, 0, rs.getInt(1));
+
+            msg = "after-last row positioning error with fetchsize=" + size;
+            assertTrue(msg, !rs.next());
+
+            assertTrue(msg, !rs.isBeforeFirst());
+            assertTrue(msg, rs.isAfterLast());
+            assertTrue(msg, !rs.isFirst());
+            assertTrue(msg, !rs.isLast());
+
+            rs.close();
+            stmt.close();
+        }
+    }
+
+    @Test
+    public void testMultiRowResultPositioning() throws Exception {
+        String msg;
+
+        createRows(100);
+
+        int[] sizes = {0, 1, 10, 100};
+        for (int size : sizes) {
+            Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
+            stmt.setFetchSize(size);
+
+            ResultSet rs = stmt.executeQuery("select * from test_fetch order by value");
+            msg = "before-first row positioning error with fetchsize=" + size;
+            assertTrue(msg, rs.isBeforeFirst());
+            assertTrue(msg, !rs.isAfterLast());
+            assertTrue(msg, !rs.isFirst());
+            assertTrue(msg, !rs.isLast());
+
+            for (int j = 0; j < 100; j++) {
+                msg = "row " + j + " positioning error with fetchsize=" + size;
+                assertTrue(msg, rs.next());
+                assertEquals(msg, j, rs.getInt(1));
+
+                assertTrue(msg, !rs.isBeforeFirst());
+                assertTrue(msg, !rs.isAfterLast());
+                if (j == 0) {
+                    assertTrue(msg, rs.isFirst());
+                } else {
+                    assertTrue(msg, !rs.isFirst());
+                }
+
+                if (j == 99) {
+                    assertTrue(msg, rs.isLast());
+                } else {
+                    assertTrue(msg, !rs.isLast());
+                }
+            }
+
+            msg = "after-last row positioning error with fetchsize=" + size;
+            assertTrue(msg, !rs.next());
+
+            assertTrue(msg, !rs.isBeforeFirst());
+            assertTrue(msg, rs.isAfterLast());
+            assertTrue(msg, !rs.isFirst());
+            assertTrue(msg, !rs.isLast());
+
+            rs.close();
+            stmt.close();
+        }
+    }
+
+    // Test odd queries that should not be transformed into cursor-based fetches.
+    @Test
+    public void testInsert() throws Exception {
+        // INSERT should not be transformed.
+        PreparedStatement stmt = con.prepareStatement("insert into test_fetch(value) values(1)");
+        stmt.setFetchSize(100); // Should be meaningless.
+        stmt.executeUpdate();
+    }
+
+    @Test
+    public void testMultistatement() throws Exception {
+        // Queries with multiple statements should not be transformed.
+
+        createRows(100); // 0 .. 99
+        PreparedStatement stmt = con.prepareStatement(
+                "insert into test_fetch(value) values(100); select * from test_fetch order by value");
+        stmt.setFetchSize(10);
+
+        assertTrue(!stmt.execute()); // INSERT
+        assertTrue(stmt.getMoreResults()); // SELECT
+        ResultSet rs = stmt.getResultSet();
+        int count = 0;
+        while (rs.next()) {
+            assertEquals(count, rs.getInt(1));
+            ++count;
+        }
+
+        assertEquals(101, count);
+    }
+
+    // if the driver tries to use a cursor with autocommit on
+    // it will fail because the cursor will disappear partway
+    // through execution
+    @Test
+    public void testNoCursorWithAutoCommit() throws Exception {
+        createRows(10); // 0 .. 9
+        con.setAutoCommit(true);
+        Statement stmt = con.createStatement();
+        stmt.setFetchSize(3);
+        ResultSet rs = stmt.executeQuery("SELECT * FROM test_fetch ORDER BY value");
+        int count = 0;
+        while (rs.next()) {
+            assertEquals(count++, rs.getInt(1));
+        }
+
+        assertEquals(10, count);
+    }
+
+    @Test
+    public void testGetRow() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.setFetchSize(1);
+        ResultSet rs = stmt.executeQuery("SELECT 1 UNION ALL SELECT 2 UNION ALL SELECT 3");
+        int count = 0;
+        while (rs.next()) {
+            count++;
+            assertEquals(count, rs.getInt(1));
+            assertEquals(count, rs.getRow());
+        }
+        assertEquals(3, count);
+    }
+
+    // isLast() may change the results of other positioning methods as it has to
+    // buffer some more results. This tests avoid using it so as to test robustness
+    // other positioning methods
+    @Test
+    public void testRowResultPositioningWithoutIsLast() throws Exception {
+        String msg;
+
+        int rowCount = 4;
+        createRows(rowCount);
+
+        int[] sizes = {1, 2, 3, 4, 5};
+        for (int size : sizes) {
+            Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
+            stmt.setFetchSize(size);
+
+            ResultSet rs = stmt.executeQuery("select * from test_fetch order by value");
+            msg = "before-first row positioning error with fetchsize=" + size;
+            assertTrue(msg, rs.isBeforeFirst());
+            assertTrue(msg, !rs.isAfterLast());
+            assertTrue(msg, !rs.isFirst());
+
+            for (int j = 0; j < rowCount; j++) {
+                msg = "row " + j + " positioning error with fetchsize=" + size;
+                assertTrue(msg, rs.next());
+                assertEquals(msg, j, rs.getInt(1));
+
+                assertTrue(msg, !rs.isBeforeFirst());
+                assertTrue(msg, !rs.isAfterLast());
+                if (j == 0) {
+                    assertTrue(msg, rs.isFirst());
+                } else {
+                    assertTrue(msg, !rs.isFirst());
+                }
+            }
+
+            msg = "after-last row positioning error with fetchsize=" + size;
+            assertTrue(msg, !rs.next());
+
+            assertTrue(msg, !rs.isBeforeFirst());
+            assertTrue(msg, rs.isAfterLast());
+            assertTrue(msg, !rs.isFirst());
+            assertTrue(msg, !rs.isLast());
+
+            rs.close();
+            stmt.close();
+        }
+    }
+
+    // Empty resultsets require all row positioning methods to return false
+    @Test
+    public void testNoRowResultPositioning() throws Exception {
+        int[] sizes = {0, 1, 50, 100};
+        for (int size : sizes) {
+            Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
+            stmt.setFetchSize(size);
+
+            ResultSet rs = stmt.executeQuery("select * from test_fetch order by value");
+            String msg = "no row (empty resultset) positioning error with fetchsize=" + size;
+            assertTrue(msg, !rs.isBeforeFirst());
+            assertTrue(msg, !rs.isAfterLast());
+            assertTrue(msg, !rs.isFirst());
+            assertTrue(msg, !rs.isLast());
+
+            assertTrue(msg, !rs.next());
+            assertTrue(msg, !rs.isBeforeFirst());
+            assertTrue(msg, !rs.isAfterLast());
+            assertTrue(msg, !rs.isFirst());
+            assertTrue(msg, !rs.isLast());
+
+            rs.close();
+            stmt.close();
+        }
+    }
+
+    // Empty resultsets require all row positioning methods to return false
+    @Test
+    public void testScrollableNoRowResultPositioning() throws Exception {
+        int[] sizes = {0, 1, 50, 100};
+        for (int size : sizes) {
+            Statement stmt =
+                    con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+            stmt.setFetchSize(size);
+
+            ResultSet rs = stmt.executeQuery("select * from test_fetch order by value");
+            String msg = "no row (empty resultset) positioning error with fetchsize=" + size;
+            assertTrue(msg, !rs.isBeforeFirst());
+            assertTrue(msg, !rs.isAfterLast());
+            assertTrue(msg, !rs.isFirst());
+            assertTrue(msg, !rs.isLast());
+
+            assertTrue(msg, !rs.first());
+            assertTrue(msg, !rs.isBeforeFirst());
+            assertTrue(msg, !rs.isAfterLast());
+            assertTrue(msg, !rs.isFirst());
+            assertTrue(msg, !rs.isLast());
+
+            assertTrue(msg, !rs.next());
+            assertTrue(msg, !rs.isBeforeFirst());
+            assertTrue(msg, !rs.isAfterLast());
+            assertTrue(msg, !rs.isFirst());
+            assertTrue(msg, !rs.isLast());
+
+            rs.close();
+            stmt.close();
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CustomTypeWithBinaryTransferTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CustomTypeWithBinaryTransferTest.java
index a0900ae..951c9e6 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CustomTypeWithBinaryTransferTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/CustomTypeWithBinaryTransferTest.java
@@ -37,214 +37,214 @@ import java.util.Collection;
  */
 @RunWith(Parameterized.class)
 public class CustomTypeWithBinaryTransferTest extends BaseTest4 {
-  // define an oid of a binary type for testing, POINT is used here as it already exists in the
-  // database and requires no complex own type definition
-  private static final int CUSTOM_TYPE_OID = Oid.POINT;
+    // define an oid of a binary type for testing, POINT is used here as it already exists in the
+    // database and requires no complex own type definition
+    private static final int CUSTOM_TYPE_OID = Oid.POINT;
 
-  public CustomTypeWithBinaryTransferTest(BinaryMode binaryMode) {
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
-    }
-    return ids;
-  }
-
-  /**
-   * Set up the fixture for this testcase: the tables for this test.
-   *
-   * @throws SQLException if a database error occurs
-   */
-  @BeforeClass
-  public static void createTestTable() throws SQLException {
-    try (Connection con = TestUtil.openDB()) {
-      TestUtil.createTable(con, "test_binary_pgobject", "id integer,name text,geom point");
-    }
-  }
-
-  /**
-   * Tear down the fixture for this test case.
-   *
-   * @throws SQLException if a database error occurs
-   */
-  @AfterClass
-  public static void dropTestTable() throws SQLException {
-    try (Connection con = TestUtil.openDB()) {
-      TestUtil.dropTable(con, "test_binary_pgobject");
-    }
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    QueryExecutor queryExecutor = con.unwrap(BaseConnection.class).getQueryExecutor();
-    queryExecutor.removeBinarySendOid(CUSTOM_TYPE_OID);
-    queryExecutor.removeBinaryReceiveOid(CUSTOM_TYPE_OID);
-    assertBinaryForReceive(CUSTOM_TYPE_OID, false,
-        () -> "Binary transfer for point type should be disabled since we've deactivated it in "
-            + "updateProperties");
-
-    assertBinaryForSend(CUSTOM_TYPE_OID, false,
-        () -> "Binary transfer for point type should be disabled since we've deactivated it in "
-            + "updateProperties");
-    try (Statement st = con.createStatement()) {
-      st.execute("DELETE FROM test_binary_pgobject");
-      st.execute("INSERT INTO test_binary_pgobject(id,name,geom) values(1,'Test',Point(1,2))");
-    }
-  }
-
-  /**
-   * Make sure custom binary types are handled automatically.
-   *
-   * @throws SQLException if a database error occurs
-   */
-  @Test
-  public void testCustomBinaryTypes() throws SQLException {
-    PGConnection pgconn = con.unwrap(PGConnection.class);
-
-    // make sure the test type implements PGBinaryObject
-    assertTrue("test type should implement PGBinaryObject",
-        PGBinaryObject.class.isAssignableFrom(TestCustomType.class));
-
-    // now define a custom type, which will add it to the binary sent/received OIDs (if the type
-    // implements PGBinaryObject)
-    pgconn.addDataType("point", TestCustomType.class);
-    // check if the type was marked for binary transfer
-    if (preferQueryMode != PreferQueryMode.SIMPLE) {
-      assertBinaryForReceive(CUSTOM_TYPE_OID, true,
-          () -> "Binary transfer for point type should be activated by addDataType(..., "
-              + "TestCustomType.class)");
-      assertBinaryForSend(CUSTOM_TYPE_OID, true,
-          () -> "Binary transfer for point type should be activated by addDataType(..., "
-              + "TestCustomType.class)");
+    public CustomTypeWithBinaryTransferTest(BinaryMode binaryMode) {
+        setBinaryMode(binaryMode);
     }
 
-    TestCustomType co;
-    // Try with PreparedStatement
-    try (PreparedStatement pst = con.prepareStatement("SELECT geom FROM test_binary_pgobject WHERE id=?")) {
-      pst.setInt(1, 1);
-      try (ResultSet rs = pst.executeQuery()) {
-        assertTrue("rs.next()", rs.next());
-        Object o = rs.getObject(1);
-        co = (TestCustomType) o;
-        // now binary transfer should be working
-        if (preferQueryMode == PreferQueryMode.SIMPLE) {
-          assertEquals(
-              "reading via prepared statement: TestCustomType.wasReadBinary() should use text encoding since preferQueryMode=SIMPLE",
-              "text",
-              co.wasReadBinary() ? "binary" : "text");
-        } else {
-          assertEquals(
-              "reading via prepared statement: TestCustomType.wasReadBinary() should use match binary mode requested by the test",
-              binaryMode == BinaryMode.FORCE ? "binary" : "text",
-              co.wasReadBinary() ? "binary" : "text");
+    @Parameterized.Parameters(name = "binary = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
         }
-      }
-    }
-
-    // ensure flag is still unset
-    assertFalse("wasWrittenBinary should be false since we have not written the object yet",
-        co.wasWrittenBinary());
-    // now try to write it
-    try (PreparedStatement pst =
-             con.prepareStatement("INSERT INTO test_binary_pgobject(id,geom) VALUES(?,?)")) {
-      pst.setInt(1, 2);
-      pst.setObject(2, co);
-      pst.executeUpdate();
-      // make sure transfer was binary
-      if (preferQueryMode == PreferQueryMode.SIMPLE) {
-        assertEquals(
-            "writing via prepared statement: TestCustomType.wasWrittenBinary() should use text encoding since preferQueryMode=SIMPLE",
-            "text",
-            co.wasWrittenBinary() ? "binary" : "text");
-      } else {
-        assertEquals(
-            "writing via prepared statement: TestCustomType.wasWrittenBinary() should use match binary mode requested by the test",
-            binaryMode == BinaryMode.FORCE ? "binary" : "text",
-            co.wasWrittenBinary() ? "binary" : "text");
-      }
-    }
-  }
-
-  /**
-   * Custom type that supports binary format.
-   */
-  @SuppressWarnings("serial")
-  public static class TestCustomType extends PGobject implements PGBinaryObject {
-    private byte[] byteValue;
-    private boolean wasReadBinary;
-    private boolean wasWrittenBinary;
-
-    @Override
-    public String getValue() {
-      // set flag
-      this.wasWrittenBinary = false;
-      return super.getValue();
-    }
-
-    @Override
-    public int lengthInBytes() {
-      if (byteValue != null) {
-        return byteValue.length;
-      } else {
-        return 0;
-      }
-    }
-
-    @Override
-    public void setByteValue(byte[] value, int offset) throws SQLException {
-      this.wasReadBinary = true;
-      // remember the byte value
-      byteValue = new byte[value.length - offset];
-      System.arraycopy(value, offset, byteValue, 0, byteValue.length);
-    }
-
-    @Override
-    public void setValue(String value) throws SQLException {
-      super.setValue(value);
-      // set flag
-      this.wasReadBinary = false;
-    }
-
-    @Override
-    public void toBytes(byte[] bytes, int offset) {
-      if (byteValue != null) {
-        // make sure array is large enough
-        if ((bytes.length - offset) <= byteValue.length) {
-          // copy data
-          System.arraycopy(byteValue, 0, bytes, offset, byteValue.length);
-        } else {
-          throw new IllegalArgumentException(
-              "byte array is too small, expected: " + byteValue.length + " got: "
-                  + (bytes.length - offset));
-        }
-      } else {
-        throw new IllegalStateException("no geometry has been set");
-      }
-      // set flag
-      this.wasWrittenBinary = true;
+        return ids;
     }
 
     /**
-     * Checks, if this type was read in binary mode.
+     * Set up the fixture for this testcase: the tables for this test.
      *
-     * @return true for binary mode, else false
+     * @throws SQLException if a database error occurs
      */
-    public boolean wasReadBinary() {
-      return this.wasReadBinary;
+    @BeforeClass
+    public static void createTestTable() throws SQLException {
+        try (Connection con = TestUtil.openDB()) {
+            TestUtil.createTable(con, "test_binary_pgobject", "id integer,name text,geom point");
+        }
     }
 
     /**
-     * Checks, if this type was written in binary mode.
+     * Tear down the fixture for this test case.
      *
-     * @return true for binary mode, else false
+     * @throws SQLException if a database error occurs
      */
-    public boolean wasWrittenBinary() {
-      return this.wasWrittenBinary;
+    @AfterClass
+    public static void dropTestTable() throws SQLException {
+        try (Connection con = TestUtil.openDB()) {
+            TestUtil.dropTable(con, "test_binary_pgobject");
+        }
+    }
+
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        QueryExecutor queryExecutor = con.unwrap(BaseConnection.class).getQueryExecutor();
+        queryExecutor.removeBinarySendOid(CUSTOM_TYPE_OID);
+        queryExecutor.removeBinaryReceiveOid(CUSTOM_TYPE_OID);
+        assertBinaryForReceive(CUSTOM_TYPE_OID, false,
+                () -> "Binary transfer for point type should be disabled since we've deactivated it in "
+                        + "updateProperties");
+
+        assertBinaryForSend(CUSTOM_TYPE_OID, false,
+                () -> "Binary transfer for point type should be disabled since we've deactivated it in "
+                        + "updateProperties");
+        try (Statement st = con.createStatement()) {
+            st.execute("DELETE FROM test_binary_pgobject");
+            st.execute("INSERT INTO test_binary_pgobject(id,name,geom) values(1,'Test',Point(1,2))");
+        }
+    }
+
+    /**
+     * Make sure custom binary types are handled automatically.
+     *
+     * @throws SQLException if a database error occurs
+     */
+    @Test
+    public void testCustomBinaryTypes() throws SQLException {
+        PGConnection pgconn = con.unwrap(PGConnection.class);
+
+        // make sure the test type implements PGBinaryObject
+        assertTrue("test type should implement PGBinaryObject",
+                PGBinaryObject.class.isAssignableFrom(TestCustomType.class));
+
+        // now define a custom type, which will add it to the binary sent/received OIDs (if the type
+        // implements PGBinaryObject)
+        pgconn.addDataType("point", TestCustomType.class);
+        // check if the type was marked for binary transfer
+        if (preferQueryMode != PreferQueryMode.SIMPLE) {
+            assertBinaryForReceive(CUSTOM_TYPE_OID, true,
+                    () -> "Binary transfer for point type should be activated by addDataType(..., "
+                            + "TestCustomType.class)");
+            assertBinaryForSend(CUSTOM_TYPE_OID, true,
+                    () -> "Binary transfer for point type should be activated by addDataType(..., "
+                            + "TestCustomType.class)");
+        }
+
+        TestCustomType co;
+        // Try with PreparedStatement
+        try (PreparedStatement pst = con.prepareStatement("SELECT geom FROM test_binary_pgobject WHERE id=?")) {
+            pst.setInt(1, 1);
+            try (ResultSet rs = pst.executeQuery()) {
+                assertTrue("rs.next()", rs.next());
+                Object o = rs.getObject(1);
+                co = (TestCustomType) o;
+                // now binary transfer should be working
+                if (preferQueryMode == PreferQueryMode.SIMPLE) {
+                    assertEquals(
+                            "reading via prepared statement: TestCustomType.wasReadBinary() should use text encoding since preferQueryMode=SIMPLE",
+                            "text",
+                            co.wasReadBinary() ? "binary" : "text");
+                } else {
+                    assertEquals(
+                            "reading via prepared statement: TestCustomType.wasReadBinary() should use match binary mode requested by the test",
+                            binaryMode == BinaryMode.FORCE ? "binary" : "text",
+                            co.wasReadBinary() ? "binary" : "text");
+                }
+            }
+        }
+
+        // ensure flag is still unset
+        assertFalse("wasWrittenBinary should be false since we have not written the object yet",
+                co.wasWrittenBinary());
+        // now try to write it
+        try (PreparedStatement pst =
+                     con.prepareStatement("INSERT INTO test_binary_pgobject(id,geom) VALUES(?,?)")) {
+            pst.setInt(1, 2);
+            pst.setObject(2, co);
+            pst.executeUpdate();
+            // make sure transfer was binary
+            if (preferQueryMode == PreferQueryMode.SIMPLE) {
+                assertEquals(
+                        "writing via prepared statement: TestCustomType.wasWrittenBinary() should use text encoding since preferQueryMode=SIMPLE",
+                        "text",
+                        co.wasWrittenBinary() ? "binary" : "text");
+            } else {
+                assertEquals(
+                        "writing via prepared statement: TestCustomType.wasWrittenBinary() should use match binary mode requested by the test",
+                        binaryMode == BinaryMode.FORCE ? "binary" : "text",
+                        co.wasWrittenBinary() ? "binary" : "text");
+            }
+        }
+    }
+
+    /**
+     * Custom type that supports binary format.
+     */
+    @SuppressWarnings("serial")
+    public static class TestCustomType extends PGobject implements PGBinaryObject {
+        private byte[] byteValue;
+        private boolean wasReadBinary;
+        private boolean wasWrittenBinary;
+
+        @Override
+        public String getValue() {
+            // set flag
+            this.wasWrittenBinary = false;
+            return super.getValue();
+        }
+
+        @Override
+        public void setValue(String value) throws SQLException {
+            super.setValue(value);
+            // set flag
+            this.wasReadBinary = false;
+        }
+
+        @Override
+        public int lengthInBytes() {
+            if (byteValue != null) {
+                return byteValue.length;
+            } else {
+                return 0;
+            }
+        }
+
+        @Override
+        public void setByteValue(byte[] value, int offset) throws SQLException {
+            this.wasReadBinary = true;
+            // remember the byte value
+            byteValue = new byte[value.length - offset];
+            System.arraycopy(value, offset, byteValue, 0, byteValue.length);
+        }
+
+        @Override
+        public void toBytes(byte[] bytes, int offset) {
+            if (byteValue != null) {
+                // make sure array is large enough
+                if ((bytes.length - offset) <= byteValue.length) {
+                    // copy data
+                    System.arraycopy(byteValue, 0, bytes, offset, byteValue.length);
+                } else {
+                    throw new IllegalArgumentException(
+                            "byte array is too small, expected: " + byteValue.length + " got: "
+                                    + (bytes.length - offset));
+                }
+            } else {
+                throw new IllegalStateException("no geometry has been set");
+            }
+            // set flag
+            this.wasWrittenBinary = true;
+        }
+
+        /**
+         * Checks, if this type was read in binary mode.
+         *
+         * @return true for binary mode, else false
+         */
+        public boolean wasReadBinary() {
+            return this.wasReadBinary;
+        }
+
+        /**
+         * Checks, if this type was written in binary mode.
+         *
+         * @return true for binary mode, else false
+         */
+        public boolean wasWrittenBinary() {
+            return this.wasWrittenBinary;
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseEncodingTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseEncodingTest.java
index c070936..77f826e 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseEncodingTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseEncodingTest.java
@@ -23,235 +23,235 @@ import java.sql.Statement;
 import java.util.Arrays;
 
 /*
-* Test case for various encoding problems.
-*
-* Ensure that we can do a round-trip of all server-supported unicode values without trashing them,
-* and that bad encodings are detected.
-*/
+ * Test case for various encoding problems.
+ *
+ * Ensure that we can do a round-trip of all server-supported unicode values without trashing them,
+ * and that bad encodings are detected.
+ */
 class DatabaseEncodingTest {
-  private static final int STEP = 100;
+    private static final int STEP = 100;
 
-  private Connection con;
+    private Connection con;
 
-  // Set up the fixture for this testcase: a connection to a database with
-  // a table for this test.
-  @BeforeEach
-  void setUp() throws Exception {
-    con = TestUtil.openDB();
-    TestUtil.createTempTable(con, "testdbencoding",
-        "unicode_ordinal integer primary key not null, unicode_string varchar(" + STEP + ")");
-    // disabling auto commit makes the test run faster
-    // by not committing each insert individually.
-    con.setAutoCommit(false);
-  }
-
-  // Tear down the fixture for this test case.
-  @AfterEach
-  void tearDown() throws Exception {
-    con.setAutoCommit(true);
-    TestUtil.closeDB(con);
-  }
-
-  private static String dumpString(String s) {
-    StringBuffer sb = new StringBuffer(s.length() * 6);
-    for (int i = 0; i < s.length(); i++) {
-      sb.append("\\u");
-      char c = s.charAt(i);
-      sb.append(Integer.toHexString((c >> 12) & 15));
-      sb.append(Integer.toHexString((c >> 8) & 15));
-      sb.append(Integer.toHexString((c >> 4) & 15));
-      sb.append(Integer.toHexString(c & 15));
-    }
-    return sb.toString();
-  }
-
-  @Test
-  void encoding() throws Exception {
-    String databaseEncoding = TestUtil.queryForString(con, "SELECT getdatabaseencoding()");
-    Assumptions.assumeTrue("UTF8".equals(databaseEncoding), "Database encoding must be UTF8");
-
-    boolean testHighUnicode = true;
-
-    // Create data.
-    // NB: we avoid d800-dfff as those are reserved for surrogates in UTF-16
-    PreparedStatement insert = con.prepareStatement(
-        "INSERT INTO testdbencoding(unicode_ordinal, unicode_string) VALUES (?,?)");
-    for (int i = 1; i < 0xd800; i += STEP) {
-      int count = (i + STEP) > 0xd800 ? 0xd800 - i : STEP;
-      char[] testChars = new char[count];
-      for (int j = 0; j < count; j++) {
-        testChars[j] = (char) (i + j);
-      }
-
-      String testString = new String(testChars);
-
-      insert.setInt(1, i);
-      insert.setString(2, testString);
-      assertEquals(1, insert.executeUpdate());
+    private static String dumpString(String s) {
+        StringBuffer sb = new StringBuffer(s.length() * 6);
+        for (int i = 0; i < s.length(); i++) {
+            sb.append("\\u");
+            char c = s.charAt(i);
+            sb.append(Integer.toHexString((c >> 12) & 15));
+            sb.append(Integer.toHexString((c >> 8) & 15));
+            sb.append(Integer.toHexString((c >> 4) & 15));
+            sb.append(Integer.toHexString(c & 15));
+        }
+        return sb.toString();
     }
 
-    for (int i = 0xe000; i < 0x10000; i += STEP) {
-      int count = (i + STEP) > 0x10000 ? 0x10000 - i : STEP;
-      char[] testChars = new char[count];
-      for (int j = 0; j < count; j++) {
-        testChars[j] = (char) (i + j);
-      }
-
-      String testString = new String(testChars);
-
-      insert.setInt(1, i);
-      insert.setString(2, testString);
-      assertEquals(1, insert.executeUpdate());
+    // Set up the fixture for this testcase: a connection to a database with
+    // a table for this test.
+    @BeforeEach
+    void setUp() throws Exception {
+        con = TestUtil.openDB();
+        TestUtil.createTempTable(con, "testdbencoding",
+                "unicode_ordinal integer primary key not null, unicode_string varchar(" + STEP + ")");
+        // disabling auto commit makes the test run faster
+        // by not committing each insert individually.
+        con.setAutoCommit(false);
     }
 
-    if (testHighUnicode) {
-      for (int i = 0x10000; i < 0x110000; i += STEP) {
-        int count = (i + STEP) > 0x110000 ? 0x110000 - i : STEP;
-        char[] testChars = new char[count * 2];
-        for (int j = 0; j < count; j++) {
-          testChars[j * 2] = (char) (0xd800 + ((i + j - 0x10000) >> 10));
-          testChars[j * 2 + 1] = (char) (0xdc00 + ((i + j - 0x10000) & 0x3ff));
+    // Tear down the fixture for this test case.
+    @AfterEach
+    void tearDown() throws Exception {
+        con.setAutoCommit(true);
+        TestUtil.closeDB(con);
+    }
+
+    @Test
+    void encoding() throws Exception {
+        String databaseEncoding = TestUtil.queryForString(con, "SELECT getdatabaseencoding()");
+        Assumptions.assumeTrue("UTF8".equals(databaseEncoding), "Database encoding must be UTF8");
+
+        boolean testHighUnicode = true;
+
+        // Create data.
+        // NB: we avoid d800-dfff as those are reserved for surrogates in UTF-16
+        PreparedStatement insert = con.prepareStatement(
+                "INSERT INTO testdbencoding(unicode_ordinal, unicode_string) VALUES (?,?)");
+        for (int i = 1; i < 0xd800; i += STEP) {
+            int count = (i + STEP) > 0xd800 ? 0xd800 - i : STEP;
+            char[] testChars = new char[count];
+            for (int j = 0; j < count; j++) {
+                testChars[j] = (char) (i + j);
+            }
+
+            String testString = new String(testChars);
+
+            insert.setInt(1, i);
+            insert.setString(2, testString);
+            assertEquals(1, insert.executeUpdate());
         }
 
-        String testString = new String(testChars);
+        for (int i = 0xe000; i < 0x10000; i += STEP) {
+            int count = (i + STEP) > 0x10000 ? 0x10000 - i : STEP;
+            char[] testChars = new char[count];
+            for (int j = 0; j < count; j++) {
+                testChars[j] = (char) (i + j);
+            }
 
-        insert.setInt(1, i);
-        insert.setString(2, testString);
+            String testString = new String(testChars);
 
-        // System.err.println("Inserting: " + dumpString(testString));
-
-        assertEquals(1, insert.executeUpdate());
-      }
-    }
-
-    con.commit();
-
-    // Check data.
-    Statement stmt = con.createStatement();
-    stmt.setFetchSize(1);
-    ResultSet rs = stmt.executeQuery(
-        "SELECT unicode_ordinal, unicode_string FROM testdbencoding ORDER BY unicode_ordinal");
-    for (int i = 1; i < 0xd800; i += STEP) {
-      assertTrue(rs.next());
-      assertEquals(i, rs.getInt(1));
-
-      int count = (i + STEP) > 0xd800 ? 0xd800 - i : STEP;
-      char[] testChars = new char[count];
-      for (int j = 0; j < count; j++) {
-        testChars[j] = (char) (i + j);
-      }
-
-      String testString = new String(testChars);
-
-      assertEquals(dumpString(testString),
-          dumpString(rs.getString(2)),
-          "Test string: " + dumpString(testString));
-    }
-
-    for (int i = 0xe000; i < 0x10000; i += STEP) {
-      assertTrue(rs.next());
-      assertEquals(i, rs.getInt(1));
-
-      int count = (i + STEP) > 0x10000 ? 0x10000 - i : STEP;
-      char[] testChars = new char[count];
-      for (int j = 0; j < count; j++) {
-        testChars[j] = (char) (i + j);
-      }
-
-      String testString = new String(testChars);
-
-      assertEquals(dumpString(testString),
-          dumpString(rs.getString(2)),
-          "Test string: " + dumpString(testString));
-    }
-
-    if (testHighUnicode) {
-      for (int i = 0x10000; i < 0x110000; i += STEP) {
-        assertTrue(rs.next());
-        assertEquals(i, rs.getInt(1));
-
-        int count = (i + STEP) > 0x110000 ? 0x110000 - i : STEP;
-        char[] testChars = new char[count * 2];
-        for (int j = 0; j < count; j++) {
-          testChars[j * 2] = (char) (0xd800 + ((i + j - 0x10000) >> 10));
-          testChars[j * 2 + 1] = (char) (0xdc00 + ((i + j - 0x10000) & 0x3ff));
+            insert.setInt(1, i);
+            insert.setString(2, testString);
+            assertEquals(1, insert.executeUpdate());
         }
 
-        String testString = new String(testChars);
+        if (testHighUnicode) {
+            for (int i = 0x10000; i < 0x110000; i += STEP) {
+                int count = (i + STEP) > 0x110000 ? 0x110000 - i : STEP;
+                char[] testChars = new char[count * 2];
+                for (int j = 0; j < count; j++) {
+                    testChars[j * 2] = (char) (0xd800 + ((i + j - 0x10000) >> 10));
+                    testChars[j * 2 + 1] = (char) (0xdc00 + ((i + j - 0x10000) & 0x3ff));
+                }
 
-        assertEquals(dumpString(testString),
-            dumpString(rs.getString(2)),
-            "Test string: " + dumpString(testString));
-      }
+                String testString = new String(testChars);
+
+                insert.setInt(1, i);
+                insert.setString(2, testString);
+
+                // System.err.println("Inserting: " + dumpString(testString));
+
+                assertEquals(1, insert.executeUpdate());
+            }
+        }
+
+        con.commit();
+
+        // Check data.
+        Statement stmt = con.createStatement();
+        stmt.setFetchSize(1);
+        ResultSet rs = stmt.executeQuery(
+                "SELECT unicode_ordinal, unicode_string FROM testdbencoding ORDER BY unicode_ordinal");
+        for (int i = 1; i < 0xd800; i += STEP) {
+            assertTrue(rs.next());
+            assertEquals(i, rs.getInt(1));
+
+            int count = (i + STEP) > 0xd800 ? 0xd800 - i : STEP;
+            char[] testChars = new char[count];
+            for (int j = 0; j < count; j++) {
+                testChars[j] = (char) (i + j);
+            }
+
+            String testString = new String(testChars);
+
+            assertEquals(dumpString(testString),
+                    dumpString(rs.getString(2)),
+                    "Test string: " + dumpString(testString));
+        }
+
+        for (int i = 0xe000; i < 0x10000; i += STEP) {
+            assertTrue(rs.next());
+            assertEquals(i, rs.getInt(1));
+
+            int count = (i + STEP) > 0x10000 ? 0x10000 - i : STEP;
+            char[] testChars = new char[count];
+            for (int j = 0; j < count; j++) {
+                testChars[j] = (char) (i + j);
+            }
+
+            String testString = new String(testChars);
+
+            assertEquals(dumpString(testString),
+                    dumpString(rs.getString(2)),
+                    "Test string: " + dumpString(testString));
+        }
+
+        if (testHighUnicode) {
+            for (int i = 0x10000; i < 0x110000; i += STEP) {
+                assertTrue(rs.next());
+                assertEquals(i, rs.getInt(1));
+
+                int count = (i + STEP) > 0x110000 ? 0x110000 - i : STEP;
+                char[] testChars = new char[count * 2];
+                for (int j = 0; j < count; j++) {
+                    testChars[j * 2] = (char) (0xd800 + ((i + j - 0x10000) >> 10));
+                    testChars[j * 2 + 1] = (char) (0xdc00 + ((i + j - 0x10000) & 0x3ff));
+                }
+
+                String testString = new String(testChars);
+
+                assertEquals(dumpString(testString),
+                        dumpString(rs.getString(2)),
+                        "Test string: " + dumpString(testString));
+            }
+        }
     }
-  }
 
-  @Test
-  void uTF8Decode() throws Exception {
-    // Tests for our custom UTF-8 decoder.
+    @Test
+    void uTF8Decode() throws Exception {
+        // Tests for our custom UTF-8 decoder.
 
-    Encoding utf8Encoding = Encoding.getJVMEncoding("UTF-8");
+        Encoding utf8Encoding = Encoding.getJVMEncoding("UTF-8");
 
-    for (int ch = 0; ch < 0x110000; ch++) {
-      if (ch >= 0xd800 && ch < 0xe000) {
-        continue; // Surrogate range.
-      }
+        for (int ch = 0; ch < 0x110000; ch++) {
+            if (ch >= 0xd800 && ch < 0xe000) {
+                continue; // Surrogate range.
+            }
 
-      String testString;
-      if (ch >= 0x10000) {
-        testString = new String(new char[]{(char) (0xd800 + ((ch - 0x10000) >> 10)),
-            (char) (0xdc00 + ((ch - 0x10000) & 0x3ff))});
-      } else {
-        testString = new String(new char[]{(char) ch});
-      }
+            String testString;
+            if (ch >= 0x10000) {
+                testString = new String(new char[]{(char) (0xd800 + ((ch - 0x10000) >> 10)),
+                        (char) (0xdc00 + ((ch - 0x10000) & 0x3ff))});
+            } else {
+                testString = new String(new char[]{(char) ch});
+            }
 
-      byte[] jvmEncoding = testString.getBytes("UTF-8");
-      String jvmDecoding = new String(jvmEncoding, 0, jvmEncoding.length, "UTF-8");
-      String ourDecoding = utf8Encoding.decode(jvmEncoding, 0, jvmEncoding.length);
+            byte[] jvmEncoding = testString.getBytes("UTF-8");
+            String jvmDecoding = new String(jvmEncoding, 0, jvmEncoding.length, "UTF-8");
+            String ourDecoding = utf8Encoding.decode(jvmEncoding, 0, jvmEncoding.length);
 
-      assertEquals(dumpString(testString),
-          dumpString(jvmDecoding),
-          "Test string: " + dumpString(testString));
-      assertEquals(dumpString(testString),
-          dumpString(ourDecoding),
-          "Test string: " + dumpString(testString));
+            assertEquals(dumpString(testString),
+                    dumpString(jvmDecoding),
+                    "Test string: " + dumpString(testString));
+            assertEquals(dumpString(testString),
+                    dumpString(ourDecoding),
+                    "Test string: " + dumpString(testString));
+        }
     }
-  }
 
-  /**
-   * Tests that invalid utf-8 values are replaced with the unicode replacement chart.
-   */
-  @Test
-  void truncatedUTF8Decode() throws Exception {
-    Encoding utf8Encoding = Encoding.getJVMEncoding("UTF-8");
+    /**
+     * Tests that invalid utf-8 values are replaced with the unicode replacement chart.
+     */
+    @Test
+    void truncatedUTF8Decode() throws Exception {
+        Encoding utf8Encoding = Encoding.getJVMEncoding("UTF-8");
 
-    byte[][] shortSequences = new byte[][]{{(byte) 0xc0}, // Second byte must be present
+        byte[][] shortSequences = new byte[][]{{(byte) 0xc0}, // Second byte must be present
 
-        {(byte) 0xe0}, // Second byte must be present
-        {(byte) 0xe0, (byte) 0x80}, // Third byte must be present
+                {(byte) 0xe0}, // Second byte must be present
+                {(byte) 0xe0, (byte) 0x80}, // Third byte must be present
 
-        {(byte) 0xf0}, // Second byte must be present
-        {(byte) 0xf0, (byte) 0x80}, // Third byte must be present
-        {(byte) 0xf0, (byte) 0x80, (byte) 0x80}, // Fourth byte must be present
-    };
+                {(byte) 0xf0}, // Second byte must be present
+                {(byte) 0xf0, (byte) 0x80}, // Third byte must be present
+                {(byte) 0xf0, (byte) 0x80, (byte) 0x80}, // Fourth byte must be present
+        };
 
-    byte[] paddedSequence = new byte[32];
-    for (int i = 0; i < shortSequences.length; i++) {
-      byte[] sequence = shortSequences[i];
-      String expected = "\uFFFD";
-      for (int j = 1; j < sequence.length; j++) {
-        expected += "\uFFFD";
-      }
+        byte[] paddedSequence = new byte[32];
+        for (int i = 0; i < shortSequences.length; i++) {
+            byte[] sequence = shortSequences[i];
+            String expected = "\uFFFD";
+            for (int j = 1; j < sequence.length; j++) {
+                expected += "\uFFFD";
+            }
 
-      String str = utf8Encoding.decode(sequence, 0, sequence.length);
-      assertEquals(expected, str, "itr:" + i);
+            String str = utf8Encoding.decode(sequence, 0, sequence.length);
+            assertEquals(expected, str, "itr:" + i);
 
-      // Try it with padding and a truncated length.
-      Arrays.fill(paddedSequence, (byte) 0);
-      System.arraycopy(sequence, 0, paddedSequence, 0, sequence.length);
+            // Try it with padding and a truncated length.
+            Arrays.fill(paddedSequence, (byte) 0);
+            System.arraycopy(sequence, 0, paddedSequence, 0, sequence.length);
 
-      str = utf8Encoding.decode(paddedSequence, 0, sequence.length);
-      assertEquals(expected, str, "itr:" + i);
+            str = utf8Encoding.decode(paddedSequence, 0, sequence.length);
+            assertEquals(expected, str, "itr:" + i);
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataCacheTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataCacheTest.java
index 55814a3..2049646 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataCacheTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataCacheTest.java
@@ -10,7 +10,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
 import org.postgresql.core.TypeInfo;
 import org.postgresql.jdbc.PgConnection;
 import org.postgresql.test.TestUtil;
-import org.postgresql.util.TestLogHandler;
+import org.postgresql.test.util.TestLogHandler;
 
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
@@ -25,69 +25,68 @@ import java.util.logging.Logger;
 import java.util.regex.Pattern;
 
 /*
-* Tests for caching of DatabaseMetadata
-*
-*/
+ * Tests for caching of DatabaseMetadata
+ *
+ */
 class DatabaseMetaDataCacheTest {
-  private PgConnection con;
-  private TestLogHandler log;
-  private Logger driverLogger;
-  private Level driverLogLevel;
+    private static final Pattern SQL_TYPE_QUERY_LOG_FILTER = Pattern.compile("querying SQL typecode for pg type");
+    private static final Pattern SQL_TYPE_CACHE_LOG_FILTER = Pattern.compile("caching all SQL typecodes");
+    private PgConnection con;
+    private TestLogHandler log;
+    private Logger driverLogger;
+    private Level driverLogLevel;
 
-  private static final Pattern SQL_TYPE_QUERY_LOG_FILTER = Pattern.compile("querying SQL typecode for pg type");
-  private static final Pattern SQL_TYPE_CACHE_LOG_FILTER = Pattern.compile("caching all SQL typecodes");
+    @BeforeEach
+    void setUp() throws Exception {
+        con = (PgConnection) TestUtil.openDB();
+        log = new TestLogHandler();
+        driverLogger = LogManager.getLogManager().getLogger("org.postgresql");
+        driverLogger.addHandler(log);
+        driverLogLevel = driverLogger.getLevel();
+        driverLogger.setLevel(Level.ALL);
+    }
 
-  @BeforeEach
-  void setUp() throws Exception {
-    con = (PgConnection) TestUtil.openDB();
-    log = new TestLogHandler();
-    driverLogger = LogManager.getLogManager().getLogger("org.postgresql");
-    driverLogger.addHandler(log);
-    driverLogLevel = driverLogger.getLevel();
-    driverLogger.setLevel(Level.ALL);
-  }
+    @AfterEach
+    void tearDown() throws Exception {
+        TestUtil.closeDB(con);
+        driverLogger.removeHandler(log);
+        driverLogger.setLevel(driverLogLevel);
+        log = null;
+    }
 
-  @AfterEach
-  void tearDown() throws Exception {
-    TestUtil.closeDB(con);
-    driverLogger.removeHandler(log);
-    driverLogger.setLevel(driverLogLevel);
-    log = null;
-  }
+    @Test
+    void getSQLTypeQueryCache() throws SQLException {
+        TypeInfo ti = con.getTypeInfo();
 
-  @Test
-  void getSQLTypeQueryCache() throws SQLException {
-    TypeInfo ti = con.getTypeInfo();
+        List<LogRecord> typeQueries = log.getRecordsMatching(SQL_TYPE_QUERY_LOG_FILTER);
+        assertEquals(0, typeQueries.size());
 
-    List<LogRecord> typeQueries = log.getRecordsMatching(SQL_TYPE_QUERY_LOG_FILTER);
-    assertEquals(0, typeQueries.size());
+        ti.getSQLType("xid");  // this must be a type not in the hardcoded 'types' list
+        typeQueries = log.getRecordsMatching(SQL_TYPE_QUERY_LOG_FILTER);
+        assertEquals(1, typeQueries.size());
 
-    ti.getSQLType("xid");  // this must be a type not in the hardcoded 'types' list
-    typeQueries = log.getRecordsMatching(SQL_TYPE_QUERY_LOG_FILTER);
-    assertEquals(1, typeQueries.size());
+        ti.getSQLType("xid");  // this time it should be retrieved from the cache
+        typeQueries = log.getRecordsMatching(SQL_TYPE_QUERY_LOG_FILTER);
+        assertEquals(1, typeQueries.size());
+    }
 
-    ti.getSQLType("xid");  // this time it should be retrieved from the cache
-    typeQueries = log.getRecordsMatching(SQL_TYPE_QUERY_LOG_FILTER);
-    assertEquals(1, typeQueries.size());
-  }
+    @Test
+    void getTypeInfoUsesCache() throws SQLException {
+        con.getMetaData().getTypeInfo();
 
-  @Test
-  void getTypeInfoUsesCache() throws SQLException {
-    con.getMetaData().getTypeInfo();
+        List<LogRecord> typeCacheQuery = log.getRecordsMatching(SQL_TYPE_CACHE_LOG_FILTER);
+        assertEquals(1, typeCacheQuery.size(), "PgDatabaseMetadata.getTypeInfo() did not cache SQL typecodes");
 
-    List<LogRecord> typeCacheQuery = log.getRecordsMatching(SQL_TYPE_CACHE_LOG_FILTER);
-    assertEquals(1, typeCacheQuery.size(), "PgDatabaseMetadata.getTypeInfo() did not cache SQL typecodes");
+        List<LogRecord> typeQueries = log.getRecordsMatching(SQL_TYPE_QUERY_LOG_FILTER);
+        assertEquals(0, typeQueries.size(), "PgDatabaseMetadata.getTypeInfo() resulted in individual queries for SQL typecodes");
+    }
 
-    List<LogRecord> typeQueries = log.getRecordsMatching(SQL_TYPE_QUERY_LOG_FILTER);
-    assertEquals(0, typeQueries.size(), "PgDatabaseMetadata.getTypeInfo() resulted in individual queries for SQL typecodes");
-  }
-
-  @Test
-  void typeForAlias() {
-    TypeInfo ti = con.getTypeInfo();
-    assertEquals("bool", ti.getTypeForAlias("boolean"));
-    assertEquals("bool", ti.getTypeForAlias("Boolean"));
-    assertEquals("bool", ti.getTypeForAlias("Bool"));
-    assertEquals("bogus", ti.getTypeForAlias("bogus"));
-  }
+    @Test
+    void typeForAlias() {
+        TypeInfo ti = con.getTypeInfo();
+        assertEquals("bool", ti.getTypeForAlias("boolean"));
+        assertEquals("bool", ti.getTypeForAlias("Boolean"));
+        assertEquals("bool", ti.getTypeForAlias("Bool"));
+        assertEquals("bogus", ti.getTypeForAlias("bogus"));
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataPropertiesTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataPropertiesTest.java
index ebb44a8..4509450 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataPropertiesTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataPropertiesTest.java
@@ -24,196 +24,196 @@ import java.sql.DatabaseMetaData;
 import java.sql.SQLException;
 
 /*
-* TestCase to test the internal functionality of org.postgresql.jdbc2.DatabaseMetaData's various
-* properties. Methods which return a ResultSet are tested elsewhere. This avoids a complicated
-* setUp/tearDown for something like assertTrue(dbmd.nullPlusNonNullIsNull());
-*/
+ * TestCase to test the internal functionality of org.postgresql.jdbc2.DatabaseMetaData's various
+ * properties. Methods which return a ResultSet are tested elsewhere. This avoids a complicated
+ * setUp/tearDown for something like assertTrue(dbmd.nullPlusNonNullIsNull());
+ */
 class DatabaseMetaDataPropertiesTest {
-  private Connection con;
+    private Connection con;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    con = TestUtil.openDB();
-  }
+    @BeforeEach
+    void setUp() throws Exception {
+        con = TestUtil.openDB();
+    }
 
-  @AfterEach
-  void tearDown() throws Exception {
-    TestUtil.closeDB(con);
-  }
+    @AfterEach
+    void tearDown() throws Exception {
+        TestUtil.closeDB(con);
+    }
 
-  /*
-   * The spec says this may return null, but we always do!
-   */
-  @Test
-  void getMetaData() throws SQLException {
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-  }
+    /*
+     * The spec says this may return null, but we always do!
+     */
+    @Test
+    void getMetaData() throws SQLException {
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+    }
 
-  /*
-   * Test default capabilities
-   */
-  @Test
-  void capabilities() throws SQLException {
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
+    /*
+     * Test default capabilities
+     */
+    @Test
+    void capabilities() throws SQLException {
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
 
-    assertTrue(dbmd.allProceduresAreCallable());
-    assertTrue(dbmd.allTablesAreSelectable()); // not true all the time
+        assertTrue(dbmd.allProceduresAreCallable());
+        assertTrue(dbmd.allTablesAreSelectable()); // not true all the time
 
-    // This should always be false for postgresql (at least for 7.x)
-    assertFalse(dbmd.isReadOnly());
+        // This should always be false for postgresql (at least for 7.x)
+        assertFalse(dbmd.isReadOnly());
 
-    // we support multiple resultsets via multiple statements in one execute() now
-    assertTrue(dbmd.supportsMultipleResultSets());
+        // we support multiple resultsets via multiple statements in one execute() now
+        assertTrue(dbmd.supportsMultipleResultSets());
 
-    // yes, as multiple backends can have transactions open
-    assertTrue(dbmd.supportsMultipleTransactions());
+        // yes, as multiple backends can have transactions open
+        assertTrue(dbmd.supportsMultipleTransactions());
 
-    assertTrue(dbmd.supportsMinimumSQLGrammar());
-    assertFalse(dbmd.supportsCoreSQLGrammar());
-    assertFalse(dbmd.supportsExtendedSQLGrammar());
-    assertTrue(dbmd.supportsANSI92EntryLevelSQL());
-    assertFalse(dbmd.supportsANSI92IntermediateSQL());
-    assertFalse(dbmd.supportsANSI92FullSQL());
+        assertTrue(dbmd.supportsMinimumSQLGrammar());
+        assertFalse(dbmd.supportsCoreSQLGrammar());
+        assertFalse(dbmd.supportsExtendedSQLGrammar());
+        assertTrue(dbmd.supportsANSI92EntryLevelSQL());
+        assertFalse(dbmd.supportsANSI92IntermediateSQL());
+        assertFalse(dbmd.supportsANSI92FullSQL());
 
-    assertTrue(dbmd.supportsIntegrityEnhancementFacility());
+        assertTrue(dbmd.supportsIntegrityEnhancementFacility());
 
-  }
+    }
 
-  @Test
-  void joins() throws SQLException {
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
+    @Test
+    void joins() throws SQLException {
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
 
-    assertTrue(dbmd.supportsOuterJoins());
-    assertTrue(dbmd.supportsFullOuterJoins());
-    assertTrue(dbmd.supportsLimitedOuterJoins());
-  }
+        assertTrue(dbmd.supportsOuterJoins());
+        assertTrue(dbmd.supportsFullOuterJoins());
+        assertTrue(dbmd.supportsLimitedOuterJoins());
+    }
 
-  @Test
-  void cursors() throws SQLException {
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
+    @Test
+    void cursors() throws SQLException {
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
 
-    assertFalse(dbmd.supportsPositionedDelete());
-    assertFalse(dbmd.supportsPositionedUpdate());
-  }
+        assertFalse(dbmd.supportsPositionedDelete());
+        assertFalse(dbmd.supportsPositionedUpdate());
+    }
 
-  @Test
-  void values() throws SQLException {
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-    int indexMaxKeys = dbmd.getMaxColumnsInIndex();
-    assertEquals(32, indexMaxKeys);
-  }
+    @Test
+    void values() throws SQLException {
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+        int indexMaxKeys = dbmd.getMaxColumnsInIndex();
+        assertEquals(32, indexMaxKeys);
+    }
 
-  @Test
-  void nulls() throws SQLException {
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
+    @Test
+    void nulls() throws SQLException {
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
 
-    assertFalse(dbmd.nullsAreSortedAtStart());
-    assertFalse(dbmd.nullsAreSortedAtEnd());
-    assertTrue(dbmd.nullsAreSortedHigh());
-    assertFalse(dbmd.nullsAreSortedLow());
+        assertFalse(dbmd.nullsAreSortedAtStart());
+        assertFalse(dbmd.nullsAreSortedAtEnd());
+        assertTrue(dbmd.nullsAreSortedHigh());
+        assertFalse(dbmd.nullsAreSortedLow());
 
-    assertTrue(dbmd.nullPlusNonNullIsNull());
+        assertTrue(dbmd.nullPlusNonNullIsNull());
 
-    assertTrue(dbmd.supportsNonNullableColumns());
-  }
+        assertTrue(dbmd.supportsNonNullableColumns());
+    }
 
-  @Test
-  void localFiles() throws SQLException {
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
+    @Test
+    void localFiles() throws SQLException {
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
 
-    assertFalse(dbmd.usesLocalFilePerTable());
-    assertFalse(dbmd.usesLocalFiles());
-  }
+        assertFalse(dbmd.usesLocalFilePerTable());
+        assertFalse(dbmd.usesLocalFiles());
+    }
 
-  @Test
-  void identifiers() throws SQLException {
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
+    @Test
+    void identifiers() throws SQLException {
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
 
-    assertFalse(dbmd.supportsMixedCaseIdentifiers());
-    assertTrue(dbmd.supportsMixedCaseQuotedIdentifiers());
+        assertFalse(dbmd.supportsMixedCaseIdentifiers());
+        assertTrue(dbmd.supportsMixedCaseQuotedIdentifiers());
 
-    assertFalse(dbmd.storesUpperCaseIdentifiers());
-    assertTrue(dbmd.storesLowerCaseIdentifiers());
-    assertFalse(dbmd.storesUpperCaseQuotedIdentifiers());
-    assertFalse(dbmd.storesLowerCaseQuotedIdentifiers());
-    assertFalse(dbmd.storesMixedCaseQuotedIdentifiers());
+        assertFalse(dbmd.storesUpperCaseIdentifiers());
+        assertTrue(dbmd.storesLowerCaseIdentifiers());
+        assertFalse(dbmd.storesUpperCaseQuotedIdentifiers());
+        assertFalse(dbmd.storesLowerCaseQuotedIdentifiers());
+        assertFalse(dbmd.storesMixedCaseQuotedIdentifiers());
 
-    assertEquals("\"", dbmd.getIdentifierQuoteString());
+        assertEquals("\"", dbmd.getIdentifierQuoteString());
 
-  }
+    }
 
-  @Test
-  void tables() throws SQLException {
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
+    @Test
+    void tables() throws SQLException {
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
 
-    // we can add columns
-    assertTrue(dbmd.supportsAlterTableWithAddColumn());
+        // we can add columns
+        assertTrue(dbmd.supportsAlterTableWithAddColumn());
 
-    // we can only drop columns in >= 7.3
-    assertTrue(dbmd.supportsAlterTableWithDropColumn());
-  }
+        // we can only drop columns in >= 7.3
+        assertTrue(dbmd.supportsAlterTableWithDropColumn());
+    }
 
-  @Test
-  void select() throws SQLException {
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
+    @Test
+    void select() throws SQLException {
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
 
-    // yes we can?: SELECT col a FROM a;
-    assertTrue(dbmd.supportsColumnAliasing());
+        // yes we can?: SELECT col a FROM a;
+        assertTrue(dbmd.supportsColumnAliasing());
 
-    // yes we can have expressions in ORDERBY
-    assertTrue(dbmd.supportsExpressionsInOrderBy());
+        // yes we can have expressions in ORDERBY
+        assertTrue(dbmd.supportsExpressionsInOrderBy());
 
-    // Yes, an ORDER BY clause can contain columns that are not in the
-    // SELECT clause.
-    assertTrue(dbmd.supportsOrderByUnrelated());
+        // Yes, an ORDER BY clause can contain columns that are not in the
+        // SELECT clause.
+        assertTrue(dbmd.supportsOrderByUnrelated());
 
-    assertTrue(dbmd.supportsGroupBy());
-    assertTrue(dbmd.supportsGroupByUnrelated());
-    assertTrue(dbmd.supportsGroupByBeyondSelect()); // needs checking
-  }
+        assertTrue(dbmd.supportsGroupBy());
+        assertTrue(dbmd.supportsGroupByUnrelated());
+        assertTrue(dbmd.supportsGroupByBeyondSelect()); // needs checking
+    }
 
-  @Test
-  void dBParams() throws SQLException {
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
+    @Test
+    void dBParams() throws SQLException {
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
 
-    assertEquals(TestUtil.getURL(), dbmd.getURL());
-    assertEquals(TestUtil.getUser(), dbmd.getUserName());
-  }
+        assertEquals(TestUtil.getURL(), dbmd.getURL());
+        assertEquals(TestUtil.getUser(), dbmd.getUserName());
+    }
 
-  @Test
-  void dbProductDetails() throws SQLException {
-    assertTrue(con instanceof PGConnection);
+    @Test
+    void dbProductDetails() throws SQLException {
+        assertTrue(con instanceof PGConnection);
 
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
 
-    assertEquals("PostgreSQL", dbmd.getDatabaseProductName());
-    assertTrue(dbmd.getDatabaseMajorVersion() >= 8);
-    assertTrue(dbmd.getDatabaseMinorVersion() >= 0);
-    assertTrue(dbmd.getDatabaseProductVersion().startsWith(String.valueOf(dbmd.getDatabaseMajorVersion())));
-  }
+        assertEquals("PostgreSQL", dbmd.getDatabaseProductName());
+        assertTrue(dbmd.getDatabaseMajorVersion() >= 8);
+        assertTrue(dbmd.getDatabaseMinorVersion() >= 0);
+        assertTrue(dbmd.getDatabaseProductVersion().startsWith(String.valueOf(dbmd.getDatabaseMajorVersion())));
+    }
 
-  @Test
-  void driverVersioning() throws SQLException {
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
+    @Test
+    void driverVersioning() throws SQLException {
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
 
-    assertEquals("PostgreSQL JDBC Driver", dbmd.getDriverName());
-    assertEquals(DriverInfo.DRIVER_VERSION, dbmd.getDriverVersion());
-    assertEquals(new Driver().getMajorVersion(), dbmd.getDriverMajorVersion());
-    assertEquals(new Driver().getMinorVersion(), dbmd.getDriverMinorVersion());
-    assertTrue(dbmd.getJDBCMajorVersion() >= 4);
-    assertTrue(dbmd.getJDBCMinorVersion() >= 0);
-  }
+        assertEquals("PostgreSQL JDBC Driver", dbmd.getDriverName());
+        assertEquals(DriverInfo.DRIVER_VERSION, dbmd.getDriverVersion());
+        assertEquals(new Driver().getMajorVersion(), dbmd.getDriverMajorVersion());
+        assertEquals(new Driver().getMinorVersion(), dbmd.getDriverMinorVersion());
+        assertTrue(dbmd.getJDBCMajorVersion() >= 4);
+        assertTrue(dbmd.getJDBCMinorVersion() >= 0);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataTest.java
index 5c16fae..e330f16 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataTest.java
@@ -48,1770 +48,1770 @@ import java.util.Set;
  *
  */
 public class DatabaseMetaDataTest {
-  private Connection con;
-  private BinaryMode binaryMode;
+    private Connection con;
+    private BinaryMode binaryMode;
 
-  public void initDatabaseMetaDataTest(BinaryMode binaryMode) {
-    this.binaryMode = binaryMode;
-  }
-
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
-    }
-    return ids;
-  }
-
-  @BeforeEach
-  void setUp() throws Exception {
-    if (binaryMode == BinaryMode.FORCE) {
-      final Properties props = new Properties();
-      PGProperty.PREPARE_THRESHOLD.set(props, -1);
-      con = TestUtil.openDB(props);
-    } else {
-      con = TestUtil.openDB();
-    }
-    TestUtil.createTable(con, "metadatatest",
-        "id int4, name text, updated timestamptz, colour text, quest text");
-    TestUtil.createTable(con, "precision_test", "implicit_precision numeric");
-    TestUtil.dropSequence(con, "sercoltest_b_seq");
-    TestUtil.dropSequence(con, "sercoltest_c_seq");
-    TestUtil.createTable(con, "sercoltest", "a int, b serial, c bigserial");
-    TestUtil.createTable(con, "\"a\\\"", "a int4");
-    TestUtil.createTable(con, "\"a'\"", "a int4");
-    TestUtil.createTable(con, "arraytable", "a numeric(5,2)[], b varchar(100)[]");
-    TestUtil.createTable(con, "intarraytable", "a int4[], b int4[][]");
-    TestUtil.createView(con, "viewtest", "SELECT id, quest FROM metadatatest");
-    TestUtil.dropType(con, "custom");
-    TestUtil.dropType(con, "_custom");
-    TestUtil.createCompositeType(con, "custom", "i int", false);
-    TestUtil.createCompositeType(con, "_custom", "f float", false);
-
-    // create a table and multiple comments on it
-    TestUtil.createTable(con, "duplicate", "x text");
-    TestUtil.execute(con, "comment on table duplicate is 'duplicate table'");
-    TestUtil.execute(con, "create or replace function bar() returns integer language sql as $$ select 1 $$");
-    TestUtil.execute(con, "comment on function bar() is 'bar function'");
-    try (Connection conPriv = TestUtil.openPrivilegedDB()) {
-      TestUtil.execute(conPriv, "update pg_description set objoid = 'duplicate'::regclass where objoid = 'bar'::regproc");
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
+        }
+        return ids;
     }
 
-    // 8.2 does not support arrays of composite types
-    TestUtil.createTable(con, "customtable", "c1 custom, c2 _custom"
-        + (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3) ? ", c3 custom[], c4 _custom[]" : ""));
-
-    Statement stmt = con.createStatement();
-    // we add the following comments to ensure the joins to the comments
-    // are done correctly. This ensures we correctly test that case.
-    stmt.execute("comment on table metadatatest is 'this is a table comment'");
-    stmt.execute("comment on column metadatatest.id is 'this is a column comment'");
-
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION f1(int, varchar) RETURNS int AS 'SELECT 1;' LANGUAGE SQL");
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION f2(a int, b varchar) RETURNS int AS 'SELECT 1;' LANGUAGE SQL");
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION f3(IN a int, INOUT b varchar, OUT c timestamptz) AS $f$ BEGIN b := 'a'; c := now(); return; END; $f$ LANGUAGE plpgsql");
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION f4(int) RETURNS metadatatest AS 'SELECT 1, ''a''::text, now(), ''c''::text, ''q''::text' LANGUAGE SQL");
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) {
-      // RETURNS TABLE requires PostgreSQL 8.4+
-      stmt.execute(
-          "CREATE OR REPLACE FUNCTION f5() RETURNS TABLE (i int) LANGUAGE sql AS 'SELECT 1'");
+    public void initDatabaseMetaDataTest(BinaryMode binaryMode) {
+        this.binaryMode = binaryMode;
     }
 
-    // create a custom `&` operator, which caused failure with `&` usage in getIndexInfo()
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION f6(numeric, integer) returns integer as 'BEGIN return $1::integer & $2;END;' language plpgsql immutable;");
-    stmt.execute("DROP OPERATOR IF EXISTS & (numeric, integer)");
-    stmt.execute("CREATE OPERATOR & (LEFTARG = numeric, RIGHTARG = integer, PROCEDURE = f6)");
+    @BeforeEach
+    void setUp() throws Exception {
+        if (binaryMode == BinaryMode.FORCE) {
+            final Properties props = new Properties();
+            PGProperty.PREPARE_THRESHOLD.set(props, -1);
+            con = TestUtil.openDB(props);
+        } else {
+            con = TestUtil.openDB();
+        }
+        TestUtil.createTable(con, "metadatatest",
+                "id int4, name text, updated timestamptz, colour text, quest text");
+        TestUtil.createTable(con, "precision_test", "implicit_precision numeric");
+        TestUtil.dropSequence(con, "sercoltest_b_seq");
+        TestUtil.dropSequence(con, "sercoltest_c_seq");
+        TestUtil.createTable(con, "sercoltest", "a int, b serial, c bigserial");
+        TestUtil.createTable(con, "\"a\\\"", "a int4");
+        TestUtil.createTable(con, "\"a'\"", "a int4");
+        TestUtil.createTable(con, "arraytable", "a numeric(5,2)[], b varchar(100)[]");
+        TestUtil.createTable(con, "intarraytable", "a int4[], b int4[][]");
+        TestUtil.createView(con, "viewtest", "SELECT id, quest FROM metadatatest");
+        TestUtil.dropType(con, "custom");
+        TestUtil.dropType(con, "_custom");
+        TestUtil.createCompositeType(con, "custom", "i int", false);
+        TestUtil.createCompositeType(con, "_custom", "f float", false);
 
-    TestUtil.createDomain(con, "nndom", "int not null");
-    TestUtil.createDomain(con, "varbit2", "varbit(3)");
-    TestUtil.createDomain(con, "float83", "numeric(8,3)");
+        // create a table and multiple comments on it
+        TestUtil.createTable(con, "duplicate", "x text");
+        TestUtil.execute(con, "comment on table duplicate is 'duplicate table'");
+        TestUtil.execute(con, "create or replace function bar() returns integer language sql as $$ select 1 $$");
+        TestUtil.execute(con, "comment on function bar() is 'bar function'");
+        try (Connection conPriv = TestUtil.openPrivilegedDB()) {
+            TestUtil.execute(conPriv, "update pg_description set objoid = 'duplicate'::regclass where objoid = 'bar'::regproc");
+        }
 
-    TestUtil.createTable(con, "domaintable", "id nndom, v varbit2, f float83");
-    stmt.close();
+        // 8.2 does not support arrays of composite types
+        TestUtil.createTable(con, "customtable", "c1 custom, c2 _custom"
+                + (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3) ? ", c3 custom[], c4 _custom[]" : ""));
 
-    if ( TestUtil.haveMinimumServerVersion(con, ServerVersion.v12) ) {
-      TestUtil.createTable(con, "employee", "id int GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, hours_per_week decimal(3,2), rate_per_hour decimal(3,2), gross_pay decimal GENERATED ALWAYS AS (hours_per_week * rate_per_hour) STORED");
-    }
-  }
+        Statement stmt = con.createStatement();
+        // we add the following comments to ensure the joins to the comments
+        // are done correctly. This ensures we correctly test that case.
+        stmt.execute("comment on table metadatatest is 'this is a table comment'");
+        stmt.execute("comment on column metadatatest.id is 'this is a column comment'");
 
-  @AfterEach
-  void tearDown() throws Exception {
-    // Drop function first because it depends on the
-    // metadatatest table's type
-    Statement stmt = con.createStatement();
-    stmt.execute("DROP FUNCTION f4(int)");
-    TestUtil.execute(con, "drop function bar()");
-    TestUtil.dropTable(con, "duplicate");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION f1(int, varchar) RETURNS int AS 'SELECT 1;' LANGUAGE SQL");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION f2(a int, b varchar) RETURNS int AS 'SELECT 1;' LANGUAGE SQL");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION f3(IN a int, INOUT b varchar, OUT c timestamptz) AS $f$ BEGIN b := 'a'; c := now(); return; END; $f$ LANGUAGE plpgsql");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION f4(int) RETURNS metadatatest AS 'SELECT 1, ''a''::text, now(), ''c''::text, ''q''::text' LANGUAGE SQL");
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) {
+            // RETURNS TABLE requires PostgreSQL 8.4+
+            stmt.execute(
+                    "CREATE OR REPLACE FUNCTION f5() RETURNS TABLE (i int) LANGUAGE sql AS 'SELECT 1'");
+        }
 
-    TestUtil.dropView(con, "viewtest");
-    TestUtil.dropTable(con, "metadatatest");
-    TestUtil.dropTable(con, "sercoltest");
-    TestUtil.dropSequence(con, "sercoltest_b_seq");
-    TestUtil.dropSequence(con, "sercoltest_c_seq");
-    TestUtil.dropTable(con, "precision_test");
-    TestUtil.dropTable(con, "\"a\\\"");
-    TestUtil.dropTable(con, "\"a'\"");
-    TestUtil.dropTable(con, "arraytable");
-    TestUtil.dropTable(con, "intarraytable");
-    TestUtil.dropTable(con, "customtable");
-    TestUtil.dropType(con, "custom");
-    TestUtil.dropType(con, "_custom");
+        // create a custom `&` operator, which caused failure with `&` usage in getIndexInfo()
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION f6(numeric, integer) returns integer as 'BEGIN return $1::integer & $2;END;' language plpgsql immutable;");
+        stmt.execute("DROP OPERATOR IF EXISTS & (numeric, integer)");
+        stmt.execute("CREATE OPERATOR & (LEFTARG = numeric, RIGHTARG = integer, PROCEDURE = f6)");
 
-    stmt.execute("DROP FUNCTION f1(int, varchar)");
-    stmt.execute("DROP FUNCTION f2(int, varchar)");
-    stmt.execute("DROP FUNCTION f3(int, varchar)");
-    stmt.execute("DROP OPERATOR IF EXISTS & (numeric, integer)");
-    stmt.execute("DROP FUNCTION f6(numeric, integer)");
-    TestUtil.dropTable(con, "domaintable");
-    TestUtil.dropDomain(con, "nndom");
-    TestUtil.dropDomain(con, "varbit2");
-    TestUtil.dropDomain(con, "float83");
+        TestUtil.createDomain(con, "nndom", "int not null");
+        TestUtil.createDomain(con, "varbit2", "varbit(3)");
+        TestUtil.createDomain(con, "float83", "numeric(8,3)");
 
-    if ( TestUtil.haveMinimumServerVersion(con, ServerVersion.v12) ) {
-      TestUtil.dropTable(con, "employee");
+        TestUtil.createTable(con, "domaintable", "id nndom, v varbit2, f float83");
+        stmt.close();
+
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v12)) {
+            TestUtil.createTable(con, "employee", "id int GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, hours_per_week decimal(3,2), rate_per_hour decimal(3,2), gross_pay decimal GENERATED ALWAYS AS (hours_per_week * rate_per_hour) STORED");
+        }
     }
 
-    TestUtil.closeDB(con);
-  }
+    @AfterEach
+    void tearDown() throws Exception {
+        // Drop function first because it depends on the
+        // metadatatest table's type
+        Statement stmt = con.createStatement();
+        stmt.execute("DROP FUNCTION f4(int)");
+        TestUtil.execute(con, "drop function bar()");
+        TestUtil.dropTable(con, "duplicate");
 
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void arrayTypeInfo(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getColumns(null, null, "intarraytable", "a");
-    assertTrue(rs.next());
-    assertEquals("_int4", rs.getString("TYPE_NAME"));
-    con.createArrayOf("integer", new Integer[]{});
-    TestUtil.closeQuietly(rs);
-    rs = dbmd.getColumns(null, null, "intarraytable", "a");
-    assertTrue(rs.next());
-    assertEquals("_int4", rs.getString("TYPE_NAME"));
-    TestUtil.closeQuietly(rs);
-  }
+        TestUtil.dropView(con, "viewtest");
+        TestUtil.dropTable(con, "metadatatest");
+        TestUtil.dropTable(con, "sercoltest");
+        TestUtil.dropSequence(con, "sercoltest_b_seq");
+        TestUtil.dropSequence(con, "sercoltest_c_seq");
+        TestUtil.dropTable(con, "precision_test");
+        TestUtil.dropTable(con, "\"a\\\"");
+        TestUtil.dropTable(con, "\"a'\"");
+        TestUtil.dropTable(con, "arraytable");
+        TestUtil.dropTable(con, "intarraytable");
+        TestUtil.dropTable(con, "customtable");
+        TestUtil.dropType(con, "custom");
+        TestUtil.dropType(con, "_custom");
 
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void arrayInt4DoubleDim(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getColumns(null, null, "intarraytable", "b");
-    assertTrue(rs.next());
-    assertEquals("_int4", rs.getString("TYPE_NAME")); // even int4[][] is represented as _int4
-    con.createArrayOf("int4", new int[][]{{1, 2}, {3, 4}});
-    rs = dbmd.getColumns(null, null, "intarraytable", "b");
-    assertTrue(rs.next());
-    assertEquals("_int4", rs.getString("TYPE_NAME")); // even int4[][] is represented as _int4
-  }
+        stmt.execute("DROP FUNCTION f1(int, varchar)");
+        stmt.execute("DROP FUNCTION f2(int, varchar)");
+        stmt.execute("DROP FUNCTION f3(int, varchar)");
+        stmt.execute("DROP OPERATOR IF EXISTS & (numeric, integer)");
+        stmt.execute("DROP FUNCTION f6(numeric, integer)");
+        TestUtil.dropTable(con, "domaintable");
+        TestUtil.dropDomain(con, "nndom");
+        TestUtil.dropDomain(con, "varbit2");
+        TestUtil.dropDomain(con, "float83");
 
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void customArrayTypeInfo(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet res = dbmd.getColumns(null, null, "customtable", null);
-    assertTrue(res.next());
-    assertEquals("custom", res.getString("TYPE_NAME"));
-    assertTrue(res.next());
-    assertEquals("_custom", res.getString("TYPE_NAME"));
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)) {
-      assertTrue(res.next());
-      assertEquals("__custom", res.getString("TYPE_NAME"));
-      assertTrue(res.next());
-      if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v16)) {
-        assertEquals("__custom_1", res.getString("TYPE_NAME"));
-      } else {
-        assertEquals("___custom", res.getString("TYPE_NAME"));
-      }
-    }
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)) {
-      con.createArrayOf("custom", new Object[]{});
-      res = dbmd.getColumns(null, null, "customtable", null);
-      assertTrue(res.next());
-      assertEquals("custom", res.getString("TYPE_NAME"));
-      assertTrue(res.next());
-      assertEquals("_custom", res.getString("TYPE_NAME"));
-      assertTrue(res.next());
-      assertEquals("__custom", res.getString("TYPE_NAME"));
-      assertTrue(res.next());
-      if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v16)) {
-        assertEquals("__custom_1", res.getString("TYPE_NAME"));
-      } else {
-        assertEquals("___custom", res.getString("TYPE_NAME"));
-      }
-    }
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void tables(BinaryMode binaryMode) throws Exception {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-
-    ResultSet rs = dbmd.getTables(null, null, "metadatates%", new String[]{"TABLE"});
-    assertTrue(rs.next());
-    String tableName = rs.getString("TABLE_NAME");
-    assertEquals("metadatatest", tableName);
-    String tableType = rs.getString("TABLE_TYPE");
-    assertEquals("TABLE", tableType);
-    assertEquals(5, rs.findColumn("REMARKS"));
-    assertEquals(6, rs.findColumn("TYPE_CAT"));
-    assertEquals(7, rs.findColumn("TYPE_SCHEM"));
-    assertEquals(8, rs.findColumn("TYPE_NAME"));
-    assertEquals(9, rs.findColumn("SELF_REFERENCING_COL_NAME"));
-    assertEquals(10, rs.findColumn("REF_GENERATION"));
-
-    // There should only be one row returned
-    assertFalse(rs.next(), "getTables() returned too many rows");
-
-    rs.close();
-
-    rs = dbmd.getColumns("", "", "meta%", "%");
-    assertTrue(rs.next());
-    assertEquals("metadatatest", rs.getString("TABLE_NAME"));
-    assertEquals("id", rs.getString("COLUMN_NAME"));
-    assertEquals(Types.INTEGER, rs.getInt("DATA_TYPE"));
-
-    assertTrue(rs.next());
-    assertEquals("metadatatest", rs.getString("TABLE_NAME"));
-    assertEquals("name", rs.getString("COLUMN_NAME"));
-    assertEquals(Types.VARCHAR, rs.getInt("DATA_TYPE"));
-
-    assertTrue(rs.next());
-    assertEquals("metadatatest", rs.getString("TABLE_NAME"));
-    assertEquals("updated", rs.getString("COLUMN_NAME"));
-    assertEquals(Types.TIMESTAMP, rs.getInt("DATA_TYPE"));
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void crossReference(BinaryMode binaryMode) throws Exception {
-    initDatabaseMetaDataTest(binaryMode);
-    Connection con1 = TestUtil.openDB();
-
-    TestUtil.createTable(con1, "vv", "a int not null, b int not null, constraint vv_pkey primary key ( a, b )");
-
-    TestUtil.createTable(con1, "ww",
-        "m int not null, n int not null, constraint m_pkey primary key ( m, n ), constraint ww_m_fkey foreign key ( m, n ) references vv ( a, b )");
-
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-
-    ResultSet rs = dbmd.getCrossReference(null, "", "vv", null, "", "ww");
-    String[] expectedPkColumnNames = new String[]{"a", "b"};
-    String[] expectedFkColumnNames = new String[]{"m", "n"};
-    int numRows = 0;
-
-    for (int j = 1; rs.next(); j++) {
-
-      String pkTableName = rs.getString("PKTABLE_NAME");
-      assertEquals("vv", pkTableName);
-
-      String pkColumnName = rs.getString("PKCOLUMN_NAME");
-      assertEquals(expectedPkColumnNames[j - 1], pkColumnName);
-
-      String fkTableName = rs.getString("FKTABLE_NAME");
-      assertEquals("ww", fkTableName);
-
-      String fkColumnName = rs.getString("FKCOLUMN_NAME");
-      assertEquals(expectedFkColumnNames[j - 1], fkColumnName);
-
-      String fkName = rs.getString("FK_NAME");
-      assertEquals("ww_m_fkey", fkName);
-
-      String pkName = rs.getString("PK_NAME");
-      assertEquals("vv_pkey", pkName);
-
-      int keySeq = rs.getInt("KEY_SEQ");
-      assertEquals(j, keySeq);
-      numRows += 1;
-    }
-    assertEquals(2, numRows);
-
-    TestUtil.dropTable(con1, "vv");
-    TestUtil.dropTable(con1, "ww");
-    TestUtil.closeDB(con1);
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void foreignKeyActions(BinaryMode binaryMode) throws Exception {
-    initDatabaseMetaDataTest(binaryMode);
-    Connection conn = TestUtil.openDB();
-    TestUtil.createTable(conn, "pkt", "id int primary key");
-    TestUtil.createTable(conn, "fkt1",
-        "id int references pkt on update restrict on delete cascade");
-    TestUtil.createTable(conn, "fkt2",
-        "id int references pkt on update set null on delete set default");
-    DatabaseMetaData dbmd = conn.getMetaData();
-
-    ResultSet rs = dbmd.getImportedKeys(null, "", "fkt1");
-    assertTrue(rs.next());
-    assertEquals(DatabaseMetaData.importedKeyRestrict, rs.getInt("UPDATE_RULE"));
-    assertEquals(DatabaseMetaData.importedKeyCascade, rs.getInt("DELETE_RULE"));
-    rs.close();
-
-    rs = dbmd.getImportedKeys(null, "", "fkt2");
-    assertTrue(rs.next());
-    assertEquals(DatabaseMetaData.importedKeySetNull, rs.getInt("UPDATE_RULE"));
-    assertEquals(DatabaseMetaData.importedKeySetDefault, rs.getInt("DELETE_RULE"));
-    rs.close();
-
-    TestUtil.dropTable(conn, "fkt2");
-    TestUtil.dropTable(conn, "fkt1");
-    TestUtil.dropTable(conn, "pkt");
-    TestUtil.closeDB(conn);
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void foreignKeysToUniqueIndexes(BinaryMode binaryMode) throws Exception {
-    initDatabaseMetaDataTest(binaryMode);
-    Connection con1 = TestUtil.openDB();
-    TestUtil.createTable(con1, "pkt",
-        "a int not null, b int not null, CONSTRAINT pkt_pk_a PRIMARY KEY (a), CONSTRAINT pkt_un_b UNIQUE (b)");
-    TestUtil.createTable(con1, "fkt",
-        "c int, d int, CONSTRAINT fkt_fk_c FOREIGN KEY (c) REFERENCES pkt(b)");
-
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getImportedKeys("", "", "fkt");
-    int j = 0;
-    for (; rs.next(); j++) {
-      assertEquals("pkt", rs.getString("PKTABLE_NAME"));
-      assertEquals("fkt", rs.getString("FKTABLE_NAME"));
-      assertEquals("pkt_un_b", rs.getString("PK_NAME"));
-      assertEquals("b", rs.getString("PKCOLUMN_NAME"));
-    }
-    assertEquals(1, j);
-
-    TestUtil.dropTable(con1, "fkt");
-    TestUtil.dropTable(con1, "pkt");
-    con1.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void multiColumnForeignKeys(BinaryMode binaryMode) throws Exception {
-    initDatabaseMetaDataTest(binaryMode);
-    Connection con1 = TestUtil.openDB();
-    TestUtil.createTable(con1, "pkt",
-        "a int not null, b int not null, CONSTRAINT pkt_pk PRIMARY KEY (a,b)");
-    TestUtil.createTable(con1, "fkt",
-        "c int, d int, CONSTRAINT fkt_fk_pkt FOREIGN KEY (c,d) REFERENCES pkt(b,a)");
-
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getImportedKeys("", "", "fkt");
-    int j = 0;
-    for (; rs.next(); j++) {
-      assertEquals("pkt", rs.getString("PKTABLE_NAME"));
-      assertEquals("fkt", rs.getString("FKTABLE_NAME"));
-      assertEquals(j + 1, rs.getInt("KEY_SEQ"));
-      if (j == 0) {
-        assertEquals("b", rs.getString("PKCOLUMN_NAME"));
-        assertEquals("c", rs.getString("FKCOLUMN_NAME"));
-      } else {
-        assertEquals("a", rs.getString("PKCOLUMN_NAME"));
-        assertEquals("d", rs.getString("FKCOLUMN_NAME"));
-      }
-    }
-    assertEquals(2, j);
-
-    TestUtil.dropTable(con1, "fkt");
-    TestUtil.dropTable(con1, "pkt");
-    con1.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void sameTableForeignKeys(BinaryMode binaryMode) throws Exception {
-    initDatabaseMetaDataTest(binaryMode);
-    Connection con1 = TestUtil.openDB();
-
-    TestUtil.createTable(con1, "person",
-        "FIRST_NAME character varying(100) NOT NULL," + "LAST_NAME character varying(100) NOT NULL,"
-            + "FIRST_NAME_PARENT_1 character varying(100),"
-            + "LAST_NAME_PARENT_1 character varying(100),"
-            + "FIRST_NAME_PARENT_2 character varying(100),"
-            + "LAST_NAME_PARENT_2 character varying(100),"
-            + "CONSTRAINT PERSON_pkey PRIMARY KEY (FIRST_NAME , LAST_NAME ),"
-            + "CONSTRAINT PARENT_1_fkey FOREIGN KEY (FIRST_NAME_PARENT_1, LAST_NAME_PARENT_1)"
-            + "REFERENCES PERSON (FIRST_NAME, LAST_NAME) MATCH SIMPLE "
-            + "ON UPDATE CASCADE ON DELETE CASCADE,"
-            + "CONSTRAINT PARENT_2_fkey FOREIGN KEY (FIRST_NAME_PARENT_2, LAST_NAME_PARENT_2)"
-            + "REFERENCES PERSON (FIRST_NAME, LAST_NAME) MATCH SIMPLE "
-            + "ON UPDATE CASCADE ON DELETE CASCADE");
-
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-    ResultSet rs = dbmd.getImportedKeys(null, "", "person");
-
-    final List<String> fkNames = new ArrayList<>();
-
-    int lastFieldCount = -1;
-    while (rs.next()) {
-      // destination table (all foreign keys point to the same)
-      String pkTableName = rs.getString("PKTABLE_NAME");
-      assertEquals("person", pkTableName);
-
-      // destination fields
-      String pkColumnName = rs.getString("PKCOLUMN_NAME");
-      assertTrue("first_name".equals(pkColumnName) || "last_name".equals(pkColumnName));
-
-      // source table (all foreign keys are in the same)
-      String fkTableName = rs.getString("FKTABLE_NAME");
-      assertEquals("person", fkTableName);
-
-      // foreign key name
-      String fkName = rs.getString("FK_NAME");
-      // sequence number within the foreign key
-      int seq = rs.getInt("KEY_SEQ");
-      if (seq == 1) {
-        // begin new foreign key
-        assertFalse(fkNames.contains(fkName));
-        fkNames.add(fkName);
-        // all foreign keys have 2 fields
-        assertTrue(lastFieldCount < 0 || lastFieldCount == 2);
-      } else {
-        // continue foreign key, i.e. fkName matches the last foreign key
-        assertEquals(fkNames.get(fkNames.size() - 1), fkName);
-        // see always increases by 1
-        assertEquals(seq, lastFieldCount + 1);
-      }
-      lastFieldCount = seq;
-    }
-    // there's more than one foreign key from a table to another
-    assertEquals(2, fkNames.size());
-
-    TestUtil.dropTable(con1, "person");
-    TestUtil.closeDB(con1);
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void foreignKeys(BinaryMode binaryMode) throws Exception {
-    initDatabaseMetaDataTest(binaryMode);
-    Connection con1 = TestUtil.openDB();
-    TestUtil.createTable(con1, "people", "id int4 primary key, name text");
-    TestUtil.createTable(con1, "policy", "id int4 primary key, name text");
-
-    TestUtil.createTable(con1, "users",
-        "id int4 primary key, people_id int4, policy_id int4,"
-            + "CONSTRAINT people FOREIGN KEY (people_id) references people(id),"
-            + "constraint policy FOREIGN KEY (policy_id) references policy(id)");
-
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-
-    ResultSet rs = dbmd.getImportedKeys(null, "", "users");
-    int j = 0;
-    for (; rs.next(); j++) {
-
-      String pkTableName = rs.getString("PKTABLE_NAME");
-      assertTrue("people".equals(pkTableName) || "policy".equals(pkTableName));
-
-      String pkColumnName = rs.getString("PKCOLUMN_NAME");
-      assertEquals("id", pkColumnName);
-
-      String fkTableName = rs.getString("FKTABLE_NAME");
-      assertEquals("users", fkTableName);
-
-      String fkColumnName = rs.getString("FKCOLUMN_NAME");
-      assertTrue("people_id".equals(fkColumnName) || "policy_id".equals(fkColumnName));
-
-      String fkName = rs.getString("FK_NAME");
-      assertTrue(fkName.startsWith("people") || fkName.startsWith("policy"));
-
-      String pkName = rs.getString("PK_NAME");
-      assertTrue("people_pkey".equals(pkName) || "policy_pkey".equals(pkName));
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v12)) {
+            TestUtil.dropTable(con, "employee");
+        }
 
+        TestUtil.closeDB(con);
     }
 
-    assertEquals(2, j);
-
-    rs = dbmd.getExportedKeys(null, "", "people");
-
-    // this is hacky, but it will serve the purpose
-    assertTrue(rs.next());
-
-    assertEquals("people", rs.getString("PKTABLE_NAME"));
-    assertEquals("id", rs.getString("PKCOLUMN_NAME"));
-
-    assertEquals("users", rs.getString("FKTABLE_NAME"));
-    assertEquals("people_id", rs.getString("FKCOLUMN_NAME"));
-
-    assertTrue(rs.getString("FK_NAME").startsWith("people"));
-
-    TestUtil.dropTable(con1, "users");
-    TestUtil.dropTable(con1, "people");
-    TestUtil.dropTable(con1, "policy");
-    TestUtil.closeDB(con1);
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void numericPrecision(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-    ResultSet rs = dbmd.getColumns(null, "public", "precision_test", "%");
-    assertTrue(rs.next(), "It should have a row for the first column");
-    assertEquals(0, rs.getInt("COLUMN_SIZE"), "The column size should be zero");
-    assertFalse(rs.next(), "It should have a single column");
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void columns(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    // At the moment just test that no exceptions are thrown KJ
-    String [] metadataColumns = {"TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "COLUMN_NAME",
-                                 "DATA_TYPE", "TYPE_NAME", "COLUMN_SIZE", "BUFFER_LENGTH",
-                                 "DECIMAL_DIGITS", "NUM_PREC_RADIX", "NULLABLE", "REMARKS",
-                                 "COLUMN_DEF", "SQL_DATA_TYPE", "SQL_DATETIME_SUB", "CHAR_OCTET_LENGTH",
-                                 "ORDINAL_POSITION", "IS_NULLABLE", "SCOPE_CATALOG", "SCOPE_SCHEMA",
-                                 "SCOPE_TABLE", "SOURCE_DATA_TYPE", "IS_AUTOINCREMENT", "IS_GENERATEDCOLUMN"};
-
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-    ResultSet rs = dbmd.getColumns(null, null, "pg_class", null);
-    if ( rs.next() ) {
-      for (int i = 0; i < metadataColumns.length; i++) {
-        assertEquals(i + 1, rs.findColumn(metadataColumns[i]));
-      }
-    }
-    rs.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void droppedColumns(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) {
-      return;
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void arrayTypeInfo(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getColumns(null, null, "intarraytable", "a");
+        assertTrue(rs.next());
+        assertEquals("_int4", rs.getString("TYPE_NAME"));
+        con.createArrayOf("integer", new Integer[]{});
+        TestUtil.closeQuietly(rs);
+        rs = dbmd.getColumns(null, null, "intarraytable", "a");
+        assertTrue(rs.next());
+        assertEquals("_int4", rs.getString("TYPE_NAME"));
+        TestUtil.closeQuietly(rs);
     }
 
-    Statement stmt = con.createStatement();
-    stmt.execute("ALTER TABLE metadatatest DROP name");
-    stmt.execute("ALTER TABLE metadatatest DROP colour");
-    stmt.close();
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void arrayInt4DoubleDim(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getColumns(null, null, "intarraytable", "b");
+        assertTrue(rs.next());
+        assertEquals("_int4", rs.getString("TYPE_NAME")); // even int4[][] is represented as _int4
+        con.createArrayOf("int4", new int[][]{{1, 2}, {3, 4}});
+        rs = dbmd.getColumns(null, null, "intarraytable", "b");
+        assertTrue(rs.next());
+        assertEquals("_int4", rs.getString("TYPE_NAME")); // even int4[][] is represented as _int4
+    }
 
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getColumns(null, null, "metadatatest", null);
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void customArrayTypeInfo(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet res = dbmd.getColumns(null, null, "customtable", null);
+        assertTrue(res.next());
+        assertEquals("custom", res.getString("TYPE_NAME"));
+        assertTrue(res.next());
+        assertEquals("_custom", res.getString("TYPE_NAME"));
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)) {
+            assertTrue(res.next());
+            assertEquals("__custom", res.getString("TYPE_NAME"));
+            assertTrue(res.next());
+            if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v16)) {
+                assertEquals("__custom_1", res.getString("TYPE_NAME"));
+            } else {
+                assertEquals("___custom", res.getString("TYPE_NAME"));
+            }
+        }
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)) {
+            con.createArrayOf("custom", new Object[]{});
+            res = dbmd.getColumns(null, null, "customtable", null);
+            assertTrue(res.next());
+            assertEquals("custom", res.getString("TYPE_NAME"));
+            assertTrue(res.next());
+            assertEquals("_custom", res.getString("TYPE_NAME"));
+            assertTrue(res.next());
+            assertEquals("__custom", res.getString("TYPE_NAME"));
+            assertTrue(res.next());
+            if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v16)) {
+                assertEquals("__custom_1", res.getString("TYPE_NAME"));
+            } else {
+                assertEquals("___custom", res.getString("TYPE_NAME"));
+            }
+        }
+    }
 
-    assertTrue(rs.next());
-    assertEquals("id", rs.getString("COLUMN_NAME"));
-    assertEquals(1, rs.getInt("ORDINAL_POSITION"));
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void tables(BinaryMode binaryMode) throws Exception {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
 
-    assertTrue(rs.next());
-    assertEquals("updated", rs.getString("COLUMN_NAME"));
-    assertEquals(2, rs.getInt("ORDINAL_POSITION"));
+        ResultSet rs = dbmd.getTables(null, null, "metadatates%", new String[]{"TABLE"});
+        assertTrue(rs.next());
+        String tableName = rs.getString("TABLE_NAME");
+        assertEquals("metadatatest", tableName);
+        String tableType = rs.getString("TABLE_TYPE");
+        assertEquals("TABLE", tableType);
+        assertEquals(5, rs.findColumn("REMARKS"));
+        assertEquals(6, rs.findColumn("TYPE_CAT"));
+        assertEquals(7, rs.findColumn("TYPE_SCHEM"));
+        assertEquals(8, rs.findColumn("TYPE_NAME"));
+        assertEquals(9, rs.findColumn("SELF_REFERENCING_COL_NAME"));
+        assertEquals(10, rs.findColumn("REF_GENERATION"));
 
-    assertTrue(rs.next());
-    assertEquals("quest", rs.getString("COLUMN_NAME"));
-    assertEquals(3, rs.getInt("ORDINAL_POSITION"));
+        // There should only be one row returned
+        assertFalse(rs.next(), "getTables() returned too many rows");
 
-    rs.close();
+        rs.close();
 
-    rs = dbmd.getColumns(null, null, "metadatatest", "quest");
-    assertTrue(rs.next());
-    assertEquals("quest", rs.getString("COLUMN_NAME"));
-    assertEquals(3, rs.getInt("ORDINAL_POSITION"));
-    assertFalse(rs.next());
-    rs.close();
+        rs = dbmd.getColumns("", "", "meta%", "%");
+        assertTrue(rs.next());
+        assertEquals("metadatatest", rs.getString("TABLE_NAME"));
+        assertEquals("id", rs.getString("COLUMN_NAME"));
+        assertEquals(Types.INTEGER, rs.getInt("DATA_TYPE"));
+
+        assertTrue(rs.next());
+        assertEquals("metadatatest", rs.getString("TABLE_NAME"));
+        assertEquals("name", rs.getString("COLUMN_NAME"));
+        assertEquals(Types.VARCHAR, rs.getInt("DATA_TYPE"));
+
+        assertTrue(rs.next());
+        assertEquals("metadatatest", rs.getString("TABLE_NAME"));
+        assertEquals("updated", rs.getString("COLUMN_NAME"));
+        assertEquals(Types.TIMESTAMP, rs.getInt("DATA_TYPE"));
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void crossReference(BinaryMode binaryMode) throws Exception {
+        initDatabaseMetaDataTest(binaryMode);
+        Connection con1 = TestUtil.openDB();
+
+        TestUtil.createTable(con1, "vv", "a int not null, b int not null, constraint vv_pkey primary key ( a, b )");
+
+        TestUtil.createTable(con1, "ww",
+                "m int not null, n int not null, constraint m_pkey primary key ( m, n ), constraint ww_m_fkey foreign key ( m, n ) references vv ( a, b )");
+
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+
+        ResultSet rs = dbmd.getCrossReference(null, "", "vv", null, "", "ww");
+        String[] expectedPkColumnNames = new String[]{"a", "b"};
+        String[] expectedFkColumnNames = new String[]{"m", "n"};
+        int numRows = 0;
+
+        for (int j = 1; rs.next(); j++) {
+
+            String pkTableName = rs.getString("PKTABLE_NAME");
+            assertEquals("vv", pkTableName);
+
+            String pkColumnName = rs.getString("PKCOLUMN_NAME");
+            assertEquals(expectedPkColumnNames[j - 1], pkColumnName);
+
+            String fkTableName = rs.getString("FKTABLE_NAME");
+            assertEquals("ww", fkTableName);
+
+            String fkColumnName = rs.getString("FKCOLUMN_NAME");
+            assertEquals(expectedFkColumnNames[j - 1], fkColumnName);
+
+            String fkName = rs.getString("FK_NAME");
+            assertEquals("ww_m_fkey", fkName);
+
+            String pkName = rs.getString("PK_NAME");
+            assertEquals("vv_pkey", pkName);
+
+            int keySeq = rs.getInt("KEY_SEQ");
+            assertEquals(j, keySeq);
+            numRows += 1;
+        }
+        assertEquals(2, numRows);
+
+        TestUtil.dropTable(con1, "vv");
+        TestUtil.dropTable(con1, "ww");
+        TestUtil.closeDB(con1);
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void foreignKeyActions(BinaryMode binaryMode) throws Exception {
+        initDatabaseMetaDataTest(binaryMode);
+        Connection conn = TestUtil.openDB();
+        TestUtil.createTable(conn, "pkt", "id int primary key");
+        TestUtil.createTable(conn, "fkt1",
+                "id int references pkt on update restrict on delete cascade");
+        TestUtil.createTable(conn, "fkt2",
+                "id int references pkt on update set null on delete set default");
+        DatabaseMetaData dbmd = conn.getMetaData();
+
+        ResultSet rs = dbmd.getImportedKeys(null, "", "fkt1");
+        assertTrue(rs.next());
+        assertEquals(DatabaseMetaData.importedKeyRestrict, rs.getInt("UPDATE_RULE"));
+        assertEquals(DatabaseMetaData.importedKeyCascade, rs.getInt("DELETE_RULE"));
+        rs.close();
+
+        rs = dbmd.getImportedKeys(null, "", "fkt2");
+        assertTrue(rs.next());
+        assertEquals(DatabaseMetaData.importedKeySetNull, rs.getInt("UPDATE_RULE"));
+        assertEquals(DatabaseMetaData.importedKeySetDefault, rs.getInt("DELETE_RULE"));
+        rs.close();
+
+        TestUtil.dropTable(conn, "fkt2");
+        TestUtil.dropTable(conn, "fkt1");
+        TestUtil.dropTable(conn, "pkt");
+        TestUtil.closeDB(conn);
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void foreignKeysToUniqueIndexes(BinaryMode binaryMode) throws Exception {
+        initDatabaseMetaDataTest(binaryMode);
+        Connection con1 = TestUtil.openDB();
+        TestUtil.createTable(con1, "pkt",
+                "a int not null, b int not null, CONSTRAINT pkt_pk_a PRIMARY KEY (a), CONSTRAINT pkt_un_b UNIQUE (b)");
+        TestUtil.createTable(con1, "fkt",
+                "c int, d int, CONSTRAINT fkt_fk_c FOREIGN KEY (c) REFERENCES pkt(b)");
+
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getImportedKeys("", "", "fkt");
+        int j = 0;
+        for (; rs.next(); j++) {
+            assertEquals("pkt", rs.getString("PKTABLE_NAME"));
+            assertEquals("fkt", rs.getString("FKTABLE_NAME"));
+            assertEquals("pkt_un_b", rs.getString("PK_NAME"));
+            assertEquals("b", rs.getString("PKCOLUMN_NAME"));
+        }
+        assertEquals(1, j);
+
+        TestUtil.dropTable(con1, "fkt");
+        TestUtil.dropTable(con1, "pkt");
+        con1.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void multiColumnForeignKeys(BinaryMode binaryMode) throws Exception {
+        initDatabaseMetaDataTest(binaryMode);
+        Connection con1 = TestUtil.openDB();
+        TestUtil.createTable(con1, "pkt",
+                "a int not null, b int not null, CONSTRAINT pkt_pk PRIMARY KEY (a,b)");
+        TestUtil.createTable(con1, "fkt",
+                "c int, d int, CONSTRAINT fkt_fk_pkt FOREIGN KEY (c,d) REFERENCES pkt(b,a)");
+
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getImportedKeys("", "", "fkt");
+        int j = 0;
+        for (; rs.next(); j++) {
+            assertEquals("pkt", rs.getString("PKTABLE_NAME"));
+            assertEquals("fkt", rs.getString("FKTABLE_NAME"));
+            assertEquals(j + 1, rs.getInt("KEY_SEQ"));
+            if (j == 0) {
+                assertEquals("b", rs.getString("PKCOLUMN_NAME"));
+                assertEquals("c", rs.getString("FKCOLUMN_NAME"));
+            } else {
+                assertEquals("a", rs.getString("PKCOLUMN_NAME"));
+                assertEquals("d", rs.getString("FKCOLUMN_NAME"));
+            }
+        }
+        assertEquals(2, j);
+
+        TestUtil.dropTable(con1, "fkt");
+        TestUtil.dropTable(con1, "pkt");
+        con1.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void sameTableForeignKeys(BinaryMode binaryMode) throws Exception {
+        initDatabaseMetaDataTest(binaryMode);
+        Connection con1 = TestUtil.openDB();
+
+        TestUtil.createTable(con1, "person",
+                "FIRST_NAME character varying(100) NOT NULL," + "LAST_NAME character varying(100) NOT NULL,"
+                        + "FIRST_NAME_PARENT_1 character varying(100),"
+                        + "LAST_NAME_PARENT_1 character varying(100),"
+                        + "FIRST_NAME_PARENT_2 character varying(100),"
+                        + "LAST_NAME_PARENT_2 character varying(100),"
+                        + "CONSTRAINT PERSON_pkey PRIMARY KEY (FIRST_NAME , LAST_NAME ),"
+                        + "CONSTRAINT PARENT_1_fkey FOREIGN KEY (FIRST_NAME_PARENT_1, LAST_NAME_PARENT_1)"
+                        + "REFERENCES PERSON (FIRST_NAME, LAST_NAME) MATCH SIMPLE "
+                        + "ON UPDATE CASCADE ON DELETE CASCADE,"
+                        + "CONSTRAINT PARENT_2_fkey FOREIGN KEY (FIRST_NAME_PARENT_2, LAST_NAME_PARENT_2)"
+                        + "REFERENCES PERSON (FIRST_NAME, LAST_NAME) MATCH SIMPLE "
+                        + "ON UPDATE CASCADE ON DELETE CASCADE");
+
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+        ResultSet rs = dbmd.getImportedKeys(null, "", "person");
+
+        final List<String> fkNames = new ArrayList<>();
+
+        int lastFieldCount = -1;
+        while (rs.next()) {
+            // destination table (all foreign keys point to the same)
+            String pkTableName = rs.getString("PKTABLE_NAME");
+            assertEquals("person", pkTableName);
+
+            // destination fields
+            String pkColumnName = rs.getString("PKCOLUMN_NAME");
+            assertTrue("first_name".equals(pkColumnName) || "last_name".equals(pkColumnName));
+
+            // source table (all foreign keys are in the same)
+            String fkTableName = rs.getString("FKTABLE_NAME");
+            assertEquals("person", fkTableName);
+
+            // foreign key name
+            String fkName = rs.getString("FK_NAME");
+            // sequence number within the foreign key
+            int seq = rs.getInt("KEY_SEQ");
+            if (seq == 1) {
+                // begin new foreign key
+                assertFalse(fkNames.contains(fkName));
+                fkNames.add(fkName);
+                // all foreign keys have 2 fields
+                assertTrue(lastFieldCount < 0 || lastFieldCount == 2);
+            } else {
+                // continue foreign key, i.e. fkName matches the last foreign key
+                assertEquals(fkNames.get(fkNames.size() - 1), fkName);
+                // see always increases by 1
+                assertEquals(seq, lastFieldCount + 1);
+            }
+            lastFieldCount = seq;
+        }
+        // there's more than one foreign key from a table to another
+        assertEquals(2, fkNames.size());
+
+        TestUtil.dropTable(con1, "person");
+        TestUtil.closeDB(con1);
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void foreignKeys(BinaryMode binaryMode) throws Exception {
+        initDatabaseMetaDataTest(binaryMode);
+        Connection con1 = TestUtil.openDB();
+        TestUtil.createTable(con1, "people", "id int4 primary key, name text");
+        TestUtil.createTable(con1, "policy", "id int4 primary key, name text");
+
+        TestUtil.createTable(con1, "users",
+                "id int4 primary key, people_id int4, policy_id int4,"
+                        + "CONSTRAINT people FOREIGN KEY (people_id) references people(id),"
+                        + "constraint policy FOREIGN KEY (policy_id) references policy(id)");
+
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+
+        ResultSet rs = dbmd.getImportedKeys(null, "", "users");
+        int j = 0;
+        for (; rs.next(); j++) {
+
+            String pkTableName = rs.getString("PKTABLE_NAME");
+            assertTrue("people".equals(pkTableName) || "policy".equals(pkTableName));
+
+            String pkColumnName = rs.getString("PKCOLUMN_NAME");
+            assertEquals("id", pkColumnName);
+
+            String fkTableName = rs.getString("FKTABLE_NAME");
+            assertEquals("users", fkTableName);
+
+            String fkColumnName = rs.getString("FKCOLUMN_NAME");
+            assertTrue("people_id".equals(fkColumnName) || "policy_id".equals(fkColumnName));
+
+            String fkName = rs.getString("FK_NAME");
+            assertTrue(fkName.startsWith("people") || fkName.startsWith("policy"));
+
+            String pkName = rs.getString("PK_NAME");
+            assertTrue("people_pkey".equals(pkName) || "policy_pkey".equals(pkName));
+
+        }
+
+        assertEquals(2, j);
+
+        rs = dbmd.getExportedKeys(null, "", "people");
+
+        // this is hacky, but it will serve the purpose
+        assertTrue(rs.next());
+
+        assertEquals("people", rs.getString("PKTABLE_NAME"));
+        assertEquals("id", rs.getString("PKCOLUMN_NAME"));
+
+        assertEquals("users", rs.getString("FKTABLE_NAME"));
+        assertEquals("people_id", rs.getString("FKCOLUMN_NAME"));
+
+        assertTrue(rs.getString("FK_NAME").startsWith("people"));
+
+        TestUtil.dropTable(con1, "users");
+        TestUtil.dropTable(con1, "people");
+        TestUtil.dropTable(con1, "policy");
+        TestUtil.closeDB(con1);
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void numericPrecision(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+        ResultSet rs = dbmd.getColumns(null, "public", "precision_test", "%");
+        assertTrue(rs.next(), "It should have a row for the first column");
+        assertEquals(0, rs.getInt("COLUMN_SIZE"), "The column size should be zero");
+        assertFalse(rs.next(), "It should have a single column");
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void columns(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        // At the moment just test that no exceptions are thrown KJ
+        String[] metadataColumns = {"TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "COLUMN_NAME",
+                "DATA_TYPE", "TYPE_NAME", "COLUMN_SIZE", "BUFFER_LENGTH",
+                "DECIMAL_DIGITS", "NUM_PREC_RADIX", "NULLABLE", "REMARKS",
+                "COLUMN_DEF", "SQL_DATA_TYPE", "SQL_DATETIME_SUB", "CHAR_OCTET_LENGTH",
+                "ORDINAL_POSITION", "IS_NULLABLE", "SCOPE_CATALOG", "SCOPE_SCHEMA",
+                "SCOPE_TABLE", "SOURCE_DATA_TYPE", "IS_AUTOINCREMENT", "IS_GENERATEDCOLUMN"};
+
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+        ResultSet rs = dbmd.getColumns(null, null, "pg_class", null);
+        if (rs.next()) {
+            for (int i = 0; i < metadataColumns.length; i++) {
+                assertEquals(i + 1, rs.findColumn(metadataColumns[i]));
+            }
+        }
+        rs.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void droppedColumns(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) {
+            return;
+        }
+
+        Statement stmt = con.createStatement();
+        stmt.execute("ALTER TABLE metadatatest DROP name");
+        stmt.execute("ALTER TABLE metadatatest DROP colour");
+        stmt.close();
+
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getColumns(null, null, "metadatatest", null);
+
+        assertTrue(rs.next());
+        assertEquals("id", rs.getString("COLUMN_NAME"));
+        assertEquals(1, rs.getInt("ORDINAL_POSITION"));
+
+        assertTrue(rs.next());
+        assertEquals("updated", rs.getString("COLUMN_NAME"));
+        assertEquals(2, rs.getInt("ORDINAL_POSITION"));
+
+        assertTrue(rs.next());
+        assertEquals("quest", rs.getString("COLUMN_NAME"));
+        assertEquals(3, rs.getInt("ORDINAL_POSITION"));
+
+        rs.close();
+
+        rs = dbmd.getColumns(null, null, "metadatatest", "quest");
+        assertTrue(rs.next());
+        assertEquals("quest", rs.getString("COLUMN_NAME"));
+        assertEquals(3, rs.getInt("ORDINAL_POSITION"));
+        assertFalse(rs.next());
+        rs.close();
 
     /* getFunctionColumns also has to be aware of dropped columns
        add this in here to make sure it can deal with them
      */
-    rs = dbmd.getFunctionColumns(null, null, "f4", null);
-    assertTrue(rs.next());
-
-    assertTrue(rs.next());
-    assertEquals("id", rs.getString(4));
-
-    assertTrue(rs.next());
-    assertEquals("updated", rs.getString(4));
-
-    rs.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void serialColumns(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getColumns(null, null, "sercoltest", null);
-    int rownum = 0;
-    while (rs.next()) {
-      assertEquals("sercoltest", rs.getString("TABLE_NAME"));
-      assertEquals(rownum + 1, rs.getInt("ORDINAL_POSITION"));
-      if (rownum == 0) {
-        assertEquals("int4", rs.getString("TYPE_NAME"));
-
-      } else if (rownum == 1) {
-        assertEquals("serial", rs.getString("TYPE_NAME"));
-        assertTrue(rs.getBoolean("IS_AUTOINCREMENT"));
-      } else if (rownum == 2) {
-        assertEquals("bigserial", rs.getString("TYPE_NAME"));
-        assertTrue(rs.getBoolean("IS_AUTOINCREMENT"));
-      }
-
-      rownum++;
-    }
-    assertEquals(3, rownum);
-    rs.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void columnPrivileges(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    // At the moment just test that no exceptions are thrown KJ
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-    ResultSet rs = dbmd.getColumnPrivileges(null, null, "pg_statistic", null);
-    rs.close();
-  }
-
-  /*
-   * Helper function - this logic is used several times to test relation privileges
-   */
-  public void relationPrivilegesHelper(String relationName) throws SQLException {
-    // Query PG catalog for privileges
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-    ResultSet rs = dbmd.getTablePrivileges(null, null, relationName);
-
-    // Parse result to check if table/view owner has select privileges
-    boolean foundSelect = false;
-    while (rs.next()) {
-      if (rs.getString("GRANTEE").equals(TestUtil.getUser())
-          && "SELECT".equals(rs.getString("PRIVILEGE"))) {
-        foundSelect = true;
-      }
-    }
-    rs.close();
-
-    // Check test condition
-    assertTrue(foundSelect,
-              "Couldn't find SELECT priv on relation "
-                + relationName + "  for " + TestUtil.getUser());
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void tablePrivileges(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    relationPrivilegesHelper("metadatatest");
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void viewPrivileges(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    relationPrivilegesHelper("viewtest");
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void materializedViewPrivileges(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_3));
-    TestUtil.createMaterializedView(con, "matviewtest", "SELECT id, quest FROM metadatatest");
-    try {
-      relationPrivilegesHelper("matviewtest");
-    } finally {
-      TestUtil.dropMaterializedView(con, "matviewtest");
-    }
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void noTablePrivileges(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    Statement stmt = con.createStatement();
-    stmt.execute("REVOKE ALL ON metadatatest FROM PUBLIC");
-    stmt.execute("REVOKE ALL ON metadatatest FROM " + TestUtil.getUser());
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getTablePrivileges(null, null, "metadatatest");
-    assertFalse(rs.next());
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void primaryKeys(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    // At the moment just test that no exceptions are thrown KJ
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-    ResultSet rs = dbmd.getPrimaryKeys(null, null, "pg_class");
-    rs.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void indexInfo(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    Statement stmt = con.createStatement();
-    stmt.execute("create index idx_id on metadatatest (id)");
-    stmt.execute("create index idx_func_single on metadatatest (upper(colour))");
-    stmt.execute("create unique index idx_un_id on metadatatest(id)");
-    stmt.execute("create index idx_func_multi on metadatatest (upper(colour), upper(quest))");
-    stmt.execute("create index idx_func_mixed on metadatatest (colour, upper(quest))");
-
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-    ResultSet rs = dbmd.getIndexInfo(null, null, "metadatatest", false, false);
-
-    assertTrue(rs.next());
-    assertEquals("idx_un_id", rs.getString("INDEX_NAME"));
-    assertEquals(1, rs.getInt("ORDINAL_POSITION"));
-    assertEquals("id", rs.getString("COLUMN_NAME"));
-    assertFalse(rs.getBoolean("NON_UNIQUE"));
-
-    assertTrue(rs.next());
-    assertEquals("idx_func_mixed", rs.getString("INDEX_NAME"));
-    assertEquals(1, rs.getInt("ORDINAL_POSITION"));
-    assertEquals("colour", rs.getString("COLUMN_NAME"));
-
-    assertTrue(rs.next());
-    assertEquals("idx_func_mixed", rs.getString("INDEX_NAME"));
-    assertEquals(2, rs.getInt("ORDINAL_POSITION"));
-    assertEquals("upper(quest)", rs.getString("COLUMN_NAME"));
-
-    assertTrue(rs.next());
-    assertEquals("idx_func_multi", rs.getString("INDEX_NAME"));
-    assertEquals(1, rs.getInt("ORDINAL_POSITION"));
-    assertEquals("upper(colour)", rs.getString("COLUMN_NAME"));
-
-    assertTrue(rs.next());
-    assertEquals("idx_func_multi", rs.getString("INDEX_NAME"));
-    assertEquals(2, rs.getInt("ORDINAL_POSITION"));
-    assertEquals("upper(quest)", rs.getString("COLUMN_NAME"));
-
-    assertTrue(rs.next());
-    assertEquals("idx_func_single", rs.getString("INDEX_NAME"));
-    assertEquals(1, rs.getInt("ORDINAL_POSITION"));
-    assertEquals("upper(colour)", rs.getString("COLUMN_NAME"));
-
-    assertTrue(rs.next());
-    assertEquals("idx_id", rs.getString("INDEX_NAME"));
-    assertEquals(1, rs.getInt("ORDINAL_POSITION"));
-    assertEquals("id", rs.getString("COLUMN_NAME"));
-    assertTrue(rs.getBoolean("NON_UNIQUE"));
-
-    assertFalse(rs.next());
-
-    rs.close();
-  }
-
-  /**
-   * Order defined at
-   * https://docs.oracle.com/javase/8/docs/api/java/sql/DatabaseMetaData.html#getIndexInfo-java.lang.String-java.lang.String-java.lang.String-boolean-boolean-
-   */
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void indexInfoColumnOrder(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-    ResultSet rs = dbmd.getIndexInfo(null, null, "metadatatest", false, false);
-    assertEquals(1, rs.findColumn("TABLE_CAT"));
-    assertEquals(2, rs.findColumn("TABLE_SCHEM"));
-    assertEquals(3, rs.findColumn("TABLE_NAME"));
-    assertEquals(4, rs.findColumn("NON_UNIQUE"));
-    assertEquals(5, rs.findColumn("INDEX_QUALIFIER"));
-    assertEquals(6, rs.findColumn("INDEX_NAME"));
-    assertEquals(7, rs.findColumn("TYPE"));
-    assertEquals(8, rs.findColumn("ORDINAL_POSITION"));
-    assertEquals(9, rs.findColumn("COLUMN_NAME"));
-    assertEquals(10, rs.findColumn("ASC_OR_DESC"));
-    assertEquals(11, rs.findColumn("CARDINALITY"));
-    assertEquals(12, rs.findColumn("PAGES"));
-    assertEquals(13, rs.findColumn("FILTER_CONDITION"));
-
-    rs.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void indexInfoColumnCase(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-
-    try (ResultSet rs = dbmd.getIndexInfo(null, null, "metadatatest", false, false)) {
-      ResultSetMetaData rsmd = rs.getMetaData();
-      for (int i = 1; i < rsmd.getColumnCount() + 1; i++) {
-        char[] chars = rsmd.getColumnName(i).toCharArray();
-        for (int j = 0; j < chars.length; j++) {
-          if (Character.isAlphabetic(chars[j])) {
-            assertTrue(Character.isUpperCase(chars[j]), "Column: " + rsmd.getColumnName(i) + " is not UPPER CASE");
-          }
-        }
-      }
-    }
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void notNullDomainColumn(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getColumns("", "", "domaintable", "");
-    assertTrue(rs.next());
-    assertEquals("id", rs.getString("COLUMN_NAME"));
-    assertEquals("NO", rs.getString("IS_NULLABLE"));
-    assertTrue(rs.next());
-    assertTrue(rs.next());
-    assertFalse(rs.next());
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void domainColumnSize(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getColumns("", "", "domaintable", "");
-    assertTrue(rs.next());
-    assertEquals("id", rs.getString("COLUMN_NAME"));
-    assertEquals(10, rs.getInt("COLUMN_SIZE"));
-    assertTrue(rs.next());
-    assertEquals("v", rs.getString("COLUMN_NAME"));
-    assertEquals(3, rs.getInt("COLUMN_SIZE"));
-    assertTrue(rs.next());
-    assertEquals("f", rs.getString("COLUMN_NAME"));
-    assertEquals(8, rs.getInt("COLUMN_SIZE"));
-    assertEquals(3, rs.getInt("DECIMAL_DIGITS"));
-
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void ascDescIndexInfo(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)) {
-      return;
-    }
-
-    Statement stmt = con.createStatement();
-    stmt.execute("CREATE INDEX idx_a_d ON metadatatest (id ASC, quest DESC)");
-    stmt.close();
-
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getIndexInfo(null, null, "metadatatest", false, false);
-
-    assertTrue(rs.next());
-    assertEquals("idx_a_d", rs.getString("INDEX_NAME"));
-    assertEquals("id", rs.getString("COLUMN_NAME"));
-    assertEquals("A", rs.getString("ASC_OR_DESC"));
-
-    assertTrue(rs.next());
-    assertEquals("idx_a_d", rs.getString("INDEX_NAME"));
-    assertEquals("quest", rs.getString("COLUMN_NAME"));
-    assertEquals("D", rs.getString("ASC_OR_DESC"));
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void partialIndexInfo(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    Statement stmt = con.createStatement();
-    stmt.execute("create index idx_p_name_id on metadatatest (name) where id > 5");
-    stmt.close();
-
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getIndexInfo(null, null, "metadatatest", false, false);
-
-    assertTrue(rs.next());
-    assertEquals("idx_p_name_id", rs.getString("INDEX_NAME"));
-    assertEquals(1, rs.getInt("ORDINAL_POSITION"));
-    assertEquals("name", rs.getString("COLUMN_NAME"));
-    assertEquals("(id > 5)", rs.getString("FILTER_CONDITION"));
-    assertTrue(rs.getBoolean("NON_UNIQUE"));
-
-    rs.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void tableTypes(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    final List<String> expectedTableTypes = new ArrayList<>(Arrays.asList("FOREIGN TABLE", "INDEX", "PARTITIONED INDEX",
-        "MATERIALIZED VIEW", "PARTITIONED TABLE", "SEQUENCE", "SYSTEM INDEX", "SYSTEM TABLE", "SYSTEM TOAST INDEX",
-        "SYSTEM TOAST TABLE", "SYSTEM VIEW", "TABLE", "TEMPORARY INDEX", "TEMPORARY SEQUENCE", "TEMPORARY TABLE",
-        "TEMPORARY VIEW", "TYPE", "VIEW"));
-    final List<String> foundTableTypes = new ArrayList<>();
-
-    // Test that no exceptions are thrown
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-
-    // Test that the table types returned are the same as those expected
-    ResultSet rs = dbmd.getTableTypes();
-    while (rs.next()) {
-      String tableType = new String(rs.getBytes(1));
-      foundTableTypes.add(tableType);
-    }
-    rs.close();
-    Collections.sort(expectedTableTypes);
-    Collections.sort(foundTableTypes);
-    assertEquals(foundTableTypes, expectedTableTypes, "The table types received from DatabaseMetaData should match the 18 expected types");
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void funcWithoutNames(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-    ResultSet rs = dbmd.getProcedureColumns(null, null, "f1", null);
-
-    assertTrue(rs.next());
-    assertEquals("returnValue", rs.getString(4));
-    assertEquals(DatabaseMetaData.procedureColumnReturn, rs.getInt(5));
-
-    assertTrue(rs.next());
-    assertEquals("$1", rs.getString(4));
-    assertEquals(DatabaseMetaData.procedureColumnIn, rs.getInt(5));
-    assertEquals(Types.INTEGER, rs.getInt(6));
-
-    assertTrue(rs.next());
-    assertEquals("$2", rs.getString(4));
-    assertEquals(DatabaseMetaData.procedureColumnIn, rs.getInt(5));
-    assertEquals(Types.VARCHAR, rs.getInt(6));
-
-    assertFalse(rs.next());
-
-    rs.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void funcWithNames(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getProcedureColumns(null, null, "f2", null);
-
-    assertTrue(rs.next());
-
-    assertTrue(rs.next());
-    assertEquals("a", rs.getString(4));
-
-    assertTrue(rs.next());
-    assertEquals("b", rs.getString(4));
-
-    assertFalse(rs.next());
-
-    rs.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void funcWithDirection(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getProcedureColumns(null, null, "f3", null);
-
-    assertTrue(rs.next());
-    assertEquals("a", rs.getString(4));
-    assertEquals(DatabaseMetaData.procedureColumnIn, rs.getInt(5));
-    assertEquals(Types.INTEGER, rs.getInt(6));
-
-    assertTrue(rs.next());
-    assertEquals("b", rs.getString(4));
-    assertEquals(DatabaseMetaData.procedureColumnInOut, rs.getInt(5));
-    assertEquals(Types.VARCHAR, rs.getInt(6));
-
-    assertTrue(rs.next());
-    assertEquals("c", rs.getString(4));
-    assertEquals(DatabaseMetaData.procedureColumnOut, rs.getInt(5));
-    assertEquals(Types.TIMESTAMP, rs.getInt(6));
-
-    rs.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void funcReturningComposite(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getProcedureColumns(null, null, "f4", null);
-
-    assertTrue(rs.next());
-    assertEquals("$1", rs.getString(4));
-    assertEquals(DatabaseMetaData.procedureColumnIn, rs.getInt(5));
-    assertEquals(Types.INTEGER, rs.getInt(6));
-
-    assertTrue(rs.next());
-    assertEquals("id", rs.getString(4));
-    assertEquals(DatabaseMetaData.procedureColumnResult, rs.getInt(5));
-    assertEquals(Types.INTEGER, rs.getInt(6));
-
-    assertTrue(rs.next());
-    assertEquals("name", rs.getString(4));
-    assertEquals(DatabaseMetaData.procedureColumnResult, rs.getInt(5));
-    assertEquals(Types.VARCHAR, rs.getInt(6));
-
-    assertTrue(rs.next());
-    assertEquals("updated", rs.getString(4));
-    assertEquals(DatabaseMetaData.procedureColumnResult, rs.getInt(5));
-    assertEquals(Types.TIMESTAMP, rs.getInt(6));
-
-    assertTrue(rs.next());
-    assertEquals("colour", rs.getString(4));
-    assertEquals(DatabaseMetaData.procedureColumnResult, rs.getInt(5));
-    assertEquals(Types.VARCHAR, rs.getInt(6));
-
-    assertTrue(rs.next());
-    assertEquals("quest", rs.getString(4));
-    assertEquals(DatabaseMetaData.procedureColumnResult, rs.getInt(5));
-    assertEquals(Types.VARCHAR, rs.getInt(6));
-
-    assertFalse(rs.next());
-    rs.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void funcReturningTable(BinaryMode binaryMode) throws Exception {
-    initDatabaseMetaDataTest(binaryMode);
-    if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) {
-      return;
-    }
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getProcedureColumns(null, null, "f5", null);
-    assertTrue(rs.next());
-    assertEquals("returnValue", rs.getString(4));
-    assertEquals(DatabaseMetaData.procedureColumnReturn, rs.getInt(5));
-    assertEquals(Types.INTEGER, rs.getInt(6));
-    assertTrue(rs.next());
-    assertEquals("i", rs.getString(4));
-    assertEquals(DatabaseMetaData.procedureColumnReturn, rs.getInt(5));
-    assertEquals(Types.INTEGER, rs.getInt(6));
-    assertFalse(rs.next());
-    rs.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void versionColumns(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    // At the moment just test that no exceptions are thrown KJ
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-    ResultSet rs = dbmd.getVersionColumns(null, null, "pg_class");
-    rs.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void bestRowIdentifier(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    // At the moment just test that no exceptions are thrown KJ
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-    ResultSet rs =
-        dbmd.getBestRowIdentifier(null, null, "pg_type", DatabaseMetaData.bestRowSession, false);
-    rs.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void procedures(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    // At the moment just test that no exceptions are thrown KJ
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-    ResultSet rs = dbmd.getProcedures(null, null, null);
-    rs.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void catalogs(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    try (ResultSet rs = dbmd.getCatalogs()) {
-      List<String> catalogs = new ArrayList<>();
-      while (rs.next()) {
-        catalogs.add(rs.getString("TABLE_CAT"));
-      }
-      List<String> sortedCatalogs = new ArrayList<>(catalogs);
-      Collections.sort(sortedCatalogs);
-
-      assertThat(
-          catalogs,
-          allOf(
-              hasItem("test"),
-              hasItem("postgres"),
-              equalTo(sortedCatalogs)
-          )
-      );
-    }
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void schemas(BinaryMode binaryMode) throws Exception {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    assertNotNull(dbmd);
-
-    ResultSet rs = dbmd.getSchemas();
-    boolean foundPublic = false;
-    boolean foundEmpty = false;
-    boolean foundPGCatalog = false;
-    int count;
-
-    for (count = 0; rs.next(); count++) {
-      String schema = rs.getString("TABLE_SCHEM");
-      if ("public".equals(schema)) {
-        foundPublic = true;
-      } else if ("".equals(schema)) {
-        foundEmpty = true;
-      } else if ("pg_catalog".equals(schema)) {
-        foundPGCatalog = true;
-      }
-    }
-    rs.close();
-    assertTrue(count >= 2);
-    assertTrue(foundPublic);
-    assertTrue(foundPGCatalog);
-    assertFalse(foundEmpty);
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void escaping(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getTables(null, null, "a'", new String[]{"TABLE"});
-    assertTrue(rs.next());
-    rs = dbmd.getTables(null, null, "a\\\\", new String[]{"TABLE"});
-    assertTrue(rs.next());
-    rs = dbmd.getTables(null, null, "a\\", new String[]{"TABLE"});
-    assertFalse(rs.next());
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void searchStringEscape(BinaryMode binaryMode) throws Exception {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    String pattern = dbmd.getSearchStringEscape() + "_";
-    PreparedStatement pstmt = con.prepareStatement("SELECT 'a' LIKE ?, '_' LIKE ?");
-    pstmt.setString(1, pattern);
-    pstmt.setString(2, pattern);
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertFalse(rs.getBoolean(1));
-    assertTrue(rs.getBoolean(2));
-    rs.close();
-    pstmt.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void getUDTQualified(BinaryMode binaryMode) throws Exception {
-    initDatabaseMetaDataTest(binaryMode);
-    Statement stmt = null;
-    try {
-      stmt = con.createStatement();
-      stmt.execute("create schema jdbc");
-      stmt.execute("create type jdbc.testint8 as (i int8)");
-      DatabaseMetaData dbmd = con.getMetaData();
-      ResultSet rs = dbmd.getUDTs(null, null, "jdbc.testint8", null);
-      assertTrue(rs.next());
-      String cat;
-      String schema;
-      String typeName;
-      String remarks;
-      String className;
-      int dataType;
-      int baseType;
-
-      cat = rs.getString("type_cat");
-      schema = rs.getString("type_schem");
-      typeName = rs.getString("type_name");
-      className = rs.getString("class_name");
-      dataType = rs.getInt("data_type");
-      remarks = rs.getString("remarks");
-      baseType = rs.getInt("base_type");
-      assertEquals("testint8", typeName, "type name ");
-      assertEquals("jdbc", schema, "schema name ");
-
-      // now test to see if the fully qualified stuff works as planned
-      rs = dbmd.getUDTs("catalog", "public", "catalog.jdbc.testint8", null);
-      assertTrue(rs.next());
-      cat = rs.getString("type_cat");
-      schema = rs.getString("type_schem");
-      typeName = rs.getString("type_name");
-      className = rs.getString("class_name");
-      dataType = rs.getInt("data_type");
-      remarks = rs.getString("remarks");
-      baseType = rs.getInt("base_type");
-      assertEquals("testint8", typeName, "type name ");
-      assertEquals("jdbc", schema, "schema name ");
-    } finally {
-      try {
-        if (stmt != null) {
-          stmt.close();
-        }
-        stmt = con.createStatement();
-        stmt.execute("drop type jdbc.testint8");
-        stmt.execute("drop schema jdbc");
-      } catch (Exception ex) {
-      }
-    }
-
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void getUDT1(BinaryMode binaryMode) throws Exception {
-    initDatabaseMetaDataTest(binaryMode);
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute("create domain testint8 as int8");
-      stmt.execute("comment on domain testint8 is 'jdbc123'");
-      DatabaseMetaData dbmd = con.getMetaData();
-      ResultSet rs = dbmd.getUDTs(null, null, "testint8", null);
-      assertTrue(rs.next());
-
-      String cat = rs.getString("type_cat");
-      String schema = rs.getString("type_schem");
-      String typeName = rs.getString("type_name");
-      String className = rs.getString("class_name");
-      int dataType = rs.getInt("data_type");
-      String remarks = rs.getString("remarks");
-
-      int baseType = rs.getInt("base_type");
-      assertEquals(Types.BIGINT, baseType, "base type");
-      assertEquals(Types.DISTINCT, dataType, "data type");
-      assertEquals("testint8", typeName, "type name ");
-      assertEquals("jdbc123", remarks, "remarks");
-    } finally {
-      try {
-        Statement stmt = con.createStatement();
-        stmt.execute("drop domain testint8");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void getUDT2(BinaryMode binaryMode) throws Exception {
-    initDatabaseMetaDataTest(binaryMode);
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute("create domain testint8 as int8");
-      stmt.execute("comment on domain testint8 is 'jdbc123'");
-      DatabaseMetaData dbmd = con.getMetaData();
-      ResultSet rs = dbmd.getUDTs(null, null, "testint8", new int[]{Types.DISTINCT, Types.STRUCT});
-      assertTrue(rs.next());
-      String typeName;
-
-      String cat = rs.getString("type_cat");
-      String schema = rs.getString("type_schem");
-      typeName = rs.getString("type_name");
-      String className = rs.getString("class_name");
-      int dataType = rs.getInt("data_type");
-      String remarks = rs.getString("remarks");
-
-      int baseType = rs.getInt("base_type");
-      assertEquals(Types.BIGINT, baseType, "base type");
-      assertEquals(Types.DISTINCT, dataType, "data type");
-      assertEquals("testint8", typeName, "type name ");
-      assertEquals("jdbc123", remarks, "remarks");
-    } finally {
-      try {
-        Statement stmt = con.createStatement();
-        stmt.execute("drop domain testint8");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void getUDT3(BinaryMode binaryMode) throws Exception {
-    initDatabaseMetaDataTest(binaryMode);
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute("create domain testint8 as int8");
-      stmt.execute("comment on domain testint8 is 'jdbc123'");
-      DatabaseMetaData dbmd = con.getMetaData();
-      ResultSet rs = dbmd.getUDTs(null, null, "testint8", new int[]{Types.DISTINCT});
-      assertTrue(rs.next());
-
-      String cat = rs.getString("type_cat");
-      String schema = rs.getString("type_schem");
-      String typeName = rs.getString("type_name");
-      String className = rs.getString("class_name");
-      int dataType = rs.getInt("data_type");
-      String remarks = rs.getString("remarks");
-
-      int baseType = rs.getInt("base_type");
-      assertEquals(Types.BIGINT, baseType, "base type");
-      assertEquals(Types.DISTINCT, dataType, "data type");
-      assertEquals("testint8", typeName, "type name ");
-      assertEquals("jdbc123", remarks, "remarks");
-    } finally {
-      try {
-        Statement stmt = con.createStatement();
-        stmt.execute("drop domain testint8");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void getUDT4(BinaryMode binaryMode) throws Exception {
-    initDatabaseMetaDataTest(binaryMode);
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute("create type testint8 as (i int8)");
-      DatabaseMetaData dbmd = con.getMetaData();
-      ResultSet rs = dbmd.getUDTs(null, null, "testint8", null);
-      assertTrue(rs.next());
-
-      String cat = rs.getString("type_cat");
-      String schema = rs.getString("type_schem");
-      String typeName = rs.getString("type_name");
-      String className = rs.getString("class_name");
-      int dataType = rs.getInt("data_type");
-      String remarks = rs.getString("remarks");
-
-      int baseType = rs.getInt("base_type");
-      assertTrue(rs.wasNull(), "base type");
-      assertEquals(Types.STRUCT, dataType, "data type");
-      assertEquals("testint8", typeName, "type name ");
-    } finally {
-      try {
-        Statement stmt = con.createStatement();
-        stmt.execute("drop type testint8");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void types(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    // https://www.postgresql.org/docs/8.2/static/datatype.html
-    List<String> stringTypeList = new ArrayList<>();
-    stringTypeList.addAll(Arrays.asList("bit",
-            "bool",
-            "box",
-            "bytea",
-            "char",
-            "cidr",
-            "circle",
-            "date",
-            "float4",
-            "float8",
-            "inet",
-            "int2",
-            "int4",
-            "int8",
-            "interval",
-            "line",
-            "lseg",
-            "macaddr",
-            "money",
-            "numeric",
-            "path",
-            "point",
-            "polygon",
-            "text",
-            "time",
-            "timestamp",
-            "timestamptz",
-            "timetz",
-            "varbit",
-            "varchar"));
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)) {
-      stringTypeList.add("tsquery");
-      stringTypeList.add("tsvector");
-      stringTypeList.add("txid_snapshot");
-      stringTypeList.add("uuid");
-      stringTypeList.add("xml");
-    }
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_2)) {
-      stringTypeList.add("json");
-    }
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_4)) {
-      stringTypeList.add("jsonb");
-      stringTypeList.add("pg_lsn");
-    }
-
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getTypeInfo();
-    List<String> types = new ArrayList<>();
-
-    while (rs.next()) {
-      types.add(rs.getString("TYPE_NAME"));
-    }
-    for (String typeName : stringTypeList) {
-      assertTrue(types.contains(typeName));
-    }
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void typeInfoSigned(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getTypeInfo();
-    while (rs.next()) {
-      if ("int4".equals(rs.getString("TYPE_NAME"))) {
-        assertFalse(rs.getBoolean("UNSIGNED_ATTRIBUTE"));
-      } else if ("float8".equals(rs.getString("TYPE_NAME"))) {
-        assertFalse(rs.getBoolean("UNSIGNED_ATTRIBUTE"));
-      } else if ("text".equals(rs.getString("TYPE_NAME"))) {
-        assertTrue(rs.getBoolean("UNSIGNED_ATTRIBUTE"));
-      }
-    }
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void typeInfoQuoting(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getTypeInfo();
-    while (rs.next()) {
-      if ("int4".equals(rs.getString("TYPE_NAME"))) {
-        assertNull(rs.getString("LITERAL_PREFIX"));
-      } else if ("text".equals(rs.getString("TYPE_NAME"))) {
-        assertEquals("'", rs.getString("LITERAL_PREFIX"));
-        assertEquals("'", rs.getString("LITERAL_SUFFIX"));
-      }
-    }
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void informationAboutArrayTypes(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getColumns("", "", "arraytable", "");
-    assertTrue(rs.next());
-    assertEquals("a", rs.getString("COLUMN_NAME"));
-    assertEquals(5, rs.getInt("COLUMN_SIZE"));
-    assertEquals(2, rs.getInt("DECIMAL_DIGITS"));
-    assertTrue(rs.next());
-    assertEquals("b", rs.getString("COLUMN_NAME"));
-    assertEquals(100, rs.getInt("COLUMN_SIZE"));
-    assertFalse(rs.next());
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void partitionedTablesIndex(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) {
-      Statement stmt = null;
-      try {
-        stmt = con.createStatement();
-        stmt.execute(
-            "CREATE TABLE measurement (logdate date not null primary key,peaktemp int,unitsales int ) PARTITION BY RANGE (logdate);");
-        DatabaseMetaData dbmd = con.getMetaData();
-        ResultSet rs = dbmd.getPrimaryKeys("", "", "measurement");
+        rs = dbmd.getFunctionColumns(null, null, "f4", null);
         assertTrue(rs.next());
-        assertEquals("measurement_pkey", rs.getString(6));
 
-      } finally {
-        if (stmt != null) {
-          stmt.execute("drop table if exists measurement");
-          stmt.close();
-        }
-      }
-    }
-
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void partitionedTables(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) {
-      Statement stmt = null;
-      try {
-        stmt = con.createStatement();
-        stmt.execute(
-            "CREATE TABLE measurement (logdate date not null primary key,peaktemp int,unitsales int ) PARTITION BY RANGE (logdate);");
-        DatabaseMetaData dbmd = con.getMetaData();
-        ResultSet rs = dbmd.getTables("", "", "measurement", new String[]{"PARTITIONED TABLE"});
         assertTrue(rs.next());
-        assertEquals("measurement", rs.getString("table_name"));
+        assertEquals("id", rs.getString(4));
+
+        assertTrue(rs.next());
+        assertEquals("updated", rs.getString(4));
+
         rs.close();
-        rs = dbmd.getPrimaryKeys("", "", "measurement");
-        assertTrue(rs.next());
-        assertEquals("measurement_pkey", rs.getString(6));
-
-      } finally {
-        if (stmt != null) {
-          stmt.execute("drop table if exists measurement");
-          stmt.close();
-        }
-      }
     }
-  }
 
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void identityColumns(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    if ( TestUtil.haveMinimumServerVersion(con, ServerVersion.v10) ) {
-      Statement stmt = null;
-      try {
-        stmt = con.createStatement();
-        stmt.execute("CREATE TABLE test_new ("
-            + "id int GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,"
-            + "payload text)");
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void serialColumns(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
         DatabaseMetaData dbmd = con.getMetaData();
-        ResultSet rs = dbmd.getColumns("", "", "test_new", "id");
+        ResultSet rs = dbmd.getColumns(null, null, "sercoltest", null);
+        int rownum = 0;
+        while (rs.next()) {
+            assertEquals("sercoltest", rs.getString("TABLE_NAME"));
+            assertEquals(rownum + 1, rs.getInt("ORDINAL_POSITION"));
+            if (rownum == 0) {
+                assertEquals("int4", rs.getString("TYPE_NAME"));
+
+            } else if (rownum == 1) {
+                assertEquals("serial", rs.getString("TYPE_NAME"));
+                assertTrue(rs.getBoolean("IS_AUTOINCREMENT"));
+            } else if (rownum == 2) {
+                assertEquals("bigserial", rs.getString("TYPE_NAME"));
+                assertTrue(rs.getBoolean("IS_AUTOINCREMENT"));
+            }
+
+            rownum++;
+        }
+        assertEquals(3, rownum);
+        rs.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void columnPrivileges(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        // At the moment just test that no exceptions are thrown KJ
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+        ResultSet rs = dbmd.getColumnPrivileges(null, null, "pg_statistic", null);
+        rs.close();
+    }
+
+    /*
+     * Helper function - this logic is used several times to test relation privileges
+     */
+    public void relationPrivilegesHelper(String relationName) throws SQLException {
+        // Query PG catalog for privileges
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+        ResultSet rs = dbmd.getTablePrivileges(null, null, relationName);
+
+        // Parse result to check if table/view owner has select privileges
+        boolean foundSelect = false;
+        while (rs.next()) {
+            if (rs.getString("GRANTEE").equals(TestUtil.getUser())
+                    && "SELECT".equals(rs.getString("PRIVILEGE"))) {
+                foundSelect = true;
+            }
+        }
+        rs.close();
+
+        // Check test condition
+        assertTrue(foundSelect,
+                "Couldn't find SELECT priv on relation "
+                        + relationName + "  for " + TestUtil.getUser());
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void tablePrivileges(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        relationPrivilegesHelper("metadatatest");
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void viewPrivileges(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        relationPrivilegesHelper("viewtest");
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void materializedViewPrivileges(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_3));
+        TestUtil.createMaterializedView(con, "matviewtest", "SELECT id, quest FROM metadatatest");
+        try {
+            relationPrivilegesHelper("matviewtest");
+        } finally {
+            TestUtil.dropMaterializedView(con, "matviewtest");
+        }
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void noTablePrivileges(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        Statement stmt = con.createStatement();
+        stmt.execute("REVOKE ALL ON metadatatest FROM PUBLIC");
+        stmt.execute("REVOKE ALL ON metadatatest FROM " + TestUtil.getUser());
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getTablePrivileges(null, null, "metadatatest");
+        assertFalse(rs.next());
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void primaryKeys(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        // At the moment just test that no exceptions are thrown KJ
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+        ResultSet rs = dbmd.getPrimaryKeys(null, null, "pg_class");
+        rs.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void indexInfo(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        Statement stmt = con.createStatement();
+        stmt.execute("create index idx_id on metadatatest (id)");
+        stmt.execute("create index idx_func_single on metadatatest (upper(colour))");
+        stmt.execute("create unique index idx_un_id on metadatatest(id)");
+        stmt.execute("create index idx_func_multi on metadatatest (upper(colour), upper(quest))");
+        stmt.execute("create index idx_func_mixed on metadatatest (colour, upper(quest))");
+
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+        ResultSet rs = dbmd.getIndexInfo(null, null, "metadatatest", false, false);
+
+        assertTrue(rs.next());
+        assertEquals("idx_un_id", rs.getString("INDEX_NAME"));
+        assertEquals(1, rs.getInt("ORDINAL_POSITION"));
+        assertEquals("id", rs.getString("COLUMN_NAME"));
+        assertFalse(rs.getBoolean("NON_UNIQUE"));
+
+        assertTrue(rs.next());
+        assertEquals("idx_func_mixed", rs.getString("INDEX_NAME"));
+        assertEquals(1, rs.getInt("ORDINAL_POSITION"));
+        assertEquals("colour", rs.getString("COLUMN_NAME"));
+
+        assertTrue(rs.next());
+        assertEquals("idx_func_mixed", rs.getString("INDEX_NAME"));
+        assertEquals(2, rs.getInt("ORDINAL_POSITION"));
+        assertEquals("upper(quest)", rs.getString("COLUMN_NAME"));
+
+        assertTrue(rs.next());
+        assertEquals("idx_func_multi", rs.getString("INDEX_NAME"));
+        assertEquals(1, rs.getInt("ORDINAL_POSITION"));
+        assertEquals("upper(colour)", rs.getString("COLUMN_NAME"));
+
+        assertTrue(rs.next());
+        assertEquals("idx_func_multi", rs.getString("INDEX_NAME"));
+        assertEquals(2, rs.getInt("ORDINAL_POSITION"));
+        assertEquals("upper(quest)", rs.getString("COLUMN_NAME"));
+
+        assertTrue(rs.next());
+        assertEquals("idx_func_single", rs.getString("INDEX_NAME"));
+        assertEquals(1, rs.getInt("ORDINAL_POSITION"));
+        assertEquals("upper(colour)", rs.getString("COLUMN_NAME"));
+
+        assertTrue(rs.next());
+        assertEquals("idx_id", rs.getString("INDEX_NAME"));
+        assertEquals(1, rs.getInt("ORDINAL_POSITION"));
+        assertEquals("id", rs.getString("COLUMN_NAME"));
+        assertTrue(rs.getBoolean("NON_UNIQUE"));
+
+        assertFalse(rs.next());
+
+        rs.close();
+    }
+
+    /**
+     * Order defined at
+     * https://docs.oracle.com/javase/8/docs/api/java/sql/DatabaseMetaData.html#getIndexInfo-java.lang.String-java.lang.String-java.lang.String-boolean-boolean-
+     */
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void indexInfoColumnOrder(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+        ResultSet rs = dbmd.getIndexInfo(null, null, "metadatatest", false, false);
+        assertEquals(1, rs.findColumn("TABLE_CAT"));
+        assertEquals(2, rs.findColumn("TABLE_SCHEM"));
+        assertEquals(3, rs.findColumn("TABLE_NAME"));
+        assertEquals(4, rs.findColumn("NON_UNIQUE"));
+        assertEquals(5, rs.findColumn("INDEX_QUALIFIER"));
+        assertEquals(6, rs.findColumn("INDEX_NAME"));
+        assertEquals(7, rs.findColumn("TYPE"));
+        assertEquals(8, rs.findColumn("ORDINAL_POSITION"));
+        assertEquals(9, rs.findColumn("COLUMN_NAME"));
+        assertEquals(10, rs.findColumn("ASC_OR_DESC"));
+        assertEquals(11, rs.findColumn("CARDINALITY"));
+        assertEquals(12, rs.findColumn("PAGES"));
+        assertEquals(13, rs.findColumn("FILTER_CONDITION"));
+
+        rs.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void indexInfoColumnCase(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+
+        try (ResultSet rs = dbmd.getIndexInfo(null, null, "metadatatest", false, false)) {
+            ResultSetMetaData rsmd = rs.getMetaData();
+            for (int i = 1; i < rsmd.getColumnCount() + 1; i++) {
+                char[] chars = rsmd.getColumnName(i).toCharArray();
+                for (int j = 0; j < chars.length; j++) {
+                    if (Character.isAlphabetic(chars[j])) {
+                        assertTrue(Character.isUpperCase(chars[j]), "Column: " + rsmd.getColumnName(i) + " is not UPPER CASE");
+                    }
+                }
+            }
+        }
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void notNullDomainColumn(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getColumns("", "", "domaintable", "");
         assertTrue(rs.next());
         assertEquals("id", rs.getString("COLUMN_NAME"));
-        assertTrue(rs.getBoolean("IS_AUTOINCREMENT"));
+        assertEquals("NO", rs.getString("IS_NULLABLE"));
+        assertTrue(rs.next());
+        assertTrue(rs.next());
+        assertFalse(rs.next());
+    }
 
-      } finally {
-        if ( stmt != null ) {
-          stmt.execute("drop table test_new");
-          stmt.close();
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void domainColumnSize(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getColumns("", "", "domaintable", "");
+        assertTrue(rs.next());
+        assertEquals("id", rs.getString("COLUMN_NAME"));
+        assertEquals(10, rs.getInt("COLUMN_SIZE"));
+        assertTrue(rs.next());
+        assertEquals("v", rs.getString("COLUMN_NAME"));
+        assertEquals(3, rs.getInt("COLUMN_SIZE"));
+        assertTrue(rs.next());
+        assertEquals("f", rs.getString("COLUMN_NAME"));
+        assertEquals(8, rs.getInt("COLUMN_SIZE"));
+        assertEquals(3, rs.getInt("DECIMAL_DIGITS"));
+
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void ascDescIndexInfo(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)) {
+            return;
         }
-      }
-    }
-  }
 
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void generatedColumns(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    if ( TestUtil.haveMinimumServerVersion(con, ServerVersion.v12) ) {
-      DatabaseMetaData dbmd = con.getMetaData();
-      ResultSet rs = dbmd.getColumns("", "", "employee", "gross_pay");
-      assertTrue(rs.next());
-      assertEquals("gross_pay", rs.getString("COLUMN_NAME"));
-      assertTrue(rs.getBoolean("IS_GENERATEDCOLUMN"));
-    }
-  }
+        Statement stmt = con.createStatement();
+        stmt.execute("CREATE INDEX idx_a_d ON metadatatest (id ASC, quest DESC)");
+        stmt.close();
 
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void getSQLKeywords(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    DatabaseMetaData dbmd = con.getMetaData();
-    String keywords = dbmd.getSQLKeywords();
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getIndexInfo(null, null, "metadatatest", false, false);
 
-    // We don't want SQL:2003 keywords returned, so check for that.
-    String sql2003 = "a,abs,absolute,action,ada,add,admin,after,all,allocate,alter,always,and,any,are,"
-        + "array,as,asc,asensitive,assertion,assignment,asymmetric,at,atomic,attribute,attributes,"
-        + "authorization,avg,before,begin,bernoulli,between,bigint,binary,blob,boolean,both,breadth,by,"
-        + "c,call,called,cardinality,cascade,cascaded,case,cast,catalog,catalog_name,ceil,ceiling,chain,"
-        + "char,char_length,character,character_length,character_set_catalog,character_set_name,"
-        + "character_set_schema,characteristics,characters,check,checked,class_origin,clob,close,"
-        + "coalesce,cobol,code_units,collate,collation,collation_catalog,collation_name,collation_schema,"
-        + "collect,column,column_name,command_function,command_function_code,commit,committed,condition,"
-        + "condition_number,connect,connection_name,constraint,constraint_catalog,constraint_name,"
-        + "constraint_schema,constraints,constructors,contains,continue,convert,corr,corresponding,count,"
-        + "covar_pop,covar_samp,create,cross,cube,cume_dist,current,current_collation,current_date,"
-        + "current_default_transform_group,current_path,current_role,current_time,current_timestamp,"
-        + "current_transform_group_for_type,current_user,cursor,cursor_name,cycle,data,date,datetime_interval_code,"
-        + "datetime_interval_precision,day,deallocate,dec,decimal,declare,default,defaults,deferrable,"
-        + "deferred,defined,definer,degree,delete,dense_rank,depth,deref,derived,desc,describe,"
-        + "descriptor,deterministic,diagnostics,disconnect,dispatch,distinct,domain,double,drop,dynamic,"
-        + "dynamic_function,dynamic_function_code,each,element,else,end,end-exec,equals,escape,every,"
-        + "except,exception,exclude,excluding,exec,execute,exists,exp,external,extract,false,fetch,filter,"
-        + "final,first,float,floor,following,for,foreign,fortran,found,free,from,full,function,fusion,"
-        + "g,general,get,global,go,goto,grant,granted,group,grouping,having,hierarchy,hold,hour,identity,"
-        + "immediate,implementation,in,including,increment,indicator,initially,inner,inout,input,"
-        + "insensitive,insert,instance,instantiable,int,integer,intersect,intersection,interval,into,"
-        + "invoker,is,isolation,join,k,key,key_member,key_type,language,large,last,lateral,leading,left,"
-        + "length,level,like,ln,local,localtime,localtimestamp,locator,lower,m,map,match,matched,max,"
-        + "maxvalue,member,merge,message_length,message_octet_length,message_text,method,min,minute,"
-        + "minvalue,mod,modifies,module,month,more,multiset,mumps,name,names,national,natural,nchar,"
-        + "nclob,nesting,new,next,no,none,normalize,normalized,not,null,nullable,nullif,nulls,number,"
-        + "numeric,object,octet_length,octets,of,old,on,only,open,option,options,or,order,ordering,"
-        + "ordinality,others,out,outer,output,over,overlaps,overlay,overriding,pad,parameter,parameter_mode,"
-        + "parameter_name,parameter_ordinal_position,parameter_specific_catalog,parameter_specific_name,"
-        + "parameter_specific_schema,partial,partition,pascal,path,percent_rank,percentile_cont,"
-        + "percentile_disc,placing,pli,position,power,preceding,precision,prepare,preserve,primary,"
-        + "prior,privileges,procedure,public,range,rank,read,reads,real,recursive,ref,references,"
-        + "referencing,regr_avgx,regr_avgy,regr_count,regr_intercept,regr_r2,regr_slope,regr_sxx,"
-        + "regr_sxy,regr_syy,relative,release,repeatable,restart,result,return,returned_cardinality,"
-        + "returned_length,returned_octet_length,returned_sqlstate,returns,revoke,right,role,rollback,"
-        + "rollup,routine,routine_catalog,routine_name,routine_schema,row,row_count,row_number,rows,"
-        + "savepoint,scale,schema,schema_name,scope_catalog,scope_name,scope_schema,scroll,search,second,"
-        + "section,security,select,self,sensitive,sequence,serializable,server_name,session,session_user,"
-        + "set,sets,similar,simple,size,smallint,some,source,space,specific,specific_name,specifictype,sql,"
-        + "sqlexception,sqlstate,sqlwarning,sqrt,start,state,statement,static,stddev_pop,stddev_samp,"
-        + "structure,style,subclass_origin,submultiset,substring,sum,symmetric,system,system_user,table,"
-        + "table_name,tablesample,temporary,then,ties,time,timestamp,timezone_hour,timezone_minute,to,"
-        + "top_level_count,trailing,transaction,transaction_active,transactions_committed,"
-        + "transactions_rolled_back,transform,transforms,translate,translation,treat,trigger,trigger_catalog,"
-        + "trigger_name,trigger_schema,trim,true,type,uescape,unbounded,uncommitted,under,union,unique,"
-        + "unknown,unnamed,unnest,update,upper,usage,user,user_defined_type_catalog,user_defined_type_code,"
-        + "user_defined_type_name,user_defined_type_schema,using,value,values,var_pop,var_samp,varchar,"
-        + "varying,view,when,whenever,where,width_bucket,window,with,within,without,work,write,year,zone";
+        assertTrue(rs.next());
+        assertEquals("idx_a_d", rs.getString("INDEX_NAME"));
+        assertEquals("id", rs.getString("COLUMN_NAME"));
+        assertEquals("A", rs.getString("ASC_OR_DESC"));
 
-    String[] excludeSQL2003 = sql2003.split(",");
-    String[] returned = keywords.split(",");
-    Set<String> returnedSet = new HashSet<>(Arrays.asList(returned));
-    assertEquals(returnedSet.size(), returned.length, "Returned keywords should be unique");
-
-    for (String s : excludeSQL2003) {
-      assertFalse(returnedSet.contains(s), "Keyword from SQL:2003 \"" + s + "\" found");
+        assertTrue(rs.next());
+        assertEquals("idx_a_d", rs.getString("INDEX_NAME"));
+        assertEquals("quest", rs.getString("COLUMN_NAME"));
+        assertEquals("D", rs.getString("ASC_OR_DESC"));
     }
 
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
-      assertTrue(returnedSet.contains("reindex"), "reindex should be in keywords");
-    }
-  }
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void partialIndexInfo(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        Statement stmt = con.createStatement();
+        stmt.execute("create index idx_p_name_id on metadatatest (name) where id > 5");
+        stmt.close();
 
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void functionColumns(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) {
-      return;
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getIndexInfo(null, null, "metadatatest", false, false);
+
+        assertTrue(rs.next());
+        assertEquals("idx_p_name_id", rs.getString("INDEX_NAME"));
+        assertEquals(1, rs.getInt("ORDINAL_POSITION"));
+        assertEquals("name", rs.getString("COLUMN_NAME"));
+        assertEquals("(id > 5)", rs.getString("FILTER_CONDITION"));
+        assertTrue(rs.getBoolean("NON_UNIQUE"));
+
+        rs.close();
     }
 
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getFunctionColumns(null, null, "f1", null);
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void tableTypes(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        final List<String> expectedTableTypes = new ArrayList<>(Arrays.asList("FOREIGN TABLE", "INDEX", "PARTITIONED INDEX",
+                "MATERIALIZED VIEW", "PARTITIONED TABLE", "SEQUENCE", "SYSTEM INDEX", "SYSTEM TABLE", "SYSTEM TOAST INDEX",
+                "SYSTEM TOAST TABLE", "SYSTEM VIEW", "TABLE", "TEMPORARY INDEX", "TEMPORARY SEQUENCE", "TEMPORARY TABLE",
+                "TEMPORARY VIEW", "TYPE", "VIEW"));
+        final List<String> foundTableTypes = new ArrayList<>();
 
-    ResultSetMetaData rsmd = rs.getMetaData();
-    assertEquals(17, rsmd.getColumnCount());
-    assertEquals("FUNCTION_CAT", rsmd.getColumnName(1));
-    assertEquals("FUNCTION_SCHEM", rsmd.getColumnName(2));
-    assertEquals("FUNCTION_NAME", rsmd.getColumnName(3));
-    assertEquals("COLUMN_NAME", rsmd.getColumnName(4));
-    assertEquals("COLUMN_TYPE", rsmd.getColumnName(5));
-    assertEquals("DATA_TYPE", rsmd.getColumnName(6));
-    assertEquals("TYPE_NAME", rsmd.getColumnName(7));
-    assertEquals("PRECISION", rsmd.getColumnName(8));
-    assertEquals("LENGTH", rsmd.getColumnName(9));
-    assertEquals("SCALE", rsmd.getColumnName(10));
-    assertEquals("RADIX", rsmd.getColumnName(11));
-    assertEquals("NULLABLE", rsmd.getColumnName(12));
-    assertEquals("REMARKS", rsmd.getColumnName(13));
-    assertEquals("CHAR_OCTET_LENGTH", rsmd.getColumnName(14));
-    assertEquals("ORDINAL_POSITION", rsmd.getColumnName(15));
-    assertEquals("IS_NULLABLE", rsmd.getColumnName(16));
-    assertEquals("SPECIFIC_NAME", rsmd.getColumnName(17));
+        // Test that no exceptions are thrown
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
 
-    assertTrue(rs.next());
-    assertNull(rs.getString(1));
-    assertEquals("public", rs.getString(2));
-    assertEquals("f1", rs.getString(3));
-    assertEquals("returnValue", rs.getString(4));
-    assertEquals(DatabaseMetaData.functionReturn, rs.getInt(5));
-    assertEquals(Types.INTEGER, rs.getInt(6));
-    assertEquals("int4", rs.getString(7));
-    assertEquals(0, rs.getInt(15));
-
-    assertTrue(rs.next());
-    assertNull(rs.getString(1));
-    assertEquals("public", rs.getString(2));
-    assertEquals("f1", rs.getString(3));
-    assertEquals("$1", rs.getString(4));
-    assertEquals(DatabaseMetaData.functionColumnIn, rs.getInt(5));
-    assertEquals(Types.INTEGER, rs.getInt(6));
-    assertEquals("int4", rs.getString(7));
-    assertEquals(1, rs.getInt(15));
-
-    assertTrue(rs.next());
-    assertNull(rs.getString(1));
-    assertEquals("public", rs.getString(2));
-    assertEquals("f1", rs.getString(3));
-    assertEquals("$2", rs.getString(4));
-    assertEquals(DatabaseMetaData.functionColumnIn, rs.getInt(5));
-    assertEquals(Types.VARCHAR, rs.getInt(6));
-    assertEquals("varchar", rs.getString(7));
-    assertEquals(2, rs.getInt(15));
-
-    assertFalse(rs.next());
-
-    rs.close();
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void smallSerialColumns(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_2));
-    TestUtil.createTable(con, "smallserial_test", "a smallserial");
-
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getColumns(null, null, "smallserial_test", "a");
-    assertTrue(rs.next());
-    assertEquals("smallserial_test", rs.getString("TABLE_NAME"));
-    assertEquals("a", rs.getString("COLUMN_NAME"));
-    assertEquals(Types.SMALLINT, rs.getInt("DATA_TYPE"));
-    assertEquals("smallserial", rs.getString("TYPE_NAME"));
-    assertTrue(rs.getBoolean("IS_AUTOINCREMENT"));
-    assertEquals("nextval('smallserial_test_a_seq'::regclass)", rs.getString("COLUMN_DEF"));
-    assertFalse(rs.next());
-    rs.close();
-
-    TestUtil.dropTable(con, "smallserial_test");
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void smallSerialSequenceLikeColumns(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    Statement stmt = con.createStatement();
-    // This is the equivalent of the smallserial, not the actual smallserial
-    stmt.execute("CREATE SEQUENCE smallserial_test_a_seq;\n"
-        + "CREATE TABLE smallserial_test (\n"
-        + "    a smallint NOT NULL DEFAULT nextval('smallserial_test_a_seq')\n"
-        + ");\n"
-        + "ALTER SEQUENCE smallserial_test_a_seq OWNED BY smallserial_test.a;");
-
-    DatabaseMetaData dbmd = con.getMetaData();
-    ResultSet rs = dbmd.getColumns(null, null, "smallserial_test", "a");
-    assertTrue(rs.next());
-    assertEquals("smallserial_test", rs.getString("TABLE_NAME"));
-    assertEquals("a", rs.getString("COLUMN_NAME"));
-    assertEquals(Types.SMALLINT, rs.getInt("DATA_TYPE"));
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_2)) {
-      // in Pg 9.2+ it behaves like smallserial
-      assertEquals("smallserial", rs.getString("TYPE_NAME"));
-    } else {
-      assertEquals("int2", rs.getString("TYPE_NAME"));
+        // Test that the table types returned are the same as those expected
+        ResultSet rs = dbmd.getTableTypes();
+        while (rs.next()) {
+            String tableType = new String(rs.getBytes(1));
+            foundTableTypes.add(tableType);
+        }
+        rs.close();
+        Collections.sort(expectedTableTypes);
+        Collections.sort(foundTableTypes);
+        assertEquals(foundTableTypes, expectedTableTypes, "The table types received from DatabaseMetaData should match the 18 expected types");
     }
-    assertTrue(rs.getBoolean("IS_AUTOINCREMENT"));
-    assertEquals("nextval('smallserial_test_a_seq'::regclass)", rs.getString("COLUMN_DEF"));
-    assertFalse(rs.next());
-    rs.close();
 
-    stmt.execute("DROP TABLE smallserial_test");
-    stmt.close();
-  }
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void funcWithoutNames(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+        ResultSet rs = dbmd.getProcedureColumns(null, null, "f1", null);
 
-  @MethodSource("data")
-  @ParameterizedTest(name = "binary = {0}")
-  void upperCaseMetaDataLabels(BinaryMode binaryMode) throws SQLException {
-    initDatabaseMetaDataTest(binaryMode);
-    ResultSet rs = con.getMetaData().getTables(null, null, null, null);
-    ResultSetMetaData rsmd = rs.getMetaData();
+        assertTrue(rs.next());
+        assertEquals("returnValue", rs.getString(4));
+        assertEquals(DatabaseMetaData.procedureColumnReturn, rs.getInt(5));
 
-    assertEquals("TABLE_CAT", rsmd.getColumnName(1));
-    assertEquals("TABLE_SCHEM", rsmd.getColumnName(2));
-    assertEquals("TABLE_NAME", rsmd.getColumnName(3));
-    assertEquals("TABLE_TYPE", rsmd.getColumnName(4));
-    assertEquals("REMARKS", rsmd.getColumnName(5));
+        assertTrue(rs.next());
+        assertEquals("$1", rs.getString(4));
+        assertEquals(DatabaseMetaData.procedureColumnIn, rs.getInt(5));
+        assertEquals(Types.INTEGER, rs.getInt(6));
 
-  }
+        assertTrue(rs.next());
+        assertEquals("$2", rs.getString(4));
+        assertEquals(DatabaseMetaData.procedureColumnIn, rs.getInt(5));
+        assertEquals(Types.VARCHAR, rs.getInt(6));
+
+        assertFalse(rs.next());
+
+        rs.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void funcWithNames(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getProcedureColumns(null, null, "f2", null);
+
+        assertTrue(rs.next());
+
+        assertTrue(rs.next());
+        assertEquals("a", rs.getString(4));
+
+        assertTrue(rs.next());
+        assertEquals("b", rs.getString(4));
+
+        assertFalse(rs.next());
+
+        rs.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void funcWithDirection(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getProcedureColumns(null, null, "f3", null);
+
+        assertTrue(rs.next());
+        assertEquals("a", rs.getString(4));
+        assertEquals(DatabaseMetaData.procedureColumnIn, rs.getInt(5));
+        assertEquals(Types.INTEGER, rs.getInt(6));
+
+        assertTrue(rs.next());
+        assertEquals("b", rs.getString(4));
+        assertEquals(DatabaseMetaData.procedureColumnInOut, rs.getInt(5));
+        assertEquals(Types.VARCHAR, rs.getInt(6));
+
+        assertTrue(rs.next());
+        assertEquals("c", rs.getString(4));
+        assertEquals(DatabaseMetaData.procedureColumnOut, rs.getInt(5));
+        assertEquals(Types.TIMESTAMP, rs.getInt(6));
+
+        rs.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void funcReturningComposite(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getProcedureColumns(null, null, "f4", null);
+
+        assertTrue(rs.next());
+        assertEquals("$1", rs.getString(4));
+        assertEquals(DatabaseMetaData.procedureColumnIn, rs.getInt(5));
+        assertEquals(Types.INTEGER, rs.getInt(6));
+
+        assertTrue(rs.next());
+        assertEquals("id", rs.getString(4));
+        assertEquals(DatabaseMetaData.procedureColumnResult, rs.getInt(5));
+        assertEquals(Types.INTEGER, rs.getInt(6));
+
+        assertTrue(rs.next());
+        assertEquals("name", rs.getString(4));
+        assertEquals(DatabaseMetaData.procedureColumnResult, rs.getInt(5));
+        assertEquals(Types.VARCHAR, rs.getInt(6));
+
+        assertTrue(rs.next());
+        assertEquals("updated", rs.getString(4));
+        assertEquals(DatabaseMetaData.procedureColumnResult, rs.getInt(5));
+        assertEquals(Types.TIMESTAMP, rs.getInt(6));
+
+        assertTrue(rs.next());
+        assertEquals("colour", rs.getString(4));
+        assertEquals(DatabaseMetaData.procedureColumnResult, rs.getInt(5));
+        assertEquals(Types.VARCHAR, rs.getInt(6));
+
+        assertTrue(rs.next());
+        assertEquals("quest", rs.getString(4));
+        assertEquals(DatabaseMetaData.procedureColumnResult, rs.getInt(5));
+        assertEquals(Types.VARCHAR, rs.getInt(6));
+
+        assertFalse(rs.next());
+        rs.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void funcReturningTable(BinaryMode binaryMode) throws Exception {
+        initDatabaseMetaDataTest(binaryMode);
+        if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) {
+            return;
+        }
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getProcedureColumns(null, null, "f5", null);
+        assertTrue(rs.next());
+        assertEquals("returnValue", rs.getString(4));
+        assertEquals(DatabaseMetaData.procedureColumnReturn, rs.getInt(5));
+        assertEquals(Types.INTEGER, rs.getInt(6));
+        assertTrue(rs.next());
+        assertEquals("i", rs.getString(4));
+        assertEquals(DatabaseMetaData.procedureColumnReturn, rs.getInt(5));
+        assertEquals(Types.INTEGER, rs.getInt(6));
+        assertFalse(rs.next());
+        rs.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void versionColumns(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        // At the moment just test that no exceptions are thrown KJ
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+        ResultSet rs = dbmd.getVersionColumns(null, null, "pg_class");
+        rs.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void bestRowIdentifier(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        // At the moment just test that no exceptions are thrown KJ
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+        ResultSet rs =
+                dbmd.getBestRowIdentifier(null, null, "pg_type", DatabaseMetaData.bestRowSession, false);
+        rs.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void procedures(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        // At the moment just test that no exceptions are thrown KJ
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+        ResultSet rs = dbmd.getProcedures(null, null, null);
+        rs.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void catalogs(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        try (ResultSet rs = dbmd.getCatalogs()) {
+            List<String> catalogs = new ArrayList<>();
+            while (rs.next()) {
+                catalogs.add(rs.getString("TABLE_CAT"));
+            }
+            List<String> sortedCatalogs = new ArrayList<>(catalogs);
+            Collections.sort(sortedCatalogs);
+
+            assertThat(
+                    catalogs,
+                    allOf(
+                            hasItem("test"),
+                            hasItem("postgres"),
+                            equalTo(sortedCatalogs)
+                    )
+            );
+        }
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void schemas(BinaryMode binaryMode) throws Exception {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        assertNotNull(dbmd);
+
+        ResultSet rs = dbmd.getSchemas();
+        boolean foundPublic = false;
+        boolean foundEmpty = false;
+        boolean foundPGCatalog = false;
+        int count;
+
+        for (count = 0; rs.next(); count++) {
+            String schema = rs.getString("TABLE_SCHEM");
+            if ("public".equals(schema)) {
+                foundPublic = true;
+            } else if ("".equals(schema)) {
+                foundEmpty = true;
+            } else if ("pg_catalog".equals(schema)) {
+                foundPGCatalog = true;
+            }
+        }
+        rs.close();
+        assertTrue(count >= 2);
+        assertTrue(foundPublic);
+        assertTrue(foundPGCatalog);
+        assertFalse(foundEmpty);
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void escaping(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getTables(null, null, "a'", new String[]{"TABLE"});
+        assertTrue(rs.next());
+        rs = dbmd.getTables(null, null, "a\\\\", new String[]{"TABLE"});
+        assertTrue(rs.next());
+        rs = dbmd.getTables(null, null, "a\\", new String[]{"TABLE"});
+        assertFalse(rs.next());
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void searchStringEscape(BinaryMode binaryMode) throws Exception {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        String pattern = dbmd.getSearchStringEscape() + "_";
+        PreparedStatement pstmt = con.prepareStatement("SELECT 'a' LIKE ?, '_' LIKE ?");
+        pstmt.setString(1, pattern);
+        pstmt.setString(2, pattern);
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertFalse(rs.getBoolean(1));
+        assertTrue(rs.getBoolean(2));
+        rs.close();
+        pstmt.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void getUDTQualified(BinaryMode binaryMode) throws Exception {
+        initDatabaseMetaDataTest(binaryMode);
+        Statement stmt = null;
+        try {
+            stmt = con.createStatement();
+            stmt.execute("create schema jdbc");
+            stmt.execute("create type jdbc.testint8 as (i int8)");
+            DatabaseMetaData dbmd = con.getMetaData();
+            ResultSet rs = dbmd.getUDTs(null, null, "jdbc.testint8", null);
+            assertTrue(rs.next());
+            String cat;
+            String schema;
+            String typeName;
+            String remarks;
+            String className;
+            int dataType;
+            int baseType;
+
+            cat = rs.getString("type_cat");
+            schema = rs.getString("type_schem");
+            typeName = rs.getString("type_name");
+            className = rs.getString("class_name");
+            dataType = rs.getInt("data_type");
+            remarks = rs.getString("remarks");
+            baseType = rs.getInt("base_type");
+            assertEquals("testint8", typeName, "type name ");
+            assertEquals("jdbc", schema, "schema name ");
+
+            // now test to see if the fully qualified stuff works as planned
+            rs = dbmd.getUDTs("catalog", "public", "catalog.jdbc.testint8", null);
+            assertTrue(rs.next());
+            cat = rs.getString("type_cat");
+            schema = rs.getString("type_schem");
+            typeName = rs.getString("type_name");
+            className = rs.getString("class_name");
+            dataType = rs.getInt("data_type");
+            remarks = rs.getString("remarks");
+            baseType = rs.getInt("base_type");
+            assertEquals("testint8", typeName, "type name ");
+            assertEquals("jdbc", schema, "schema name ");
+        } finally {
+            try {
+                if (stmt != null) {
+                    stmt.close();
+                }
+                stmt = con.createStatement();
+                stmt.execute("drop type jdbc.testint8");
+                stmt.execute("drop schema jdbc");
+            } catch (Exception ex) {
+            }
+        }
+
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void getUDT1(BinaryMode binaryMode) throws Exception {
+        initDatabaseMetaDataTest(binaryMode);
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute("create domain testint8 as int8");
+            stmt.execute("comment on domain testint8 is 'jdbc123'");
+            DatabaseMetaData dbmd = con.getMetaData();
+            ResultSet rs = dbmd.getUDTs(null, null, "testint8", null);
+            assertTrue(rs.next());
+
+            String cat = rs.getString("type_cat");
+            String schema = rs.getString("type_schem");
+            String typeName = rs.getString("type_name");
+            String className = rs.getString("class_name");
+            int dataType = rs.getInt("data_type");
+            String remarks = rs.getString("remarks");
+
+            int baseType = rs.getInt("base_type");
+            assertEquals(Types.BIGINT, baseType, "base type");
+            assertEquals(Types.DISTINCT, dataType, "data type");
+            assertEquals("testint8", typeName, "type name ");
+            assertEquals("jdbc123", remarks, "remarks");
+        } finally {
+            try {
+                Statement stmt = con.createStatement();
+                stmt.execute("drop domain testint8");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void getUDT2(BinaryMode binaryMode) throws Exception {
+        initDatabaseMetaDataTest(binaryMode);
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute("create domain testint8 as int8");
+            stmt.execute("comment on domain testint8 is 'jdbc123'");
+            DatabaseMetaData dbmd = con.getMetaData();
+            ResultSet rs = dbmd.getUDTs(null, null, "testint8", new int[]{Types.DISTINCT, Types.STRUCT});
+            assertTrue(rs.next());
+            String typeName;
+
+            String cat = rs.getString("type_cat");
+            String schema = rs.getString("type_schem");
+            typeName = rs.getString("type_name");
+            String className = rs.getString("class_name");
+            int dataType = rs.getInt("data_type");
+            String remarks = rs.getString("remarks");
+
+            int baseType = rs.getInt("base_type");
+            assertEquals(Types.BIGINT, baseType, "base type");
+            assertEquals(Types.DISTINCT, dataType, "data type");
+            assertEquals("testint8", typeName, "type name ");
+            assertEquals("jdbc123", remarks, "remarks");
+        } finally {
+            try {
+                Statement stmt = con.createStatement();
+                stmt.execute("drop domain testint8");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void getUDT3(BinaryMode binaryMode) throws Exception {
+        initDatabaseMetaDataTest(binaryMode);
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute("create domain testint8 as int8");
+            stmt.execute("comment on domain testint8 is 'jdbc123'");
+            DatabaseMetaData dbmd = con.getMetaData();
+            ResultSet rs = dbmd.getUDTs(null, null, "testint8", new int[]{Types.DISTINCT});
+            assertTrue(rs.next());
+
+            String cat = rs.getString("type_cat");
+            String schema = rs.getString("type_schem");
+            String typeName = rs.getString("type_name");
+            String className = rs.getString("class_name");
+            int dataType = rs.getInt("data_type");
+            String remarks = rs.getString("remarks");
+
+            int baseType = rs.getInt("base_type");
+            assertEquals(Types.BIGINT, baseType, "base type");
+            assertEquals(Types.DISTINCT, dataType, "data type");
+            assertEquals("testint8", typeName, "type name ");
+            assertEquals("jdbc123", remarks, "remarks");
+        } finally {
+            try {
+                Statement stmt = con.createStatement();
+                stmt.execute("drop domain testint8");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void getUDT4(BinaryMode binaryMode) throws Exception {
+        initDatabaseMetaDataTest(binaryMode);
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute("create type testint8 as (i int8)");
+            DatabaseMetaData dbmd = con.getMetaData();
+            ResultSet rs = dbmd.getUDTs(null, null, "testint8", null);
+            assertTrue(rs.next());
+
+            String cat = rs.getString("type_cat");
+            String schema = rs.getString("type_schem");
+            String typeName = rs.getString("type_name");
+            String className = rs.getString("class_name");
+            int dataType = rs.getInt("data_type");
+            String remarks = rs.getString("remarks");
+
+            int baseType = rs.getInt("base_type");
+            assertTrue(rs.wasNull(), "base type");
+            assertEquals(Types.STRUCT, dataType, "data type");
+            assertEquals("testint8", typeName, "type name ");
+        } finally {
+            try {
+                Statement stmt = con.createStatement();
+                stmt.execute("drop type testint8");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void types(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        // https://www.postgresql.org/docs/8.2/static/datatype.html
+        List<String> stringTypeList = new ArrayList<>();
+        stringTypeList.addAll(Arrays.asList("bit",
+                "bool",
+                "box",
+                "bytea",
+                "char",
+                "cidr",
+                "circle",
+                "date",
+                "float4",
+                "float8",
+                "inet",
+                "int2",
+                "int4",
+                "int8",
+                "interval",
+                "line",
+                "lseg",
+                "macaddr",
+                "money",
+                "numeric",
+                "path",
+                "point",
+                "polygon",
+                "text",
+                "time",
+                "timestamp",
+                "timestamptz",
+                "timetz",
+                "varbit",
+                "varchar"));
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3)) {
+            stringTypeList.add("tsquery");
+            stringTypeList.add("tsvector");
+            stringTypeList.add("txid_snapshot");
+            stringTypeList.add("uuid");
+            stringTypeList.add("xml");
+        }
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_2)) {
+            stringTypeList.add("json");
+        }
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_4)) {
+            stringTypeList.add("jsonb");
+            stringTypeList.add("pg_lsn");
+        }
+
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getTypeInfo();
+        List<String> types = new ArrayList<>();
+
+        while (rs.next()) {
+            types.add(rs.getString("TYPE_NAME"));
+        }
+        for (String typeName : stringTypeList) {
+            assertTrue(types.contains(typeName));
+        }
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void typeInfoSigned(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getTypeInfo();
+        while (rs.next()) {
+            if ("int4".equals(rs.getString("TYPE_NAME"))) {
+                assertFalse(rs.getBoolean("UNSIGNED_ATTRIBUTE"));
+            } else if ("float8".equals(rs.getString("TYPE_NAME"))) {
+                assertFalse(rs.getBoolean("UNSIGNED_ATTRIBUTE"));
+            } else if ("text".equals(rs.getString("TYPE_NAME"))) {
+                assertTrue(rs.getBoolean("UNSIGNED_ATTRIBUTE"));
+            }
+        }
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void typeInfoQuoting(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getTypeInfo();
+        while (rs.next()) {
+            if ("int4".equals(rs.getString("TYPE_NAME"))) {
+                assertNull(rs.getString("LITERAL_PREFIX"));
+            } else if ("text".equals(rs.getString("TYPE_NAME"))) {
+                assertEquals("'", rs.getString("LITERAL_PREFIX"));
+                assertEquals("'", rs.getString("LITERAL_SUFFIX"));
+            }
+        }
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void informationAboutArrayTypes(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getColumns("", "", "arraytable", "");
+        assertTrue(rs.next());
+        assertEquals("a", rs.getString("COLUMN_NAME"));
+        assertEquals(5, rs.getInt("COLUMN_SIZE"));
+        assertEquals(2, rs.getInt("DECIMAL_DIGITS"));
+        assertTrue(rs.next());
+        assertEquals("b", rs.getString("COLUMN_NAME"));
+        assertEquals(100, rs.getInt("COLUMN_SIZE"));
+        assertFalse(rs.next());
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void partitionedTablesIndex(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) {
+            Statement stmt = null;
+            try {
+                stmt = con.createStatement();
+                stmt.execute(
+                        "CREATE TABLE measurement (logdate date not null primary key,peaktemp int,unitsales int ) PARTITION BY RANGE (logdate);");
+                DatabaseMetaData dbmd = con.getMetaData();
+                ResultSet rs = dbmd.getPrimaryKeys("", "", "measurement");
+                assertTrue(rs.next());
+                assertEquals("measurement_pkey", rs.getString(6));
+
+            } finally {
+                if (stmt != null) {
+                    stmt.execute("drop table if exists measurement");
+                    stmt.close();
+                }
+            }
+        }
+
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void partitionedTables(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) {
+            Statement stmt = null;
+            try {
+                stmt = con.createStatement();
+                stmt.execute(
+                        "CREATE TABLE measurement (logdate date not null primary key,peaktemp int,unitsales int ) PARTITION BY RANGE (logdate);");
+                DatabaseMetaData dbmd = con.getMetaData();
+                ResultSet rs = dbmd.getTables("", "", "measurement", new String[]{"PARTITIONED TABLE"});
+                assertTrue(rs.next());
+                assertEquals("measurement", rs.getString("table_name"));
+                rs.close();
+                rs = dbmd.getPrimaryKeys("", "", "measurement");
+                assertTrue(rs.next());
+                assertEquals("measurement_pkey", rs.getString(6));
+
+            } finally {
+                if (stmt != null) {
+                    stmt.execute("drop table if exists measurement");
+                    stmt.close();
+                }
+            }
+        }
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void identityColumns(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v10)) {
+            Statement stmt = null;
+            try {
+                stmt = con.createStatement();
+                stmt.execute("CREATE TABLE test_new ("
+                        + "id int GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY,"
+                        + "payload text)");
+                DatabaseMetaData dbmd = con.getMetaData();
+                ResultSet rs = dbmd.getColumns("", "", "test_new", "id");
+                assertTrue(rs.next());
+                assertEquals("id", rs.getString("COLUMN_NAME"));
+                assertTrue(rs.getBoolean("IS_AUTOINCREMENT"));
+
+            } finally {
+                if (stmt != null) {
+                    stmt.execute("drop table test_new");
+                    stmt.close();
+                }
+            }
+        }
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void generatedColumns(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v12)) {
+            DatabaseMetaData dbmd = con.getMetaData();
+            ResultSet rs = dbmd.getColumns("", "", "employee", "gross_pay");
+            assertTrue(rs.next());
+            assertEquals("gross_pay", rs.getString("COLUMN_NAME"));
+            assertTrue(rs.getBoolean("IS_GENERATEDCOLUMN"));
+        }
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void getSQLKeywords(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        DatabaseMetaData dbmd = con.getMetaData();
+        String keywords = dbmd.getSQLKeywords();
+
+        // We don't want SQL:2003 keywords returned, so check for that.
+        String sql2003 = "a,abs,absolute,action,ada,add,admin,after,all,allocate,alter,always,and,any,are,"
+                + "array,as,asc,asensitive,assertion,assignment,asymmetric,at,atomic,attribute,attributes,"
+                + "authorization,avg,before,begin,bernoulli,between,bigint,binary,blob,boolean,both,breadth,by,"
+                + "c,call,called,cardinality,cascade,cascaded,case,cast,catalog,catalog_name,ceil,ceiling,chain,"
+                + "char,char_length,character,character_length,character_set_catalog,character_set_name,"
+                + "character_set_schema,characteristics,characters,check,checked,class_origin,clob,close,"
+                + "coalesce,cobol,code_units,collate,collation,collation_catalog,collation_name,collation_schema,"
+                + "collect,column,column_name,command_function,command_function_code,commit,committed,condition,"
+                + "condition_number,connect,connection_name,constraint,constraint_catalog,constraint_name,"
+                + "constraint_schema,constraints,constructors,contains,continue,convert,corr,corresponding,count,"
+                + "covar_pop,covar_samp,create,cross,cube,cume_dist,current,current_collation,current_date,"
+                + "current_default_transform_group,current_path,current_role,current_time,current_timestamp,"
+                + "current_transform_group_for_type,current_user,cursor,cursor_name,cycle,data,date,datetime_interval_code,"
+                + "datetime_interval_precision,day,deallocate,dec,decimal,declare,default,defaults,deferrable,"
+                + "deferred,defined,definer,degree,delete,dense_rank,depth,deref,derived,desc,describe,"
+                + "descriptor,deterministic,diagnostics,disconnect,dispatch,distinct,domain,double,drop,dynamic,"
+                + "dynamic_function,dynamic_function_code,each,element,else,end,end-exec,equals,escape,every,"
+                + "except,exception,exclude,excluding,exec,execute,exists,exp,external,extract,false,fetch,filter,"
+                + "final,first,float,floor,following,for,foreign,fortran,found,free,from,full,function,fusion,"
+                + "g,general,get,global,go,goto,grant,granted,group,grouping,having,hierarchy,hold,hour,identity,"
+                + "immediate,implementation,in,including,increment,indicator,initially,inner,inout,input,"
+                + "insensitive,insert,instance,instantiable,int,integer,intersect,intersection,interval,into,"
+                + "invoker,is,isolation,join,k,key,key_member,key_type,language,large,last,lateral,leading,left,"
+                + "length,level,like,ln,local,localtime,localtimestamp,locator,lower,m,map,match,matched,max,"
+                + "maxvalue,member,merge,message_length,message_octet_length,message_text,method,min,minute,"
+                + "minvalue,mod,modifies,module,month,more,multiset,mumps,name,names,national,natural,nchar,"
+                + "nclob,nesting,new,next,no,none,normalize,normalized,not,null,nullable,nullif,nulls,number,"
+                + "numeric,object,octet_length,octets,of,old,on,only,open,option,options,or,order,ordering,"
+                + "ordinality,others,out,outer,output,over,overlaps,overlay,overriding,pad,parameter,parameter_mode,"
+                + "parameter_name,parameter_ordinal_position,parameter_specific_catalog,parameter_specific_name,"
+                + "parameter_specific_schema,partial,partition,pascal,path,percent_rank,percentile_cont,"
+                + "percentile_disc,placing,pli,position,power,preceding,precision,prepare,preserve,primary,"
+                + "prior,privileges,procedure,public,range,rank,read,reads,real,recursive,ref,references,"
+                + "referencing,regr_avgx,regr_avgy,regr_count,regr_intercept,regr_r2,regr_slope,regr_sxx,"
+                + "regr_sxy,regr_syy,relative,release,repeatable,restart,result,return,returned_cardinality,"
+                + "returned_length,returned_octet_length,returned_sqlstate,returns,revoke,right,role,rollback,"
+                + "rollup,routine,routine_catalog,routine_name,routine_schema,row,row_count,row_number,rows,"
+                + "savepoint,scale,schema,schema_name,scope_catalog,scope_name,scope_schema,scroll,search,second,"
+                + "section,security,select,self,sensitive,sequence,serializable,server_name,session,session_user,"
+                + "set,sets,similar,simple,size,smallint,some,source,space,specific,specific_name,specifictype,sql,"
+                + "sqlexception,sqlstate,sqlwarning,sqrt,start,state,statement,static,stddev_pop,stddev_samp,"
+                + "structure,style,subclass_origin,submultiset,substring,sum,symmetric,system,system_user,table,"
+                + "table_name,tablesample,temporary,then,ties,time,timestamp,timezone_hour,timezone_minute,to,"
+                + "top_level_count,trailing,transaction,transaction_active,transactions_committed,"
+                + "transactions_rolled_back,transform,transforms,translate,translation,treat,trigger,trigger_catalog,"
+                + "trigger_name,trigger_schema,trim,true,type,uescape,unbounded,uncommitted,under,union,unique,"
+                + "unknown,unnamed,unnest,update,upper,usage,user,user_defined_type_catalog,user_defined_type_code,"
+                + "user_defined_type_name,user_defined_type_schema,using,value,values,var_pop,var_samp,varchar,"
+                + "varying,view,when,whenever,where,width_bucket,window,with,within,without,work,write,year,zone";
+
+        String[] excludeSQL2003 = sql2003.split(",");
+        String[] returned = keywords.split(",");
+        Set<String> returnedSet = new HashSet<>(Arrays.asList(returned));
+        assertEquals(returnedSet.size(), returned.length, "Returned keywords should be unique");
+
+        for (String s : excludeSQL2003) {
+            assertFalse(returnedSet.contains(s), "Keyword from SQL:2003 \"" + s + "\" found");
+        }
+
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
+            assertTrue(returnedSet.contains("reindex"), "reindex should be in keywords");
+        }
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void functionColumns(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) {
+            return;
+        }
+
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getFunctionColumns(null, null, "f1", null);
+
+        ResultSetMetaData rsmd = rs.getMetaData();
+        assertEquals(17, rsmd.getColumnCount());
+        assertEquals("FUNCTION_CAT", rsmd.getColumnName(1));
+        assertEquals("FUNCTION_SCHEM", rsmd.getColumnName(2));
+        assertEquals("FUNCTION_NAME", rsmd.getColumnName(3));
+        assertEquals("COLUMN_NAME", rsmd.getColumnName(4));
+        assertEquals("COLUMN_TYPE", rsmd.getColumnName(5));
+        assertEquals("DATA_TYPE", rsmd.getColumnName(6));
+        assertEquals("TYPE_NAME", rsmd.getColumnName(7));
+        assertEquals("PRECISION", rsmd.getColumnName(8));
+        assertEquals("LENGTH", rsmd.getColumnName(9));
+        assertEquals("SCALE", rsmd.getColumnName(10));
+        assertEquals("RADIX", rsmd.getColumnName(11));
+        assertEquals("NULLABLE", rsmd.getColumnName(12));
+        assertEquals("REMARKS", rsmd.getColumnName(13));
+        assertEquals("CHAR_OCTET_LENGTH", rsmd.getColumnName(14));
+        assertEquals("ORDINAL_POSITION", rsmd.getColumnName(15));
+        assertEquals("IS_NULLABLE", rsmd.getColumnName(16));
+        assertEquals("SPECIFIC_NAME", rsmd.getColumnName(17));
+
+        assertTrue(rs.next());
+        assertNull(rs.getString(1));
+        assertEquals("public", rs.getString(2));
+        assertEquals("f1", rs.getString(3));
+        assertEquals("returnValue", rs.getString(4));
+        assertEquals(DatabaseMetaData.functionReturn, rs.getInt(5));
+        assertEquals(Types.INTEGER, rs.getInt(6));
+        assertEquals("int4", rs.getString(7));
+        assertEquals(0, rs.getInt(15));
+
+        assertTrue(rs.next());
+        assertNull(rs.getString(1));
+        assertEquals("public", rs.getString(2));
+        assertEquals("f1", rs.getString(3));
+        assertEquals("$1", rs.getString(4));
+        assertEquals(DatabaseMetaData.functionColumnIn, rs.getInt(5));
+        assertEquals(Types.INTEGER, rs.getInt(6));
+        assertEquals("int4", rs.getString(7));
+        assertEquals(1, rs.getInt(15));
+
+        assertTrue(rs.next());
+        assertNull(rs.getString(1));
+        assertEquals("public", rs.getString(2));
+        assertEquals("f1", rs.getString(3));
+        assertEquals("$2", rs.getString(4));
+        assertEquals(DatabaseMetaData.functionColumnIn, rs.getInt(5));
+        assertEquals(Types.VARCHAR, rs.getInt(6));
+        assertEquals("varchar", rs.getString(7));
+        assertEquals(2, rs.getInt(15));
+
+        assertFalse(rs.next());
+
+        rs.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void smallSerialColumns(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_2));
+        TestUtil.createTable(con, "smallserial_test", "a smallserial");
+
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getColumns(null, null, "smallserial_test", "a");
+        assertTrue(rs.next());
+        assertEquals("smallserial_test", rs.getString("TABLE_NAME"));
+        assertEquals("a", rs.getString("COLUMN_NAME"));
+        assertEquals(Types.SMALLINT, rs.getInt("DATA_TYPE"));
+        assertEquals("smallserial", rs.getString("TYPE_NAME"));
+        assertTrue(rs.getBoolean("IS_AUTOINCREMENT"));
+        assertEquals("nextval('smallserial_test_a_seq'::regclass)", rs.getString("COLUMN_DEF"));
+        assertFalse(rs.next());
+        rs.close();
+
+        TestUtil.dropTable(con, "smallserial_test");
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void smallSerialSequenceLikeColumns(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        Statement stmt = con.createStatement();
+        // This is the equivalent of the smallserial, not the actual smallserial
+        stmt.execute("CREATE SEQUENCE smallserial_test_a_seq;\n"
+                + "CREATE TABLE smallserial_test (\n"
+                + "    a smallint NOT NULL DEFAULT nextval('smallserial_test_a_seq')\n"
+                + ");\n"
+                + "ALTER SEQUENCE smallserial_test_a_seq OWNED BY smallserial_test.a;");
+
+        DatabaseMetaData dbmd = con.getMetaData();
+        ResultSet rs = dbmd.getColumns(null, null, "smallserial_test", "a");
+        assertTrue(rs.next());
+        assertEquals("smallserial_test", rs.getString("TABLE_NAME"));
+        assertEquals("a", rs.getString("COLUMN_NAME"));
+        assertEquals(Types.SMALLINT, rs.getInt("DATA_TYPE"));
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_2)) {
+            // in Pg 9.2+ it behaves like smallserial
+            assertEquals("smallserial", rs.getString("TYPE_NAME"));
+        } else {
+            assertEquals("int2", rs.getString("TYPE_NAME"));
+        }
+        assertTrue(rs.getBoolean("IS_AUTOINCREMENT"));
+        assertEquals("nextval('smallserial_test_a_seq'::regclass)", rs.getString("COLUMN_DEF"));
+        assertFalse(rs.next());
+        rs.close();
+
+        stmt.execute("DROP TABLE smallserial_test");
+        stmt.close();
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "binary = {0}")
+    void upperCaseMetaDataLabels(BinaryMode binaryMode) throws SQLException {
+        initDatabaseMetaDataTest(binaryMode);
+        ResultSet rs = con.getMetaData().getTables(null, null, null, null);
+        ResultSetMetaData rsmd = rs.getMetaData();
+
+        assertEquals("TABLE_CAT", rsmd.getColumnName(1));
+        assertEquals("TABLE_SCHEM", rsmd.getColumnName(2));
+        assertEquals("TABLE_NAME", rsmd.getColumnName(3));
+        assertEquals("TABLE_TYPE", rsmd.getColumnName(4));
+        assertEquals("REMARKS", rsmd.getColumnName(5));
+
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataTransactionIsolationTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataTransactionIsolationTest.java
index 1b52f8c..b672155 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataTransactionIsolationTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DatabaseMetaDataTransactionIsolationTest.java
@@ -22,123 +22,123 @@ import java.sql.Statement;
 import java.util.function.Supplier;
 
 class DatabaseMetaDataTransactionIsolationTest {
-  static Connection con;
+    static Connection con;
 
-  @BeforeAll
-  static void setup() throws SQLException {
-    con = TestUtil.openDB();
-  }
-
-  @AfterAll
-  static void teardown() throws SQLException {
-    TestUtil.closeDB(con);
-  }
-
-  @BeforeEach
-  void resetTransactionIsolation() throws SQLException {
-    // Restore to defaults
-    con.setAutoCommit(true);
-    try (Statement st = con.createStatement()) {
-      st.execute("alter database test set default_transaction_isolation to DEFAULT");
-    }
-  }
-
-  @Test
-  void connectionTransactionIsolation() throws SQLException {
-    // We use a new connection to avoid any side effects from other tests as we need to test
-    // the default transaction isolation level.
-    try (Connection con = TestUtil.openDB()) {
-      assertIsolationEquals(
-          "read committed",
-          con.getTransactionIsolation(),
-          () -> "Default connection transaction isolation in PostgreSQL is read committed");
-    }
-  }
-
-  @Test
-  void metadataDefaultTransactionIsolation() throws SQLException {
-    assertIsolationEquals(
-        "read committed",
-        getDefaultTransactionIsolation(),
-        () -> "Default database transaction isolation in PostgreSQL is read committed");
-  }
-
-  @ParameterizedTest
-  @ValueSource(strings = {"read committed", "read uncommitted", "repeatable read", "serializable"})
-  void alterDatabaseDefaultTransactionIsolation(String isolationLevel) throws SQLException {
-    try (Statement st = con.createStatement()) {
-      st.execute(
-          "alter database test set default_transaction_isolation to '" + isolationLevel + "'");
+    @BeforeAll
+    static void setup() throws SQLException {
+        con = TestUtil.openDB();
     }
 
-    assertIsolationEquals(
-        isolationLevel,
-        getDefaultTransactionIsolation(),
-        () -> "Default transaction isolation should be " + isolationLevel);
-  }
-
-  /**
-   * PostgreSQL does not seem to update the value in
-   * pg_catalog.pg_settings WHERE name='default_transaction_isolation'
-   * when changing default_transaction_isolation, so we reconnect to get the new value.
-   */
-  static int getDefaultTransactionIsolation() throws SQLException {
-    try (Connection con = TestUtil.openDB()) {
-      return con.getMetaData().getDefaultTransactionIsolation();
-    }
-  }
-
-  @ParameterizedTest
-  @ValueSource(strings = {"read committed", "read uncommitted", "repeatable read", "serializable"})
-  void alterConnectionTransactionIsolation(String isolationLevel) throws SQLException {
-    con.setAutoCommit(false);
-    try (Statement st = con.createStatement()) {
-      st.execute("set transaction ISOLATION LEVEL " + isolationLevel);
+    @AfterAll
+    static void teardown() throws SQLException {
+        TestUtil.closeDB(con);
     }
 
-    assertIsolationEquals(
-        isolationLevel,
-        con.getTransactionIsolation(),
-        () -> "Connection transaction isolation should be " + isolationLevel);
-  }
-
-  @ParameterizedTest
-  @ValueSource(ints = {
-      Connection.TRANSACTION_SERIALIZABLE,
-      Connection.TRANSACTION_REPEATABLE_READ,
-      Connection.TRANSACTION_READ_COMMITTED,
-      Connection.TRANSACTION_READ_UNCOMMITTED})
-  void setConnectionTransactionIsolation(int isolationLevel) throws SQLException {
-    con.setAutoCommit(false);
-    con.setTransactionIsolation(isolationLevel);
-
-    assertIsolationEquals(
-        mapJdbcIsolationToPg(isolationLevel),
-        con.getTransactionIsolation(),
-        () -> "Connection transaction isolation should be " + isolationLevel);
-  }
-
-  private static void assertIsolationEquals(String expected, int actual, Supplier<String> message) {
-    assertEquals(
-        expected,
-        mapJdbcIsolationToPg(actual),
-        message);
-  }
-
-  private static String mapJdbcIsolationToPg(int isolationLevel) {
-    switch (isolationLevel) {
-      case Connection.TRANSACTION_READ_COMMITTED:
-        return "read committed";
-      case Connection.TRANSACTION_READ_UNCOMMITTED:
-        return "read uncommitted";
-      case Connection.TRANSACTION_REPEATABLE_READ:
-        return "repeatable read";
-      case Connection.TRANSACTION_SERIALIZABLE:
-        return "serializable";
-      case Connection.TRANSACTION_NONE:
-        return "none";
-      default:
-        return "Unknown isolation level " + isolationLevel;
+    /**
+     * PostgreSQL does not seem to update the value in
+     * pg_catalog.pg_settings WHERE name='default_transaction_isolation'
+     * when changing default_transaction_isolation, so we reconnect to get the new value.
+     */
+    static int getDefaultTransactionIsolation() throws SQLException {
+        try (Connection con = TestUtil.openDB()) {
+            return con.getMetaData().getDefaultTransactionIsolation();
+        }
+    }
+
+    private static void assertIsolationEquals(String expected, int actual, Supplier<String> message) {
+        assertEquals(
+                expected,
+                mapJdbcIsolationToPg(actual),
+                message);
+    }
+
+    private static String mapJdbcIsolationToPg(int isolationLevel) {
+        switch (isolationLevel) {
+            case Connection.TRANSACTION_READ_COMMITTED:
+                return "read committed";
+            case Connection.TRANSACTION_READ_UNCOMMITTED:
+                return "read uncommitted";
+            case Connection.TRANSACTION_REPEATABLE_READ:
+                return "repeatable read";
+            case Connection.TRANSACTION_SERIALIZABLE:
+                return "serializable";
+            case Connection.TRANSACTION_NONE:
+                return "none";
+            default:
+                return "Unknown isolation level " + isolationLevel;
+        }
+    }
+
+    @BeforeEach
+    void resetTransactionIsolation() throws SQLException {
+        // Restore to defaults
+        con.setAutoCommit(true);
+        try (Statement st = con.createStatement()) {
+            st.execute("alter database test set default_transaction_isolation to DEFAULT");
+        }
+    }
+
+    @Test
+    void connectionTransactionIsolation() throws SQLException {
+        // We use a new connection to avoid any side effects from other tests as we need to test
+        // the default transaction isolation level.
+        try (Connection con = TestUtil.openDB()) {
+            assertIsolationEquals(
+                    "read committed",
+                    con.getTransactionIsolation(),
+                    () -> "Default connection transaction isolation in PostgreSQL is read committed");
+        }
+    }
+
+    @Test
+    void metadataDefaultTransactionIsolation() throws SQLException {
+        assertIsolationEquals(
+                "read committed",
+                getDefaultTransactionIsolation(),
+                () -> "Default database transaction isolation in PostgreSQL is read committed");
+    }
+
+    @ParameterizedTest
+    @ValueSource(strings = {"read committed", "read uncommitted", "repeatable read", "serializable"})
+    void alterDatabaseDefaultTransactionIsolation(String isolationLevel) throws SQLException {
+        try (Statement st = con.createStatement()) {
+            st.execute(
+                    "alter database test set default_transaction_isolation to '" + isolationLevel + "'");
+        }
+
+        assertIsolationEquals(
+                isolationLevel,
+                getDefaultTransactionIsolation(),
+                () -> "Default transaction isolation should be " + isolationLevel);
+    }
+
+    @ParameterizedTest
+    @ValueSource(strings = {"read committed", "read uncommitted", "repeatable read", "serializable"})
+    void alterConnectionTransactionIsolation(String isolationLevel) throws SQLException {
+        con.setAutoCommit(false);
+        try (Statement st = con.createStatement()) {
+            st.execute("set transaction ISOLATION LEVEL " + isolationLevel);
+        }
+
+        assertIsolationEquals(
+                isolationLevel,
+                con.getTransactionIsolation(),
+                () -> "Connection transaction isolation should be " + isolationLevel);
+    }
+
+    @ParameterizedTest
+    @ValueSource(ints = {
+            Connection.TRANSACTION_SERIALIZABLE,
+            Connection.TRANSACTION_REPEATABLE_READ,
+            Connection.TRANSACTION_READ_COMMITTED,
+            Connection.TRANSACTION_READ_UNCOMMITTED})
+    void setConnectionTransactionIsolation(int isolationLevel) throws SQLException {
+        con.setAutoCommit(false);
+        con.setTransactionIsolation(isolationLevel);
+
+        assertIsolationEquals(
+                mapJdbcIsolationToPg(isolationLevel),
+                con.getTransactionIsolation(),
+                () -> "Connection transaction isolation should be " + isolationLevel);
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DateStyleTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DateStyleTest.java
index b3f4d75..76b8d0a 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DateStyleTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DateStyleTest.java
@@ -5,57 +5,55 @@
 
 package org.postgresql.test.jdbc2;
 
-import org.postgresql.test.TestUtil;
-import org.postgresql.util.PSQLState;
-
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Arrays;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
-
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Arrays;
+import org.postgresql.test.TestUtil;
+import org.postgresql.util.PSQLState;
 
 @RunWith(Parameterized.class)
 public class DateStyleTest extends BaseTest4 {
 
-  @Parameterized.Parameter(0)
-  public String dateStyle;
+    @Parameterized.Parameter(0)
+    public String dateStyle;
 
-  @Parameterized.Parameter(1)
-  public boolean shouldPass;
+    @Parameterized.Parameter(1)
+    public boolean shouldPass;
 
-  @Parameterized.Parameters(name = "dateStyle={0}, shouldPass={1}")
-  public static Iterable<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-        {"iso, mdy", true},
-        {"ISO", true},
-        {"ISO,ymd", true},
-        {"PostgreSQL", false}
-    });
-  }
-
-  @Test
-  public void connect() throws SQLException {
-    Statement st = con.createStatement();
-    try {
-      st.execute("set DateStyle='" + dateStyle + "'");
-      if (!shouldPass) {
-        Assert.fail("Set DateStyle=" + dateStyle + " should not be allowed");
-      }
-    } catch (SQLException e) {
-      if (shouldPass) {
-        throw new IllegalStateException("Set DateStyle=" + dateStyle
-            + " should be fine, however received " + e.getMessage(), e);
-      }
-      if (PSQLState.CONNECTION_FAILURE.getState().equals(e.getSQLState())) {
-        return;
-      }
-      throw new IllegalStateException("Set DateStyle=" + dateStyle
-          + " should result in CONNECTION_FAILURE error, however received " + e.getMessage(), e);
-    } finally {
-      TestUtil.closeQuietly(st);
+    @Parameterized.Parameters(name = "dateStyle={0}, shouldPass={1}")
+    public static Iterable<Object[]> data() {
+        return Arrays.asList(new Object[][]{
+                {"iso, mdy", true},
+                {"ISO", true},
+                {"ISO,ymd", true},
+                {"PostgreSQL", false}
+        });
+    }
+
+    @Test
+    public void connect() throws SQLException {
+        Statement st = con.createStatement();
+        try {
+            st.execute("set DateStyle='" + dateStyle + "'");
+            if (!shouldPass) {
+                Assert.fail("Set DateStyle=" + dateStyle + " should not be allowed");
+            }
+        } catch (SQLException e) {
+            if (shouldPass) {
+                throw new IllegalStateException("Set DateStyle=" + dateStyle
+                        + " should be fine, however received " + e.getMessage(), e);
+            }
+            if (PSQLState.CONNECTION_FAILURE.getState().equals(e.getSQLState())) {
+                return;
+            }
+            throw new IllegalStateException("Set DateStyle=" + dateStyle
+                    + " should result in CONNECTION_FAILURE error, however received " + e.getMessage(), e);
+        } finally {
+            TestUtil.closeQuietly(st);
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DateTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DateTest.java
index 7b875e0..2f70bbc 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DateTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DateTest.java
@@ -5,19 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeTrue;
-
-import org.postgresql.test.TestUtil;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
@@ -30,6 +17,16 @@ import java.util.Objects;
 import java.util.TimeZone;
 import java.util.stream.IntStream;
 import java.util.stream.Stream;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.postgresql.test.TestUtil;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeTrue;
 
 /*
  * Some simple tests based on problems reported by users. Hopefully these will help prevent previous
@@ -38,290 +35,290 @@ import java.util.stream.Stream;
  */
 @RunWith(Parameterized.class)
 public class DateTest extends BaseTest4 {
-  private static final TimeZone saveTZ = TimeZone.getDefault();
+    private static final TimeZone saveTZ = TimeZone.getDefault();
 
-  private final String type;
-  private final String zoneId;
+    private final String type;
+    private final String zoneId;
 
-  public DateTest(String type, String zoneId, BinaryMode binaryMode) {
-    this.type = type;
-    this.zoneId = zoneId;
-    TimeZone.setDefault(TimeZone.getTimeZone(zoneId));
-    setBinaryMode(binaryMode);
-  }
+    public DateTest(String type, String zoneId, BinaryMode binaryMode) {
+        this.type = type;
+        this.zoneId = zoneId;
+        TimeZone.setDefault(TimeZone.getTimeZone(zoneId));
+        setBinaryMode(binaryMode);
+    }
 
-  @Parameterized.Parameters(name = "type = {0}, zoneId = {1}, binary = {2}")
-  public static Iterable<Object[]> data() {
-    final List<Object[]> data = new ArrayList<>();
-    for (String type : Arrays.asList("date", "timestamp", "timestamptz")) {
-      Stream<String> tzIds = Stream.of("Africa/Casablanca", "America/New_York", "America/Toronto",
-          "Europe/Berlin", "Europe/Moscow", "Pacific/Apia", "America/Los_Angeles");
-      // some selection of static GMT offsets (not all, as this takes too long):
-      tzIds = Stream.concat(tzIds, IntStream.of(-12, -11, -5, -1, 0, 1, 3, 12, 13)
-          .mapToObj(i -> String.format(Locale.ROOT, "GMT%+02d", i)));
-      for (String tzId : (Iterable<String>) tzIds::iterator) {
-        for (BinaryMode binaryMode : BinaryMode.values()) {
-          data.add(new Object[]{type, tzId, binaryMode});
+    @Parameterized.Parameters(name = "type = {0}, zoneId = {1}, binary = {2}")
+    public static Iterable<Object[]> data() {
+        final List<Object[]> data = new ArrayList<>();
+        for (String type : Arrays.asList("date", "timestamp", "timestamptz")) {
+            Stream<String> tzIds = Stream.of("Africa/Casablanca", "America/New_York", "America/Toronto",
+                    "Europe/Berlin", "Europe/Moscow", "Pacific/Apia", "America/Los_Angeles");
+            // some selection of static GMT offsets (not all, as this takes too long):
+            tzIds = Stream.concat(tzIds, IntStream.of(-12, -11, -5, -1, 0, 1, 3, 12, 13)
+                    .mapToObj(i -> String.format(Locale.ROOT, "GMT%+02d", i)));
+            for (String tzId : (Iterable<String>) tzIds::iterator) {
+                for (BinaryMode binaryMode : BinaryMode.values()) {
+                    data.add(new Object[]{type, tzId, binaryMode});
+                }
+            }
         }
-      }
+        return data;
     }
-    return data;
-  }
 
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTable(con, "test", "dt ".concat(type));
-  }
-
-  @After
-  public void tearDown() throws SQLException {
-    TimeZone.setDefault(saveTZ);
-    TestUtil.dropTable(con, "test");
-    super.tearDown();
-  }
-
-  /*
-   * Tests the time methods in ResultSet
-   */
-  @Test
-  public void testGetDate() throws SQLException {
-    assumeTrue("TODO: Test fails on some server versions with local time zones (not GMT based)",
-        false == Objects.equals(type, "timestamptz") || zoneId.startsWith("GMT"));
-    try (Statement stmt = con.createStatement()) {
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1950-02-07'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1970-06-02'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1999-08-11'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'2001-02-13'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1950-04-02'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1970-11-30'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1988-01-01'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'2003-07-09'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1934-02-28'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1969-04-03'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1982-08-03'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'2012-03-15'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1912-05-01'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1971-12-15'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1984-12-03'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'2000-01-01'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'3456-01-01'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'0101-01-01 BC'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'0001-01-01'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'0001-01-01 BC'")));
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'0001-12-31 BC'")));
-
-      /* dateTest() contains all of the tests */
-      dateTest();
-
-      assertEquals(21, stmt.executeUpdate("DELETE FROM test"));
+    @Before
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTable(con, "test", "dt ".concat(type));
     }
-  }
 
-  /*
-   * Tests the time methods in PreparedStatement
-   */
-  @Test
-  public void testSetDate() throws SQLException {
-    try (Statement stmt = con.createStatement()) {
-      PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("test", "?"));
-
-      ps.setDate(1, makeDate(1950, 2, 7));
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setDate(1, makeDate(1970, 6, 2));
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setDate(1, makeDate(1999, 8, 11));
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setDate(1, makeDate(2001, 2, 13));
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setObject(1, java.sql.Timestamp.valueOf("1950-04-02 12:00:00"), java.sql.Types.DATE);
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setObject(1, java.sql.Timestamp.valueOf("1970-11-30 3:00:00"), java.sql.Types.DATE);
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setObject(1, java.sql.Timestamp.valueOf("1988-01-01 13:00:00"), java.sql.Types.DATE);
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setObject(1, java.sql.Timestamp.valueOf("2003-07-09 12:00:00"), java.sql.Types.DATE);
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setObject(1, "1934-02-28", java.sql.Types.DATE);
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setObject(1, "1969-04-03", java.sql.Types.DATE);
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setObject(1, "1982-08-03", java.sql.Types.DATE);
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setObject(1, "2012-03-15", java.sql.Types.DATE);
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setObject(1, java.sql.Date.valueOf("1912-05-01"), java.sql.Types.DATE);
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setObject(1, java.sql.Date.valueOf("1971-12-15"), java.sql.Types.DATE);
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setObject(1, java.sql.Date.valueOf("1984-12-03"), java.sql.Types.DATE);
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setObject(1, java.sql.Date.valueOf("2000-01-01"), java.sql.Types.DATE);
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setObject(1, java.sql.Date.valueOf("3456-01-01"), java.sql.Types.DATE);
-      assertEquals(1, ps.executeUpdate());
-
-      // We can't use valueOf on BC dates.
-      ps.setObject(1, makeDate(-100, 1, 1));
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setObject(1, makeDate(1, 1, 1));
-      assertEquals(1, ps.executeUpdate());
-
-      // Note: Year 0 in Java is year '0001-01-01 BC' in PostgreSQL.
-      ps.setObject(1, makeDate(0, 1, 1));
-      assertEquals(1, ps.executeUpdate());
-
-      ps.setObject(1, makeDate(0, 12, 31));
-      assertEquals(1, ps.executeUpdate());
-
-      ps.close();
-
-      dateTest();
-
-      assertEquals(21, stmt.executeUpdate("DELETE FROM test"));
+    @After
+    public void tearDown() throws SQLException {
+        TimeZone.setDefault(saveTZ);
+        TestUtil.dropTable(con, "test");
+        super.tearDown();
     }
-  }
 
-  /*
-   * Helper for the date tests. It tests what should be in the db
-   */
-  private void dateTest() throws SQLException {
-    Statement st = con.createStatement();
-    ResultSet rs;
-    java.sql.Date d;
+    /*
+     * Tests the time methods in ResultSet
+     */
+    @Test
+    public void testGetDate() throws SQLException {
+        assumeTrue("TODO: Test fails on some server versions with local time zones (not GMT based)",
+                false == Objects.equals(type, "timestamptz") || zoneId.startsWith("GMT"));
+        try (Statement stmt = con.createStatement()) {
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1950-02-07'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1970-06-02'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1999-08-11'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'2001-02-13'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1950-04-02'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1970-11-30'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1988-01-01'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'2003-07-09'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1934-02-28'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1969-04-03'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1982-08-03'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'2012-03-15'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1912-05-01'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1971-12-15'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'1984-12-03'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'2000-01-01'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'3456-01-01'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'0101-01-01 BC'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'0001-01-01'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'0001-01-01 BC'")));
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("test", "'0001-12-31 BC'")));
 
-    rs = st.executeQuery(TestUtil.selectSQL("test", "dt"));
-    assertNotNull(rs);
+            /* dateTest() contains all of the tests */
+            dateTest();
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(1950, 2, 7), d);
+            assertEquals(21, stmt.executeUpdate("DELETE FROM test"));
+        }
+    }
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(1970, 6, 2), d);
+    /*
+     * Tests the time methods in PreparedStatement
+     */
+    @Test
+    public void testSetDate() throws SQLException {
+        try (Statement stmt = con.createStatement()) {
+            PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("test", "?"));
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(1999, 8, 11), d);
+            ps.setDate(1, makeDate(1950, 2, 7));
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(2001, 2, 13), d);
+            ps.setDate(1, makeDate(1970, 6, 2));
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(1950, 4, 2), d);
+            ps.setDate(1, makeDate(1999, 8, 11));
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(1970, 11, 30), d);
+            ps.setDate(1, makeDate(2001, 2, 13));
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(1988, 1, 1), d);
+            ps.setObject(1, java.sql.Timestamp.valueOf("1950-04-02 12:00:00"), java.sql.Types.DATE);
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(2003, 7, 9), d);
+            ps.setObject(1, java.sql.Timestamp.valueOf("1970-11-30 3:00:00"), java.sql.Types.DATE);
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(1934, 2, 28), d);
+            ps.setObject(1, java.sql.Timestamp.valueOf("1988-01-01 13:00:00"), java.sql.Types.DATE);
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(1969, 4, 3), d);
+            ps.setObject(1, java.sql.Timestamp.valueOf("2003-07-09 12:00:00"), java.sql.Types.DATE);
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(1982, 8, 3), d);
+            ps.setObject(1, "1934-02-28", java.sql.Types.DATE);
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(2012, 3, 15), d);
+            ps.setObject(1, "1969-04-03", java.sql.Types.DATE);
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(1912, 5, 1), d);
+            ps.setObject(1, "1982-08-03", java.sql.Types.DATE);
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(1971, 12, 15), d);
+            ps.setObject(1, "2012-03-15", java.sql.Types.DATE);
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(1984, 12, 3), d);
+            ps.setObject(1, java.sql.Date.valueOf("1912-05-01"), java.sql.Types.DATE);
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(2000, 1, 1), d);
+            ps.setObject(1, java.sql.Date.valueOf("1971-12-15"), java.sql.Types.DATE);
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(3456, 1, 1), d);
+            ps.setObject(1, java.sql.Date.valueOf("1984-12-03"), java.sql.Types.DATE);
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(-100, 1, 1), d);
+            ps.setObject(1, java.sql.Date.valueOf("2000-01-01"), java.sql.Types.DATE);
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(1, 1, 1), d);
+            ps.setObject(1, java.sql.Date.valueOf("3456-01-01"), java.sql.Types.DATE);
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(0, 1, 1), d);
+            // We can't use valueOf on BC dates.
+            ps.setObject(1, makeDate(-100, 1, 1));
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(0, 12, 31), d);
+            ps.setObject(1, makeDate(1, 1, 1));
+            assertEquals(1, ps.executeUpdate());
 
-    assertTrue(!rs.next());
+            // Note: Year 0 in Java is year '0001-01-01 BC' in PostgreSQL.
+            ps.setObject(1, makeDate(0, 1, 1));
+            assertEquals(1, ps.executeUpdate());
 
-    rs.close();
-    st.close();
-  }
+            ps.setObject(1, makeDate(0, 12, 31));
+            assertEquals(1, ps.executeUpdate());
 
-  private java.sql.Date makeDate(int y, int m, int d) {
-    return new java.sql.Date(y - 1900, m - 1, d);
-  }
+            ps.close();
+
+            dateTest();
+
+            assertEquals(21, stmt.executeUpdate("DELETE FROM test"));
+        }
+    }
+
+    /*
+     * Helper for the date tests. It tests what should be in the db
+     */
+    private void dateTest() throws SQLException {
+        Statement st = con.createStatement();
+        ResultSet rs;
+        java.sql.Date d;
+
+        rs = st.executeQuery(TestUtil.selectSQL("test", "dt"));
+        assertNotNull(rs);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(1950, 2, 7), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(1970, 6, 2), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(1999, 8, 11), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(2001, 2, 13), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(1950, 4, 2), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(1970, 11, 30), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(1988, 1, 1), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(2003, 7, 9), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(1934, 2, 28), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(1969, 4, 3), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(1982, 8, 3), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(2012, 3, 15), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(1912, 5, 1), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(1971, 12, 15), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(1984, 12, 3), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(2000, 1, 1), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(3456, 1, 1), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(-100, 1, 1), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(1, 1, 1), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(0, 1, 1), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(0, 12, 31), d);
+
+        assertTrue(!rs.next());
+
+        rs.close();
+        st.close();
+    }
+
+    private java.sql.Date makeDate(int y, int m, int d) {
+        return new java.sql.Date(y - 1900, m - 1, d);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DriverTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DriverTest.java
index 74169df..38c704c 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DriverTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/DriverTest.java
@@ -5,26 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertNull;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.Driver;
-import org.postgresql.PGEnvironment;
-import org.postgresql.PGProperty;
-import org.postgresql.test.TestUtil;
-import org.postgresql.util.StubEnvironmentAndProperties;
-import org.postgresql.util.URLCoder;
-
-import org.junit.jupiter.api.Test;
-import uk.org.webcompere.systemstubs.environment.EnvironmentVariables;
-import uk.org.webcompere.systemstubs.properties.SystemProperties;
-import uk.org.webcompere.systemstubs.resource.Resources;
-
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
 import java.lang.reflect.Method;
@@ -37,492 +17,509 @@ import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Properties;
+import org.junit.jupiter.api.Test;
+import org.postgresql.Driver;
+import org.postgresql.PGEnvironment;
+import org.postgresql.PGProperty;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.util.StubEnvironmentAndProperties;
+import org.postgresql.test.util.systemstubs.EnvironmentVariables;
+import org.postgresql.test.util.systemstubs.properties.SystemProperties;
+import org.postgresql.test.util.systemstubs.resource.Resources;
+import org.postgresql.util.URLCoder;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /*
  * Tests the dynamically created class org.postgresql.Driver
- *
  */
 @StubEnvironmentAndProperties
 class DriverTest {
 
-  @Test
-  void urlIsNotForPostgreSQL() throws SQLException {
-    Driver driver = new Driver();
+    @Test
+    void urlIsNotForPostgreSQL() throws SQLException {
+        Driver driver = new Driver();
 
-    assertNull(driver.connect("jdbc:otherdb:database", new Properties()));
-  }
+        assertNull(driver.connect("jdbc:otherdb:database", new Properties()));
+    }
 
-  /**
-   * According to the javadoc of java.sql.Driver.connect(...), calling abort when the {@code executor} is {@code null}
-   * results in SQLException
-   */
-  @Test
-  void urlIsNull() throws SQLException {
-    Driver driver = new Driver();
+    /**
+     * According to the javadoc of java.sql.Driver.connect(...), calling abort when the {@code executor} is {@code null}
+     * results in SQLException
+     */
+    @Test
+    void urlIsNull() throws SQLException {
+        Driver driver = new Driver();
 
-    assertThrows(SQLException.class, () -> driver.connect(null, new Properties()));
-  }
+        assertThrows(SQLException.class, () -> driver.connect(null, new Properties()));
+    }
 
-  /*
-   * This tests the acceptsURL() method with a couple of well and poorly formed jdbc urls.
-   */
-  @Test
-  void acceptsURL() throws Exception {
-    TestUtil.initDriver(); // Set up log levels, etc.
+    /*
+     * This tests the acceptsURL() method with a couple of well and poorly formed jdbc urls.
+     */
+    @Test
+    void acceptsURL() throws Exception {
+        TestUtil.initDriver(); // Set up log levels, etc.
 
-    // Load the driver (note clients should never do it this way!)
-    Driver drv = new Driver();
-    assertNotNull(drv);
+        // Load the driver (note clients should never do it this way!)
+        Driver drv = new Driver();
+        assertNotNull(drv);
 
-    // These are always correct
-    verifyUrl(drv, "jdbc:postgresql:test", "localhost", "5432", "test");
-    verifyUrl(drv, "jdbc:postgresql://localhost/test", "localhost", "5432", "test");
-    verifyUrl(drv, "jdbc:postgresql://localhost,locahost2/test", "localhost,locahost2", "5432,5432", "test");
-    verifyUrl(drv, "jdbc:postgresql://localhost:5433,locahost2:5434/test", "localhost,locahost2", "5433,5434", "test");
-    verifyUrl(drv, "jdbc:postgresql://[::1]:5433,:5434,[::1]/test", "[::1],localhost,[::1]", "5433,5434,5432", "test");
-    verifyUrl(drv, "jdbc:postgresql://localhost/test?port=8888", "localhost", "8888", "test");
-    verifyUrl(drv, "jdbc:postgresql://localhost:5432/test", "localhost", "5432", "test");
-    verifyUrl(drv, "jdbc:postgresql://localhost:5432/test?dbname=test2", "localhost", "5432", "test2");
-    verifyUrl(drv, "jdbc:postgresql://127.0.0.1/anydbname", "127.0.0.1", "5432", "anydbname");
-    verifyUrl(drv, "jdbc:postgresql://127.0.0.1:5433/hidden", "127.0.0.1", "5433", "hidden");
-    verifyUrl(drv, "jdbc:postgresql://127.0.0.1:5433/hidden?port=7777", "127.0.0.1", "7777", "hidden");
-    verifyUrl(drv, "jdbc:postgresql://[::1]:5740/db", "[::1]", "5740", "db");
-    verifyUrl(drv, "jdbc:postgresql://[::1]:5740/my%20data%23base%251?loggerFile=C%3A%5Cdir%5Cfile.log", "[::1]", "5740", "my data#base%1");
+        // These are always correct
+        verifyUrl(drv, "jdbc:postgresql:test", "localhost", "5432", "test");
+        verifyUrl(drv, "jdbc:postgresql://localhost/test", "localhost", "5432", "test");
+        verifyUrl(drv, "jdbc:postgresql://localhost,locahost2/test", "localhost,locahost2", "5432,5432", "test");
+        verifyUrl(drv, "jdbc:postgresql://localhost:5433,locahost2:5434/test", "localhost,locahost2", "5433,5434", "test");
+        verifyUrl(drv, "jdbc:postgresql://[::1]:5433,:5434,[::1]/test", "[::1],localhost,[::1]", "5433,5434,5432", "test");
+        verifyUrl(drv, "jdbc:postgresql://localhost/test?port=8888", "localhost", "8888", "test");
+        verifyUrl(drv, "jdbc:postgresql://localhost:5432/test", "localhost", "5432", "test");
+        verifyUrl(drv, "jdbc:postgresql://localhost:5432/test?dbname=test2", "localhost", "5432", "test2");
+        verifyUrl(drv, "jdbc:postgresql://127.0.0.1/anydbname", "127.0.0.1", "5432", "anydbname");
+        verifyUrl(drv, "jdbc:postgresql://127.0.0.1:5433/hidden", "127.0.0.1", "5433", "hidden");
+        verifyUrl(drv, "jdbc:postgresql://127.0.0.1:5433/hidden?port=7777", "127.0.0.1", "7777", "hidden");
+        verifyUrl(drv, "jdbc:postgresql://[::1]:5740/db", "[::1]", "5740", "db");
+        verifyUrl(drv, "jdbc:postgresql://[::1]:5740/my%20data%23base%251?loggerFile=C%3A%5Cdir%5Cfile.log", "[::1]", "5740", "my data#base%1");
 
-    // tests for service syntax
-    URL urlFileProps = getClass().getResource("/pg_service/pgservicefileProps.conf");
-    assertNotNull(urlFileProps);
-    Resources.with(
-        new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), urlFileProps.getFile())
-    ).execute(() -> {
-      // correct cases
-      verifyUrl(drv, "jdbc:postgresql://?service=driverTestService1", "test-host1", "5444", "testdb1");
-      verifyUrl(drv, "jdbc:postgresql://?service=driverTestService1&host=other-host", "other-host", "5444", "testdb1");
-      verifyUrl(drv, "jdbc:postgresql:///?service=driverTestService1", "test-host1", "5444", "testdb1");
-      verifyUrl(drv, "jdbc:postgresql:///?service=driverTestService1&port=3333&dbname=other-db", "test-host1", "3333", "other-db");
-      verifyUrl(drv, "jdbc:postgresql://localhost:5432/test?service=driverTestService1", "localhost", "5432", "test");
-      verifyUrl(drv, "jdbc:postgresql://localhost:5432/test?port=7777&dbname=other-db&service=driverTestService1", "localhost", "7777", "other-db");
-      verifyUrl(drv, "jdbc:postgresql://[::1]:5740/?service=driverTestService1", "[::1]", "5740", "testdb1");
-      verifyUrl(drv, "jdbc:postgresql://:5740/?service=driverTestService1", "localhost", "5740", "testdb1");
-      verifyUrl(drv, "jdbc:postgresql://[::1]/?service=driverTestService1", "[::1]", "5432", "testdb1");
-      verifyUrl(drv, "jdbc:postgresql://localhost/?service=driverTestService2", "localhost", "5432", "testdb1");
-      // fail cases
-      assertFalse(drv.acceptsURL("jdbc:postgresql://?service=driverTestService2"));
-    });
+        // tests for service syntax
+        URL urlFileProps = getClass().getResource("/pg_service/pgservicefileProps.conf");
+        assertNotNull(urlFileProps);
+        Resources.with(
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), urlFileProps.getFile())
+        ).execute(() -> {
+            // correct cases
+            verifyUrl(drv, "jdbc:postgresql://?service=driverTestService1", "test-host1", "5444", "testdb1");
+            verifyUrl(drv, "jdbc:postgresql://?service=driverTestService1&host=other-host", "other-host", "5444", "testdb1");
+            verifyUrl(drv, "jdbc:postgresql:///?service=driverTestService1", "test-host1", "5444", "testdb1");
+            verifyUrl(drv, "jdbc:postgresql:///?service=driverTestService1&port=3333&dbname=other-db", "test-host1", "3333", "other-db");
+            verifyUrl(drv, "jdbc:postgresql://localhost:5432/test?service=driverTestService1", "localhost", "5432", "test");
+            verifyUrl(drv, "jdbc:postgresql://localhost:5432/test?port=7777&dbname=other-db&service=driverTestService1", "localhost", "7777", "other-db");
+            verifyUrl(drv, "jdbc:postgresql://[::1]:5740/?service=driverTestService1", "[::1]", "5740", "testdb1");
+            verifyUrl(drv, "jdbc:postgresql://:5740/?service=driverTestService1", "localhost", "5740", "testdb1");
+            verifyUrl(drv, "jdbc:postgresql://[::1]/?service=driverTestService1", "[::1]", "5432", "testdb1");
+            verifyUrl(drv, "jdbc:postgresql://localhost/?service=driverTestService2", "localhost", "5432", "testdb1");
+            // fail cases
+            assertFalse(drv.acceptsURL("jdbc:postgresql://?service=driverTestService2"));
+        });
 
-    // Badly formatted url's
-    assertFalse(drv.acceptsURL("jdbc:postgres:test"));
-    assertFalse(drv.acceptsURL("jdbc:postgresql:/test"));
-    assertFalse(drv.acceptsURL("jdbc:postgresql:////"));
-    assertFalse(drv.acceptsURL("jdbc:postgresql:///?service=my data#base%1"));
-    assertFalse(drv.acceptsURL("jdbc:postgresql://[::1]:5740/my data#base%1"));
-    assertFalse(drv.acceptsURL("jdbc:postgresql://localhost/dbname?loggerFile=C%3A%5Cdir%5Cfile.%log"));
-    assertFalse(drv.acceptsURL("postgresql:test"));
-    assertFalse(drv.acceptsURL("db"));
-    assertFalse(drv.acceptsURL("jdbc:postgresql://localhost:5432a/test"));
-    assertFalse(drv.acceptsURL("jdbc:postgresql://localhost:500000/test"));
-    assertFalse(drv.acceptsURL("jdbc:postgresql://localhost:0/test"));
-    assertFalse(drv.acceptsURL("jdbc:postgresql://localhost:-2/test"));
+        // Badly formatted url's
+        assertFalse(drv.acceptsURL("jdbc:postgres:test"));
+        assertFalse(drv.acceptsURL("jdbc:postgresql:/test"));
+        assertFalse(drv.acceptsURL("jdbc:postgresql:////"));
+        assertFalse(drv.acceptsURL("jdbc:postgresql:///?service=my data#base%1"));
+        assertFalse(drv.acceptsURL("jdbc:postgresql://[::1]:5740/my data#base%1"));
+        assertFalse(drv.acceptsURL("jdbc:postgresql://localhost/dbname?loggerFile=C%3A%5Cdir%5Cfile.%log"));
+        assertFalse(drv.acceptsURL("postgresql:test"));
+        assertFalse(drv.acceptsURL("db"));
+        assertFalse(drv.acceptsURL("jdbc:postgresql://localhost:5432a/test"));
+        assertFalse(drv.acceptsURL("jdbc:postgresql://localhost:500000/test"));
+        assertFalse(drv.acceptsURL("jdbc:postgresql://localhost:0/test"));
+        assertFalse(drv.acceptsURL("jdbc:postgresql://localhost:-2/test"));
 
-    // failover urls
-    verifyUrl(drv, "jdbc:postgresql://localhost,127.0.0.1:5432/test", "localhost,127.0.0.1",
-        "5432,5432", "test");
-    verifyUrl(drv, "jdbc:postgresql://localhost:5433,127.0.0.1:5432/test", "localhost,127.0.0.1",
-        "5433,5432", "test");
-    verifyUrl(drv, "jdbc:postgresql://[::1],[::1]:5432/db", "[::1],[::1]", "5432,5432", "db");
-    verifyUrl(drv, "jdbc:postgresql://[::1]:5740,127.0.0.1:5432/db", "[::1],127.0.0.1", "5740,5432",
-        "db");
-  }
+        // failover urls
+        verifyUrl(drv, "jdbc:postgresql://localhost,127.0.0.1:5432/test", "localhost,127.0.0.1",
+                "5432,5432", "test");
+        verifyUrl(drv, "jdbc:postgresql://localhost:5433,127.0.0.1:5432/test", "localhost,127.0.0.1",
+                "5433,5432", "test");
+        verifyUrl(drv, "jdbc:postgresql://[::1],[::1]:5432/db", "[::1],[::1]", "5432,5432", "db");
+        verifyUrl(drv, "jdbc:postgresql://[::1]:5740,127.0.0.1:5432/db", "[::1],127.0.0.1", "5740,5432",
+                "db");
+    }
 
-  private void verifyUrl(Driver drv, String url, String hosts, String ports, String dbName)
-      throws Exception {
-    assertTrue(drv.acceptsURL(url), url);
-    Method parseMethod =
-        drv.getClass().getDeclaredMethod("parseURL", String.class, Properties.class);
-    parseMethod.setAccessible(true);
-    Properties p = (Properties) parseMethod.invoke(drv, url, null);
-    assertEquals(dbName, p.getProperty(PGProperty.PG_DBNAME.getName()), url);
-    assertEquals(hosts, p.getProperty(PGProperty.PG_HOST.getName()), url);
-    assertEquals(ports, p.getProperty(PGProperty.PG_PORT.getName()), url);
-  }
+    private void verifyUrl(Driver drv, String url, String hosts, String ports, String dbName)
+            throws Exception {
+        assertTrue(drv.acceptsURL(url), url);
+        Method parseMethod =
+                drv.getClass().getDeclaredMethod("parseURL", String.class, Properties.class);
+        parseMethod.setAccessible(true);
+        Properties p = (Properties) parseMethod.invoke(drv, url, null);
+        assertEquals(dbName, p.getProperty(PGProperty.PG_DBNAME.getName()), url);
+        assertEquals(hosts, p.getProperty(PGProperty.PG_HOST.getName()), url);
+        assertEquals(ports, p.getProperty(PGProperty.PG_PORT.getName()), url);
+    }
 
-  /**
-   * Tests the connect method by connecting to the test database.
-   */
-  @Test
-  void connect() throws Exception {
-    TestUtil.initDriver(); // Set up log levels, etc.
+    /**
+     * Tests the connect method by connecting to the test database.
+     */
+    @Test
+    void connect() throws Exception {
+        TestUtil.initDriver(); // Set up log levels, etc.
 
-    // Test with the url, username & password
-    Connection con =
-        DriverManager.getConnection(TestUtil.getURL(), TestUtil.getUser(), TestUtil.getPassword());
-    assertNotNull(con);
-    con.close();
-
-    // Test with the username in the url
-    con = DriverManager.getConnection(
-        TestUtil.getURL()
-            + "&user=" + URLCoder.encode(TestUtil.getUser())
-            + "&password=" + URLCoder.encode(TestUtil.getPassword()));
-    assertNotNull(con);
-    con.close();
-
-    // Test with failover url
-  }
-
-  /**
-   * Tests the connect method by connecting to the test database.
-   */
-  @Test
-  void connectService() throws Exception {
-    TestUtil.initDriver(); // Set up log levels, etc.
-    String wrongPort = "65536";
-
-    // Create temporary pg_service.conf file
-    Path tempDirWithPrefix = Files.createTempDirectory("junit");
-    Path tempFile = Files.createTempFile(tempDirWithPrefix, "pg_service", "conf");
-    try {
-      // Write service section
-      String testService1 = "testService1"; // with correct port
-      String testService2 = "testService2"; // with wrong port
-      try (PrintStream ps = new PrintStream(Files.newOutputStream(tempFile))) {
-        ps.printf("[%s]%nhost=%s%nport=%s%ndbname=%s%nuser=%s%npassword=%s%n", testService1, TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), TestUtil.getPassword());
-        ps.printf("[%s]%nhost=%s%nport=%s%ndbname=%s%nuser=%s%npassword=%s%n", testService2, TestUtil.getServer(), wrongPort, TestUtil.getDatabase(), TestUtil.getUser(), TestUtil.getPassword());
-      }
-      // consume service
-      Resources.with(
-          new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), tempFile.toString(), PGEnvironment.PGSYSCONFDIR.getName(), ""),
-          new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent")
-      ).execute(() -> {
-        //
-        // testing that properties overriding priority is correct (POSITIVE cases)
-        //
-        // service=correct port
-        Connection con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService1));
-        assertNotNull(con);
-        con.close();
-        // service=wrong port; Properties=correct port
-        Properties info = new Properties();
-        info.setProperty("PGPORT", String.valueOf(TestUtil.getPort()));
-        con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService2), info);
-        assertNotNull(con);
-        con.close();
-        // service=wrong port; Properties=wrong port; URL port=correct
-        info.setProperty("PGPORT", wrongPort);
-        con = DriverManager.getConnection(String.format("jdbc:postgresql://:%s/?service=%s", TestUtil.getPort(), testService2), info);
-        assertNotNull(con);
-        con.close();
-        // service=wrong port; Properties=wrong port; URL port=wrong; URL argument=correct port
-        con = DriverManager.getConnection(String.format("jdbc:postgresql://:%s/?service=%s&port=%s", wrongPort, testService2, TestUtil.getPort()), info);
+        // Test with the url, username & password
+        Connection con =
+                DriverManager.getConnection(TestUtil.getURL(), TestUtil.getUser(), TestUtil.getPassword());
         assertNotNull(con);
         con.close();
 
-        //
-        // testing that properties overriding priority is correct (NEGATIVE cases)
-        //
-        // service=wrong port
+        // Test with the username in the url
+        con = DriverManager.getConnection(
+                TestUtil.getURL()
+                        + "&user=" + URLCoder.encode(TestUtil.getUser())
+                        + "&password=" + URLCoder.encode(TestUtil.getPassword()));
+        assertNotNull(con);
+        con.close();
+
+        // Test with failover url
+    }
+
+    /**
+     * Tests the connect method by connecting to the test database.
+     */
+    @Test
+    void connectService() throws Exception {
+        TestUtil.initDriver(); // Set up log levels, etc.
+        String wrongPort = "65536";
+
+        // Create temporary pg_service.conf file
+        Path tempDirWithPrefix = Files.createTempDirectory("junit");
+        Path tempFile = Files.createTempFile(tempDirWithPrefix, "pg_service", "conf");
         try {
-          con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService2));
-          fail("Expected an SQLException because port is out of range");
-        } catch (SQLException e) {
-          // Expected exception.
+            // Write service section
+            String testService1 = "testService1"; // with correct port
+            String testService2 = "testService2"; // with wrong port
+            try (PrintStream ps = new PrintStream(Files.newOutputStream(tempFile))) {
+                ps.printf("[%s]%nhost=%s%nport=%s%ndbname=%s%nuser=%s%npassword=%s%n", testService1, TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), TestUtil.getPassword());
+                ps.printf("[%s]%nhost=%s%nport=%s%ndbname=%s%nuser=%s%npassword=%s%n", testService2, TestUtil.getServer(), wrongPort, TestUtil.getDatabase(), TestUtil.getUser(), TestUtil.getPassword());
+            }
+            // consume service
+            Resources.with(
+                    new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), tempFile.toString(), PGEnvironment.PGSYSCONFDIR.getName(), ""),
+                    new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent")
+            ).execute(() -> {
+                //
+                // testing that properties overriding priority is correct (POSITIVE cases)
+                //
+                // service=correct port
+                Connection con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService1));
+                assertNotNull(con);
+                con.close();
+                // service=wrong port; Properties=correct port
+                Properties info = new Properties();
+                info.setProperty("PGPORT", String.valueOf(TestUtil.getPort()));
+                con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService2), info);
+                assertNotNull(con);
+                con.close();
+                // service=wrong port; Properties=wrong port; URL port=correct
+                info.setProperty("PGPORT", wrongPort);
+                con = DriverManager.getConnection(String.format("jdbc:postgresql://:%s/?service=%s", TestUtil.getPort(), testService2), info);
+                assertNotNull(con);
+                con.close();
+                // service=wrong port; Properties=wrong port; URL port=wrong; URL argument=correct port
+                con = DriverManager.getConnection(String.format("jdbc:postgresql://:%s/?service=%s&port=%s", wrongPort, testService2, TestUtil.getPort()), info);
+                assertNotNull(con);
+                con.close();
+
+                //
+                // testing that properties overriding priority is correct (NEGATIVE cases)
+                //
+                // service=wrong port
+                try {
+                    con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService2));
+                    fail("Expected an SQLException because port is out of range");
+                } catch (SQLException e) {
+                    // Expected exception.
+                }
+                // service=correct port; Properties=wrong port
+                info.setProperty("PGPORT", wrongPort);
+                try {
+                    con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService1), info);
+                    fail("Expected an SQLException because port is out of range");
+                } catch (SQLException e) {
+                    // Expected exception.
+                }
+                // service=correct port; Properties=correct port; URL port=wrong
+                info.setProperty("PGPORT", String.valueOf(TestUtil.getPort()));
+                try {
+                    con = DriverManager.getConnection(String.format("jdbc:postgresql://:%s/?service=%s", wrongPort, testService1), info);
+                    fail("Expected an SQLException because port is out of range");
+                } catch (SQLException e) {
+                    // Expected exception.
+                }
+                // service=correct port; Properties=correct port; URL port=correct; URL argument=wrong port
+                try {
+                    con = DriverManager.getConnection(String.format("jdbc:postgresql://:%s/?service=%s&port=%s", TestUtil.getPort(), testService1, wrongPort), info);
+                    fail("Expected an SQLException because port is out of range");
+                } catch (SQLException e) {
+                    // Expected exception.
+                }
+            });
+        } finally {
+            // cleanup
+            Files.delete(tempFile);
+            Files.delete(tempDirWithPrefix);
         }
-        // service=correct port; Properties=wrong port
-        info.setProperty("PGPORT", wrongPort);
+    }
+
+    /**
+     * Tests the password by connecting to the test database.
+     * password from .pgpass (correct)
+     */
+    @Test
+    void connectPassword01() throws Exception {
+        TestUtil.initDriver(); // Set up log levels, etc.
+
+        // Create temporary .pgpass file
+        Path tempDirWithPrefix = Files.createTempDirectory("junit");
+        Path tempPgPassFile = Files.createTempFile(tempDirWithPrefix, "pgpass", "conf");
         try {
-          con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService1), info);
-          fail("Expected an SQLException because port is out of range");
-        } catch (SQLException e) {
-          // Expected exception.
+            try (PrintStream psPass = new PrintStream(Files.newOutputStream(tempPgPassFile))) {
+                psPass.printf("%s:%s:%s:%s:%s%n", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), TestUtil.getPassword());
+            }
+            // ignore pg_service.conf, use .pgpass
+            Resources.with(
+                    new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), ""),
+                    new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent",
+                            PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), tempPgPassFile.toString())
+            ).execute(() -> {
+                // password from .pgpass (correct)
+                Connection con = DriverManager.getConnection(String.format("jdbc:postgresql://%s:%s/%s?user=%s", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser()));
+                assertNotNull(con);
+                con.close();
+            });
+        } finally {
+            // cleanup
+            Files.delete(tempPgPassFile);
+            Files.delete(tempDirWithPrefix);
         }
-        // service=correct port; Properties=correct port; URL port=wrong
-        info.setProperty("PGPORT", String.valueOf(TestUtil.getPort()));
+    }
+
+    /**
+     * Tests the password by connecting to the test database.
+     * password from service (correct) and .pgpass (wrong)
+     */
+    @Test
+    void connectPassword02() throws Exception {
+        TestUtil.initDriver(); // Set up log levels, etc.
+        String wrongPassword = "random wrong";
+
+        // Create temporary pg_service.conf and .pgpass file
+        Path tempDirWithPrefix = Files.createTempDirectory("junit");
+        Path tempPgServiceFile = Files.createTempFile(tempDirWithPrefix, "pg_service", "conf");
+        Path tempPgPassFile = Files.createTempFile(tempDirWithPrefix, "pgpass", "conf");
         try {
-          con = DriverManager.getConnection(String.format("jdbc:postgresql://:%s/?service=%s", wrongPort, testService1), info);
-          fail("Expected an SQLException because port is out of range");
-        } catch (SQLException e) {
-          // Expected exception.
+            // Write service section
+            String testService1 = "testService1";
+            try (PrintStream psService = new PrintStream(Files.newOutputStream(tempPgServiceFile));
+                 PrintStream psPass = new PrintStream(Files.newOutputStream(tempPgPassFile))) {
+                psService.printf("[%s]%nhost=%s%nport=%s%ndbname=%s%nuser=%s%npassword=%s%n", testService1, TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), TestUtil.getPassword());
+                psPass.printf("%s:%s:%s:%s:%s%n", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), wrongPassword);
+            }
+            // ignore pg_service.conf, use .pgpass
+            Resources.with(
+                    new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), tempPgServiceFile.toString(), PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), tempPgPassFile.toString())
+            ).execute(() -> {
+                // password from service (correct) and .pgpass (wrong)
+                Connection con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService1));
+                assertNotNull(con);
+                con.close();
+            });
+        } finally {
+            // cleanup
+            Files.delete(tempPgPassFile);
+            Files.delete(tempPgServiceFile);
+            Files.delete(tempDirWithPrefix);
         }
-        // service=correct port; Properties=correct port; URL port=correct; URL argument=wrong port
+    }
+
+    /**
+     * Tests the password by connecting to the test database.
+     * password from java property (correct) and service (wrong) and .pgpass (wrong)
+     */
+    @Test
+    void connectPassword03() throws Exception {
+        TestUtil.initDriver(); // Set up log levels, etc.
+        String wrongPassword = "random wrong";
+
+        // Create temporary pg_service.conf and .pgpass file
+        Path tempDirWithPrefix = Files.createTempDirectory("junit");
+        Path tempPgServiceFile = Files.createTempFile(tempDirWithPrefix, "pg_service", "conf");
+        Path tempPgPassFile = Files.createTempFile(tempDirWithPrefix, "pgpass", "conf");
         try {
-          con = DriverManager.getConnection(String.format("jdbc:postgresql://:%s/?service=%s&port=%s", TestUtil.getPort(), testService1, wrongPort), info);
-          fail("Expected an SQLException because port is out of range");
-        } catch (SQLException e) {
-          // Expected exception.
+            // Write service section
+            String testService1 = "testService1";
+            try (PrintStream psService = new PrintStream(Files.newOutputStream(tempPgServiceFile));
+                 PrintStream psPass = new PrintStream(Files.newOutputStream(tempPgPassFile))) {
+                psService.printf("[%s]%nhost=%s%nport=%s%ndbname=%s%nuser=%s%npassword=%s%n", testService1, TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), wrongPassword);
+                psPass.printf("%s:%s:%s:%s:%s%n", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), wrongPassword);
+            }
+            // ignore pg_service.conf, use .pgpass
+            Resources.with(
+                    new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), tempPgServiceFile.toString(), PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), tempPgPassFile.toString())
+            ).execute(() -> {
+                // password from java property (correct) and service (wrong) and .pgpass (wrong)
+                Properties info = new Properties();
+                PGProperty.PASSWORD.set(info, TestUtil.getPassword());
+                Connection con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService1), info);
+                assertNotNull(con);
+                con.close();
+            });
+        } finally {
+            // cleanup
+            Files.delete(tempPgPassFile);
+            Files.delete(tempPgServiceFile);
+            Files.delete(tempDirWithPrefix);
         }
-      });
-    } finally {
-      // cleanup
-      Files.delete(tempFile);
-      Files.delete(tempDirWithPrefix);
     }
-  }
 
-  /**
-   * Tests the password by connecting to the test database.
-   * password from .pgpass (correct)
-   */
-  @Test
-  void connectPassword01() throws Exception {
-    TestUtil.initDriver(); // Set up log levels, etc.
+    /**
+     * Tests the password by connecting to the test database.
+     * password from URL parameter (correct) and java property (wrong) and service (wrong) and .pgpass (wrong)
+     */
+    @Test
+    void connectPassword04() throws Exception {
+        TestUtil.initDriver(); // Set up log levels, etc.
+        String wrongPassword = "random wrong";
 
-    // Create temporary .pgpass file
-    Path tempDirWithPrefix = Files.createTempDirectory("junit");
-    Path tempPgPassFile = Files.createTempFile(tempDirWithPrefix, "pgpass", "conf");
-    try {
-      try (PrintStream psPass = new PrintStream(Files.newOutputStream(tempPgPassFile))) {
-        psPass.printf("%s:%s:%s:%s:%s%n", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), TestUtil.getPassword());
-      }
-      // ignore pg_service.conf, use .pgpass
-      Resources.with(
-          new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), ""),
-          new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent",
-              PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), tempPgPassFile.toString())
-      ).execute(() -> {
-        // password from .pgpass (correct)
-        Connection con = DriverManager.getConnection(String.format("jdbc:postgresql://%s:%s/%s?user=%s", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser()));
-        assertNotNull(con);
-        con.close();
-      });
-    } finally {
-      // cleanup
-      Files.delete(tempPgPassFile);
-      Files.delete(tempDirWithPrefix);
-    }
-  }
-
-  /**
-   * Tests the password by connecting to the test database.
-   * password from service (correct) and .pgpass (wrong)
-   */
-  @Test
-  void connectPassword02() throws Exception {
-    TestUtil.initDriver(); // Set up log levels, etc.
-    String wrongPassword = "random wrong";
-
-    // Create temporary pg_service.conf and .pgpass file
-    Path tempDirWithPrefix = Files.createTempDirectory("junit");
-    Path tempPgServiceFile = Files.createTempFile(tempDirWithPrefix, "pg_service", "conf");
-    Path tempPgPassFile = Files.createTempFile(tempDirWithPrefix, "pgpass", "conf");
-    try {
-      // Write service section
-      String testService1 = "testService1";
-      try (PrintStream psService = new PrintStream(Files.newOutputStream(tempPgServiceFile));
-           PrintStream psPass = new PrintStream(Files.newOutputStream(tempPgPassFile))) {
-        psService.printf("[%s]%nhost=%s%nport=%s%ndbname=%s%nuser=%s%npassword=%s%n", testService1, TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), TestUtil.getPassword());
-        psPass.printf("%s:%s:%s:%s:%s%n", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), wrongPassword);
-      }
-      // ignore pg_service.conf, use .pgpass
-      Resources.with(
-          new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), tempPgServiceFile.toString(), PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), tempPgPassFile.toString())
-      ).execute(() -> {
-        // password from service (correct) and .pgpass (wrong)
-        Connection con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService1));
-        assertNotNull(con);
-        con.close();
-      });
-    } finally {
-      // cleanup
-      Files.delete(tempPgPassFile);
-      Files.delete(tempPgServiceFile);
-      Files.delete(tempDirWithPrefix);
-    }
-  }
-
-  /**
-   * Tests the password by connecting to the test database.
-   * password from java property (correct) and service (wrong) and .pgpass (wrong)
-   */
-  @Test
-  void connectPassword03() throws Exception {
-    TestUtil.initDriver(); // Set up log levels, etc.
-    String wrongPassword = "random wrong";
-
-    // Create temporary pg_service.conf and .pgpass file
-    Path tempDirWithPrefix = Files.createTempDirectory("junit");
-    Path tempPgServiceFile = Files.createTempFile(tempDirWithPrefix, "pg_service", "conf");
-    Path tempPgPassFile = Files.createTempFile(tempDirWithPrefix, "pgpass", "conf");
-    try {
-      // Write service section
-      String testService1 = "testService1";
-      try (PrintStream psService = new PrintStream(Files.newOutputStream(tempPgServiceFile));
-           PrintStream psPass = new PrintStream(Files.newOutputStream(tempPgPassFile))) {
-        psService.printf("[%s]%nhost=%s%nport=%s%ndbname=%s%nuser=%s%npassword=%s%n", testService1, TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), wrongPassword);
-        psPass.printf("%s:%s:%s:%s:%s%n", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), wrongPassword);
-      }
-      // ignore pg_service.conf, use .pgpass
-      Resources.with(
-          new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), tempPgServiceFile.toString(), PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), tempPgPassFile.toString())
-      ).execute(() -> {
-        // password from java property (correct) and service (wrong) and .pgpass (wrong)
-        Properties info = new Properties();
-        PGProperty.PASSWORD.set(info, TestUtil.getPassword());
-        Connection con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s", testService1), info);
-        assertNotNull(con);
-        con.close();
-      });
-    } finally {
-      // cleanup
-      Files.delete(tempPgPassFile);
-      Files.delete(tempPgServiceFile);
-      Files.delete(tempDirWithPrefix);
-    }
-  }
-
-  /**
-   * Tests the password by connecting to the test database.
-   * password from URL parameter (correct) and java property (wrong) and service (wrong) and .pgpass (wrong)
-   */
-  @Test
-  void connectPassword04() throws Exception {
-    TestUtil.initDriver(); // Set up log levels, etc.
-    String wrongPassword = "random wrong";
-
-    // Create temporary pg_service.conf and .pgpass file
-    Path tempDirWithPrefix = Files.createTempDirectory("junit");
-    Path tempPgServiceFile = Files.createTempFile(tempDirWithPrefix, "pg_service", "conf");
-    Path tempPgPassFile = Files.createTempFile(tempDirWithPrefix, "pgpass", "conf");
-    try {
-      // Write service section
-      String testService1 = "testService1";
-      try (PrintStream psService = new PrintStream(Files.newOutputStream(tempPgServiceFile));
-           PrintStream psPass = new PrintStream(Files.newOutputStream(tempPgPassFile))) {
-        psService.printf("[%s]%nhost=%s%nport=%s%ndbname=%s%nuser=%s%npassword=%s%n", testService1, TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), wrongPassword);
-        psPass.printf("%s:%s:%s:%s:%s%n", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), wrongPassword);
-      }
-      // ignore pg_service.conf, use .pgpass
-      Resources.with(
-          new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), tempPgServiceFile.toString(), PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), tempPgPassFile.toString())
-      ).execute(() -> {
-        //
-        Properties info = new Properties();
-        PGProperty.PASSWORD.set(info, wrongPassword);
-        Connection con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s&password=%s", testService1, TestUtil.getPassword()), info);
-        assertNotNull(con);
-        con.close();
-      });
-    } finally {
-      // cleanup
-      Files.delete(tempPgPassFile);
-      Files.delete(tempPgServiceFile);
-      Files.delete(tempDirWithPrefix);
-    }
-  }
-
-  /**
-   * Tests that pgjdbc performs connection failover if unable to connect to the first host in the
-   * URL.
-   *
-   * @throws Exception if something wrong happens
-   */
-  @Test
-  void connectFailover() throws Exception {
-    String url = "jdbc:postgresql://invalidhost.not.here," + TestUtil.getServer() + ":"
-        + TestUtil.getPort() + "/" + TestUtil.getDatabase() + "?connectTimeout=5";
-    Connection con = DriverManager.getConnection(url, TestUtil.getUser(), TestUtil.getPassword());
-    assertNotNull(con);
-    con.close();
-  }
-
-  /*
-   * Test that the readOnly property works.
-   */
-  @Test
-  void readOnly() throws Exception {
-    TestUtil.initDriver(); // Set up log levels, etc.
-
-    Connection con = DriverManager.getConnection(TestUtil.getURL() + "&readOnly=true",
-        TestUtil.getUser(), TestUtil.getPassword());
-    assertNotNull(con);
-    assertTrue(con.isReadOnly());
-    con.close();
-
-    con = DriverManager.getConnection(TestUtil.getURL() + "&readOnly=false", TestUtil.getUser(),
-        TestUtil.getPassword());
-    assertNotNull(con);
-    assertFalse(con.isReadOnly());
-    con.close();
-
-    con =
-        DriverManager.getConnection(TestUtil.getURL(), TestUtil.getUser(), TestUtil.getPassword());
-    assertNotNull(con);
-    assertFalse(con.isReadOnly());
-    con.close();
-  }
-
-  @Test
-  void registration() throws Exception {
-    TestUtil.initDriver();
-
-    // Driver is initially registered because it is automatically done when class is loaded
-    assertTrue(Driver.isRegistered());
-
-    ArrayList<java.sql.Driver> drivers = Collections.list(DriverManager.getDrivers());
-    searchInstanceOf: {
-
-      for (java.sql.Driver driver : drivers) {
-        if (driver instanceof Driver) {
-          break searchInstanceOf;
+        // Create temporary pg_service.conf and .pgpass file
+        Path tempDirWithPrefix = Files.createTempDirectory("junit");
+        Path tempPgServiceFile = Files.createTempFile(tempDirWithPrefix, "pg_service", "conf");
+        Path tempPgPassFile = Files.createTempFile(tempDirWithPrefix, "pgpass", "conf");
+        try {
+            // Write service section
+            String testService1 = "testService1";
+            try (PrintStream psService = new PrintStream(Files.newOutputStream(tempPgServiceFile));
+                 PrintStream psPass = new PrintStream(Files.newOutputStream(tempPgPassFile))) {
+                psService.printf("[%s]%nhost=%s%nport=%s%ndbname=%s%nuser=%s%npassword=%s%n", testService1, TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), wrongPassword);
+                psPass.printf("%s:%s:%s:%s:%s%n", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getUser(), wrongPassword);
+            }
+            // ignore pg_service.conf, use .pgpass
+            Resources.with(
+                    new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), tempPgServiceFile.toString(), PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), tempPgPassFile.toString())
+            ).execute(() -> {
+                //
+                Properties info = new Properties();
+                PGProperty.PASSWORD.set(info, wrongPassword);
+                Connection con = DriverManager.getConnection(String.format("jdbc:postgresql://?service=%s&password=%s", testService1, TestUtil.getPassword()), info);
+                assertNotNull(con);
+                con.close();
+            });
+        } finally {
+            // cleanup
+            Files.delete(tempPgPassFile);
+            Files.delete(tempPgServiceFile);
+            Files.delete(tempDirWithPrefix);
         }
-      }
-      fail("Driver has not been found in DriverManager's list but it should be registered");
     }
 
-    // Deregister the driver
-    Driver.deregister();
-    assertFalse(Driver.isRegistered());
-
-    drivers = Collections.list(DriverManager.getDrivers());
-    for (java.sql.Driver driver : drivers) {
-      if (driver instanceof Driver) {
-        fail("Driver should be deregistered but it is still present in DriverManager's list");
-      }
-    }
-
-    // register again the driver
-    Driver.register();
-    assertTrue(Driver.isRegistered());
-
-    drivers = Collections.list(DriverManager.getDrivers());
-    for (java.sql.Driver driver : drivers) {
-      if (driver instanceof Driver) {
-        return;
-      }
-    }
-    fail("Driver has not been found in DriverManager's list but it should be registered");
-  }
-
-  @Test
-  void systemErrIsNotClosedWhenCreatedMultipleConnections() throws Exception {
-    TestUtil.initDriver();
-    PrintStream err = System.err;
-    PrintStream buffer = new PrintStream(new ByteArrayOutputStream());
-    System.setErr(buffer);
-    try {
-      Connection con = DriverManager.getConnection(TestUtil.getURL(), TestUtil.getUser(), TestUtil.getPassword());
-      try {
+    /**
+     * Tests that pgjdbc performs connection failover if unable to connect to the first host in the
+     * URL.
+     *
+     * @throws Exception if something wrong happens
+     */
+    @Test
+    void connectFailover() throws Exception {
+        String url = "jdbc:postgresql://invalidhost.not.here," + TestUtil.getServer() + ":"
+                + TestUtil.getPort() + "/" + TestUtil.getDatabase() + "?connectTimeout=5";
+        Connection con = DriverManager.getConnection(url, TestUtil.getUser(), TestUtil.getPassword());
         assertNotNull(con);
-      } finally {
         con.close();
-      }
-      con = DriverManager.getConnection(TestUtil.getURL(), TestUtil.getUser(), TestUtil.getPassword());
-      try {
-        assertNotNull(con);
-        System.err.println();
-        assertFalse(System.err.checkError(), "The System.err should not be closed.");
-      } finally {
-        con.close();
-      }
-    } finally {
-      System.setErr(err);
     }
-  }
 
-  private void setProperty(String key, String value) {
-    if (value == null) {
-      System.clearProperty(key);
-    } else {
-      System.setProperty(key, value);
+    /*
+     * Test that the readOnly property works.
+     */
+    @Test
+    void readOnly() throws Exception {
+        TestUtil.initDriver(); // Set up log levels, etc.
+
+        Connection con = DriverManager.getConnection(TestUtil.getURL() + "&readOnly=true",
+                TestUtil.getUser(), TestUtil.getPassword());
+        assertNotNull(con);
+        assertTrue(con.isReadOnly());
+        con.close();
+
+        con = DriverManager.getConnection(TestUtil.getURL() + "&readOnly=false", TestUtil.getUser(),
+                TestUtil.getPassword());
+        assertNotNull(con);
+        assertFalse(con.isReadOnly());
+        con.close();
+
+        con =
+                DriverManager.getConnection(TestUtil.getURL(), TestUtil.getUser(), TestUtil.getPassword());
+        assertNotNull(con);
+        assertFalse(con.isReadOnly());
+        con.close();
+    }
+
+    @Test
+    void registration() throws Exception {
+        TestUtil.initDriver();
+
+        // Driver is initially registered because it is automatically done when class is loaded
+        assertTrue(Driver.isRegistered());
+
+        ArrayList<java.sql.Driver> drivers = Collections.list(DriverManager.getDrivers());
+        searchInstanceOf:
+        {
+
+            for (java.sql.Driver driver : drivers) {
+                if (driver instanceof Driver) {
+                    break searchInstanceOf;
+                }
+            }
+            fail("Driver has not been found in DriverManager's list but it should be registered");
+        }
+
+        // Deregister the driver
+        Driver.deregister();
+        assertFalse(Driver.isRegistered());
+
+        drivers = Collections.list(DriverManager.getDrivers());
+        for (java.sql.Driver driver : drivers) {
+            if (driver instanceof Driver) {
+                fail("Driver should be deregistered but it is still present in DriverManager's list");
+            }
+        }
+
+        // register again the driver
+        Driver.register();
+        assertTrue(Driver.isRegistered());
+
+        drivers = Collections.list(DriverManager.getDrivers());
+        for (java.sql.Driver driver : drivers) {
+            if (driver instanceof Driver) {
+                return;
+            }
+        }
+        fail("Driver has not been found in DriverManager's list but it should be registered");
+    }
+
+    @Test
+    void systemErrIsNotClosedWhenCreatedMultipleConnections() throws Exception {
+        TestUtil.initDriver();
+        PrintStream err = System.err;
+        PrintStream buffer = new PrintStream(new ByteArrayOutputStream());
+        System.setErr(buffer);
+        try {
+            Connection con = DriverManager.getConnection(TestUtil.getURL(), TestUtil.getUser(), TestUtil.getPassword());
+            try {
+                assertNotNull(con);
+            } finally {
+                con.close();
+            }
+            con = DriverManager.getConnection(TestUtil.getURL(), TestUtil.getUser(), TestUtil.getPassword());
+            try {
+                assertNotNull(con);
+                System.err.println();
+                assertFalse(System.err.checkError(), "The System.err should not be closed.");
+            } finally {
+                con.close();
+            }
+        } finally {
+            System.setErr(err);
+        }
+    }
+
+    private void setProperty(String key, String value) {
+        if (value == null) {
+            System.clearProperty(key);
+        } else {
+            System.setProperty(key, value);
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/EncodingTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/EncodingTest.java
index 5ad1ffb..af99486 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/EncodingTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/EncodingTest.java
@@ -5,53 +5,50 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.postgresql.core.Encoding;
-
-import org.junit.jupiter.api.Test;
-
 import java.io.ByteArrayInputStream;
 import java.io.InputStream;
 import java.io.Reader;
 import java.util.Locale;
+import org.junit.jupiter.api.Test;
+import org.postgresql.core.Encoding;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Tests for the Encoding class.
  */
 class EncodingTest {
 
-  @Test
-  void creation() throws Exception {
-    Encoding encoding = Encoding.getDatabaseEncoding("UTF8");
-    assertEquals("UTF", encoding.name().substring(0, 3).toUpperCase(Locale.US));
-    encoding = Encoding.getDatabaseEncoding("SQL_ASCII");
-    assertTrue(encoding.name().toUpperCase(Locale.US).contains("ASCII"));
-    assertEquals(Encoding.defaultEncoding(), Encoding.getDatabaseEncoding("UNKNOWN"), "When encoding is unknown the default encoding should be used");
-  }
+    @Test
+    void creation() throws Exception {
+        Encoding encoding = Encoding.getDatabaseEncoding("UTF8");
+        assertEquals("UTF", encoding.name().substring(0, 3).toUpperCase(Locale.US));
+        encoding = Encoding.getDatabaseEncoding("SQL_ASCII");
+        assertTrue(encoding.name().toUpperCase(Locale.US).contains("ASCII"));
+        assertEquals(Encoding.defaultEncoding(), Encoding.getDatabaseEncoding("UNKNOWN"), "When encoding is unknown the default encoding should be used");
+    }
 
-  @Test
-  void transformations() throws Exception {
-    Encoding encoding = Encoding.getDatabaseEncoding("UTF8");
-    assertEquals("ab", encoding.decode(new byte[]{97, 98}));
+    @Test
+    void transformations() throws Exception {
+        Encoding encoding = Encoding.getDatabaseEncoding("UTF8");
+        assertEquals("ab", encoding.decode(new byte[]{97, 98}));
 
-    assertEquals(2, encoding.encode("ab").length);
-    assertEquals(97, encoding.encode("a")[0]);
-    assertEquals(98, encoding.encode("b")[0]);
+        assertEquals(2, encoding.encode("ab").length);
+        assertEquals(97, encoding.encode("a")[0]);
+        assertEquals(98, encoding.encode("b")[0]);
 
-    encoding = Encoding.defaultEncoding();
-    assertEquals("a".getBytes()[0], encoding.encode("a")[0]);
-    assertEquals(new String(new byte[]{97}), encoding.decode(new byte[]{97}));
-  }
+        encoding = Encoding.defaultEncoding();
+        assertEquals("a".getBytes()[0], encoding.encode("a")[0]);
+        assertEquals(new String(new byte[]{97}), encoding.decode(new byte[]{97}));
+    }
 
-  @Test
-  void reader() throws Exception {
-    Encoding encoding = Encoding.getDatabaseEncoding("SQL_ASCII");
-    InputStream stream = new ByteArrayInputStream(new byte[]{97, 98});
-    Reader reader = encoding.getDecodingReader(stream);
-    assertEquals(97, reader.read());
-    assertEquals(98, reader.read());
-    assertEquals(-1, reader.read());
-  }
+    @Test
+    void reader() throws Exception {
+        Encoding encoding = Encoding.getDatabaseEncoding("SQL_ASCII");
+        InputStream stream = new ByteArrayInputStream(new byte[]{97, 98});
+        Reader reader = encoding.getDecodingReader(stream);
+        assertEquals(97, reader.read());
+        assertEquals(98, reader.read());
+        assertEquals(-1, reader.read());
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/EnumTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/EnumTest.java
index b6bc331..c3c9f22 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/EnumTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/EnumTest.java
@@ -5,13 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import org.postgresql.test.TestUtil;
-
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
 import java.sql.Array;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
@@ -19,78 +12,83 @@ import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.postgresql.test.TestUtil;
 
 @RunWith(Parameterized.class)
 public class EnumTest extends BaseTest4 {
-  public EnumTest(BinaryMode binaryMode) {
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
+    public EnumTest(BinaryMode binaryMode) {
+        setBinaryMode(binaryMode);
     }
-    return ids;
-  }
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createEnumType(con, "flag", "'duplicate','spike','new'");
-  }
+    @Parameterized.Parameters(name = "binary = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
+        }
+        return ids;
+    }
 
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropType(con, "flag");
-    super.tearDown();
-  }
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createEnumType(con, "flag", "'duplicate','spike','new'");
+    }
 
-  @Test
-  public void enumArray() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement("SELECT '{duplicate,new}'::flag[]");
-    ResultSet rs = pstmt.executeQuery();
-    rs.next();
-    Array array = rs.getArray(1);
-    Assert.assertNotNull("{duplicate,new} should come up as a non-null array", array);
-    Object[] objectArray = (Object[]) array.getArray();
-    Assert.assertEquals(
-        "{duplicate,new} should come up as Java array with two entries",
-        "[duplicate, new]",
-        Arrays.deepToString(objectArray)
-    );
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropType(con, "flag");
+        super.tearDown();
+    }
 
-    Assert.assertEquals(
-        "Enum array entries should come up as strings",
-        "java.lang.String, java.lang.String",
-        objectArray[0].getClass().getName() + ", " + objectArray[1].getClass().getName()
-    );
-    rs.close();
-    pstmt.close();
-  }
+    @Test
+    public void enumArray() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement("SELECT '{duplicate,new}'::flag[]");
+        ResultSet rs = pstmt.executeQuery();
+        rs.next();
+        Array array = rs.getArray(1);
+        Assert.assertNotNull("{duplicate,new} should come up as a non-null array", array);
+        Object[] objectArray = (Object[]) array.getArray();
+        Assert.assertEquals(
+                "{duplicate,new} should come up as Java array with two entries",
+                "[duplicate, new]",
+                Arrays.deepToString(objectArray)
+        );
 
-  @Test
-  public void enumArrayArray() throws SQLException {
-    String value = "{{duplicate,new},{spike,spike}}";
-    PreparedStatement pstmt = con.prepareStatement("SELECT '" + value + "'::flag[][]");
-    ResultSet rs = pstmt.executeQuery();
-    rs.next();
-    Array array = rs.getArray(1);
-    Assert.assertNotNull(value + " should come up as a non-null array", array);
-    Object[] objectArray = (Object[]) array.getArray();
-    Assert.assertEquals(
-        value + " should come up as Java array with two entries",
-        "[[duplicate, new], [spike, spike]]",
-        Arrays.deepToString(objectArray)
-    );
+        Assert.assertEquals(
+                "Enum array entries should come up as strings",
+                "java.lang.String, java.lang.String",
+                objectArray[0].getClass().getName() + ", " + objectArray[1].getClass().getName()
+        );
+        rs.close();
+        pstmt.close();
+    }
 
-    Assert.assertEquals(
-        "Enum array entries should come up as strings",
-        "[Ljava.lang.String;, [Ljava.lang.String;",
-        objectArray[0].getClass().getName() + ", " + objectArray[1].getClass().getName()
-    );
-    rs.close();
-    pstmt.close();
-  }
+    @Test
+    public void enumArrayArray() throws SQLException {
+        String value = "{{duplicate,new},{spike,spike}}";
+        PreparedStatement pstmt = con.prepareStatement("SELECT '" + value + "'::flag[][]");
+        ResultSet rs = pstmt.executeQuery();
+        rs.next();
+        Array array = rs.getArray(1);
+        Assert.assertNotNull(value + " should come up as a non-null array", array);
+        Object[] objectArray = (Object[]) array.getArray();
+        Assert.assertEquals(
+                value + " should come up as Java array with two entries",
+                "[[duplicate, new], [spike, spike]]",
+                Arrays.deepToString(objectArray)
+        );
+
+        Assert.assertEquals(
+                "Enum array entries should come up as strings",
+                "[Ljava.lang.String;, [Ljava.lang.String;",
+                objectArray[0].getClass().getName() + ", " + objectArray[1].getClass().getName()
+        );
+        rs.close();
+        pstmt.close();
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/GeometricTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/GeometricTest.java
index 38f0794..99f8a4b 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/GeometricTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/GeometricTest.java
@@ -5,10 +5,16 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 import org.postgresql.core.ServerVersion;
 import org.postgresql.geometric.PGbox;
 import org.postgresql.geometric.PGcircle;
@@ -20,18 +26,9 @@ import org.postgresql.geometric.PGpolygon;
 import org.postgresql.test.TestUtil;
 import org.postgresql.util.PGobject;
 import org.postgresql.util.PSQLException;
-
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /*
  * Test case for geometric type I/O
@@ -39,152 +36,152 @@ import java.util.List;
 @RunWith(Parameterized.class)
 public class GeometricTest extends BaseTest4 {
 
-  public GeometricTest(BinaryMode binaryMode) {
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
+    public GeometricTest(BinaryMode binaryMode) {
+        setBinaryMode(binaryMode);
     }
-    return ids;
-  }
 
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTable(con, "testgeometric",
-        "boxval box, circleval circle, lsegval lseg, pathval path, polygonval polygon, pointval point, lineval line");
-  }
-
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "testgeometric");
-    super.tearDown();
-  }
-
-  private void checkReadWrite(PGobject obj, String column) throws Exception {
-    PreparedStatement insert =
-        con.prepareStatement("INSERT INTO testgeometric(" + column + ") VALUES (?)");
-    insert.setObject(1, obj);
-    insert.executeUpdate();
-    insert.close();
-
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT " + column + " FROM testgeometric");
-    assertTrue(rs.next());
-    assertEquals("PGObject#equals(rs.getObject)", obj, rs.getObject(1));
-    PGobject obj2 = (PGobject) obj.clone();
-    obj2.setValue(rs.getString(1));
-    assertEquals("PGobject.toString vs rs.getString", obj, obj2);
-    rs.close();
-
-    stmt.executeUpdate("DELETE FROM testgeometric");
-    stmt.close();
-  }
-
-  @Test
-  public void testPGbox() throws Exception {
-    checkReadWrite(new PGbox(1.0, 2.0, 3.0, 4.0), "boxval");
-    checkReadWrite(new PGbox(-1.0, 2.0, 3.0, 4.0), "boxval");
-    checkReadWrite(new PGbox(1.0, -2.0, 3.0, 4.0), "boxval");
-    checkReadWrite(new PGbox(1.0, 2.0, -3.0, 4.0), "boxval");
-    checkReadWrite(new PGbox(1.0, 2.0, 3.0, -4.0), "boxval");
-  }
-
-  @Test
-  public void testPGcircle() throws Exception {
-    checkReadWrite(new PGcircle(1.0, 2.0, 3.0), "circleval");
-    checkReadWrite(new PGcircle(-1.0, 2.0, 3.0), "circleval");
-    checkReadWrite(new PGcircle(1.0, -2.0, 3.0), "circleval");
-  }
-
-  @Test
-  public void testPGlseg() throws Exception {
-    checkReadWrite(new PGlseg(1.0, 2.0, 3.0, 4.0), "lsegval");
-    checkReadWrite(new PGlseg(-1.0, 2.0, 3.0, 4.0), "lsegval");
-    checkReadWrite(new PGlseg(1.0, -2.0, 3.0, 4.0), "lsegval");
-    checkReadWrite(new PGlseg(1.0, 2.0, -3.0, 4.0), "lsegval");
-    checkReadWrite(new PGlseg(1.0, 2.0, 3.0, -4.0), "lsegval");
-  }
-
-  @Test
-  public void testPGpath() throws Exception {
-    PGpoint[] points =
-        new PGpoint[]{new PGpoint(0.0, 0.0), new PGpoint(0.0, 5.0), new PGpoint(5.0, 5.0),
-            new PGpoint(5.0, -5.0), new PGpoint(-5.0, -5.0), new PGpoint(-5.0, 5.0),};
-
-    checkReadWrite(new PGpath(points, true), "pathval");
-    checkReadWrite(new PGpath(points, false), "pathval");
-  }
-
-  @Test
-  public void testPGpolygon() throws Exception {
-    PGpoint[] points =
-        new PGpoint[]{new PGpoint(0.0, 0.0), new PGpoint(0.0, 5.0), new PGpoint(5.0, 5.0),
-            new PGpoint(5.0, -5.0), new PGpoint(-5.0, -5.0), new PGpoint(-5.0, 5.0),};
-
-    checkReadWrite(new PGpolygon(points), "polygonval");
-  }
-
-  @Test
-  public void testPGline() throws Exception {
-    final String columnName = "lineval";
-
-    // PostgreSQL versions older than 9.4 support creating columns with the LINE datatype, but
-    // not actually writing to those columns. Only try to write if the version if at least 9.4
-    final boolean roundTripToDatabase = TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_4);
-
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_4)) {
-
-      // Apparently the driver requires public no-args constructor, and postgresql doesn't accept
-      // lines with A and B
-      // coefficients both being zero... so assert a no-arg instantiated instance throws an
-      // exception.
-      if (roundTripToDatabase) {
-        try {
-          checkReadWrite(new PGline(), columnName);
-          fail("Expected a PSQLException to be thrown");
-        } catch (PSQLException e) {
-          assertEquals("22P02", e.getSQLState());
+    @Parameterized.Parameters(name = "binary = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
         }
-      }
-
-      // Generate a dataset for testing.
-      List<PGline> linesToTest = new ArrayList<>();
-      for (double i = 1; i <= 3; i += 0.25) {
-        // Test the 3-arg constructor (coefficients+constant)
-        linesToTest.add(new PGline(i, (0 - i), (1 / i)));
-        linesToTest.add(new PGline("{" + i + "," + (0 - i) + "," + (1 / i) + "}"));
-        // Test the 4-arg constructor (x/y coords of two points on the line)
-        linesToTest.add(new PGline(i, (0 - i), (1 / i), (1 / i / i)));
-        linesToTest.add(new PGline(i, (0 - i), i, (1 / i / i))); // tests vertical line
-        // Test 2-arg constructor (2 PGpoints on the line);
-        linesToTest.add(new PGline(new PGpoint(i, (0 - i)), new PGpoint((1 / i), (1 / i / i))));
-        // tests vertical line
-        linesToTest.add(new PGline(new PGpoint(i, (0 - i)), new PGpoint(i, (1 / i / i))));
-        // Test 1-arg constructor (PGlseg on the line);
-        linesToTest.add(new PGline(new PGlseg(i, (0 - i), (1 / i), (1 / i / i))));
-        linesToTest.add(new PGline(new PGlseg(i, (0 - i), i, (1 / i / i))));
-        linesToTest.add(
-            new PGline(new PGlseg(new PGpoint(i, (0 - i)), new PGpoint((1 / i), (1 / i / i)))));
-        linesToTest
-            .add(new PGline(new PGlseg(new PGpoint(i, (0 - i)), new PGpoint(i, (1 / i / i)))));
-      }
-
-      // Include persistence an querying if the postgresql version supports it.
-      if (roundTripToDatabase) {
-        for (PGline testLine : linesToTest) {
-          checkReadWrite(testLine, columnName);
-        }
-      }
-
+        return ids;
     }
-  }
 
-  @Test
-  public void testPGpoint() throws Exception {
-    checkReadWrite(new PGpoint(1.0, 2.0), "pointval");
-  }
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTable(con, "testgeometric",
+                "boxval box, circleval circle, lsegval lseg, pathval path, polygonval polygon, pointval point, lineval line");
+    }
+
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "testgeometric");
+        super.tearDown();
+    }
+
+    private void checkReadWrite(PGobject obj, String column) throws Exception {
+        PreparedStatement insert =
+                con.prepareStatement("INSERT INTO testgeometric(" + column + ") VALUES (?)");
+        insert.setObject(1, obj);
+        insert.executeUpdate();
+        insert.close();
+
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT " + column + " FROM testgeometric");
+        assertTrue(rs.next());
+        assertEquals("PGObject#equals(rs.getObject)", obj, rs.getObject(1));
+        PGobject obj2 = (PGobject) obj.clone();
+        obj2.setValue(rs.getString(1));
+        assertEquals("PGobject.toString vs rs.getString", obj, obj2);
+        rs.close();
+
+        stmt.executeUpdate("DELETE FROM testgeometric");
+        stmt.close();
+    }
+
+    @Test
+    public void testPGbox() throws Exception {
+        checkReadWrite(new PGbox(1.0, 2.0, 3.0, 4.0), "boxval");
+        checkReadWrite(new PGbox(-1.0, 2.0, 3.0, 4.0), "boxval");
+        checkReadWrite(new PGbox(1.0, -2.0, 3.0, 4.0), "boxval");
+        checkReadWrite(new PGbox(1.0, 2.0, -3.0, 4.0), "boxval");
+        checkReadWrite(new PGbox(1.0, 2.0, 3.0, -4.0), "boxval");
+    }
+
+    @Test
+    public void testPGcircle() throws Exception {
+        checkReadWrite(new PGcircle(1.0, 2.0, 3.0), "circleval");
+        checkReadWrite(new PGcircle(-1.0, 2.0, 3.0), "circleval");
+        checkReadWrite(new PGcircle(1.0, -2.0, 3.0), "circleval");
+    }
+
+    @Test
+    public void testPGlseg() throws Exception {
+        checkReadWrite(new PGlseg(1.0, 2.0, 3.0, 4.0), "lsegval");
+        checkReadWrite(new PGlseg(-1.0, 2.0, 3.0, 4.0), "lsegval");
+        checkReadWrite(new PGlseg(1.0, -2.0, 3.0, 4.0), "lsegval");
+        checkReadWrite(new PGlseg(1.0, 2.0, -3.0, 4.0), "lsegval");
+        checkReadWrite(new PGlseg(1.0, 2.0, 3.0, -4.0), "lsegval");
+    }
+
+    @Test
+    public void testPGpath() throws Exception {
+        PGpoint[] points =
+                new PGpoint[]{new PGpoint(0.0, 0.0), new PGpoint(0.0, 5.0), new PGpoint(5.0, 5.0),
+                        new PGpoint(5.0, -5.0), new PGpoint(-5.0, -5.0), new PGpoint(-5.0, 5.0),};
+
+        checkReadWrite(new PGpath(points, true), "pathval");
+        checkReadWrite(new PGpath(points, false), "pathval");
+    }
+
+    @Test
+    public void testPGpolygon() throws Exception {
+        PGpoint[] points =
+                new PGpoint[]{new PGpoint(0.0, 0.0), new PGpoint(0.0, 5.0), new PGpoint(5.0, 5.0),
+                        new PGpoint(5.0, -5.0), new PGpoint(-5.0, -5.0), new PGpoint(-5.0, 5.0),};
+
+        checkReadWrite(new PGpolygon(points), "polygonval");
+    }
+
+    @Test
+    public void testPGline() throws Exception {
+        final String columnName = "lineval";
+
+        // PostgreSQL versions older than 9.4 support creating columns with the LINE datatype, but
+        // not actually writing to those columns. Only try to write if the version if at least 9.4
+        final boolean roundTripToDatabase = TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_4);
+
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_4)) {
+
+            // Apparently the driver requires public no-args constructor, and postgresql doesn't accept
+            // lines with A and B
+            // coefficients both being zero... so assert a no-arg instantiated instance throws an
+            // exception.
+            if (roundTripToDatabase) {
+                try {
+                    checkReadWrite(new PGline(), columnName);
+                    fail("Expected a PSQLException to be thrown");
+                } catch (PSQLException e) {
+                    assertEquals("22P02", e.getSQLState());
+                }
+            }
+
+            // Generate a dataset for testing.
+            List<PGline> linesToTest = new ArrayList<>();
+            for (double i = 1; i <= 3; i += 0.25) {
+                // Test the 3-arg constructor (coefficients+constant)
+                linesToTest.add(new PGline(i, (0 - i), (1 / i)));
+                linesToTest.add(new PGline("{" + i + "," + (0 - i) + "," + (1 / i) + "}"));
+                // Test the 4-arg constructor (x/y coords of two points on the line)
+                linesToTest.add(new PGline(i, (0 - i), (1 / i), (1 / i / i)));
+                linesToTest.add(new PGline(i, (0 - i), i, (1 / i / i))); // tests vertical line
+                // Test 2-arg constructor (2 PGpoints on the line);
+                linesToTest.add(new PGline(new PGpoint(i, (0 - i)), new PGpoint((1 / i), (1 / i / i))));
+                // tests vertical line
+                linesToTest.add(new PGline(new PGpoint(i, (0 - i)), new PGpoint(i, (1 / i / i))));
+                // Test 1-arg constructor (PGlseg on the line);
+                linesToTest.add(new PGline(new PGlseg(i, (0 - i), (1 / i), (1 / i / i))));
+                linesToTest.add(new PGline(new PGlseg(i, (0 - i), i, (1 / i / i))));
+                linesToTest.add(
+                        new PGline(new PGlseg(new PGpoint(i, (0 - i)), new PGpoint((1 / i), (1 / i / i)))));
+                linesToTest
+                        .add(new PGline(new PGlseg(new PGpoint(i, (0 - i)), new PGpoint(i, (1 / i / i)))));
+            }
+
+            // Include persistence an querying if the postgresql version supports it.
+            if (roundTripToDatabase) {
+                for (PGline testLine : linesToTest) {
+                    checkReadWrite(testLine, columnName);
+                }
+            }
+
+        }
+    }
+
+    @Test
+    public void testPGpoint() throws Exception {
+        checkReadWrite(new PGpoint(1.0, 2.0), "pointval");
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/GetXXXTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/GetXXXTest.java
index 31e8cd7..58d284e 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/GetXXXTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/GetXXXTest.java
@@ -5,17 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.postgresql.test.TestUtil;
-import org.postgresql.util.PGInterval;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
@@ -24,61 +13,69 @@ import java.sql.Statement;
 import java.sql.Timestamp;
 import java.util.Calendar;
 import java.util.HashMap;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.test.TestUtil;
+import org.postgresql.util.PGInterval;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /*
-* Test for getObject
-*/
+ * Test for getObject
+ */
 class GetXXXTest {
-  private Connection con;
+    private Connection con;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    con = TestUtil.openDB();
-    TestUtil.createTempTable(con, "test_interval",
-        "initial timestamp with time zone, final timestamp with time zone");
-    PreparedStatement pstmt = con.prepareStatement("insert into test_interval values (?,?)");
-    Calendar cal = Calendar.getInstance();
-    cal.add(Calendar.DAY_OF_YEAR, -1);
-
-    pstmt.setTimestamp(1, new Timestamp(cal.getTime().getTime()));
-    pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
-    assertEquals(1, pstmt.executeUpdate());
-    pstmt.close();
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    TestUtil.dropTable(con, "test_interval");
-    con.close();
-  }
-
-  @Test
-  void getObject() throws SQLException {
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("select (final-initial) as diff from test_interval");
-    while (rs.next()) {
-      String str = rs.getString(1);
-
-      assertNotNull(str);
-      Object obj = rs.getObject(1);
-      assertNotNull(obj);
-    }
-  }
-
-  @Test
-  void getUDT() throws SQLException {
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("select (final-initial) as diff from test_interval");
-
-    while (rs.next()) {
-      // make this return a PGobject
-      Object obj = rs.getObject(1, new HashMap<>());
-
-      // it should not be an instance of PGInterval
-      assertTrue(obj instanceof PGInterval);
+    @BeforeEach
+    void setUp() throws Exception {
+        con = TestUtil.openDB();
+        TestUtil.createTempTable(con, "test_interval",
+                "initial timestamp with time zone, final timestamp with time zone");
+        PreparedStatement pstmt = con.prepareStatement("insert into test_interval values (?,?)");
+        Calendar cal = Calendar.getInstance();
+        cal.add(Calendar.DAY_OF_YEAR, -1);
 
+        pstmt.setTimestamp(1, new Timestamp(cal.getTime().getTime()));
+        pstmt.setTimestamp(2, new Timestamp(System.currentTimeMillis()));
+        assertEquals(1, pstmt.executeUpdate());
+        pstmt.close();
     }
 
-  }
+    @AfterEach
+    void tearDown() throws Exception {
+        TestUtil.dropTable(con, "test_interval");
+        con.close();
+    }
+
+    @Test
+    void getObject() throws SQLException {
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("select (final-initial) as diff from test_interval");
+        while (rs.next()) {
+            String str = rs.getString(1);
+
+            assertNotNull(str);
+            Object obj = rs.getObject(1);
+            assertNotNull(obj);
+        }
+    }
+
+    @Test
+    void getUDT() throws SQLException {
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("select (final-initial) as diff from test_interval");
+
+        while (rs.next()) {
+            // make this return a PGobject
+            Object obj = rs.getObject(1, new HashMap<>());
+
+            // it should not be an instance of PGInterval
+            assertTrue(obj instanceof PGInterval);
+
+        }
+
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/IntervalTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/IntervalTest.java
index f15e822..ab418b1 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/IntervalTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/IntervalTest.java
@@ -5,18 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.postgresql.test.TestUtil;
-import org.postgresql.util.PGInterval;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
@@ -26,356 +14,365 @@ import java.util.Calendar;
 import java.util.Date;
 import java.util.GregorianCalendar;
 import java.util.Locale;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.test.TestUtil;
+import org.postgresql.util.PGInterval;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 class IntervalTest {
-  private Connection conn;
+    private Connection conn;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    conn = TestUtil.openDB();
-    TestUtil.createTable(conn, "testinterval", "v interval");
-    TestUtil.createTable(conn, "testdate", "v date");
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    TestUtil.dropTable(conn, "testinterval");
-    TestUtil.dropTable(conn, "testdate");
-
-    TestUtil.closeDB(conn);
-  }
-
-  @Test
-  void onlineTests() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("INSERT INTO testinterval VALUES (?)");
-    pstmt.setObject(1, new PGInterval(2004, 13, 28, 0, 0, 43000.9013));
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT v FROM testinterval");
-    assertTrue(rs.next());
-    PGInterval pgi = (PGInterval) rs.getObject(1);
-    assertEquals(2005, pgi.getYears());
-    assertEquals(1, pgi.getMonths());
-    assertEquals(28, pgi.getDays());
-    assertEquals(11, pgi.getHours());
-    assertEquals(56, pgi.getMinutes());
-    assertEquals(40.9013, pgi.getSeconds(), 0.000001);
-    assertFalse(rs.next());
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  void stringToIntervalCoercion() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("testdate", "'2010-01-01'"));
-    stmt.executeUpdate(TestUtil.insertSQL("testdate", "'2010-01-02'"));
-    stmt.executeUpdate(TestUtil.insertSQL("testdate", "'2010-01-04'"));
-    stmt.executeUpdate(TestUtil.insertSQL("testdate", "'2010-01-05'"));
-    stmt.close();
-
-    PreparedStatement pstmt = conn.prepareStatement(
-        "SELECT v FROM testdate WHERE v < (?::timestamp with time zone + ? * ?::interval) ORDER BY v");
-    pstmt.setObject(1, makeDate(2010, 1, 1));
-    pstmt.setObject(2, 2);
-    pstmt.setObject(3, "1 day");
-    ResultSet rs = pstmt.executeQuery();
-
-    assertNotNull(rs);
-
-    java.sql.Date d;
-
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(2010, 1, 1), d);
-
-    assertTrue(rs.next());
-    d = rs.getDate(1);
-    assertNotNull(d);
-    assertEquals(makeDate(2010, 1, 2), d);
-
-    assertFalse(rs.next());
-
-    rs.close();
-    pstmt.close();
-  }
-
-  @Test
-  void intervalToStringCoercion() throws SQLException {
-    PGInterval interval = new PGInterval("1 year 3 months");
-    String coercedStringValue = interval.toString();
-
-    assertEquals("1 years 3 mons 0 days 0 hours 0 mins 0.0 secs", coercedStringValue);
-  }
-
-  @Test
-  void daysHours() throws SQLException {
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT '101:12:00'::interval");
-    assertTrue(rs.next());
-    PGInterval i = (PGInterval) rs.getObject(1);
-    // 8.1 servers store hours and days separately.
-    assertEquals(0, i.getDays());
-    assertEquals(101, i.getHours());
-
-    assertEquals(12, i.getMinutes());
-  }
-
-  @Test
-  void addRounding() {
-    PGInterval pgi = new PGInterval(0, 0, 0, 0, 0, 0.6006);
-    Calendar cal = Calendar.getInstance();
-    long origTime = cal.getTime().getTime();
-    pgi.add(cal);
-    long newTime = cal.getTime().getTime();
-    assertEquals(601, newTime - origTime);
-    pgi.setSeconds(-0.6006);
-    pgi.add(cal);
-    assertEquals(origTime, cal.getTime().getTime());
-  }
-
-  @Test
-  void offlineTests() throws Exception {
-    PGInterval pgi = new PGInterval(2004, 4, 20, 15, 57, 12.1);
-
-    assertEquals(2004, pgi.getYears());
-    assertEquals(4, pgi.getMonths());
-    assertEquals(20, pgi.getDays());
-    assertEquals(15, pgi.getHours());
-    assertEquals(57, pgi.getMinutes());
-    assertEquals(12.1, pgi.getSeconds(), 0);
-
-    PGInterval pgi2 = new PGInterval("@ 2004 years 4 mons 20 days 15 hours 57 mins 12.1 secs");
-    assertEquals(pgi, pgi2);
-
-    // Singular units
-    PGInterval pgi3 = new PGInterval("@ 2004 year 4 mon 20 day 15 hour 57 min 12.1 sec");
-    assertEquals(pgi, pgi3);
-
-    PGInterval pgi4 = new PGInterval("2004 years 4 mons 20 days 15:57:12.1");
-    assertEquals(pgi, pgi4);
-
-    // Ago test
-    pgi = new PGInterval("@ 2004 years 4 mons 20 days 15 hours 57 mins 12.1 secs ago");
-    assertEquals(-2004, pgi.getYears());
-    assertEquals(-4, pgi.getMonths());
-    assertEquals(-20, pgi.getDays());
-    assertEquals(-15, pgi.getHours());
-    assertEquals(-57, pgi.getMinutes());
-    assertEquals(-12.1, pgi.getSeconds(), 0);
-
-    // Char test
-    pgi = new PGInterval("@ +2004 years -4 mons +20 days -15 hours +57 mins -12.1 secs");
-    assertEquals(2004, pgi.getYears());
-    assertEquals(-4, pgi.getMonths());
-    assertEquals(20, pgi.getDays());
-    assertEquals(-15, pgi.getHours());
-    assertEquals(57, pgi.getMinutes());
-    assertEquals(-12.1, pgi.getSeconds(), 0);
-
-    // Unjustified interval test
-    pgi = new PGInterval("@ 0 years 0 mons 0 days 900 hours 0 mins 0.00 secs");
-    assertEquals(0, pgi.getYears());
-    assertEquals(0, pgi.getMonths());
-    assertEquals(0, pgi.getDays());
-    assertEquals(900, pgi.getHours());
-    assertEquals(0, pgi.getMinutes());
-    assertEquals(0, pgi.getSeconds(), 0);
-  }
-
-  private Calendar getStartCalendar() {
-    Calendar cal = new GregorianCalendar();
-    cal.set(Calendar.YEAR, 2005);
-    cal.set(Calendar.MONTH, 4);
-    cal.set(Calendar.DAY_OF_MONTH, 29);
-    cal.set(Calendar.HOUR_OF_DAY, 15);
-    cal.set(Calendar.MINUTE, 35);
-    cal.set(Calendar.SECOND, 42);
-    cal.set(Calendar.MILLISECOND, 100);
-
-    return cal;
-  }
-
-  @Test
-  void calendar() throws Exception {
-    Calendar cal = getStartCalendar();
-
-    PGInterval pgi = new PGInterval("@ 1 year 1 mon 1 day 1 hour 1 minute 1 secs");
-    pgi.add(cal);
-
-    assertEquals(2006, cal.get(Calendar.YEAR));
-    assertEquals(5, cal.get(Calendar.MONTH));
-    assertEquals(30, cal.get(Calendar.DAY_OF_MONTH));
-    assertEquals(16, cal.get(Calendar.HOUR_OF_DAY));
-    assertEquals(36, cal.get(Calendar.MINUTE));
-    assertEquals(43, cal.get(Calendar.SECOND));
-    assertEquals(100, cal.get(Calendar.MILLISECOND));
-
-    pgi = new PGInterval("@ 1 year 1 mon 1 day 1 hour 1 minute 1 secs ago");
-    pgi.add(cal);
-
-    assertEquals(2005, cal.get(Calendar.YEAR));
-    assertEquals(4, cal.get(Calendar.MONTH));
-    assertEquals(29, cal.get(Calendar.DAY_OF_MONTH));
-    assertEquals(15, cal.get(Calendar.HOUR_OF_DAY));
-    assertEquals(35, cal.get(Calendar.MINUTE));
-    assertEquals(42, cal.get(Calendar.SECOND));
-    assertEquals(100, cal.get(Calendar.MILLISECOND));
-
-    cal = getStartCalendar();
-
-    pgi = new PGInterval("@ 1 year -23 hours -3 mins -3.30 secs");
-    pgi.add(cal);
-
-    assertEquals(2006, cal.get(Calendar.YEAR));
-    assertEquals(4, cal.get(Calendar.MONTH));
-    assertEquals(28, cal.get(Calendar.DAY_OF_MONTH));
-    assertEquals(16, cal.get(Calendar.HOUR_OF_DAY));
-    assertEquals(32, cal.get(Calendar.MINUTE));
-    assertEquals(38, cal.get(Calendar.SECOND));
-    assertEquals(800, cal.get(Calendar.MILLISECOND));
-
-    pgi = new PGInterval("@ 1 year -23 hours -3 mins -3.30 secs ago");
-    pgi.add(cal);
-
-    assertEquals(2005, cal.get(Calendar.YEAR));
-    assertEquals(4, cal.get(Calendar.MONTH));
-    assertEquals(29, cal.get(Calendar.DAY_OF_MONTH));
-    assertEquals(15, cal.get(Calendar.HOUR_OF_DAY));
-    assertEquals(35, cal.get(Calendar.MINUTE));
-    assertEquals(42, cal.get(Calendar.SECOND));
-    assertEquals(100, cal.get(Calendar.MILLISECOND));
-  }
-
-  @Test
-  void date() throws Exception {
-    Date date = getStartCalendar().getTime();
-    Date date2 = getStartCalendar().getTime();
-
-    PGInterval pgi = new PGInterval("@ +2004 years -4 mons +20 days -15 hours +57 mins -12.1 secs");
-    pgi.add(date);
-
-    PGInterval pgi2 =
-        new PGInterval("@ +2004 years -4 mons +20 days -15 hours +57 mins -12.1 secs ago");
-    pgi2.add(date);
-
-    assertEquals(date2, date);
-  }
-
-  @Test
-  void postgresDate() throws Exception {
-    Date date = getStartCalendar().getTime();
-    Date date2 = getStartCalendar().getTime();
-
-    PGInterval pgi = new PGInterval("+2004 years -4 mons +20 days -15:57:12.1");
-    pgi.add(date);
-
-    PGInterval pgi2 = new PGInterval("-2004 years 4 mons -20 days 15:57:12.1");
-    pgi2.add(date);
-
-    assertEquals(date2, date);
-  }
-
-  @Test
-  void iSO8601() throws Exception {
-    PGInterval pgi = new PGInterval("P1Y2M3DT4H5M6S");
-    assertEquals(1, pgi.getYears());
-    assertEquals(2, pgi.getMonths());
-    assertEquals(3, pgi.getDays());
-    assertEquals(4, pgi.getHours());
-    assertEquals(5, pgi.getMinutes());
-    assertEquals(6, pgi.getSeconds(), .1);
-
-    pgi = new PGInterval("P-1Y2M3DT4H5M6S");
-    assertEquals(-1, pgi.getYears());
-
-    pgi = new PGInterval("P1Y2M");
-    assertEquals(1, pgi.getYears());
-    assertEquals(2, pgi.getMonths());
-    assertEquals(0, pgi.getDays());
-
-    pgi = new PGInterval("P3DT4H5M6S");
-    assertEquals(0, pgi.getYears());
-
-    pgi = new PGInterval("P-1Y-2M3DT-4H-5M-6S");
-    assertEquals(-1, pgi.getYears());
-    assertEquals(-2, pgi.getMonths());
-    assertEquals(-4, pgi.getHours());
-
-    pgi = new PGInterval("PT6.123456S");
-    assertEquals(6.123456, pgi.getSeconds(), .0);
-    assertEquals(6, pgi.getWholeSeconds());
-    assertEquals(123456, pgi.getMicroSeconds());
-
-    pgi = new PGInterval("PT-6.123456S");
-    assertEquals(-6.123456, pgi.getSeconds(), .0);
-    assertEquals(-6, pgi.getWholeSeconds());
-    assertEquals(-123456, pgi.getMicroSeconds());
-  }
-
-  @Test
-  void smallValue() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("INSERT INTO testinterval VALUES (?)");
-    pstmt.setObject(1, new PGInterval("0.0001 seconds"));
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT v FROM testinterval");
-    assertTrue(rs.next());
-    PGInterval pgi = (PGInterval) rs.getObject(1);
-    assertEquals(0, pgi.getYears());
-    assertEquals(0, pgi.getMonths());
-    assertEquals(0, pgi.getDays());
-    assertEquals(0, pgi.getHours());
-    assertEquals(0, pgi.getMinutes());
-    assertEquals(0, pgi.getWholeSeconds());
-    assertEquals(100, pgi.getMicroSeconds());
-    assertFalse(rs.next());
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  void getValueForSmallValue() throws SQLException {
-    PGInterval orig = new PGInterval("0.0001 seconds");
-    PGInterval copy = new PGInterval(orig.getValue());
-
-    assertEquals(orig, copy);
-  }
-
-  @Test
-  void getValueForSmallValueWithCommaAsDecimalSeparatorInDefaultLocale() throws SQLException {
-    Locale originalLocale = Locale.getDefault();
-    Locale.setDefault(Locale.GERMANY);
-    try {
-      PGInterval orig = new PGInterval("0.0001 seconds");
-      PGInterval copy = new PGInterval(orig.getValue());
-
-      assertEquals(orig, copy);
-    } finally {
-      Locale.setDefault(originalLocale);
+    @BeforeEach
+    void setUp() throws Exception {
+        conn = TestUtil.openDB();
+        TestUtil.createTable(conn, "testinterval", "v interval");
+        TestUtil.createTable(conn, "testdate", "v date");
     }
-  }
 
-  @Test
-  void getSecondsForSmallValue() throws SQLException {
-    PGInterval pgi = new PGInterval("0.000001 seconds");
+    @AfterEach
+    void tearDown() throws Exception {
+        TestUtil.dropTable(conn, "testinterval");
+        TestUtil.dropTable(conn, "testdate");
 
-    assertEquals(0.000001, pgi.getSeconds(), 0.000000001);
-  }
+        TestUtil.closeDB(conn);
+    }
 
-  @Test
-  void microSecondsAreRoundedToNearest() throws SQLException {
-    PGInterval pgi = new PGInterval("0.0000007 seconds");
+    @Test
+    void onlineTests() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("INSERT INTO testinterval VALUES (?)");
+        pstmt.setObject(1, new PGInterval(2004, 13, 28, 0, 0, 43000.9013));
+        pstmt.executeUpdate();
+        pstmt.close();
 
-    assertEquals(1, pgi.getMicroSeconds());
-  }
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT v FROM testinterval");
+        assertTrue(rs.next());
+        PGInterval pgi = (PGInterval) rs.getObject(1);
+        assertEquals(2005, pgi.getYears());
+        assertEquals(1, pgi.getMonths());
+        assertEquals(28, pgi.getDays());
+        assertEquals(11, pgi.getHours());
+        assertEquals(56, pgi.getMinutes());
+        assertEquals(40.9013, pgi.getSeconds(), 0.000001);
+        assertFalse(rs.next());
+        rs.close();
+        stmt.close();
+    }
 
-  private java.sql.Date makeDate(int y, int m, int d) {
-    return new java.sql.Date(y - 1900, m - 1, d);
-  }
+    @Test
+    void stringToIntervalCoercion() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("testdate", "'2010-01-01'"));
+        stmt.executeUpdate(TestUtil.insertSQL("testdate", "'2010-01-02'"));
+        stmt.executeUpdate(TestUtil.insertSQL("testdate", "'2010-01-04'"));
+        stmt.executeUpdate(TestUtil.insertSQL("testdate", "'2010-01-05'"));
+        stmt.close();
+
+        PreparedStatement pstmt = conn.prepareStatement(
+                "SELECT v FROM testdate WHERE v < (?::timestamp with time zone + ? * ?::interval) ORDER BY v");
+        pstmt.setObject(1, makeDate(2010, 1, 1));
+        pstmt.setObject(2, 2);
+        pstmt.setObject(3, "1 day");
+        ResultSet rs = pstmt.executeQuery();
+
+        assertNotNull(rs);
+
+        java.sql.Date d;
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(2010, 1, 1), d);
+
+        assertTrue(rs.next());
+        d = rs.getDate(1);
+        assertNotNull(d);
+        assertEquals(makeDate(2010, 1, 2), d);
+
+        assertFalse(rs.next());
+
+        rs.close();
+        pstmt.close();
+    }
+
+    @Test
+    void intervalToStringCoercion() throws SQLException {
+        PGInterval interval = new PGInterval("1 year 3 months");
+        String coercedStringValue = interval.toString();
+
+        assertEquals("1 years 3 mons 0 days 0 hours 0 mins 0.0 secs", coercedStringValue);
+    }
+
+    @Test
+    void daysHours() throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT '101:12:00'::interval");
+        assertTrue(rs.next());
+        PGInterval i = (PGInterval) rs.getObject(1);
+        // 8.1 servers store hours and days separately.
+        assertEquals(0, i.getDays());
+        assertEquals(101, i.getHours());
+
+        assertEquals(12, i.getMinutes());
+    }
+
+    @Test
+    void addRounding() {
+        PGInterval pgi = new PGInterval(0, 0, 0, 0, 0, 0.6006);
+        Calendar cal = Calendar.getInstance();
+        long origTime = cal.getTime().getTime();
+        pgi.add(cal);
+        long newTime = cal.getTime().getTime();
+        assertEquals(601, newTime - origTime);
+        pgi.setSeconds(-0.6006);
+        pgi.add(cal);
+        assertEquals(origTime, cal.getTime().getTime());
+    }
+
+    @Test
+    void offlineTests() throws Exception {
+        PGInterval pgi = new PGInterval(2004, 4, 20, 15, 57, 12.1);
+
+        assertEquals(2004, pgi.getYears());
+        assertEquals(4, pgi.getMonths());
+        assertEquals(20, pgi.getDays());
+        assertEquals(15, pgi.getHours());
+        assertEquals(57, pgi.getMinutes());
+        assertEquals(12.1, pgi.getSeconds(), 0);
+
+        PGInterval pgi2 = new PGInterval("@ 2004 years 4 mons 20 days 15 hours 57 mins 12.1 secs");
+        assertEquals(pgi, pgi2);
+
+        // Singular units
+        PGInterval pgi3 = new PGInterval("@ 2004 year 4 mon 20 day 15 hour 57 min 12.1 sec");
+        assertEquals(pgi, pgi3);
+
+        PGInterval pgi4 = new PGInterval("2004 years 4 mons 20 days 15:57:12.1");
+        assertEquals(pgi, pgi4);
+
+        // Ago test
+        pgi = new PGInterval("@ 2004 years 4 mons 20 days 15 hours 57 mins 12.1 secs ago");
+        assertEquals(-2004, pgi.getYears());
+        assertEquals(-4, pgi.getMonths());
+        assertEquals(-20, pgi.getDays());
+        assertEquals(-15, pgi.getHours());
+        assertEquals(-57, pgi.getMinutes());
+        assertEquals(-12.1, pgi.getSeconds(), 0);
+
+        // Char test
+        pgi = new PGInterval("@ +2004 years -4 mons +20 days -15 hours +57 mins -12.1 secs");
+        assertEquals(2004, pgi.getYears());
+        assertEquals(-4, pgi.getMonths());
+        assertEquals(20, pgi.getDays());
+        assertEquals(-15, pgi.getHours());
+        assertEquals(57, pgi.getMinutes());
+        assertEquals(-12.1, pgi.getSeconds(), 0);
+
+        // Unjustified interval test
+        pgi = new PGInterval("@ 0 years 0 mons 0 days 900 hours 0 mins 0.00 secs");
+        assertEquals(0, pgi.getYears());
+        assertEquals(0, pgi.getMonths());
+        assertEquals(0, pgi.getDays());
+        assertEquals(900, pgi.getHours());
+        assertEquals(0, pgi.getMinutes());
+        assertEquals(0, pgi.getSeconds(), 0);
+    }
+
+    private Calendar getStartCalendar() {
+        Calendar cal = new GregorianCalendar();
+        cal.set(Calendar.YEAR, 2005);
+        cal.set(Calendar.MONTH, 4);
+        cal.set(Calendar.DAY_OF_MONTH, 29);
+        cal.set(Calendar.HOUR_OF_DAY, 15);
+        cal.set(Calendar.MINUTE, 35);
+        cal.set(Calendar.SECOND, 42);
+        cal.set(Calendar.MILLISECOND, 100);
+
+        return cal;
+    }
+
+    @Test
+    void calendar() throws Exception {
+        Calendar cal = getStartCalendar();
+
+        PGInterval pgi = new PGInterval("@ 1 year 1 mon 1 day 1 hour 1 minute 1 secs");
+        pgi.add(cal);
+
+        assertEquals(2006, cal.get(Calendar.YEAR));
+        assertEquals(5, cal.get(Calendar.MONTH));
+        assertEquals(30, cal.get(Calendar.DAY_OF_MONTH));
+        assertEquals(16, cal.get(Calendar.HOUR_OF_DAY));
+        assertEquals(36, cal.get(Calendar.MINUTE));
+        assertEquals(43, cal.get(Calendar.SECOND));
+        assertEquals(100, cal.get(Calendar.MILLISECOND));
+
+        pgi = new PGInterval("@ 1 year 1 mon 1 day 1 hour 1 minute 1 secs ago");
+        pgi.add(cal);
+
+        assertEquals(2005, cal.get(Calendar.YEAR));
+        assertEquals(4, cal.get(Calendar.MONTH));
+        assertEquals(29, cal.get(Calendar.DAY_OF_MONTH));
+        assertEquals(15, cal.get(Calendar.HOUR_OF_DAY));
+        assertEquals(35, cal.get(Calendar.MINUTE));
+        assertEquals(42, cal.get(Calendar.SECOND));
+        assertEquals(100, cal.get(Calendar.MILLISECOND));
+
+        cal = getStartCalendar();
+
+        pgi = new PGInterval("@ 1 year -23 hours -3 mins -3.30 secs");
+        pgi.add(cal);
+
+        assertEquals(2006, cal.get(Calendar.YEAR));
+        assertEquals(4, cal.get(Calendar.MONTH));
+        assertEquals(28, cal.get(Calendar.DAY_OF_MONTH));
+        assertEquals(16, cal.get(Calendar.HOUR_OF_DAY));
+        assertEquals(32, cal.get(Calendar.MINUTE));
+        assertEquals(38, cal.get(Calendar.SECOND));
+        assertEquals(800, cal.get(Calendar.MILLISECOND));
+
+        pgi = new PGInterval("@ 1 year -23 hours -3 mins -3.30 secs ago");
+        pgi.add(cal);
+
+        assertEquals(2005, cal.get(Calendar.YEAR));
+        assertEquals(4, cal.get(Calendar.MONTH));
+        assertEquals(29, cal.get(Calendar.DAY_OF_MONTH));
+        assertEquals(15, cal.get(Calendar.HOUR_OF_DAY));
+        assertEquals(35, cal.get(Calendar.MINUTE));
+        assertEquals(42, cal.get(Calendar.SECOND));
+        assertEquals(100, cal.get(Calendar.MILLISECOND));
+    }
+
+    @Test
+    void date() throws Exception {
+        Date date = getStartCalendar().getTime();
+        Date date2 = getStartCalendar().getTime();
+
+        PGInterval pgi = new PGInterval("@ +2004 years -4 mons +20 days -15 hours +57 mins -12.1 secs");
+        pgi.add(date);
+
+        PGInterval pgi2 =
+                new PGInterval("@ +2004 years -4 mons +20 days -15 hours +57 mins -12.1 secs ago");
+        pgi2.add(date);
+
+        assertEquals(date2, date);
+    }
+
+    @Test
+    void postgresDate() throws Exception {
+        Date date = getStartCalendar().getTime();
+        Date date2 = getStartCalendar().getTime();
+
+        PGInterval pgi = new PGInterval("+2004 years -4 mons +20 days -15:57:12.1");
+        pgi.add(date);
+
+        PGInterval pgi2 = new PGInterval("-2004 years 4 mons -20 days 15:57:12.1");
+        pgi2.add(date);
+
+        assertEquals(date2, date);
+    }
+
+    @Test
+    void iSO8601() throws Exception {
+        PGInterval pgi = new PGInterval("P1Y2M3DT4H5M6S");
+        assertEquals(1, pgi.getYears());
+        assertEquals(2, pgi.getMonths());
+        assertEquals(3, pgi.getDays());
+        assertEquals(4, pgi.getHours());
+        assertEquals(5, pgi.getMinutes());
+        assertEquals(6, pgi.getSeconds(), .1);
+
+        pgi = new PGInterval("P-1Y2M3DT4H5M6S");
+        assertEquals(-1, pgi.getYears());
+
+        pgi = new PGInterval("P1Y2M");
+        assertEquals(1, pgi.getYears());
+        assertEquals(2, pgi.getMonths());
+        assertEquals(0, pgi.getDays());
+
+        pgi = new PGInterval("P3DT4H5M6S");
+        assertEquals(0, pgi.getYears());
+
+        pgi = new PGInterval("P-1Y-2M3DT-4H-5M-6S");
+        assertEquals(-1, pgi.getYears());
+        assertEquals(-2, pgi.getMonths());
+        assertEquals(-4, pgi.getHours());
+
+        pgi = new PGInterval("PT6.123456S");
+        assertEquals(6.123456, pgi.getSeconds(), .0);
+        assertEquals(6, pgi.getWholeSeconds());
+        assertEquals(123456, pgi.getMicroSeconds());
+
+        pgi = new PGInterval("PT-6.123456S");
+        assertEquals(-6.123456, pgi.getSeconds(), .0);
+        assertEquals(-6, pgi.getWholeSeconds());
+        assertEquals(-123456, pgi.getMicroSeconds());
+    }
+
+    @Test
+    void smallValue() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("INSERT INTO testinterval VALUES (?)");
+        pstmt.setObject(1, new PGInterval("0.0001 seconds"));
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT v FROM testinterval");
+        assertTrue(rs.next());
+        PGInterval pgi = (PGInterval) rs.getObject(1);
+        assertEquals(0, pgi.getYears());
+        assertEquals(0, pgi.getMonths());
+        assertEquals(0, pgi.getDays());
+        assertEquals(0, pgi.getHours());
+        assertEquals(0, pgi.getMinutes());
+        assertEquals(0, pgi.getWholeSeconds());
+        assertEquals(100, pgi.getMicroSeconds());
+        assertFalse(rs.next());
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    void getValueForSmallValue() throws SQLException {
+        PGInterval orig = new PGInterval("0.0001 seconds");
+        PGInterval copy = new PGInterval(orig.getValue());
+
+        assertEquals(orig, copy);
+    }
+
+    @Test
+    void getValueForSmallValueWithCommaAsDecimalSeparatorInDefaultLocale() throws SQLException {
+        Locale originalLocale = Locale.getDefault();
+        Locale.setDefault(Locale.GERMANY);
+        try {
+            PGInterval orig = new PGInterval("0.0001 seconds");
+            PGInterval copy = new PGInterval(orig.getValue());
+
+            assertEquals(orig, copy);
+        } finally {
+            Locale.setDefault(originalLocale);
+        }
+    }
+
+    @Test
+    void getSecondsForSmallValue() throws SQLException {
+        PGInterval pgi = new PGInterval("0.000001 seconds");
+
+        assertEquals(0.000001, pgi.getSeconds(), 0.000000001);
+    }
+
+    @Test
+    void microSecondsAreRoundedToNearest() throws SQLException {
+        PGInterval pgi = new PGInterval("0.0000007 seconds");
+
+        assertEquals(1, pgi.getMicroSeconds());
+    }
+
+    private java.sql.Date makeDate(int y, int m, int d) {
+        return new java.sql.Date(y - 1900, m - 1, d);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/JBuilderTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/JBuilderTest.java
index ad3f2ee..878633c 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/JBuilderTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/JBuilderTest.java
@@ -5,60 +5,57 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-
-import org.postgresql.test.TestUtil;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
 import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.Statement;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.test.TestUtil;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 /*
-* Some simple tests to check that the required components needed for JBuilder stay working
-*
-*/
+ * Some simple tests to check that the required components needed for JBuilder stay working
+ *
+ */
 class JBuilderTest {
 
-  // Set up the fixture for this testcase: the tables for this test.
-  @BeforeEach
-  void setUp() throws Exception {
-    Connection con = TestUtil.openDB();
+    // Set up the fixture for this testcase: the tables for this test.
+    @BeforeEach
+    void setUp() throws Exception {
+        Connection con = TestUtil.openDB();
 
-    TestUtil.createTable(con, "test_c", "source text,cost money,imageid int4");
+        TestUtil.createTable(con, "test_c", "source text,cost money,imageid int4");
 
-    TestUtil.closeDB(con);
-  }
-
-  // Tear down the fixture for this test case.
-  @AfterEach
-  void tearDown() throws Exception {
-    Connection con = TestUtil.openDB();
-    TestUtil.dropTable(con, "test_c");
-    TestUtil.closeDB(con);
-  }
-
-  /*
-   * This tests that Money types work. JDBCExplorer barfs if this fails.
-   */
-  @Test
-  void money() throws Exception {
-    Connection con = TestUtil.openDB();
-
-    Statement st = con.createStatement();
-    ResultSet rs = st.executeQuery("select cost from test_c");
-    assertNotNull(rs);
-
-    while (rs.next()) {
-      rs.getDouble(1);
+        TestUtil.closeDB(con);
     }
 
-    rs.close();
-    st.close();
+    // Tear down the fixture for this test case.
+    @AfterEach
+    void tearDown() throws Exception {
+        Connection con = TestUtil.openDB();
+        TestUtil.dropTable(con, "test_c");
+        TestUtil.closeDB(con);
+    }
 
-    TestUtil.closeDB(con);
-  }
+    /*
+     * This tests that Money types work. JDBCExplorer barfs if this fails.
+     */
+    @Test
+    void money() throws Exception {
+        Connection con = TestUtil.openDB();
+
+        Statement st = con.createStatement();
+        ResultSet rs = st.executeQuery("select cost from test_c");
+        assertNotNull(rs);
+
+        while (rs.next()) {
+            rs.getDouble(1);
+        }
+
+        rs.close();
+        st.close();
+
+        TestUtil.closeDB(con);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/LoginTimeoutTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/LoginTimeoutTest.java
index 5878abf..c1367c3 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/LoginTimeoutTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/LoginTimeoutTest.java
@@ -26,143 +26,143 @@ import java.util.Properties;
 
 class LoginTimeoutTest {
 
-  @BeforeEach
-  void setUp() throws Exception {
-    TestUtil.initDriver(); // Set up log levels, etc.
-  }
-
-  @Test
-  void intTimeout() throws Exception {
-    Properties props = new Properties();
-    PGProperty.USER.set(props, TestUtil.getUser());
-    PGProperty.PASSWORD.set(props, TestUtil.getPassword());
-    PGProperty.LOGIN_TIMEOUT.set(props, 10);
-
-    Connection conn = DriverManager.getConnection(TestUtil.getURL(), props);
-    conn.close();
-  }
-
-  @Test
-  void floatTimeout() throws Exception {
-    Properties props = new Properties();
-    props.setProperty("user", TestUtil.getUser());
-    props.setProperty("password", TestUtil.getPassword());
-    props.setProperty("loginTimeout", "10.0");
-
-    Connection conn = DriverManager.getConnection(TestUtil.getURL(), props);
-    conn.close();
-  }
-
-  @Test
-  void zeroTimeout() throws Exception {
-    Properties props = new Properties();
-    props.setProperty("user", TestUtil.getUser());
-    props.setProperty("password", TestUtil.getPassword());
-    props.setProperty("loginTimeout", "0");
-
-    Connection conn = DriverManager.getConnection(TestUtil.getURL(), props);
-    conn.close();
-  }
-
-  @Test
-  void negativeTimeout() throws Exception {
-    Properties props = new Properties();
-    props.setProperty("user", TestUtil.getUser());
-    props.setProperty("password", TestUtil.getPassword());
-    props.setProperty("loginTimeout", "-1");
-
-    Connection conn = DriverManager.getConnection(TestUtil.getURL(), props);
-    conn.close();
-  }
-
-  @Test
-  void badTimeout() throws Exception {
-    Properties props = new Properties();
-    props.setProperty("user", TestUtil.getUser());
-    props.setProperty("password", TestUtil.getPassword());
-    props.setProperty("loginTimeout", "zzzz");
-
-    Connection conn = DriverManager.getConnection(TestUtil.getURL(), props);
-    conn.close();
-  }
-
-  private static class TimeoutHelper implements Runnable {
-    TimeoutHelper() throws IOException {
-      InetAddress localAddr;
-      try {
-        localAddr = InetAddress.getLocalHost();
-      } catch (UnknownHostException ex) {
-        System.err.println("WARNING: Could not resolve local host name, trying 'localhost'. " + ex);
-        localAddr = InetAddress.getByName("localhost");
-      }
-      this.listenSocket = new ServerSocket(0, 1, localAddr);
+    @BeforeEach
+    void setUp() throws Exception {
+        TestUtil.initDriver(); // Set up log levels, etc.
     }
 
-    String getHost() {
-      return listenSocket.getInetAddress().getHostAddress();
+    @Test
+    void intTimeout() throws Exception {
+        Properties props = new Properties();
+        PGProperty.USER.set(props, TestUtil.getUser());
+        PGProperty.PASSWORD.set(props, TestUtil.getPassword());
+        PGProperty.LOGIN_TIMEOUT.set(props, 10);
+
+        Connection conn = DriverManager.getConnection(TestUtil.getURL(), props);
+        conn.close();
     }
 
-    int getPort() {
-      return listenSocket.getLocalPort();
+    @Test
+    void floatTimeout() throws Exception {
+        Properties props = new Properties();
+        props.setProperty("user", TestUtil.getUser());
+        props.setProperty("password", TestUtil.getPassword());
+        props.setProperty("loginTimeout", "10.0");
+
+        Connection conn = DriverManager.getConnection(TestUtil.getURL(), props);
+        conn.close();
     }
 
-    @Override
-    public void run() {
-      try {
-        Socket newSocket = listenSocket.accept();
+    @Test
+    void zeroTimeout() throws Exception {
+        Properties props = new Properties();
+        props.setProperty("user", TestUtil.getUser());
+        props.setProperty("password", TestUtil.getPassword());
+        props.setProperty("loginTimeout", "0");
+
+        Connection conn = DriverManager.getConnection(TestUtil.getURL(), props);
+        conn.close();
+    }
+
+    @Test
+    void negativeTimeout() throws Exception {
+        Properties props = new Properties();
+        props.setProperty("user", TestUtil.getUser());
+        props.setProperty("password", TestUtil.getPassword());
+        props.setProperty("loginTimeout", "-1");
+
+        Connection conn = DriverManager.getConnection(TestUtil.getURL(), props);
+        conn.close();
+    }
+
+    @Test
+    void badTimeout() throws Exception {
+        Properties props = new Properties();
+        props.setProperty("user", TestUtil.getUser());
+        props.setProperty("password", TestUtil.getPassword());
+        props.setProperty("loginTimeout", "zzzz");
+
+        Connection conn = DriverManager.getConnection(TestUtil.getURL(), props);
+        conn.close();
+    }
+
+    @Test
+    void timeoutOccurs() throws Exception {
+        // Spawn a helper thread to accept a connection and do nothing with it;
+        // this should trigger a timeout.
+        TimeoutHelper helper = new TimeoutHelper();
+        new Thread(helper, "timeout listen helper").start();
+
         try {
-          Thread.sleep(30000);
-        } catch (InterruptedException e) {
-          // Ignore it.
+            String url = "jdbc:postgresql://" + helper.getHost() + ":" + helper.getPort() + "/dummy";
+            Properties props = new Properties();
+            props.setProperty("user", "dummy");
+            props.setProperty("loginTimeout", "5");
+
+            // This is a pretty crude check, but should help distinguish
+            // "can't connect" from "timed out".
+            long startTime = System.nanoTime();
+            Connection conn = null;
+            try {
+                conn = DriverManager.getConnection(url, props);
+                fail("connection was unexpectedly successful");
+            } catch (SQLException e) {
+                // Ignored.
+            } finally {
+                if (conn != null) {
+                    conn.close();
+                }
+            }
+
+            long endTime = System.nanoTime();
+            assertTrue(endTime > startTime + (2500L * 1E6), "Connection timed before 2500ms");
+        } finally {
+            helper.kill();
         }
-        newSocket.close();
-      } catch (IOException e) {
-        // Ignore it.
-      }
     }
 
-    void kill() {
-      try {
-        listenSocket.close();
-      } catch (IOException e) {
-      }
-    }
+    private static class TimeoutHelper implements Runnable {
+        private final ServerSocket listenSocket;
 
-    private final ServerSocket listenSocket;
-  }
-
-  @Test
-  void timeoutOccurs() throws Exception {
-    // Spawn a helper thread to accept a connection and do nothing with it;
-    // this should trigger a timeout.
-    TimeoutHelper helper = new TimeoutHelper();
-    new Thread(helper, "timeout listen helper").start();
-
-    try {
-      String url = "jdbc:postgresql://" + helper.getHost() + ":" + helper.getPort() + "/dummy";
-      Properties props = new Properties();
-      props.setProperty("user", "dummy");
-      props.setProperty("loginTimeout", "5");
-
-      // This is a pretty crude check, but should help distinguish
-      // "can't connect" from "timed out".
-      long startTime = System.nanoTime();
-      Connection conn = null;
-      try {
-        conn = DriverManager.getConnection(url, props);
-        fail("connection was unexpectedly successful");
-      } catch (SQLException e) {
-        // Ignored.
-      } finally {
-        if (conn != null) {
-          conn.close();
+        TimeoutHelper() throws IOException {
+            InetAddress localAddr;
+            try {
+                localAddr = InetAddress.getLocalHost();
+            } catch (UnknownHostException ex) {
+                System.err.println("WARNING: Could not resolve local host name, trying 'localhost'. " + ex);
+                localAddr = InetAddress.getByName("localhost");
+            }
+            this.listenSocket = new ServerSocket(0, 1, localAddr);
         }
-      }
 
-      long endTime = System.nanoTime();
-      assertTrue(endTime > startTime + (2500L * 1E6), "Connection timed before 2500ms");
-    } finally {
-      helper.kill();
+        String getHost() {
+            return listenSocket.getInetAddress().getHostAddress();
+        }
+
+        int getPort() {
+            return listenSocket.getLocalPort();
+        }
+
+        @Override
+        public void run() {
+            try {
+                Socket newSocket = listenSocket.accept();
+                try {
+                    Thread.sleep(30000);
+                } catch (InterruptedException e) {
+                    // Ignore it.
+                }
+                newSocket.close();
+            } catch (IOException e) {
+                // Ignore it.
+            }
+        }
+
+        void kill() {
+            try {
+                listenSocket.close();
+            } catch (IOException e) {
+            }
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/MiscTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/MiscTest.java
index 5cceb39..87041c3 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/MiscTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/MiscTest.java
@@ -5,14 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.test.TestUtil;
-
-import org.junit.jupiter.api.Disabled;
-import org.junit.jupiter.api.Test;
-
 import java.io.ByteArrayOutputStream;
 import java.io.ObjectOutputStream;
 import java.sql.Connection;
@@ -20,115 +12,120 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.SQLWarning;
 import java.sql.Statement;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+import org.postgresql.test.TestUtil;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /*
-* Some simple tests based on problems reported by users. Hopefully these will help prevent previous
-* problems from re-occurring ;-)
-*
-*/
+ * Some simple tests based on problems reported by users. Hopefully these will help prevent previous
+ * problems from re-occurring ;-)
+ *
+ */
 class MiscTest {
 
-  /*
-   * Some versions of the driver would return rs as a null?
-   *
-   * Sasha <ber0806@iperbole.bologna.it> was having this problem.
-   *
-   * Added Feb 13 2001
-   */
-  @Test
-  void databaseSelectNullBug() throws Exception {
-    Connection con = TestUtil.openDB();
+    /*
+     * Some versions of the driver would return rs as a null?
+     *
+     * Sasha <ber0806@iperbole.bologna.it> was having this problem.
+     *
+     * Added Feb 13 2001
+     */
+    @Test
+    void databaseSelectNullBug() throws Exception {
+        Connection con = TestUtil.openDB();
 
-    Statement st = con.createStatement();
-    ResultSet rs = st.executeQuery("select datname from pg_database");
-    assertNotNull(rs);
+        Statement st = con.createStatement();
+        ResultSet rs = st.executeQuery("select datname from pg_database");
+        assertNotNull(rs);
 
-    while (rs.next()) {
-      rs.getString(1);
+        while (rs.next()) {
+            rs.getString(1);
+        }
+
+        rs.close();
+        st.close();
+
+        TestUtil.closeDB(con);
     }
 
-    rs.close();
-    st.close();
-
-    TestUtil.closeDB(con);
-  }
-
-  /**
-   * Ensure the cancel call does not return before it has completed. Previously it did which
-   * cancelled future queries.
-   */
-  @Test
-  void singleThreadCancel() throws Exception {
-    Connection con = TestUtil.openDB();
-    Statement stmt = con.createStatement();
-    for (int i = 0; i < 100; i++) {
-      ResultSet rs = stmt.executeQuery("SELECT 1");
-      rs.close();
-      stmt.cancel();
-    }
-    TestUtil.closeDB(con);
-  }
-
-  @Test
-  void error() throws Exception {
-    Connection con = TestUtil.openDB();
-    try {
-
-      // transaction mode
-      con.setAutoCommit(false);
-      Statement stmt = con.createStatement();
-      stmt.execute("select 1/0");
-      fail("Should not execute this, as a SQLException s/b thrown");
-      con.commit();
-    } catch (SQLException ex) {
-      // Verify that the SQLException is serializable.
-      ByteArrayOutputStream baos = new ByteArrayOutputStream();
-      ObjectOutputStream oos = new ObjectOutputStream(baos);
-      oos.writeObject(ex);
-      oos.close();
+    /**
+     * Ensure the cancel call does not return before it has completed. Previously it did which
+     * cancelled future queries.
+     */
+    @Test
+    void singleThreadCancel() throws Exception {
+        Connection con = TestUtil.openDB();
+        Statement stmt = con.createStatement();
+        for (int i = 0; i < 100; i++) {
+            ResultSet rs = stmt.executeQuery("SELECT 1");
+            rs.close();
+            stmt.cancel();
+        }
+        TestUtil.closeDB(con);
     }
 
-    con.commit();
-    con.close();
-  }
+    @Test
+    void error() throws Exception {
+        Connection con = TestUtil.openDB();
+        try {
 
-  @Test
-  void warning() throws Exception {
-    Connection con = TestUtil.openDB();
-    Statement stmt = con.createStatement();
-    stmt.execute("CREATE TEMP TABLE t(a int primary key)");
-    SQLWarning warning = stmt.getWarnings();
-    // We should get a warning about primary key index creation
-    // it's possible we won't depending on the server's
-    // client_min_messages setting.
-    while (warning != null) {
-      // Verify that the SQLWarning is serializable.
-      ByteArrayOutputStream baos = new ByteArrayOutputStream();
-      ObjectOutputStream oos = new ObjectOutputStream(baos);
-      oos.writeObject(warning);
-      oos.close();
-      warning = warning.getNextWarning();
+            // transaction mode
+            con.setAutoCommit(false);
+            Statement stmt = con.createStatement();
+            stmt.execute("select 1/0");
+            fail("Should not execute this, as a SQLException s/b thrown");
+            con.commit();
+        } catch (SQLException ex) {
+            // Verify that the SQLException is serializable.
+            ByteArrayOutputStream baos = new ByteArrayOutputStream();
+            ObjectOutputStream oos = new ObjectOutputStream(baos);
+            oos.writeObject(ex);
+            oos.close();
+        }
+
+        con.commit();
+        con.close();
     }
 
-    stmt.close();
-    con.close();
-  }
+    @Test
+    void warning() throws Exception {
+        Connection con = TestUtil.openDB();
+        Statement stmt = con.createStatement();
+        stmt.execute("CREATE TEMP TABLE t(a int primary key)");
+        SQLWarning warning = stmt.getWarnings();
+        // We should get a warning about primary key index creation
+        // it's possible we won't depending on the server's
+        // client_min_messages setting.
+        while (warning != null) {
+            // Verify that the SQLWarning is serializable.
+            ByteArrayOutputStream baos = new ByteArrayOutputStream();
+            ObjectOutputStream oos = new ObjectOutputStream(baos);
+            oos.writeObject(warning);
+            oos.close();
+            warning = warning.getNextWarning();
+        }
 
-  @Disabled
-  @Test
-  void xtestLocking() throws Exception {
-    Connection con = TestUtil.openDB();
-    Connection con2 = TestUtil.openDB();
+        stmt.close();
+        con.close();
+    }
 
-    TestUtil.createTable(con, "test_lock", "name text");
-    Statement st = con.createStatement();
-    Statement st2 = con2.createStatement();
-    con.setAutoCommit(false);
-    st.execute("lock table test_lock");
-    st2.executeUpdate("insert into test_lock ( name ) values ('hello')");
-    con.commit();
-    TestUtil.dropTable(con, "test_lock");
-    con.close();
-    con2.close();
-  }
+    @Disabled
+    @Test
+    void xtestLocking() throws Exception {
+        Connection con = TestUtil.openDB();
+        Connection con2 = TestUtil.openDB();
+
+        TestUtil.createTable(con, "test_lock", "name text");
+        Statement st = con.createStatement();
+        Statement st2 = con2.createStatement();
+        con.setAutoCommit(false);
+        st.execute("lock table test_lock");
+        st2.executeUpdate("insert into test_lock ( name ) values ('hello')");
+        con.commit();
+        TestUtil.dropTable(con, "test_lock");
+        con.close();
+        con2.close();
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NotifyTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NotifyTest.java
index 4740a5a..c662abe 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NotifyTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NotifyTest.java
@@ -5,267 +5,264 @@
 
 package org.postgresql.test.jdbc2;
 
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Arrays;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import org.postgresql.PGConnection;
+import org.postgresql.PGNotification;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.test.TestUtil;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 
-import org.postgresql.PGConnection;
-import org.postgresql.PGNotification;
-import org.postgresql.core.ServerVersion;
-import org.postgresql.test.TestUtil;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.Timeout;
-
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Arrays;
-
 class NotifyTest {
-  private Connection conn;
+    private Connection conn;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    conn = TestUtil.openDB();
-  }
-
-  @AfterEach
-  void tearDown() throws SQLException {
-    TestUtil.closeDB(conn);
-  }
-
-  @Test
-  @Timeout(60)
-  void testNotify() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate("LISTEN mynotification");
-    stmt.executeUpdate("NOTIFY mynotification");
-
-    PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications();
-    assertNotNull(notifications);
-    assertEquals(1, notifications.length);
-    assertEquals("mynotification", notifications[0].getName());
-    assertEquals("", notifications[0].getParameter());
-
-    stmt.close();
-  }
-
-  @Test
-  @Timeout(60)
-  void notifyArgument() throws Exception {
-    if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v9_0)) {
-      return;
+    private static void connectAndNotify(String channel) {
+        Connection conn2 = null;
+        try {
+            conn2 = TestUtil.openDB();
+            Statement stmt2 = conn2.createStatement();
+            stmt2.executeUpdate("NOTIFY " + channel);
+            stmt2.close();
+        } catch (Exception e) {
+            throw new RuntimeException("Couldn't notify '" + channel + "'.", e);
+        } finally {
+            try {
+                conn2.close();
+            } catch (SQLException e) {
+            }
+        }
     }
 
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate("LISTEN mynotification");
-    stmt.executeUpdate("NOTIFY mynotification, 'message'");
-
-    PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications();
-    assertNotNull(notifications);
-    assertEquals(1, notifications.length);
-    assertEquals("mynotification", notifications[0].getName());
-    assertEquals("message", notifications[0].getParameter());
-
-    stmt.close();
-  }
-
-  @Test
-  @Timeout(60)
-  void asyncNotify() throws Exception {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate("LISTEN mynotification");
-
-    // Notify on a separate connection to get an async notify on the first.
-    connectAndNotify("mynotification");
-
-    // Wait a bit to let the notify come through... Changed this so the test takes ~2 seconds
-    // less to run and is still as effective.
-    PGNotification[] notifications = null;
-    PGConnection connection = conn.unwrap(PGConnection.class);
-    for (int i = 0; i < 3000; i++) {
-      notifications = connection.getNotifications();
-      if (notifications.length > 0) {
-        break;
-      }
-      Thread.sleep(10);
+    @BeforeEach
+    void setUp() throws Exception {
+        conn = TestUtil.openDB();
     }
 
-    assertNotNull(notifications, "Notification is expected to be delivered when subscription was created"
-            + " before sending notification");
-    assertEquals(1, notifications.length);
-    assertEquals("mynotification", notifications[0].getName());
-    assertEquals("", notifications[0].getParameter());
+    @AfterEach
+    void tearDown() throws SQLException {
+        TestUtil.closeDB(conn);
+    }
 
-    stmt.close();
-  }
+    @Test
+    @Timeout(60)
+    void testNotify() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate("LISTEN mynotification");
+        stmt.executeUpdate("NOTIFY mynotification");
 
-  /**
-   * To test timeouts we have to send the notification from another thread, because we
-   * listener is blocking.
-   */
-  @Test
-  @Timeout(60)
-  void asyncNotifyWithTimeout() throws Exception {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate("LISTEN mynotification");
+        PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications();
+        assertNotNull(notifications);
+        assertEquals(1, notifications.length);
+        assertEquals("mynotification", notifications[0].getName());
+        assertEquals("", notifications[0].getParameter());
 
-    // Here we let the getNotifications() timeout.
-    long startMillis = System.currentTimeMillis();
-    PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(500);
-    long endMillis = System.currentTimeMillis();
-    long runtime = endMillis - startMillis;
-    assertEquals("[]", Arrays.asList(notifications).toString(), "There have been notifications, although none have been expected.");
-    assertTrue(runtime > 450, "We didn't wait long enough! runtime=" + runtime);
+        stmt.close();
+    }
 
-    stmt.close();
-  }
-
-  @Test
-  @Timeout(60)
-  void asyncNotifyWithTimeoutAndMessagesAvailableWhenStartingListening() throws Exception {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate("LISTEN mynotification");
-
-    // Now we check the case where notifications are already available while we are starting to
-    // listen for notifications
-    connectAndNotify("mynotification");
-
-    PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(10000);
-    assertNotNull(notifications);
-    assertEquals(1, notifications.length);
-    assertEquals("mynotification", notifications[0].getName());
-    assertEquals("", notifications[0].getParameter());
-
-    stmt.close();
-  }
-
-  @Test
-  @Timeout(60)
-  void asyncNotifyWithEndlessTimeoutAndMessagesAvailableWhenStartingListening() throws Exception {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate("LISTEN mynotification");
-
-    // Now we check the case where notifications are already available while we are waiting forever
-    connectAndNotify("mynotification");
-
-    PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(0);
-    assertNotNull(notifications);
-    assertEquals(1, notifications.length);
-    assertEquals("mynotification", notifications[0].getName());
-    assertEquals("", notifications[0].getParameter());
-
-    stmt.close();
-  }
-
-  @Test
-  @Timeout(60)
-  void asyncNotifyWithTimeoutAndMessagesSendAfter() throws Exception {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate("LISTEN mynotification");
-
-    // Now we check the case where notifications are send after we have started to listen for
-    // notifications
-    new Thread( new Runnable() {
-      @Override
-      public void run() {
-        try {
-          Thread.sleep(200);
-        } catch (InterruptedException ie) {
+    @Test
+    @Timeout(60)
+    void notifyArgument() throws Exception {
+        if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v9_0)) {
+            return;
         }
+
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate("LISTEN mynotification");
+        stmt.executeUpdate("NOTIFY mynotification, 'message'");
+
+        PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications();
+        assertNotNull(notifications);
+        assertEquals(1, notifications.length);
+        assertEquals("mynotification", notifications[0].getName());
+        assertEquals("message", notifications[0].getParameter());
+
+        stmt.close();
+    }
+
+    @Test
+    @Timeout(60)
+    void asyncNotify() throws Exception {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate("LISTEN mynotification");
+
+        // Notify on a separate connection to get an async notify on the first.
         connectAndNotify("mynotification");
-      }
-    }).start();
 
-    PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(10000);
-    assertNotNull(notifications);
-    assertEquals(1, notifications.length);
-    assertEquals("mynotification", notifications[0].getName());
-    assertEquals("", notifications[0].getParameter());
-
-    stmt.close();
-  }
-
-  @Test
-  @Timeout(60)
-  void asyncNotifyWithEndlessTimeoutAndMessagesSendAfter() throws Exception {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate("LISTEN mynotification");
-
-    // Now we check the case where notifications are send after we have started to listen for
-    // notifications forever
-    new Thread( new Runnable() {
-      @Override
-      public void run() {
-        try {
-          Thread.sleep(200);
-        } catch (InterruptedException ie) {
+        // Wait a bit to let the notify come through... Changed this so the test takes ~2 seconds
+        // less to run and is still as effective.
+        PGNotification[] notifications = null;
+        PGConnection connection = conn.unwrap(PGConnection.class);
+        for (int i = 0; i < 3000; i++) {
+            notifications = connection.getNotifications();
+            if (notifications.length > 0) {
+                break;
+            }
+            Thread.sleep(10);
         }
+
+        assertNotNull(notifications, "Notification is expected to be delivered when subscription was created"
+                + " before sending notification");
+        assertEquals(1, notifications.length);
+        assertEquals("mynotification", notifications[0].getName());
+        assertEquals("", notifications[0].getParameter());
+
+        stmt.close();
+    }
+
+    /**
+     * To test timeouts we have to send the notification from another thread, because we
+     * listener is blocking.
+     */
+    @Test
+    @Timeout(60)
+    void asyncNotifyWithTimeout() throws Exception {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate("LISTEN mynotification");
+
+        // Here we let the getNotifications() timeout.
+        long startMillis = System.currentTimeMillis();
+        PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(500);
+        long endMillis = System.currentTimeMillis();
+        long runtime = endMillis - startMillis;
+        assertEquals("[]", Arrays.asList(notifications).toString(), "There have been notifications, although none have been expected.");
+        assertTrue(runtime > 450, "We didn't wait long enough! runtime=" + runtime);
+
+        stmt.close();
+    }
+
+    @Test
+    @Timeout(60)
+    void asyncNotifyWithTimeoutAndMessagesAvailableWhenStartingListening() throws Exception {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate("LISTEN mynotification");
+
+        // Now we check the case where notifications are already available while we are starting to
+        // listen for notifications
         connectAndNotify("mynotification");
-      }
-    }).start();
 
-    PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(0);
-    assertNotNull(notifications);
-    assertEquals(1, notifications.length);
-    assertEquals("mynotification", notifications[0].getName());
-    assertEquals("", notifications[0].getParameter());
+        PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(10000);
+        assertNotNull(notifications);
+        assertEquals(1, notifications.length);
+        assertEquals("mynotification", notifications[0].getName());
+        assertEquals("", notifications[0].getParameter());
 
-    stmt.close();
-  }
+        stmt.close();
+    }
 
-  @Test
-  @Timeout(60)
-  void asyncNotifyWithTimeoutAndSocketThatBecomesClosed() throws Exception {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate("LISTEN mynotification");
+    @Test
+    @Timeout(60)
+    void asyncNotifyWithEndlessTimeoutAndMessagesAvailableWhenStartingListening() throws Exception {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate("LISTEN mynotification");
+
+        // Now we check the case where notifications are already available while we are waiting forever
+        connectAndNotify("mynotification");
+
+        PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(0);
+        assertNotNull(notifications);
+        assertEquals(1, notifications.length);
+        assertEquals("mynotification", notifications[0].getName());
+        assertEquals("", notifications[0].getParameter());
+
+        stmt.close();
+    }
+
+    @Test
+    @Timeout(60)
+    void asyncNotifyWithTimeoutAndMessagesSendAfter() throws Exception {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate("LISTEN mynotification");
+
+        // Now we check the case where notifications are send after we have started to listen for
+        // notifications
+        new Thread(new Runnable() {
+            @Override
+            public void run() {
+                try {
+                    Thread.sleep(200);
+                } catch (InterruptedException ie) {
+                }
+                connectAndNotify("mynotification");
+            }
+        }).start();
+
+        PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(10000);
+        assertNotNull(notifications);
+        assertEquals(1, notifications.length);
+        assertEquals("mynotification", notifications[0].getName());
+        assertEquals("", notifications[0].getParameter());
+
+        stmt.close();
+    }
+
+    @Test
+    @Timeout(60)
+    void asyncNotifyWithEndlessTimeoutAndMessagesSendAfter() throws Exception {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate("LISTEN mynotification");
+
+        // Now we check the case where notifications are send after we have started to listen for
+        // notifications forever
+        new Thread(new Runnable() {
+            @Override
+            public void run() {
+                try {
+                    Thread.sleep(200);
+                } catch (InterruptedException ie) {
+                }
+                connectAndNotify("mynotification");
+            }
+        }).start();
+
+        PGNotification[] notifications = conn.unwrap(PGConnection.class).getNotifications(0);
+        assertNotNull(notifications);
+        assertEquals(1, notifications.length);
+        assertEquals("mynotification", notifications[0].getName());
+        assertEquals("", notifications[0].getParameter());
+
+        stmt.close();
+    }
+
+    @Test
+    @Timeout(60)
+    void asyncNotifyWithTimeoutAndSocketThatBecomesClosed() throws Exception {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate("LISTEN mynotification");
+
+        // Here we check what happens when the connection gets closed from another thread. This
+        // should be able, and this test ensures that no synchronized statements will stop the
+        // connection from becoming closed.
+        new Thread(new Runnable() {
+            @Override
+            public void run() {
+                try {
+                    Thread.sleep(500);
+                } catch (InterruptedException ie) {
+                }
+                try {
+                    conn.close();
+                } catch (SQLException e) {
+                }
+            }
+        }).start();
 
-    // Here we check what happens when the connection gets closed from another thread. This
-    // should be able, and this test ensures that no synchronized statements will stop the
-    // connection from becoming closed.
-    new Thread( new Runnable() {
-      @Override
-      public void run() {
         try {
-          Thread.sleep(500);
-        } catch (InterruptedException ie) {
-        }
-        try {
-          conn.close();
+            conn.unwrap(PGConnection.class).getNotifications(40000);
+            fail("The getNotifications(...) call didn't return when the socket closed.");
         } catch (SQLException e) {
+            // We expected that
         }
-      }
-    }).start();
 
-    try {
-      conn.unwrap(PGConnection.class).getNotifications(40000);
-      fail("The getNotifications(...) call didn't return when the socket closed.");
-    } catch (SQLException e) {
-      // We expected that
+        stmt.close();
     }
 
-    stmt.close();
-  }
-
-  private static void connectAndNotify(String channel) {
-    Connection conn2 = null;
-    try {
-      conn2 = TestUtil.openDB();
-      Statement stmt2 = conn2.createStatement();
-      stmt2.executeUpdate("NOTIFY " + channel);
-      stmt2.close();
-    } catch (Exception e) {
-      throw new RuntimeException("Couldn't notify '" + channel + "'.", e);
-    } finally {
-      try {
-        conn2.close();
-      } catch (SQLException e) {
-      }
-    }
-  }
-
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NumericTransferTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NumericTransferTest.java
index d1cea18..4d106fe 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NumericTransferTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NumericTransferTest.java
@@ -25,62 +25,62 @@ import java.util.Properties;
 
 @RunWith(Parameterized.class)
 public class NumericTransferTest extends BaseTest4 {
-  public NumericTransferTest(BinaryMode binaryMode) {
-    setBinaryMode(binaryMode);
-  }
-
-  @Override
-  protected void updateProperties(Properties props) {
-    super.updateProperties(props);
-    PGProperty.BINARY_TRANSFER_ENABLE.set(props, Oid.NUMERIC);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
+    public NumericTransferTest(BinaryMode binaryMode) {
+        setBinaryMode(binaryMode);
     }
-    return ids;
-  }
 
-  @Test
-  public void receive100000() throws SQLException {
-    Statement statement = con.createStatement();
-    for (String sign : new String[]{"", "-"}) {
-      for (int i = 0; i < 100; i++) {
-        final String sql = "SELECT " + sign + "1E+" + i + "::numeric";
-        ResultSet rs = statement.executeQuery(sql);
-        rs.next();
-        if (i == 0) {
-          final String expected = sign + "1";
-          assertEquals("getString for " + sql, expected, rs.getString(1));
-          assertEquals("getBigDecimal for " + sql, expected, rs.getBigDecimal(1).toString());
-        } else {
-          final String expected = sign + String.format("1%0" + i + "d", 0);
-          assertEquals("getString for " + sql, expected, rs.getString(1));
-          assertEquals("getBigDecimal for " + sql, expected, rs.getBigDecimal(1).toString());
+    @Parameterized.Parameters(name = "binary = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
         }
-        rs.close();
-      }
+        return ids;
     }
-    statement.close();
-  }
 
-  @Test
-  public void sendReceive100000() throws SQLException {
-    PreparedStatement statement = con.prepareStatement("select ?::numeric");
-    for (String sign : new String[]{"", "-"}) {
-      for (int i = 0; i < 100; i++) {
-        final String expected = sign + (i == 0 ? 1 : String.format("1%0" + i + "d", 0));
-        statement.setBigDecimal(1, new BigDecimal(expected));
-        ResultSet rs = statement.executeQuery();
-        rs.next();
-        assertEquals("getString for " + expected, expected, rs.getString(1));
-        assertEquals("getBigDecimal for " + expected, expected, rs.getBigDecimal(1).toString());
-        rs.close();
-      }
+    @Override
+    protected void updateProperties(Properties props) {
+        super.updateProperties(props);
+        PGProperty.BINARY_TRANSFER_ENABLE.set(props, Oid.NUMERIC);
+    }
+
+    @Test
+    public void receive100000() throws SQLException {
+        Statement statement = con.createStatement();
+        for (String sign : new String[]{"", "-"}) {
+            for (int i = 0; i < 100; i++) {
+                final String sql = "SELECT " + sign + "1E+" + i + "::numeric";
+                ResultSet rs = statement.executeQuery(sql);
+                rs.next();
+                if (i == 0) {
+                    final String expected = sign + "1";
+                    assertEquals("getString for " + sql, expected, rs.getString(1));
+                    assertEquals("getBigDecimal for " + sql, expected, rs.getBigDecimal(1).toString());
+                } else {
+                    final String expected = sign + String.format("1%0" + i + "d", 0);
+                    assertEquals("getString for " + sql, expected, rs.getString(1));
+                    assertEquals("getBigDecimal for " + sql, expected, rs.getBigDecimal(1).toString());
+                }
+                rs.close();
+            }
+        }
+        statement.close();
+    }
+
+    @Test
+    public void sendReceive100000() throws SQLException {
+        PreparedStatement statement = con.prepareStatement("select ?::numeric");
+        for (String sign : new String[]{"", "-"}) {
+            for (int i = 0; i < 100; i++) {
+                final String expected = sign + (i == 0 ? 1 : String.format("1%0" + i + "d", 0));
+                statement.setBigDecimal(1, new BigDecimal(expected));
+                ResultSet rs = statement.executeQuery();
+                rs.next();
+                assertEquals("getString for " + expected, expected, rs.getString(1));
+                assertEquals("getBigDecimal for " + expected, expected, rs.getBigDecimal(1).toString());
+                rs.close();
+            }
+        }
+        statement.close();
     }
-    statement.close();
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NumericTransferTest2.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NumericTransferTest2.java
index 7a2962a..07b6aa0 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NumericTransferTest2.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/NumericTransferTest2.java
@@ -28,87 +28,87 @@ import java.util.Properties;
 @RunWith(Parameterized.class)
 public class NumericTransferTest2 extends BaseTest4 {
 
-  final BigDecimal value;
+    final BigDecimal value;
 
-  public NumericTransferTest2(BinaryMode binaryMode, BigDecimal value) {
-    setBinaryMode(binaryMode);
-    this.value = value;
-  }
-
-  @Override
-  protected void updateProperties(Properties props) {
-    super.updateProperties(props);
-    PGProperty.BINARY_TRANSFER_ENABLE.set(props, Oid.NUMERIC);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}, value = {1,number,#,###.##################################################}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> numbers = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      numbers.add(new Object[]{binaryMode, new BigDecimal("1.0")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("0.000000000000000000000000000000000000000000000000000")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("0.100000000000000000000000000000000000000000000009900")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("-1.0")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("-1")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("1.2")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("-2.05")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("0.000000000000000000000000000990")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("-0.000000000000000000000000000990")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("10.0000000000099")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal(".10000000000000")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("1.10000000000000")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("99999.2")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("99999")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("-99999.2")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("-99999")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("2147483647")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("-2147483648")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("2147483648")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("-2147483649")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("9223372036854775807")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("-9223372036854775808")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("9223372036854775808")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("-9223372036854775809")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("10223372036850000000")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("19223372036854775807")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("19223372036854775807.300")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("-19223372036854775807.300")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal(BigInteger.valueOf(1234567890987654321L), -1)});
-      numbers.add(new Object[]{binaryMode, new BigDecimal(BigInteger.valueOf(1234567890987654321L), -5)});
-      numbers.add(new Object[]{binaryMode, new BigDecimal(BigInteger.valueOf(-1234567890987654321L), -3)});
-      numbers.add(new Object[]{binaryMode, new BigDecimal(BigInteger.valueOf(6), -8)});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("30000")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("40000").setScale(15)});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("20000.00000000000000000000")});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("9990000").setScale(10)});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("1000000").setScale(20)});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("10000000000000000000000000000000000000").setScale(20)});
-      numbers.add(new Object[]{binaryMode, new BigDecimal("90000000000000000000000000000000000000")});
+    public NumericTransferTest2(BinaryMode binaryMode, BigDecimal value) {
+        setBinaryMode(binaryMode);
+        this.value = value;
     }
-    return numbers;
-  }
 
-  @Test
-  public void receiveValue() throws SQLException {
-    final String valString = value.toPlainString();
-    try (Statement statement = con.createStatement()) {
-      final String sql = "SELECT " + valString + "::numeric";
-      try (ResultSet rs = statement.executeQuery(sql)) {
-        assertTrue(rs.next());
-        assertEquals("getBigDecimal for " + sql, valString, rs.getBigDecimal(1).toPlainString());
-      }
+    @Parameterized.Parameters(name = "binary = {0}, value = {1,number,#,###.##################################################}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> numbers = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            numbers.add(new Object[]{binaryMode, new BigDecimal("1.0")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("0.000000000000000000000000000000000000000000000000000")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("0.100000000000000000000000000000000000000000000009900")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("-1.0")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("-1")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("1.2")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("-2.05")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("0.000000000000000000000000000990")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("-0.000000000000000000000000000990")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("10.0000000000099")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal(".10000000000000")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("1.10000000000000")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("99999.2")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("99999")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("-99999.2")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("-99999")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("2147483647")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("-2147483648")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("2147483648")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("-2147483649")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("9223372036854775807")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("-9223372036854775808")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("9223372036854775808")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("-9223372036854775809")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("10223372036850000000")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("19223372036854775807")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("19223372036854775807.300")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("-19223372036854775807.300")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal(BigInteger.valueOf(1234567890987654321L), -1)});
+            numbers.add(new Object[]{binaryMode, new BigDecimal(BigInteger.valueOf(1234567890987654321L), -5)});
+            numbers.add(new Object[]{binaryMode, new BigDecimal(BigInteger.valueOf(-1234567890987654321L), -3)});
+            numbers.add(new Object[]{binaryMode, new BigDecimal(BigInteger.valueOf(6), -8)});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("30000")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("40000").setScale(15)});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("20000.00000000000000000000")});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("9990000").setScale(10)});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("1000000").setScale(20)});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("10000000000000000000000000000000000000").setScale(20)});
+            numbers.add(new Object[]{binaryMode, new BigDecimal("90000000000000000000000000000000000000")});
+        }
+        return numbers;
     }
-  }
 
-  @Test
-  public void sendReceiveValue() throws SQLException {
-    final String valString = value.toPlainString();
-    try (PreparedStatement statement = con.prepareStatement("select ?::numeric")) {
-      statement.setBigDecimal(1, value);
-      try (ResultSet rs = statement.executeQuery()) {
-        rs.next();
-        assertEquals("getBigDecimal for " + valString, valString, rs.getBigDecimal(1).toPlainString());
-      }
+    @Override
+    protected void updateProperties(Properties props) {
+        super.updateProperties(props);
+        PGProperty.BINARY_TRANSFER_ENABLE.set(props, Oid.NUMERIC);
+    }
+
+    @Test
+    public void receiveValue() throws SQLException {
+        final String valString = value.toPlainString();
+        try (Statement statement = con.createStatement()) {
+            final String sql = "SELECT " + valString + "::numeric";
+            try (ResultSet rs = statement.executeQuery(sql)) {
+                assertTrue(rs.next());
+                assertEquals("getBigDecimal for " + sql, valString, rs.getBigDecimal(1).toPlainString());
+            }
+        }
+    }
+
+    @Test
+    public void sendReceiveValue() throws SQLException {
+        final String valString = value.toPlainString();
+        try (PreparedStatement statement = con.prepareStatement("select ?::numeric")) {
+            statement.setBigDecimal(1, value);
+            try (ResultSet rs = statement.executeQuery()) {
+                rs.next();
+                assertEquals("getBigDecimal for " + valString, valString, rs.getBigDecimal(1).toPlainString());
+            }
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/OuterJoinSyntaxTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/OuterJoinSyntaxTest.java
index 1511545..be824f3 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/OuterJoinSyntaxTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/OuterJoinSyntaxTest.java
@@ -22,95 +22,95 @@ import java.util.List;
  */
 public class OuterJoinSyntaxTest extends BaseTest4 {
 
-  @Test
-  public void testOuterJoinSyntaxWithSingleJoinAndWithoutOj() throws Exception {
-    testOuterJoinSyntax(
-        "select t1.id as t1_id, t1.text as t1_text,"
-        + " t2.id as t2_id, t2.text as t2_text"
-        + " from (values (1, 'one'), (2, 'two')) as t1 (id, text)"
-        + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id)",
-        Arrays.asList("1,one,1,a", "2,two,null,null"));
-  }
-
-  @Test
-  public void testOuterJoinSyntaxWithMultipleJoinsAndWithoutOj() throws Exception {
-    testOuterJoinSyntax(
-        "select t1.id as t1_id, t1.text as t1_text,"
-        + " t2.id as t2_id, t2.text as t2_text,"
-        + " t3.id as t3_id, t3.text as t3_text"
-        + " from (values (1, 'one'), (2, 'two')) as t1 (id, text)"
-        + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id)"
-        + " left outer join (values (4, '1'), (2, '2')) as t3 (id, text) on (t2.id = t3.id)",
-        Arrays.asList("1,one,1,a,null,null", "2,two,null,null,null,null"));
-  }
-
-  @Test
-  public void testOuterJoinSyntaxWithSingleJoinAndWithOj() throws Exception {
-    testOuterJoinSyntax(
-        "select t1.id as t1_id, t1.text as t1_text,"
-        + " t2.id as t2_id, t2.text as t2_text"
-        + " from {oj (values (1, 'one'), (2, 'two')) as t1 (id, text)"
-        + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id) }",
-        Arrays.asList("1,one,1,a", "2,two,null,null"));
-  }
-
-  @Test
-  public void testOuterJoinSyntaxWithMultipleJoinsAndWithOj() throws Exception {
-    testOuterJoinSyntax(
-        "select t1.id as t1_id, t1.text as t1_text,"
-        + " t2.id as t2_id, t2.text as t2_text,"
-        + " t3.id as t3_id, t3.text as t3_text"
-        + " from {oj (values (1, 'one'), (2, 'two')) as t1 (id, text)"
-        + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id)"
-        + " left outer join (values (1, '1'), (2, '2')) as t3 (id, text) on (t2.id = t3.id)}",
-        Arrays.asList("1,one,1,a,1,1", "2,two,null,null,null,null"));
-  }
-
-  @Test
-  public void testOuterJoinSyntaxWithMultipleJoinsAndWithOj2() throws Exception {
-    // multiple joins with oj and missing space character after oj
-    testOuterJoinSyntax(
-        "select t1.id as t1_id, t1.text as t1_text,"
-        + " t2.id as t2_id, t2.text as t2_text,"
-        + " t3.id as t3_id, t3.text as t3_text"
-        + " from {oj(values (1, 'one'), (2, 'two')) as t1 (id, text)"
-        + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id)"
-        + " left outer join (values (4, '1'), (2, '2')) as t3 (id, text) on (t2.id = t3.id)}",
-        Arrays.asList("1,one,1,a,null,null", "2,two,null,null,null,null"));
-  }
-
-  @Test
-  public void testOuterJoinSyntaxWithMultipleJoinsAndWithOj3() throws Exception {
-    // multiple joins with oj and missing space character after oj and some more parenthesis
-    testOuterJoinSyntax(
-        "select t1.id as t1_id, t1.text as t1_text,"
-        + " t2.id as t2_id, t2.text as t2_text,"
-        + " t3.id as t3_id, t3.text as t3_text"
-        + " from {oj(((values (1, 'one'), (2, 'two')) as t1 (id, text)"
-        + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id))"
-        + " left outer join (values (1, '1'), (4, '2')) as t3 (id, text) on (t2.id = t3.id))}",
-        Arrays.asList("1,one,1,a,1,1", "2,two,null,null,null,null"));
-  }
-
-  /**
-   * Executes the statement.
-   *
-   * @param theQuery the query to execute
-   * @param expectedResult the expected columns in result set
-   * @throws Exception on error
-   */
-  private void testOuterJoinSyntax(String theQuery, List<String> expectedResult) throws Exception {
-    final Statement st = con.createStatement();
-    try {
-      final ResultSet rs = st.executeQuery(theQuery);
-      try {
-        Assert.assertEquals("SQL " + theQuery, TestUtil.join(TestUtil.resultSetToLines(rs)), TestUtil.join(expectedResult));
-      } finally {
-        TestUtil.closeQuietly(rs);
-      }
-    } finally {
-      TestUtil.closeQuietly(st);
+    @Test
+    public void testOuterJoinSyntaxWithSingleJoinAndWithoutOj() throws Exception {
+        testOuterJoinSyntax(
+                "select t1.id as t1_id, t1.text as t1_text,"
+                        + " t2.id as t2_id, t2.text as t2_text"
+                        + " from (values (1, 'one'), (2, 'two')) as t1 (id, text)"
+                        + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id)",
+                Arrays.asList("1,one,1,a", "2,two,null,null"));
+    }
+
+    @Test
+    public void testOuterJoinSyntaxWithMultipleJoinsAndWithoutOj() throws Exception {
+        testOuterJoinSyntax(
+                "select t1.id as t1_id, t1.text as t1_text,"
+                        + " t2.id as t2_id, t2.text as t2_text,"
+                        + " t3.id as t3_id, t3.text as t3_text"
+                        + " from (values (1, 'one'), (2, 'two')) as t1 (id, text)"
+                        + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id)"
+                        + " left outer join (values (4, '1'), (2, '2')) as t3 (id, text) on (t2.id = t3.id)",
+                Arrays.asList("1,one,1,a,null,null", "2,two,null,null,null,null"));
+    }
+
+    @Test
+    public void testOuterJoinSyntaxWithSingleJoinAndWithOj() throws Exception {
+        testOuterJoinSyntax(
+                "select t1.id as t1_id, t1.text as t1_text,"
+                        + " t2.id as t2_id, t2.text as t2_text"
+                        + " from {oj (values (1, 'one'), (2, 'two')) as t1 (id, text)"
+                        + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id) }",
+                Arrays.asList("1,one,1,a", "2,two,null,null"));
+    }
+
+    @Test
+    public void testOuterJoinSyntaxWithMultipleJoinsAndWithOj() throws Exception {
+        testOuterJoinSyntax(
+                "select t1.id as t1_id, t1.text as t1_text,"
+                        + " t2.id as t2_id, t2.text as t2_text,"
+                        + " t3.id as t3_id, t3.text as t3_text"
+                        + " from {oj (values (1, 'one'), (2, 'two')) as t1 (id, text)"
+                        + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id)"
+                        + " left outer join (values (1, '1'), (2, '2')) as t3 (id, text) on (t2.id = t3.id)}",
+                Arrays.asList("1,one,1,a,1,1", "2,two,null,null,null,null"));
+    }
+
+    @Test
+    public void testOuterJoinSyntaxWithMultipleJoinsAndWithOj2() throws Exception {
+        // multiple joins with oj and missing space character after oj
+        testOuterJoinSyntax(
+                "select t1.id as t1_id, t1.text as t1_text,"
+                        + " t2.id as t2_id, t2.text as t2_text,"
+                        + " t3.id as t3_id, t3.text as t3_text"
+                        + " from {oj(values (1, 'one'), (2, 'two')) as t1 (id, text)"
+                        + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id)"
+                        + " left outer join (values (4, '1'), (2, '2')) as t3 (id, text) on (t2.id = t3.id)}",
+                Arrays.asList("1,one,1,a,null,null", "2,two,null,null,null,null"));
+    }
+
+    @Test
+    public void testOuterJoinSyntaxWithMultipleJoinsAndWithOj3() throws Exception {
+        // multiple joins with oj and missing space character after oj and some more parenthesis
+        testOuterJoinSyntax(
+                "select t1.id as t1_id, t1.text as t1_text,"
+                        + " t2.id as t2_id, t2.text as t2_text,"
+                        + " t3.id as t3_id, t3.text as t3_text"
+                        + " from {oj(((values (1, 'one'), (2, 'two')) as t1 (id, text)"
+                        + " left outer join (values (1, 'a'), (3, 'b')) as t2 (id, text) on (t1.id = t2.id))"
+                        + " left outer join (values (1, '1'), (4, '2')) as t3 (id, text) on (t2.id = t3.id))}",
+                Arrays.asList("1,one,1,a,1,1", "2,two,null,null,null,null"));
+    }
+
+    /**
+     * Executes the statement.
+     *
+     * @param theQuery       the query to execute
+     * @param expectedResult the expected columns in result set
+     * @throws Exception on error
+     */
+    private void testOuterJoinSyntax(String theQuery, List<String> expectedResult) throws Exception {
+        final Statement st = con.createStatement();
+        try {
+            final ResultSet rs = st.executeQuery(theQuery);
+            try {
+                Assert.assertEquals("SQL " + theQuery, TestUtil.join(TestUtil.resultSetToLines(rs)), TestUtil.join(expectedResult));
+            } finally {
+                TestUtil.closeQuietly(rs);
+            }
+        } finally {
+            TestUtil.closeQuietly(st);
+        }
     }
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGObjectGetTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGObjectGetTest.java
index 4245f50..30bc818 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGObjectGetTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGObjectGetTest.java
@@ -5,9 +5,14 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collection;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 import org.postgresql.geometric.PGbox;
 import org.postgresql.geometric.PGcircle;
 import org.postgresql.geometric.PGline;
@@ -18,112 +23,104 @@ import org.postgresql.geometric.PGpolygon;
 import org.postgresql.util.PGInterval;
 import org.postgresql.util.PGmoney;
 import org.postgresql.util.PGobject;
-
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collection;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 
 @RunWith(Parameterized.class)
 public class PGObjectGetTest extends BaseTest4 {
-  private final String sqlExpression;
-  private final Class<? extends PGobject> type;
-  private final String expected;
-  private final String stringValue;
+    private final String sqlExpression;
+    private final Class<? extends PGobject> type;
+    private final String expected;
+    private final String stringValue;
 
-  public PGObjectGetTest(BinaryMode binaryMode, String sqlExpression,
-      Class<? extends PGobject> type, String expected, String stringValue) {
-    setBinaryMode(binaryMode);
-    this.sqlExpression = sqlExpression;
-    this.type = type;
-    this.expected = expected;
-    this.stringValue = stringValue;
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}, sql = {1}, type = {2}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode, "null::inet", PGobject.class,
-          "PGobject(type=inet, value=null)", null});
-      ids.add(new Object[]{binaryMode, "null::box", PGbox.class,
-          "PGbox(type=box, value=null)", null});
-      ids.add(new Object[]{binaryMode, "null::circle", PGcircle.class,
-          "PGcircle(type=circle, value=null)", null});
-      ids.add(new Object[]{binaryMode, "null::line", PGline.class,
-          "PGline(type=line, value=null)", null});
-      ids.add(new Object[]{binaryMode, "null::lseg", PGlseg.class,
-          "PGlseg(type=lseg, value=null)", null});
-      ids.add(new Object[]{binaryMode, "null::path", PGpath.class,
-          "PGpath(type=path, value=null)", null});
-      ids.add(new Object[]{binaryMode, "null::point", PGpoint.class,
-          "PGpoint(type=point, value=null)", null});
-      ids.add(new Object[]{binaryMode, "null::polygon", PGpolygon.class,
-          "PGpolygon(type=polygon, value=null)", null});
-      ids.add(new Object[]{binaryMode, "null::money", PGmoney.class,
-          "PGmoney(type=money, value=null)", null});
-      ids.add(new Object[]{binaryMode, "null::interval", PGInterval.class,
-          "PGInterval(type=interval, value=null)", null});
+    public PGObjectGetTest(BinaryMode binaryMode, String sqlExpression,
+                           Class<? extends PGobject> type, String expected, String stringValue) {
+        setBinaryMode(binaryMode);
+        this.sqlExpression = sqlExpression;
+        this.type = type;
+        this.expected = expected;
+        this.stringValue = stringValue;
     }
-    return ids;
-  }
 
-  @Test
-  public void getAsPGobject() throws SQLException {
-    testGet(sqlExpression, expected, PGobject.class);
-  }
-
-  @Test
-  public void getAsPGobjectSubtype() throws SQLException {
-    testGet(sqlExpression, expected, type);
-  }
-
-  @Test
-  public void getAsString() throws SQLException {
-    PreparedStatement ps = con.prepareStatement("select " + sqlExpression);
-    ResultSet rs = ps.executeQuery();
-    rs.next();
-    assertEquals(
-        "'" + sqlExpression + "'.getString(1)",
-        stringValue,
-        rs.getString(1)
-    );
-  }
-
-  private void testGet(final String s, String expected, Class<? extends PGobject> type) throws SQLException {
-    PreparedStatement ps = con.prepareStatement("select " + s);
-    ResultSet rs = ps.executeQuery();
-    rs.next();
-    assertEquals(
-        "'" + s + "'.getObject(1, " + type.getSimpleName() + ".class)",
-        expected,
-        printObject(rs.getObject(1, type))
-    );
-    if (expected.contains("value=null)")) {
-      // For some reason we return objects as nulls
-      assertNull(
-          "'select " + s + "'.getObject(1)",
-          rs.getObject(1)
-      );
-    } else {
-      assertEquals(
-          "'select " + s + "'.getObject(1)",
-          expected,
-          printObject(rs.getObject(1))
-      );
+    @Parameterized.Parameters(name = "binary = {0}, sql = {1}, type = {2}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode, "null::inet", PGobject.class,
+                    "PGobject(type=inet, value=null)", null});
+            ids.add(new Object[]{binaryMode, "null::box", PGbox.class,
+                    "PGbox(type=box, value=null)", null});
+            ids.add(new Object[]{binaryMode, "null::circle", PGcircle.class,
+                    "PGcircle(type=circle, value=null)", null});
+            ids.add(new Object[]{binaryMode, "null::line", PGline.class,
+                    "PGline(type=line, value=null)", null});
+            ids.add(new Object[]{binaryMode, "null::lseg", PGlseg.class,
+                    "PGlseg(type=lseg, value=null)", null});
+            ids.add(new Object[]{binaryMode, "null::path", PGpath.class,
+                    "PGpath(type=path, value=null)", null});
+            ids.add(new Object[]{binaryMode, "null::point", PGpoint.class,
+                    "PGpoint(type=point, value=null)", null});
+            ids.add(new Object[]{binaryMode, "null::polygon", PGpolygon.class,
+                    "PGpolygon(type=polygon, value=null)", null});
+            ids.add(new Object[]{binaryMode, "null::money", PGmoney.class,
+                    "PGmoney(type=money, value=null)", null});
+            ids.add(new Object[]{binaryMode, "null::interval", PGInterval.class,
+                    "PGInterval(type=interval, value=null)", null});
+        }
+        return ids;
     }
-  }
 
-  String printObject(Object object) {
-    if (!(object instanceof PGobject)) {
-      return String.valueOf(object);
+    @Test
+    public void getAsPGobject() throws SQLException {
+        testGet(sqlExpression, expected, PGobject.class);
+    }
+
+    @Test
+    public void getAsPGobjectSubtype() throws SQLException {
+        testGet(sqlExpression, expected, type);
+    }
+
+    @Test
+    public void getAsString() throws SQLException {
+        PreparedStatement ps = con.prepareStatement("select " + sqlExpression);
+        ResultSet rs = ps.executeQuery();
+        rs.next();
+        assertEquals(
+                "'" + sqlExpression + "'.getString(1)",
+                stringValue,
+                rs.getString(1)
+        );
+    }
+
+    private void testGet(final String s, String expected, Class<? extends PGobject> type) throws SQLException {
+        PreparedStatement ps = con.prepareStatement("select " + s);
+        ResultSet rs = ps.executeQuery();
+        rs.next();
+        assertEquals(
+                "'" + s + "'.getObject(1, " + type.getSimpleName() + ".class)",
+                expected,
+                printObject(rs.getObject(1, type))
+        );
+        if (expected.contains("value=null)")) {
+            // For some reason we return objects as nulls
+            assertNull(
+                    "'select " + s + "'.getObject(1)",
+                    rs.getObject(1)
+            );
+        } else {
+            assertEquals(
+                    "'select " + s + "'.getObject(1)",
+                    expected,
+                    printObject(rs.getObject(1))
+            );
+        }
+    }
+
+    String printObject(Object object) {
+        if (!(object instanceof PGobject)) {
+            return String.valueOf(object);
+        }
+        PGobject pg = (PGobject) object;
+        return pg.getClass().getSimpleName() + "(type=" + pg.getType() + ", value=" + pg.getValue() + ")";
     }
-    PGobject pg = (PGobject) object;
-    return pg.getClass().getSimpleName() + "(type=" + pg.getType() + ", value=" + pg.getValue() + ")";
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGObjectSetTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGObjectSetTest.java
index f3aca34..3b5359c 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGObjectSetTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGObjectSetTest.java
@@ -33,96 +33,96 @@ import java.util.Collection;
 
 @RunWith(Parameterized.class)
 public class PGObjectSetTest extends BaseTest4 {
-  private final String typeName;
-  private final String expected;
-  private final Class<? extends PGobject> type;
+    private final String typeName;
+    private final String expected;
+    private final Class<? extends PGobject> type;
 
-  public PGObjectSetTest(BinaryMode binaryMode, Class<? extends PGobject> type,
-      String typeName, String expected) {
-    setBinaryMode(binaryMode);
-    this.expected = expected;
-    this.type = type;
-    this.typeName = typeName;
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}, sql = {2}, type = {1}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode, PGobject.class, "inet",
-          "PGobject(type=inet, value=null)"});
-      ids.add(new Object[]{binaryMode, PGbox.class, "box",
-          "PGbox(type=box, value=null)"});
-      ids.add(new Object[]{binaryMode, PGcircle.class, "circle",
-          "PGcircle(type=circle, value=null)"});
-      ids.add(new Object[]{binaryMode, PGline.class, "line",
-          "PGline(type=line, value=null)"});
-      ids.add(new Object[]{binaryMode, PGlseg.class, "lseg",
-          "PGlseg(type=lseg, value=null)"});
-      ids.add(new Object[]{binaryMode, PGpath.class, "path",
-          "PGpath(type=path, value=null)"});
-      ids.add(new Object[]{binaryMode, PGpoint.class, "point",
-          "PGpoint(type=point, value=null)"});
-      ids.add(new Object[]{binaryMode, PGpolygon.class, "polygon",
-          "PGpolygon(type=polygon, value=null)"});
-      ids.add(new Object[]{binaryMode, PGmoney.class, "money",
-          "PGmoney(type=money, value=null)"});
-      ids.add(new Object[]{binaryMode, PGInterval.class, "interval",
-          "PGInterval(type=interval, value=null)"});
+    public PGObjectSetTest(BinaryMode binaryMode, Class<? extends PGobject> type,
+                           String typeName, String expected) {
+        setBinaryMode(binaryMode);
+        this.expected = expected;
+        this.type = type;
+        this.typeName = typeName;
     }
-    return ids;
-  }
 
-  @Test
-  public void setNullAsPGobject() throws SQLException {
-    PGobject object = new PGobject();
-    object.setType(typeName);
-    object.setValue(null);
-    assertTrue("IsNull should return true", object.isNull());
-    testSet(object, expected, PGobject.class);
-  }
-
-  @Test
-  public void setNullAsPGobjectSubtype() throws SQLException, NoSuchMethodException,
-      IllegalAccessException, InvocationTargetException, InstantiationException {
-    if (type == PGobject.class) {
-      // We can't use PGobject without setType
-      return;
+    @Parameterized.Parameters(name = "binary = {0}, sql = {2}, type = {1}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode, PGobject.class, "inet",
+                    "PGobject(type=inet, value=null)"});
+            ids.add(new Object[]{binaryMode, PGbox.class, "box",
+                    "PGbox(type=box, value=null)"});
+            ids.add(new Object[]{binaryMode, PGcircle.class, "circle",
+                    "PGcircle(type=circle, value=null)"});
+            ids.add(new Object[]{binaryMode, PGline.class, "line",
+                    "PGline(type=line, value=null)"});
+            ids.add(new Object[]{binaryMode, PGlseg.class, "lseg",
+                    "PGlseg(type=lseg, value=null)"});
+            ids.add(new Object[]{binaryMode, PGpath.class, "path",
+                    "PGpath(type=path, value=null)"});
+            ids.add(new Object[]{binaryMode, PGpoint.class, "point",
+                    "PGpoint(type=point, value=null)"});
+            ids.add(new Object[]{binaryMode, PGpolygon.class, "polygon",
+                    "PGpolygon(type=polygon, value=null)"});
+            ids.add(new Object[]{binaryMode, PGmoney.class, "money",
+                    "PGmoney(type=money, value=null)"});
+            ids.add(new Object[]{binaryMode, PGInterval.class, "interval",
+                    "PGInterval(type=interval, value=null)"});
+        }
+        return ids;
     }
-    PGobject object = type.getConstructor().newInstance();
-    object.setValue(null);
-    testSet(object, expected, type);
-  }
 
-  private void testSet(PGobject value, String expected, Class<? extends PGobject> type) throws SQLException {
-    PreparedStatement ps = con.prepareStatement("select ?::" + value.getType());
-    ps.setObject(1, value);
-    ResultSet rs = ps.executeQuery();
-    rs.next();
-    assertEquals(
-        "'select ?::" + value.getType() + "'.withParam(" + printObject(value) + ").getObject(1, " + type.getSimpleName() + ".class)",
-        expected,
-        printObject(rs.getObject(1, type))
-    );
-    if (expected.contains("value=null)")) {
-      assertNull(
-          "'select ?::" + value.getType() + "'.withParam(" + printObject(value) + ").getObject(1)",
-          rs.getObject(1)
-      );
-    } else {
-      assertEquals(
-          "'select ?::" + value.getType() + "'.withParam(" + printObject(value) + ").getObject(1)",
-          expected,
-          printObject(rs.getObject(1))
-      );
+    @Test
+    public void setNullAsPGobject() throws SQLException {
+        PGobject object = new PGobject();
+        object.setType(typeName);
+        object.setValue(null);
+        assertTrue("IsNull should return true", object.isNull());
+        testSet(object, expected, PGobject.class);
     }
-  }
 
-  String printObject(Object object) {
-    if (!(object instanceof PGobject)) {
-      return String.valueOf(object);
+    @Test
+    public void setNullAsPGobjectSubtype() throws SQLException, NoSuchMethodException,
+            IllegalAccessException, InvocationTargetException, InstantiationException {
+        if (type == PGobject.class) {
+            // We can't use PGobject without setType
+            return;
+        }
+        PGobject object = type.getConstructor().newInstance();
+        object.setValue(null);
+        testSet(object, expected, type);
+    }
+
+    private void testSet(PGobject value, String expected, Class<? extends PGobject> type) throws SQLException {
+        PreparedStatement ps = con.prepareStatement("select ?::" + value.getType());
+        ps.setObject(1, value);
+        ResultSet rs = ps.executeQuery();
+        rs.next();
+        assertEquals(
+                "'select ?::" + value.getType() + "'.withParam(" + printObject(value) + ").getObject(1, " + type.getSimpleName() + ".class)",
+                expected,
+                printObject(rs.getObject(1, type))
+        );
+        if (expected.contains("value=null)")) {
+            assertNull(
+                    "'select ?::" + value.getType() + "'.withParam(" + printObject(value) + ").getObject(1)",
+                    rs.getObject(1)
+            );
+        } else {
+            assertEquals(
+                    "'select ?::" + value.getType() + "'.withParam(" + printObject(value) + ").getObject(1)",
+                    expected,
+                    printObject(rs.getObject(1))
+            );
+        }
+    }
+
+    String printObject(Object object) {
+        if (!(object instanceof PGobject)) {
+            return String.valueOf(object);
+        }
+        PGobject pg = (PGobject) object;
+        return pg.getClass().getSimpleName() + "(type=" + pg.getType() + ", value=" + pg.getValue() + ")";
     }
-    PGobject pg = (PGobject) object;
-    return pg.getClass().getSimpleName() + "(type=" + pg.getType() + ", value=" + pg.getValue() + ")";
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGPropertyTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGPropertyTest.java
index 1d727fe..9a80249 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGPropertyTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGPropertyTest.java
@@ -5,24 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertSame;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.postgresql.Driver;
-import org.postgresql.PGProperty;
-import org.postgresql.ds.PGSimpleDataSource;
-import org.postgresql.ds.common.BaseDataSource;
-import org.postgresql.jdbc.AutoSave;
-import org.postgresql.test.TestUtil;
-import org.postgresql.util.URLCoder;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
 import java.beans.BeanInfo;
 import java.beans.Introspector;
 import java.beans.PropertyDescriptor;
@@ -32,266 +14,281 @@ import java.util.Locale;
 import java.util.Map;
 import java.util.Properties;
 import java.util.TreeMap;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.Driver;
+import org.postgresql.PGProperty;
+import org.postgresql.ds.PGSimpleDataSource;
+import org.postgresql.ds.common.BaseDataSource;
+import org.postgresql.jdbc.AutoSave;
+import org.postgresql.test.TestUtil;
+import org.postgresql.util.URLCoder;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 class PGPropertyTest {
 
-  /**
-   * Some tests modify the "ssl" system property. To not disturb other test cases in the suite store
-   * the value of the property and restore it.
-   */
-  private String bootSSLPropertyValue;
+    /**
+     * Some tests modify the "ssl" system property. To not disturb other test cases in the suite store
+     * the value of the property and restore it.
+     */
+    private String bootSSLPropertyValue;
 
-  @BeforeEach
-  void setUp() {
-    bootSSLPropertyValue = System.getProperty("ssl");
-  }
-
-  @AfterEach
-  void tearDown() {
-    if (bootSSLPropertyValue == null) {
-      System.getProperties().remove("ssl");
-    } else {
-      System.setProperty("ssl", bootSSLPropertyValue);
-    }
-  }
-
-  /**
-   * Test that we can get and set all default values and all choices (if any).
-   */
-  @Test
-  void getSetAllProperties() {
-    Properties properties = new Properties();
-    for (PGProperty property : PGProperty.values()) {
-      String value = property.getOrDefault(properties);
-      assertEquals(property.getDefaultValue(), value);
-
-      property.set(properties, value);
-      assertEquals(value, property.getOrDefault(properties));
-
-      if (property.getChoices() != null && property.getChoices().length > 0) {
-        for (String choice : property.getChoices()) {
-          property.set(properties, choice);
-          assertEquals(choice, property.getOrDefault(properties));
-        }
-      }
-    }
-  }
-
-  @Test
-  void sortOrder() {
-    String prevName = null;
-    for (PGProperty property : PGProperty.values()) {
-      String name = property.name();
-      if (prevName != null) {
-        assertTrue(name.compareTo(prevName) > 0, "PGProperty names should be sorted in ascending order: " + name + " < " + prevName);
-      }
-      prevName = name;
-    }
-  }
-
-  /**
-   * Test that the enum constant is common with the underlying property name.
-   */
-  @Test
-  void enumConstantNaming() {
-    for (PGProperty property : PGProperty.values()) {
-      String enumName = property.name().replaceAll("_", "");
-      assertEquals(property.getName().toLowerCase(Locale.ROOT), enumName.toLowerCase(Locale.ROOT), "Naming of the enum constant [" + property.name()
-          + "] should follow the naming of its underlying property [" + property.getName()
-          + "] in PGProperty");
-    }
-  }
-
-  @Test
-  void driverGetPropertyInfo() {
-    Driver driver = new Driver();
-    DriverPropertyInfo[] infos = driver.getPropertyInfo(
-        "jdbc:postgresql://localhost/test?user=fred&password=secret&ssl=true",
-        // this is the example we give in docs
-        new Properties());
-    for (DriverPropertyInfo info : infos) {
-      if ("user".equals(info.name)) {
-        assertEquals("fred", info.value);
-      } else if ("password".equals(info.name)) {
-        assertEquals("secret", info.value);
-      } else if ("ssl".equals(info.name)) {
-        assertEquals("true", info.value);
-      }
-    }
-  }
-
-  /**
-   * Test if the datasource has getter and setter for all properties.
-   */
-  @Test
-  void dataSourceProperties() throws Exception {
-    PGSimpleDataSource dataSource = new PGSimpleDataSource();
-    BeanInfo info = Introspector.getBeanInfo(dataSource.getClass());
-
-    // index PropertyDescriptors by name
-    Map<String, PropertyDescriptor> propertyDescriptors =
-        new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
-    for (PropertyDescriptor propertyDescriptor : info.getPropertyDescriptors()) {
-      propertyDescriptors.put(propertyDescriptor.getName(), propertyDescriptor);
+    @BeforeEach
+    void setUp() {
+        bootSSLPropertyValue = System.getProperty("ssl");
     }
 
-    // test for the existence of all read methods (getXXX/isXXX) and write methods (setXXX) for all
-    // known properties
-    for (PGProperty property : PGProperty.values()) {
-      if (!property.getName().startsWith("PG") && property != PGProperty.SERVICE) {
-        assertTrue(propertyDescriptors.containsKey(property.getName()), "Missing getter/setter for property [" + property.getName() + "] in ["
-            + BaseDataSource.class + "]");
-
-        assertNotNull(propertyDescriptors.get(property.getName()).getReadMethod(),
-            "No getter for property [" + property.getName() + "] in ["
-            + BaseDataSource.class + "]");
-
-        assertNotNull(propertyDescriptors.get(property.getName()).getWriteMethod(),
-            "No setter for property [" + property.getName() + "] in ["
-            + BaseDataSource.class + "]");
-      }
-    }
-
-    // test readability/writability of default value
-    for (PGProperty property : PGProperty.values()) {
-      if (!property.getName().startsWith("PG") && property != PGProperty.SERVICE) {
-        Object propertyValue =
-            propertyDescriptors.get(property.getName()).getReadMethod().invoke(dataSource);
-        propertyDescriptors.get(property.getName()).getWriteMethod().invoke(dataSource,
-            propertyValue);
-      }
-    }
-  }
-
-  /**
-   * Test to make sure that setURL doesn't overwrite autosave
-   * more should be put in but this scratches the current itch
-   */
-  @Test
-  void overWriteDSProperties() throws Exception {
-    PGSimpleDataSource dataSource = new PGSimpleDataSource();
-    dataSource.setAutosave(AutoSave.CONSERVATIVE);
-    dataSource.setURL("jdbc:postgresql://localhost:5432/postgres");
-    assertSame(AutoSave.CONSERVATIVE, dataSource.getAutosave());
-  }
-
-  /**
-   * Test that {@link PGProperty#isPresent(Properties)} returns a correct result in all cases.
-   */
-  @Test
-  void isPresentWithParseURLResult() throws Exception {
-    Properties givenProperties = new Properties();
-    givenProperties.setProperty("user", TestUtil.getUser());
-    givenProperties.setProperty("password", TestUtil.getPassword());
-
-    Properties sysProperties = System.getProperties();
-    sysProperties.remove("ssl");
-    System.setProperties(sysProperties);
-    Properties parsedProperties = Driver.parseURL(TestUtil.getURL(), givenProperties);
-    assertFalse(PGProperty.SSL.isPresent(parsedProperties),
-        "SSL property should not be present");
-
-    System.setProperty("ssl", "true");
-    givenProperties.setProperty("ssl", "true");
-    parsedProperties = Driver.parseURL(TestUtil.getURL(), givenProperties);
-    assertTrue(PGProperty.SSL.isPresent(parsedProperties), "SSL property should be present");
-
-    givenProperties.setProperty("ssl", "anotherValue");
-    parsedProperties = Driver.parseURL(TestUtil.getURL(), givenProperties);
-    assertTrue(PGProperty.SSL.isPresent(parsedProperties), "SSL property should be present");
-
-    parsedProperties = Driver.parseURL(TestUtil.getURL() + "&ssl=true", null);
-    assertTrue(PGProperty.SSL.isPresent(parsedProperties), "SSL property should be present");
-  }
-
-  /**
-   * Check whether the isPresent method really works.
-   */
-  @Test
-  void presenceCheck() {
-    Properties empty = new Properties();
-    Object value = PGProperty.READ_ONLY.getOrDefault(empty);
-    assertNotNull(value);
-    assertFalse(PGProperty.READ_ONLY.isPresent(empty));
-  }
-
-  @Test
-  void encodedUrlValues() {
-    String databaseName = "d&a%ta+base";
-    String userName = "&u%ser";
-    String password = "p%a&s^s#w!o@r*";
-    String url = "jdbc:postgresql://"
-        + "localhost" + ":" + 5432 + "/"
-        + URLCoder.encode(databaseName)
-        + "?user=" + URLCoder.encode(userName)
-        + "&password=" + URLCoder.encode(password);
-    Properties parsed = Driver.parseURL(url, new Properties());
-    assertEquals(databaseName, PGProperty.PG_DBNAME.getOrDefault(parsed), "database");
-    assertEquals(userName, PGProperty.USER.getOrDefault(parsed), "user");
-    assertEquals(password, PGProperty.PASSWORD.getOrDefault(parsed), "password");
-  }
-
-  @Test
-  void lowerCamelCase() {
-    // These are legacy properties excluded for backward compatibility.
-    ArrayList<String> excluded = new ArrayList<>();
-    excluded.add("LOG_LEVEL"); // Remove with PR #722
-    excluded.add("PREPARED_STATEMENT_CACHE_SIZE_MIB"); // preparedStatementCacheSizeMi[B]
-    excluded.add("DATABASE_METADATA_CACHE_FIELDS_MIB"); // databaseMetadataCacheFieldsMi[B]
-    excluded.add("STRING_TYPE"); // string[t]ype
-    excluded.add("SSL_MODE"); // ssl[m]ode
-    excluded.add("SSL_FACTORY"); // ssl[f]actory
-    excluded.add("SSL_FACTORY_ARG"); // ssl[f]actory[a]rg
-    excluded.add("SSL_HOSTNAME_VERIFIER"); // ssl[h]ostname[v]erifier
-    excluded.add("SSL_CERT"); // ssl[c]ert
-    excluded.add("SSL_KEY"); // ssl[k]ey
-    excluded.add("SSL_ROOT_CERT"); // ssl[r]oot[c]ert
-    excluded.add("SSL_PASSWORD"); // ssl[p]assword
-    excluded.add("SSL_PASSWORD_CALLBACK"); // ssl[p]assword[c]allback
-    excluded.add("APPLICATION_NAME"); // [A]pplicationName
-    excluded.add("GSS_LIB"); // gss[l]ib
-    excluded.add("REWRITE_BATCHED_INSERTS"); // re[W]riteBatchedInserts
-
-    for (PGProperty property : PGProperty.values()) {
-      if (!property.name().startsWith("PG")) { // Ignore all properties that start with PG
-        String[] words = property.name().split("_");
-        if (words.length == 1) {
-          assertEquals(words[0].toLowerCase(Locale.ROOT), property.getName());
+    @AfterEach
+    void tearDown() {
+        if (bootSSLPropertyValue == null) {
+            System.getProperties().remove("ssl");
         } else {
-          if (!excluded.contains(property.name())) {
-            String word = "";
-            for (int i = 0; i < words.length; i++) {
-              if (i == 0) {
-                word = words[i].toLowerCase(Locale.ROOT);
-              } else {
-                word += words[i].substring(0, 1).toUpperCase(Locale.ROOT) + words[i].substring(1).toLowerCase(Locale.ROOT);
-              }
-            }
-            assertEquals(word, property.getName());
-          }
+            System.setProperty("ssl", bootSSLPropertyValue);
         }
-      }
     }
-  }
 
-  @Test
-  void encodedUrlValuesFromDataSource() {
-    String databaseName = "d&a%ta+base";
-    String userName = "&u%ser";
-    String password = "p%a&s^s#w!o@r*";
-    String applicationName = "Laurel&Hardy=Best?Yes";
-    PGSimpleDataSource dataSource = new PGSimpleDataSource();
+    /**
+     * Test that we can get and set all default values and all choices (if any).
+     */
+    @Test
+    void getSetAllProperties() {
+        Properties properties = new Properties();
+        for (PGProperty property : PGProperty.values()) {
+            String value = property.getOrDefault(properties);
+            assertEquals(property.getDefaultValue(), value);
 
-    dataSource.setDatabaseName(databaseName);
-    dataSource.setUser(userName);
-    dataSource.setPassword(password);
-    dataSource.setApplicationName(applicationName);
+            property.set(properties, value);
+            assertEquals(value, property.getOrDefault(properties));
 
-    Properties parsed = Driver.parseURL(dataSource.getURL(), new Properties());
-    assertEquals(databaseName, PGProperty.PG_DBNAME.getOrDefault(parsed), "database");
-    // datasources do not pass username and password as URL parameters
-    assertFalse(PGProperty.USER.isPresent(parsed), "user");
-    assertFalse(PGProperty.PASSWORD.isPresent(parsed), "password");
-    assertEquals(applicationName, PGProperty.APPLICATION_NAME.getOrDefault(parsed), "APPLICATION_NAME");
-  }
+            if (property.getChoices() != null && property.getChoices().length > 0) {
+                for (String choice : property.getChoices()) {
+                    property.set(properties, choice);
+                    assertEquals(choice, property.getOrDefault(properties));
+                }
+            }
+        }
+    }
+
+    @Test
+    void sortOrder() {
+        String prevName = null;
+        for (PGProperty property : PGProperty.values()) {
+            String name = property.name();
+            if (prevName != null) {
+                assertTrue(name.compareTo(prevName) > 0, "PGProperty names should be sorted in ascending order: " + name + " < " + prevName);
+            }
+            prevName = name;
+        }
+    }
+
+    /**
+     * Test that the enum constant is common with the underlying property name.
+     */
+    @Test
+    void enumConstantNaming() {
+        for (PGProperty property : PGProperty.values()) {
+            String enumName = property.name().replaceAll("_", "");
+            assertEquals(property.getName().toLowerCase(Locale.ROOT), enumName.toLowerCase(Locale.ROOT), "Naming of the enum constant [" + property.name()
+                    + "] should follow the naming of its underlying property [" + property.getName()
+                    + "] in PGProperty");
+        }
+    }
+
+    @Test
+    void driverGetPropertyInfo() {
+        Driver driver = new Driver();
+        DriverPropertyInfo[] infos = driver.getPropertyInfo(
+                "jdbc:postgresql://localhost/test?user=fred&password=secret&ssl=true",
+                // this is the example we give in docs
+                new Properties());
+        for (DriverPropertyInfo info : infos) {
+            if ("user".equals(info.name)) {
+                assertEquals("fred", info.value);
+            } else if ("password".equals(info.name)) {
+                assertEquals("secret", info.value);
+            } else if ("ssl".equals(info.name)) {
+                assertEquals("true", info.value);
+            }
+        }
+    }
+
+    /**
+     * Test if the datasource has getter and setter for all properties.
+     */
+    @Test
+    void dataSourceProperties() throws Exception {
+        PGSimpleDataSource dataSource = new PGSimpleDataSource();
+        BeanInfo info = Introspector.getBeanInfo(dataSource.getClass());
+
+        // index PropertyDescriptors by name
+        Map<String, PropertyDescriptor> propertyDescriptors =
+                new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
+        for (PropertyDescriptor propertyDescriptor : info.getPropertyDescriptors()) {
+            propertyDescriptors.put(propertyDescriptor.getName(), propertyDescriptor);
+        }
+
+        // test for the existence of all read methods (getXXX/isXXX) and write methods (setXXX) for all
+        // known properties
+        for (PGProperty property : PGProperty.values()) {
+            if (!property.getName().startsWith("PG") && property != PGProperty.SERVICE) {
+                assertTrue(propertyDescriptors.containsKey(property.getName()), "Missing getter/setter for property [" + property.getName() + "] in ["
+                        + BaseDataSource.class + "]");
+
+                assertNotNull(propertyDescriptors.get(property.getName()).getReadMethod(),
+                        "No getter for property [" + property.getName() + "] in ["
+                                + BaseDataSource.class + "]");
+
+                assertNotNull(propertyDescriptors.get(property.getName()).getWriteMethod(),
+                        "No setter for property [" + property.getName() + "] in ["
+                                + BaseDataSource.class + "]");
+            }
+        }
+
+        // test readability/writability of default value
+        for (PGProperty property : PGProperty.values()) {
+            if (!property.getName().startsWith("PG") && property != PGProperty.SERVICE) {
+                Object propertyValue =
+                        propertyDescriptors.get(property.getName()).getReadMethod().invoke(dataSource);
+                propertyDescriptors.get(property.getName()).getWriteMethod().invoke(dataSource,
+                        propertyValue);
+            }
+        }
+    }
+
+    /**
+     * Test to make sure that setURL doesn't overwrite autosave
+     * more should be put in but this scratches the current itch
+     */
+    @Test
+    void overWriteDSProperties() throws Exception {
+        PGSimpleDataSource dataSource = new PGSimpleDataSource();
+        dataSource.setAutosave(AutoSave.CONSERVATIVE);
+        dataSource.setURL("jdbc:postgresql://localhost:5432/postgres");
+        assertSame(AutoSave.CONSERVATIVE, dataSource.getAutosave());
+    }
+
+    /**
+     * Test that {@link PGProperty#isPresent(Properties)} returns a correct result in all cases.
+     */
+    @Test
+    void isPresentWithParseURLResult() throws Exception {
+        Properties givenProperties = new Properties();
+        givenProperties.setProperty("user", TestUtil.getUser());
+        givenProperties.setProperty("password", TestUtil.getPassword());
+
+        Properties sysProperties = System.getProperties();
+        sysProperties.remove("ssl");
+        System.setProperties(sysProperties);
+        Properties parsedProperties = Driver.parseURL(TestUtil.getURL(), givenProperties);
+        assertFalse(PGProperty.SSL.isPresent(parsedProperties),
+                "SSL property should not be present");
+
+        System.setProperty("ssl", "true");
+        givenProperties.setProperty("ssl", "true");
+        parsedProperties = Driver.parseURL(TestUtil.getURL(), givenProperties);
+        assertTrue(PGProperty.SSL.isPresent(parsedProperties), "SSL property should be present");
+
+        givenProperties.setProperty("ssl", "anotherValue");
+        parsedProperties = Driver.parseURL(TestUtil.getURL(), givenProperties);
+        assertTrue(PGProperty.SSL.isPresent(parsedProperties), "SSL property should be present");
+
+        parsedProperties = Driver.parseURL(TestUtil.getURL() + "&ssl=true", null);
+        assertTrue(PGProperty.SSL.isPresent(parsedProperties), "SSL property should be present");
+    }
+
+    /**
+     * Check whether the isPresent method really works.
+     */
+    @Test
+    void presenceCheck() {
+        Properties empty = new Properties();
+        Object value = PGProperty.READ_ONLY.getOrDefault(empty);
+        assertNotNull(value);
+        assertFalse(PGProperty.READ_ONLY.isPresent(empty));
+    }
+
+    @Test
+    void encodedUrlValues() {
+        String databaseName = "d&a%ta+base";
+        String userName = "&u%ser";
+        String password = "p%a&s^s#w!o@r*";
+        String url = "jdbc:postgresql://"
+                + "localhost" + ":" + 5432 + "/"
+                + URLCoder.encode(databaseName)
+                + "?user=" + URLCoder.encode(userName)
+                + "&password=" + URLCoder.encode(password);
+        Properties parsed = Driver.parseURL(url, new Properties());
+        assertEquals(databaseName, PGProperty.PG_DBNAME.getOrDefault(parsed), "database");
+        assertEquals(userName, PGProperty.USER.getOrDefault(parsed), "user");
+        assertEquals(password, PGProperty.PASSWORD.getOrDefault(parsed), "password");
+    }
+
+    @Test
+    void lowerCamelCase() {
+        // These are legacy properties excluded for backward compatibility.
+        ArrayList<String> excluded = new ArrayList<>();
+        excluded.add("LOG_LEVEL"); // Remove with PR #722
+        excluded.add("PREPARED_STATEMENT_CACHE_SIZE_MIB"); // preparedStatementCacheSizeMi[B]
+        excluded.add("DATABASE_METADATA_CACHE_FIELDS_MIB"); // databaseMetadataCacheFieldsMi[B]
+        excluded.add("STRING_TYPE"); // string[t]ype
+        excluded.add("SSL_MODE"); // ssl[m]ode
+        excluded.add("SSL_FACTORY"); // ssl[f]actory
+        excluded.add("SSL_FACTORY_ARG"); // ssl[f]actory[a]rg
+        excluded.add("SSL_HOSTNAME_VERIFIER"); // ssl[h]ostname[v]erifier
+        excluded.add("SSL_CERT"); // ssl[c]ert
+        excluded.add("SSL_KEY"); // ssl[k]ey
+        excluded.add("SSL_ROOT_CERT"); // ssl[r]oot[c]ert
+        excluded.add("SSL_PASSWORD"); // ssl[p]assword
+        excluded.add("SSL_PASSWORD_CALLBACK"); // ssl[p]assword[c]allback
+        excluded.add("APPLICATION_NAME"); // [A]pplicationName
+        excluded.add("GSS_LIB"); // gss[l]ib
+        excluded.add("REWRITE_BATCHED_INSERTS"); // re[W]riteBatchedInserts
+
+        for (PGProperty property : PGProperty.values()) {
+            if (!property.name().startsWith("PG")) { // Ignore all properties that start with PG
+                String[] words = property.name().split("_");
+                if (words.length == 1) {
+                    assertEquals(words[0].toLowerCase(Locale.ROOT), property.getName());
+                } else {
+                    if (!excluded.contains(property.name())) {
+                        String word = "";
+                        for (int i = 0; i < words.length; i++) {
+                            if (i == 0) {
+                                word = words[i].toLowerCase(Locale.ROOT);
+                            } else {
+                                word += words[i].substring(0, 1).toUpperCase(Locale.ROOT) + words[i].substring(1).toLowerCase(Locale.ROOT);
+                            }
+                        }
+                        assertEquals(word, property.getName());
+                    }
+                }
+            }
+        }
+    }
+
+    @Test
+    void encodedUrlValuesFromDataSource() {
+        String databaseName = "d&a%ta+base";
+        String userName = "&u%ser";
+        String password = "p%a&s^s#w!o@r*";
+        String applicationName = "Laurel&Hardy=Best?Yes";
+        PGSimpleDataSource dataSource = new PGSimpleDataSource();
+
+        dataSource.setDatabaseName(databaseName);
+        dataSource.setUser(userName);
+        dataSource.setPassword(password);
+        dataSource.setApplicationName(applicationName);
+
+        Properties parsed = Driver.parseURL(dataSource.getURL(), new Properties());
+        assertEquals(databaseName, PGProperty.PG_DBNAME.getOrDefault(parsed), "database");
+        // datasources do not pass username and password as URL parameters
+        assertFalse(PGProperty.USER.isPresent(parsed), "user");
+        assertFalse(PGProperty.PASSWORD.isPresent(parsed), "password");
+        assertEquals(applicationName, PGProperty.APPLICATION_NAME.getOrDefault(parsed), "APPLICATION_NAME");
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGTimeTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGTimeTest.java
index 3463f21..33e81f3 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGTimeTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGTimeTest.java
@@ -5,17 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeTrue;
-
-import org.postgresql.test.TestUtil;
-import org.postgresql.util.PGInterval;
-import org.postgresql.util.PGTime;
-
-import org.junit.Test;
-
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
@@ -24,223 +13,231 @@ import java.sql.Time;
 import java.text.SimpleDateFormat;
 import java.util.Calendar;
 import java.util.TimeZone;
+import org.junit.Test;
+import org.postgresql.test.TestUtil;
+import org.postgresql.util.PGInterval;
+import org.postgresql.util.PGTime;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeTrue;
 
 /**
  * Tests {@link PGTime} in various scenarios including setTime, setObject for both <code>time with
  * time zone</code> and <code>time without time zone</code> data types.
  */
 public class PGTimeTest extends BaseTest4 {
-  /**
-   * The name of the test table.
-   */
-  private static final String TEST_TABLE = "testtime";
+    /**
+     * The name of the test table.
+     */
+    private static final String TEST_TABLE = "testtime";
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTempTable(con, TEST_TABLE, "tm time, tz time with time zone");
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, TEST_TABLE);
-    super.tearDown();
-  }
-
-  /**
-   * Tests that adding a <code>PGInterval</code> object to a <code>PGTime</code> object when
-   * performed as a casted string and object.
-   *
-   * @throws SQLException if a JDBC or database problem occurs.
-   */
-  @Test
-  public void testTimeWithInterval() throws SQLException {
-    assumeTrue(TestUtil.haveIntegerDateTimes(con));
-
-    Calendar cal = Calendar.getInstance();
-    cal.set(1970, Calendar.JANUARY, 1);
-
-    final long now = cal.getTimeInMillis();
-    verifyTimeWithInterval(new PGTime(now), new PGInterval(0, 0, 0, 1, 2, 3.14), true);
-    verifyTimeWithInterval(new PGTime(now), new PGInterval(0, 0, 0, 1, 2, 3.14), false);
-
-    verifyTimeWithInterval(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))),
-        new PGInterval(0, 0, 0, 1, 2, 3.14), true);
-    verifyTimeWithInterval(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))),
-        new PGInterval(0, 0, 0, 1, 2, 3.14), false);
-
-    verifyTimeWithInterval(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))),
-        new PGInterval(0, 0, 0, 1, 2, 3.456), true);
-    verifyTimeWithInterval(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))),
-        new PGInterval(0, 0, 0, 1, 2, 3.456), false);
-  }
-
-  /**
-   * Verifies that adding the given <code>PGInterval</code> object to a <code>PGTime</code> produces
-   * the correct results when performed as a casted string and object.
-   *
-   * @param time the time to test.
-   * @param interval the time interval.
-   * @param useSetObject <code>true</code> if the setObject method should be used instead of
-   *        setTime.
-   * @throws SQLException if a JDBC or database problem occurs.
-   */
-  private void verifyTimeWithInterval(PGTime time, PGInterval interval, boolean useSetObject)
-      throws SQLException {
-    // Construct the SQL query.
-    String sql;
-    if (time.getCalendar() != null) {
-      sql = "SELECT ?::time with time zone + ?";
-    } else {
-      sql = "SELECT ?::time + ?";
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTempTable(con, TEST_TABLE, "tm time, tz time with time zone");
     }
 
-    SimpleDateFormat sdf = createSimpleDateFormat(time);
-
-    // Execute a query using a casted time string + PGInterval.
-    PreparedStatement stmt = con.prepareStatement(sql);
-    stmt.setString(1, sdf.format(time));
-    stmt.setObject(2, interval);
-
-    ResultSet rs = stmt.executeQuery();
-    assertTrue(rs.next());
-
-    Time result1 = rs.getTime(1);
-    // System.out.println(stmt + " = " + sdf.format(result1));
-    stmt.close();
-
-    // Execute a query using with PGTime + PGInterval.
-    stmt = con.prepareStatement("SELECT ? + ?");
-    if (useSetObject) {
-      stmt.setObject(1, time);
-    } else {
-      stmt.setTime(1, time);
-    }
-    stmt.setObject(2, interval);
-
-    rs = stmt.executeQuery();
-    assertTrue(rs.next());
-
-    Time result2 = rs.getTime(1);
-    // System.out.println(stmt + " = " + sdf.format(result2));
-    assertEquals(result1, result2);
-    stmt.close();
-  }
-
-  /**
-   * Tests inserting and selecting <code>PGTime</code> objects with <code>time</code> and <code>time
-   * with time zone</code> columns.
-   *
-   * @throws SQLException if a JDBC or database problem occurs.
-   */
-  @Test
-  public void testTimeInsertAndSelect() throws SQLException {
-    Calendar cal = Calendar.getInstance();
-    cal.set(1970, Calendar.JANUARY, 1);
-
-    final long now = cal.getTimeInMillis();
-    verifyInsertAndSelect(new PGTime(now), true);
-    verifyInsertAndSelect(new PGTime(now), false);
-
-    verifyInsertAndSelect(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))), true);
-    verifyInsertAndSelect(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))),
-        false);
-
-    verifyInsertAndSelect(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))),
-        true);
-    verifyInsertAndSelect(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))),
-        false);
-  }
-
-  /**
-   * Verifies that inserting the given <code>PGTime</code> as a time string and an object produces
-   * the same results.
-   *
-   * @param time the time to test.
-   * @param useSetObject <code>true</code> if the setObject method should be used instead of
-   *        setTime.
-   * @throws SQLException if a JDBC or database problem occurs.
-   */
-  private void verifyInsertAndSelect(PGTime time, boolean useSetObject) throws SQLException {
-    // Construct the INSERT statement of a casted time string.
-    String sql;
-    if (time.getCalendar() != null) {
-      sql =
-          "INSERT INTO " + TEST_TABLE + " VALUES (?::time with time zone, ?::time with time zone)";
-    } else {
-      sql = "INSERT INTO " + TEST_TABLE + " VALUES (?::time, ?::time)";
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, TEST_TABLE);
+        super.tearDown();
     }
 
-    SimpleDateFormat sdf = createSimpleDateFormat(time);
+    /**
+     * Tests that adding a <code>PGInterval</code> object to a <code>PGTime</code> object when
+     * performed as a casted string and object.
+     *
+     * @throws SQLException if a JDBC or database problem occurs.
+     */
+    @Test
+    public void testTimeWithInterval() throws SQLException {
+        assumeTrue(TestUtil.haveIntegerDateTimes(con));
 
-    // Insert the times as casted strings.
-    PreparedStatement pstmt1 = con.prepareStatement(sql);
-    pstmt1.setString(1, sdf.format(time));
-    pstmt1.setString(2, sdf.format(time));
-    assertEquals(1, pstmt1.executeUpdate());
+        Calendar cal = Calendar.getInstance();
+        cal.set(1970, Calendar.JANUARY, 1);
 
-    // Insert the times as PGTime objects.
-    PreparedStatement pstmt2 = con.prepareStatement("INSERT INTO " + TEST_TABLE + " VALUES (?, ?)");
+        final long now = cal.getTimeInMillis();
+        verifyTimeWithInterval(new PGTime(now), new PGInterval(0, 0, 0, 1, 2, 3.14), true);
+        verifyTimeWithInterval(new PGTime(now), new PGInterval(0, 0, 0, 1, 2, 3.14), false);
 
-    if (useSetObject) {
-      pstmt2.setObject(1, time);
-      pstmt2.setObject(2, time);
-    } else {
-      pstmt2.setTime(1, time);
-      pstmt2.setTime(2, time);
+        verifyTimeWithInterval(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))),
+                new PGInterval(0, 0, 0, 1, 2, 3.14), true);
+        verifyTimeWithInterval(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))),
+                new PGInterval(0, 0, 0, 1, 2, 3.14), false);
+
+        verifyTimeWithInterval(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))),
+                new PGInterval(0, 0, 0, 1, 2, 3.456), true);
+        verifyTimeWithInterval(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))),
+                new PGInterval(0, 0, 0, 1, 2, 3.456), false);
     }
 
-    assertEquals(1, pstmt2.executeUpdate());
+    /**
+     * Verifies that adding the given <code>PGInterval</code> object to a <code>PGTime</code> produces
+     * the correct results when performed as a casted string and object.
+     *
+     * @param time         the time to test.
+     * @param interval     the time interval.
+     * @param useSetObject <code>true</code> if the setObject method should be used instead of
+     *                     setTime.
+     * @throws SQLException if a JDBC or database problem occurs.
+     */
+    private void verifyTimeWithInterval(PGTime time, PGInterval interval, boolean useSetObject)
+            throws SQLException {
+        // Construct the SQL query.
+        String sql;
+        if (time.getCalendar() != null) {
+            sql = "SELECT ?::time with time zone + ?";
+        } else {
+            sql = "SELECT ?::time + ?";
+        }
 
-    // Query the values back out.
-    Statement stmt = con.createStatement();
+        SimpleDateFormat sdf = createSimpleDateFormat(time);
 
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL(TEST_TABLE, "tm,tz"));
-    assertNotNull(rs);
+        // Execute a query using a casted time string + PGInterval.
+        PreparedStatement stmt = con.prepareStatement(sql);
+        stmt.setString(1, sdf.format(time));
+        stmt.setObject(2, interval);
 
-    // Read the casted string values.
-    assertTrue(rs.next());
+        ResultSet rs = stmt.executeQuery();
+        assertTrue(rs.next());
 
-    Time tm1 = rs.getTime(1);
-    Time tz1 = rs.getTime(2);
+        Time result1 = rs.getTime(1);
+        // System.out.println(stmt + " = " + sdf.format(result1));
+        stmt.close();
 
-    // System.out.println(pstmt1 + " -> " + tm1 + ", " + sdf.format(tz1));
+        // Execute a query using with PGTime + PGInterval.
+        stmt = con.prepareStatement("SELECT ? + ?");
+        if (useSetObject) {
+            stmt.setObject(1, time);
+        } else {
+            stmt.setTime(1, time);
+        }
+        stmt.setObject(2, interval);
 
-    // Read the PGTime values.
-    assertTrue(rs.next());
+        rs = stmt.executeQuery();
+        assertTrue(rs.next());
 
-    Time tm2 = rs.getTime(1);
-    Time tz2 = rs.getTime(2);
-
-    // System.out.println(pstmt2 + " -> " + tm2 + ", " + sdf.format(tz2));
-
-    // Verify that the first and second versions match.
-    assertEquals(tm1, tm2);
-    assertEquals(tz1, tz2);
-
-    // Clean up.
-    assertEquals(2, stmt.executeUpdate("DELETE FROM " + TEST_TABLE));
-    stmt.close();
-    pstmt2.close();
-    pstmt1.close();
-  }
-
-  /**
-   * Creates a {@code SimpleDateFormat} that is appropriate for the given time.
-   *
-   * @param time the time object.
-   * @return the new format instance.
-   */
-  private SimpleDateFormat createSimpleDateFormat(PGTime time) {
-    String pattern = "HH:mm:ss.SSS";
-    if (time.getCalendar() != null) {
-      pattern += " Z";
+        Time result2 = rs.getTime(1);
+        // System.out.println(stmt + " = " + sdf.format(result2));
+        assertEquals(result1, result2);
+        stmt.close();
     }
 
-    SimpleDateFormat sdf = new SimpleDateFormat(pattern);
-    if (time.getCalendar() != null) {
-      sdf.setTimeZone(time.getCalendar().getTimeZone());
+    /**
+     * Tests inserting and selecting <code>PGTime</code> objects with <code>time</code> and <code>time
+     * with time zone</code> columns.
+     *
+     * @throws SQLException if a JDBC or database problem occurs.
+     */
+    @Test
+    public void testTimeInsertAndSelect() throws SQLException {
+        Calendar cal = Calendar.getInstance();
+        cal.set(1970, Calendar.JANUARY, 1);
+
+        final long now = cal.getTimeInMillis();
+        verifyInsertAndSelect(new PGTime(now), true);
+        verifyInsertAndSelect(new PGTime(now), false);
+
+        verifyInsertAndSelect(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))), true);
+        verifyInsertAndSelect(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))),
+                false);
+
+        verifyInsertAndSelect(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))),
+                true);
+        verifyInsertAndSelect(new PGTime(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))),
+                false);
+    }
+
+    /**
+     * Verifies that inserting the given <code>PGTime</code> as a time string and an object produces
+     * the same results.
+     *
+     * @param time         the time to test.
+     * @param useSetObject <code>true</code> if the setObject method should be used instead of
+     *                     setTime.
+     * @throws SQLException if a JDBC or database problem occurs.
+     */
+    private void verifyInsertAndSelect(PGTime time, boolean useSetObject) throws SQLException {
+        // Construct the INSERT statement of a casted time string.
+        String sql;
+        if (time.getCalendar() != null) {
+            sql =
+                    "INSERT INTO " + TEST_TABLE + " VALUES (?::time with time zone, ?::time with time zone)";
+        } else {
+            sql = "INSERT INTO " + TEST_TABLE + " VALUES (?::time, ?::time)";
+        }
+
+        SimpleDateFormat sdf = createSimpleDateFormat(time);
+
+        // Insert the times as casted strings.
+        PreparedStatement pstmt1 = con.prepareStatement(sql);
+        pstmt1.setString(1, sdf.format(time));
+        pstmt1.setString(2, sdf.format(time));
+        assertEquals(1, pstmt1.executeUpdate());
+
+        // Insert the times as PGTime objects.
+        PreparedStatement pstmt2 = con.prepareStatement("INSERT INTO " + TEST_TABLE + " VALUES (?, ?)");
+
+        if (useSetObject) {
+            pstmt2.setObject(1, time);
+            pstmt2.setObject(2, time);
+        } else {
+            pstmt2.setTime(1, time);
+            pstmt2.setTime(2, time);
+        }
+
+        assertEquals(1, pstmt2.executeUpdate());
+
+        // Query the values back out.
+        Statement stmt = con.createStatement();
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL(TEST_TABLE, "tm,tz"));
+        assertNotNull(rs);
+
+        // Read the casted string values.
+        assertTrue(rs.next());
+
+        Time tm1 = rs.getTime(1);
+        Time tz1 = rs.getTime(2);
+
+        // System.out.println(pstmt1 + " -> " + tm1 + ", " + sdf.format(tz1));
+
+        // Read the PGTime values.
+        assertTrue(rs.next());
+
+        Time tm2 = rs.getTime(1);
+        Time tz2 = rs.getTime(2);
+
+        // System.out.println(pstmt2 + " -> " + tm2 + ", " + sdf.format(tz2));
+
+        // Verify that the first and second versions match.
+        assertEquals(tm1, tm2);
+        assertEquals(tz1, tz2);
+
+        // Clean up.
+        assertEquals(2, stmt.executeUpdate("DELETE FROM " + TEST_TABLE));
+        stmt.close();
+        pstmt2.close();
+        pstmt1.close();
+    }
+
+    /**
+     * Creates a {@code SimpleDateFormat} that is appropriate for the given time.
+     *
+     * @param time the time object.
+     * @return the new format instance.
+     */
+    private SimpleDateFormat createSimpleDateFormat(PGTime time) {
+        String pattern = "HH:mm:ss.SSS";
+        if (time.getCalendar() != null) {
+            pattern += " Z";
+        }
+
+        SimpleDateFormat sdf = new SimpleDateFormat(pattern);
+        if (time.getCalendar() != null) {
+            sdf.setTimeZone(time.getCalendar().getTimeZone());
+        }
+        return sdf;
     }
-    return sdf;
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGTimestampTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGTimestampTest.java
index 1cf2c95..458ec34 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGTimestampTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PGTimestampTest.java
@@ -33,213 +33,213 @@ import java.util.TimeZone;
  * {@code timestamp with time zone} and {@code timestamp without time zone} data types.
  */
 class PGTimestampTest {
-  /**
-   * The name of the test table.
-   */
-  private static final String TEST_TABLE = "testtimestamp";
+    /**
+     * The name of the test table.
+     */
+    private static final String TEST_TABLE = "testtimestamp";
 
-  private Connection con;
+    private Connection con;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    con = TestUtil.openDB();
-    TestUtil.createTable(con, TEST_TABLE, "ts timestamp, tz timestamp with time zone");
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    TestUtil.dropTable(con, TEST_TABLE);
-    TestUtil.closeDB(con);
-  }
-
-  /**
-   * Tests {@link PGTimestamp} with {@link PGInterval}.
-   *
-   * @throws SQLException if a JDBC or database problem occurs.
-   */
-  @Test
-  void timestampWithInterval() throws SQLException {
-    assumeTrue(TestUtil.haveIntegerDateTimes(con));
-    PGTimestamp timestamp = new PGTimestamp(System.currentTimeMillis());
-    PGInterval interval = new PGInterval(0, 0, 0, 1, 2, 3.14);
-    verifyTimestampWithInterval(timestamp, interval, true);
-    verifyTimestampWithInterval(timestamp, interval, false);
-
-    timestamp = new PGTimestamp(System.currentTimeMillis(),
-        Calendar.getInstance(TimeZone.getTimeZone("GMT")));
-    interval = new PGInterval(0, 0, 0, 1, 2, 3.14);
-    verifyTimestampWithInterval(timestamp, interval, true);
-    verifyTimestampWithInterval(timestamp, interval, false);
-
-    timestamp = new PGTimestamp(System.currentTimeMillis(),
-        Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00")));
-    interval = new PGInterval(-3, -2, -1, 1, 2, 3.14);
-    verifyTimestampWithInterval(timestamp, interval, true);
-    verifyTimestampWithInterval(timestamp, interval, false);
-  }
-
-  /**
-   * Executes a test with the given timestamp and interval.
-   *
-   * @param timestamp the timestamp under test.
-   * @param interval the interval.
-   * @param useSetObject indicates if setObject should be used instead of setTimestamp.
-   * @throws SQLException if a JDBC or database problem occurs.
-   */
-  private void verifyTimestampWithInterval(PGTimestamp timestamp, PGInterval interval,
-      boolean useSetObject) throws SQLException {
-    // Construct the SQL query.
-    String sql;
-    if (timestamp.getCalendar() != null) {
-      sql = "SELECT ?::timestamp with time zone + ?";
-    } else {
-      sql = "SELECT ?::timestamp + ?";
+    @BeforeEach
+    void setUp() throws Exception {
+        con = TestUtil.openDB();
+        TestUtil.createTable(con, TEST_TABLE, "ts timestamp, tz timestamp with time zone");
     }
 
-    // Execute a query using a casted timestamp string + PGInterval.
-    PreparedStatement ps = con.prepareStatement(sql);
-    SimpleDateFormat sdf = createSimpleDateFormat(timestamp);
-    final String timestampString = sdf.format(timestamp);
-    ps.setString(1, timestampString);
-    ps.setObject(2, interval);
-    ResultSet rs = ps.executeQuery();
-    assertNotNull(rs);
-
-    assertTrue(rs.next());
-    Timestamp result1 = rs.getTimestamp(1);
-    assertNotNull(result1);
-    ps.close();
-
-    // Execute a query as PGTimestamp + PGInterval.
-    ps = con.prepareStatement("SELECT ? + ?");
-    if (useSetObject) {
-      ps.setObject(1, timestamp);
-    } else {
-      ps.setTimestamp(1, timestamp);
-    }
-    ps.setObject(2, interval);
-    rs = ps.executeQuery();
-
-    // Verify that the query produces the same results.
-    assertTrue(rs.next());
-    Timestamp result2 = rs.getTimestamp(1);
-    assertEquals(result1, result2);
-    ps.close();
-  }
-
-  /**
-   * Tests inserting and selecting {@code PGTimestamp} objects with {@code timestamp} and
-   * {@code timestamp with time zone} columns.
-   *
-   * @throws SQLException if a JDBC or database problem occurs.
-   */
-  @Test
-  void timeInsertAndSelect() throws SQLException {
-    final long now = System.currentTimeMillis();
-    verifyInsertAndSelect(new PGTimestamp(now), true);
-    verifyInsertAndSelect(new PGTimestamp(now), false);
-
-    verifyInsertAndSelect(new PGTimestamp(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))),
-        true);
-    verifyInsertAndSelect(new PGTimestamp(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))),
-        false);
-
-    verifyInsertAndSelect(
-        new PGTimestamp(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))), true);
-    verifyInsertAndSelect(
-        new PGTimestamp(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))), false);
-  }
-
-  /**
-   * Verifies that inserting the given {@code PGTimestamp} as a timestamp string and an object
-   * produces the same results.
-   *
-   * @param timestamp the timestamp to test.
-   * @param useSetObject {@code true} if the setObject method should be used instead of
-   *        setTimestamp.
-   * @throws SQLException if a JDBC or database problem occurs.
-   */
-  private void verifyInsertAndSelect(PGTimestamp timestamp, boolean useSetObject)
-      throws SQLException {
-    // Construct the INSERT statement of a casted timestamp string.
-    String sql;
-    if (timestamp.getCalendar() != null) {
-      sql = "INSERT INTO " + TEST_TABLE
-          + " VALUES (?::timestamp with time zone, ?::timestamp with time zone)";
-    } else {
-      sql = "INSERT INTO " + TEST_TABLE + " VALUES (?::timestamp, ?::timestamp)";
+    @AfterEach
+    void tearDown() throws Exception {
+        TestUtil.dropTable(con, TEST_TABLE);
+        TestUtil.closeDB(con);
     }
 
-    SimpleDateFormat sdf = createSimpleDateFormat(timestamp);
+    /**
+     * Tests {@link PGTimestamp} with {@link PGInterval}.
+     *
+     * @throws SQLException if a JDBC or database problem occurs.
+     */
+    @Test
+    void timestampWithInterval() throws SQLException {
+        assumeTrue(TestUtil.haveIntegerDateTimes(con));
+        PGTimestamp timestamp = new PGTimestamp(System.currentTimeMillis());
+        PGInterval interval = new PGInterval(0, 0, 0, 1, 2, 3.14);
+        verifyTimestampWithInterval(timestamp, interval, true);
+        verifyTimestampWithInterval(timestamp, interval, false);
 
-    // Insert the timestamps as casted strings.
-    PreparedStatement pstmt1 = con.prepareStatement(sql);
-    pstmt1.setString(1, sdf.format(timestamp));
-    pstmt1.setString(2, sdf.format(timestamp));
-    assertEquals(1, pstmt1.executeUpdate());
+        timestamp = new PGTimestamp(System.currentTimeMillis(),
+                Calendar.getInstance(TimeZone.getTimeZone("GMT")));
+        interval = new PGInterval(0, 0, 0, 1, 2, 3.14);
+        verifyTimestampWithInterval(timestamp, interval, true);
+        verifyTimestampWithInterval(timestamp, interval, false);
 
-    // Insert the timestamps as PGTimestamp objects.
-    PreparedStatement pstmt2 = con.prepareStatement("INSERT INTO " + TEST_TABLE + " VALUES (?, ?)");
-
-    if (useSetObject) {
-      pstmt2.setObject(1, timestamp);
-      pstmt2.setObject(2, timestamp);
-    } else {
-      pstmt2.setTimestamp(1, timestamp);
-      pstmt2.setTimestamp(2, timestamp);
+        timestamp = new PGTimestamp(System.currentTimeMillis(),
+                Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00")));
+        interval = new PGInterval(-3, -2, -1, 1, 2, 3.14);
+        verifyTimestampWithInterval(timestamp, interval, true);
+        verifyTimestampWithInterval(timestamp, interval, false);
     }
 
-    assertEquals(1, pstmt2.executeUpdate());
+    /**
+     * Executes a test with the given timestamp and interval.
+     *
+     * @param timestamp    the timestamp under test.
+     * @param interval     the interval.
+     * @param useSetObject indicates if setObject should be used instead of setTimestamp.
+     * @throws SQLException if a JDBC or database problem occurs.
+     */
+    private void verifyTimestampWithInterval(PGTimestamp timestamp, PGInterval interval,
+                                             boolean useSetObject) throws SQLException {
+        // Construct the SQL query.
+        String sql;
+        if (timestamp.getCalendar() != null) {
+            sql = "SELECT ?::timestamp with time zone + ?";
+        } else {
+            sql = "SELECT ?::timestamp + ?";
+        }
 
-    // Query the values back out.
-    Statement stmt = con.createStatement();
+        // Execute a query using a casted timestamp string + PGInterval.
+        PreparedStatement ps = con.prepareStatement(sql);
+        SimpleDateFormat sdf = createSimpleDateFormat(timestamp);
+        final String timestampString = sdf.format(timestamp);
+        ps.setString(1, timestampString);
+        ps.setObject(2, interval);
+        ResultSet rs = ps.executeQuery();
+        assertNotNull(rs);
 
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL(TEST_TABLE, "ts,tz"));
-    assertNotNull(rs);
+        assertTrue(rs.next());
+        Timestamp result1 = rs.getTimestamp(1);
+        assertNotNull(result1);
+        ps.close();
 
-    // Read the casted string values.
-    assertTrue(rs.next());
+        // Execute a query as PGTimestamp + PGInterval.
+        ps = con.prepareStatement("SELECT ? + ?");
+        if (useSetObject) {
+            ps.setObject(1, timestamp);
+        } else {
+            ps.setTimestamp(1, timestamp);
+        }
+        ps.setObject(2, interval);
+        rs = ps.executeQuery();
 
-    Timestamp ts1 = rs.getTimestamp(1);
-    Timestamp tz1 = rs.getTimestamp(2);
-
-    // System.out.println(pstmt1 + " -> " + ts1 + ", " + sdf.format(tz1));
-
-    // Read the PGTimestamp values.
-    assertTrue(rs.next());
-
-    Timestamp ts2 = rs.getTimestamp(1);
-    Timestamp tz2 = rs.getTimestamp(2);
-
-    // System.out.println(pstmt2 + " -> " + ts2 + ", " + sdf.format(tz2));
-
-    // Verify that the first and second versions match.
-    assertEquals(ts1, ts2);
-    assertEquals(tz1, tz2);
-
-    // Clean up.
-    assertEquals(2, stmt.executeUpdate("DELETE FROM " + TEST_TABLE));
-    stmt.close();
-    pstmt2.close();
-    pstmt1.close();
-  }
-
-  /**
-   * Creates a {@code SimpleDateFormat} that is appropriate for the given timestamp.
-   *
-   * @param timestamp the timestamp object.
-   * @return the new format instance.
-   */
-  private SimpleDateFormat createSimpleDateFormat(PGTimestamp timestamp) {
-    String pattern = "yyyy-MM-dd HH:mm:ss.SSS";
-    if (timestamp.getCalendar() != null) {
-      pattern += " Z";
+        // Verify that the query produces the same results.
+        assertTrue(rs.next());
+        Timestamp result2 = rs.getTimestamp(1);
+        assertEquals(result1, result2);
+        ps.close();
     }
 
-    SimpleDateFormat sdf = new SimpleDateFormat(pattern);
-    if (timestamp.getCalendar() != null) {
-      sdf.setTimeZone(timestamp.getCalendar().getTimeZone());
+    /**
+     * Tests inserting and selecting {@code PGTimestamp} objects with {@code timestamp} and
+     * {@code timestamp with time zone} columns.
+     *
+     * @throws SQLException if a JDBC or database problem occurs.
+     */
+    @Test
+    void timeInsertAndSelect() throws SQLException {
+        final long now = System.currentTimeMillis();
+        verifyInsertAndSelect(new PGTimestamp(now), true);
+        verifyInsertAndSelect(new PGTimestamp(now), false);
+
+        verifyInsertAndSelect(new PGTimestamp(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))),
+                true);
+        verifyInsertAndSelect(new PGTimestamp(now, Calendar.getInstance(TimeZone.getTimeZone("GMT"))),
+                false);
+
+        verifyInsertAndSelect(
+                new PGTimestamp(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))), true);
+        verifyInsertAndSelect(
+                new PGTimestamp(now, Calendar.getInstance(TimeZone.getTimeZone("GMT+01:00"))), false);
+    }
+
+    /**
+     * Verifies that inserting the given {@code PGTimestamp} as a timestamp string and an object
+     * produces the same results.
+     *
+     * @param timestamp    the timestamp to test.
+     * @param useSetObject {@code true} if the setObject method should be used instead of
+     *                     setTimestamp.
+     * @throws SQLException if a JDBC or database problem occurs.
+     */
+    private void verifyInsertAndSelect(PGTimestamp timestamp, boolean useSetObject)
+            throws SQLException {
+        // Construct the INSERT statement of a casted timestamp string.
+        String sql;
+        if (timestamp.getCalendar() != null) {
+            sql = "INSERT INTO " + TEST_TABLE
+                    + " VALUES (?::timestamp with time zone, ?::timestamp with time zone)";
+        } else {
+            sql = "INSERT INTO " + TEST_TABLE + " VALUES (?::timestamp, ?::timestamp)";
+        }
+
+        SimpleDateFormat sdf = createSimpleDateFormat(timestamp);
+
+        // Insert the timestamps as casted strings.
+        PreparedStatement pstmt1 = con.prepareStatement(sql);
+        pstmt1.setString(1, sdf.format(timestamp));
+        pstmt1.setString(2, sdf.format(timestamp));
+        assertEquals(1, pstmt1.executeUpdate());
+
+        // Insert the timestamps as PGTimestamp objects.
+        PreparedStatement pstmt2 = con.prepareStatement("INSERT INTO " + TEST_TABLE + " VALUES (?, ?)");
+
+        if (useSetObject) {
+            pstmt2.setObject(1, timestamp);
+            pstmt2.setObject(2, timestamp);
+        } else {
+            pstmt2.setTimestamp(1, timestamp);
+            pstmt2.setTimestamp(2, timestamp);
+        }
+
+        assertEquals(1, pstmt2.executeUpdate());
+
+        // Query the values back out.
+        Statement stmt = con.createStatement();
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL(TEST_TABLE, "ts,tz"));
+        assertNotNull(rs);
+
+        // Read the casted string values.
+        assertTrue(rs.next());
+
+        Timestamp ts1 = rs.getTimestamp(1);
+        Timestamp tz1 = rs.getTimestamp(2);
+
+        // System.out.println(pstmt1 + " -> " + ts1 + ", " + sdf.format(tz1));
+
+        // Read the PGTimestamp values.
+        assertTrue(rs.next());
+
+        Timestamp ts2 = rs.getTimestamp(1);
+        Timestamp tz2 = rs.getTimestamp(2);
+
+        // System.out.println(pstmt2 + " -> " + ts2 + ", " + sdf.format(tz2));
+
+        // Verify that the first and second versions match.
+        assertEquals(ts1, ts2);
+        assertEquals(tz1, tz2);
+
+        // Clean up.
+        assertEquals(2, stmt.executeUpdate("DELETE FROM " + TEST_TABLE));
+        stmt.close();
+        pstmt2.close();
+        pstmt1.close();
+    }
+
+    /**
+     * Creates a {@code SimpleDateFormat} that is appropriate for the given timestamp.
+     *
+     * @param timestamp the timestamp object.
+     * @return the new format instance.
+     */
+    private SimpleDateFormat createSimpleDateFormat(PGTimestamp timestamp) {
+        String pattern = "yyyy-MM-dd HH:mm:ss.SSS";
+        if (timestamp.getCalendar() != null) {
+            pattern += " Z";
+        }
+
+        SimpleDateFormat sdf = new SimpleDateFormat(pattern);
+        if (timestamp.getCalendar() != null) {
+            sdf.setTimeZone(timestamp.getCalendar().getTimeZone());
+        }
+        return sdf;
     }
-    return sdf;
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ParameterStatusTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ParameterStatusTest.java
index 06902c8..87abebc 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ParameterStatusTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ParameterStatusTest.java
@@ -28,197 +28,197 @@ import java.util.logging.Logger;
  */
 public class ParameterStatusTest extends BaseTest4 {
 
-  private final TimeZone tzPlus0800 = TimeZone.getTimeZone("GMT+8:00");
-  private final Logger logger = Logger.getLogger(ParameterStatusTest.class.getName());
+    private final TimeZone tzPlus0800 = TimeZone.getTimeZone("GMT+8:00");
+    private final Logger logger = Logger.getLogger(ParameterStatusTest.class.getName());
 
-  @Override
-  public void tearDown() {
-    TimeZone.setDefault(null);
-  }
-
-  @Test
-  public void expectedInitialParameters() throws Exception {
-    TimeZone.setDefault(tzPlus0800);
-    con = TestUtil.openDB();
-
-    Map<String,String> params = ((PGConnection) con).getParameterStatuses();
-
-    // PgJDBC forces the following parameters
-    Assert.assertEquals("UTF8", params.get("client_encoding"));
-    Assert.assertNotNull(params.get("DateStyle"));
-    MatcherAssert.assertThat(params.get("DateStyle"), StringStartsWith.startsWith("ISO"));
-
-    // PgJDBC sets TimeZone via Java's TimeZone.getDefault()
-    // Pg reports POSIX timezones which are negated, so:
-    Assert.assertEquals("GMT-08:00", params.get("TimeZone"));
-
-    // Must be reported. All these exist in 8.2 or above, and we don't bother
-    // with test coverage older than that.
-    Assert.assertNotNull(params.get("integer_datetimes"));
-    Assert.assertNotNull(params.get("is_superuser"));
-    Assert.assertNotNull(params.get("server_encoding"));
-    Assert.assertNotNull(params.get("server_version"));
-    Assert.assertNotNull(params.get("session_authorization"));
-    Assert.assertNotNull(params.get("standard_conforming_strings"));
-
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) {
-      Assert.assertNotNull(params.get("IntervalStyle"));
-    } else {
-      Assert.assertNull(params.get("IntervalStyle"));
+    @Override
+    public void tearDown() {
+        TimeZone.setDefault(null);
     }
 
-    // TestUtil forces "ApplicationName=Driver Tests"
-    // if application_name is supported (9.0 or newer)
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
-      Assert.assertEquals("Driver Tests", params.get("application_name"));
-    } else {
-      Assert.assertNull(params.get("application_name"));
+    @Test
+    public void expectedInitialParameters() throws Exception {
+        TimeZone.setDefault(tzPlus0800);
+        con = TestUtil.openDB();
+
+        Map<String, String> params = ((PGConnection) con).getParameterStatuses();
+
+        // PgJDBC forces the following parameters
+        Assert.assertEquals("UTF8", params.get("client_encoding"));
+        Assert.assertNotNull(params.get("DateStyle"));
+        MatcherAssert.assertThat(params.get("DateStyle"), StringStartsWith.startsWith("ISO"));
+
+        // PgJDBC sets TimeZone via Java's TimeZone.getDefault()
+        // Pg reports POSIX timezones which are negated, so:
+        Assert.assertEquals("GMT-08:00", params.get("TimeZone"));
+
+        // Must be reported. All these exist in 8.2 or above, and we don't bother
+        // with test coverage older than that.
+        Assert.assertNotNull(params.get("integer_datetimes"));
+        Assert.assertNotNull(params.get("is_superuser"));
+        Assert.assertNotNull(params.get("server_encoding"));
+        Assert.assertNotNull(params.get("server_version"));
+        Assert.assertNotNull(params.get("session_authorization"));
+        Assert.assertNotNull(params.get("standard_conforming_strings"));
+
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) {
+            Assert.assertNotNull(params.get("IntervalStyle"));
+        } else {
+            Assert.assertNull(params.get("IntervalStyle"));
+        }
+
+        // TestUtil forces "ApplicationName=Driver Tests"
+        // if application_name is supported (9.0 or newer)
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
+            Assert.assertEquals("Driver Tests", params.get("application_name"));
+        } else {
+            Assert.assertNull(params.get("application_name"));
+        }
+
+        // Not reported
+        Assert.assertNull(params.get("nonexistent"));
+        Assert.assertNull(params.get("enable_hashjoin"));
+
+        TestUtil.closeDB(con);
     }
 
-    // Not reported
-    Assert.assertNull(params.get("nonexistent"));
-    Assert.assertNull(params.get("enable_hashjoin"));
+    @Test
+    public void reportUpdatedParameters() throws Exception {
+        con = TestUtil.openDB();
 
-    TestUtil.closeDB(con);
-  }
+        if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
+            /* This test uses application_name which was added in 9.0 */
+            return;
+        }
 
-  @Test
-  public void reportUpdatedParameters() throws Exception {
-    con = TestUtil.openDB();
+        con.setAutoCommit(false);
+        Statement stmt = con.createStatement();
 
-    if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
-      /* This test uses application_name which was added in 9.0 */
-      return;
+        stmt.executeUpdate("SET application_name = 'pgjdbc_ParameterStatusTest2';");
+        stmt.close();
+
+        // Parameter status should be reported before the ReadyForQuery so we will
+        // have already processed it
+        Assert.assertEquals("pgjdbc_ParameterStatusTest2", ((PGConnection) con).getParameterStatus("application_name"));
+
+        TestUtil.closeDB(con);
     }
 
-    con.setAutoCommit(false);
-    Statement stmt = con.createStatement();
+    // Run a txn-level SET then a txn-level SET LOCAL so we can make sure we keep
+    // track of the right GUC value at each point.
+    private void transactionalParametersCommon() throws Exception {
+        Statement stmt = con.createStatement();
 
-    stmt.executeUpdate("SET application_name = 'pgjdbc_ParameterStatusTest2';");
-    stmt.close();
+        // Initial value assigned by TestUtil
+        Assert.assertEquals("Driver Tests", ((PGConnection) con).getParameterStatus("application_name"));
 
-    // Parameter status should be reported before the ReadyForQuery so we will
-    // have already processed it
-    Assert.assertEquals("pgjdbc_ParameterStatusTest2", ((PGConnection) con).getParameterStatus("application_name"));
+        // PgJDBC begins an explicit txn here due to autocommit=off so the effect
+        // should be lost on rollback but retained on commit per the docs.
+        stmt.executeUpdate("SET application_name = 'pgjdbc_ParameterStatusTestTxn';");
+        Assert.assertEquals("pgjdbc_ParameterStatusTestTxn", ((PGConnection) con).getParameterStatus("application_name"));
 
-    TestUtil.closeDB(con);
-  }
+        // SET LOCAL is always txn scoped so the effect here will always be
+        // unwound on txn end.
+        stmt.executeUpdate("SET LOCAL application_name = 'pgjdbc_ParameterStatusTestLocal';");
+        Assert.assertEquals("pgjdbc_ParameterStatusTestLocal", ((PGConnection) con).getParameterStatus("application_name"));
 
-  // Run a txn-level SET then a txn-level SET LOCAL so we can make sure we keep
-  // track of the right GUC value at each point.
-  private void transactionalParametersCommon() throws Exception {
-    Statement stmt = con.createStatement();
-
-    // Initial value assigned by TestUtil
-    Assert.assertEquals("Driver Tests", ((PGConnection) con).getParameterStatus("application_name"));
-
-    // PgJDBC begins an explicit txn here due to autocommit=off so the effect
-    // should be lost on rollback but retained on commit per the docs.
-    stmt.executeUpdate("SET application_name = 'pgjdbc_ParameterStatusTestTxn';");
-    Assert.assertEquals("pgjdbc_ParameterStatusTestTxn", ((PGConnection) con).getParameterStatus("application_name"));
-
-    // SET LOCAL is always txn scoped so the effect here will always be
-    // unwound on txn end.
-    stmt.executeUpdate("SET LOCAL application_name = 'pgjdbc_ParameterStatusTestLocal';");
-    Assert.assertEquals("pgjdbc_ParameterStatusTestLocal", ((PGConnection) con).getParameterStatus("application_name"));
-
-    stmt.close();
-  }
-
-  @Test
-  public void transactionalParametersRollback() throws Exception {
-    con = TestUtil.openDB();
-
-    if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
-      /* This test uses application_name which was added in 9.0 */
-      return;
+        stmt.close();
     }
 
-    con.setAutoCommit(false);
+    @Test
+    public void transactionalParametersRollback() throws Exception {
+        con = TestUtil.openDB();
 
-    transactionalParametersCommon();
+        if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
+            /* This test uses application_name which was added in 9.0 */
+            return;
+        }
 
-    // SET unwinds on ROLLBACK
-    con.rollback();
+        con.setAutoCommit(false);
 
-    Assert.assertEquals("Driver Tests", ((PGConnection) con).getParameterStatus("application_name"));
+        transactionalParametersCommon();
 
-    TestUtil.closeDB(con);
-  }
+        // SET unwinds on ROLLBACK
+        con.rollback();
 
-  @Test
-  public void transactionalParametersCommit() throws Exception {
-    con = TestUtil.openDB();
+        Assert.assertEquals("Driver Tests", ((PGConnection) con).getParameterStatus("application_name"));
 
-    if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
-      /* This test uses application_name which was added in 9.0 */
-      return;
+        TestUtil.closeDB(con);
     }
 
-    con.setAutoCommit(false);
+    @Test
+    public void transactionalParametersCommit() throws Exception {
+        con = TestUtil.openDB();
 
-    transactionalParametersCommon();
+        if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
+            /* This test uses application_name which was added in 9.0 */
+            return;
+        }
 
-    // SET is retained on commit but SET LOCAL is unwound
-    con.commit();
+        con.setAutoCommit(false);
 
-    Assert.assertEquals("pgjdbc_ParameterStatusTestTxn", ((PGConnection) con).getParameterStatus("application_name"));
+        transactionalParametersCommon();
 
-    TestUtil.closeDB(con);
-  }
+        // SET is retained on commit but SET LOCAL is unwound
+        con.commit();
 
-  @Test
-  public void transactionalParametersAutocommit() throws Exception {
-    con = TestUtil.openDB();
+        Assert.assertEquals("pgjdbc_ParameterStatusTestTxn", ((PGConnection) con).getParameterStatus("application_name"));
 
-    if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
-      /* This test uses application_name which was added in 9.0 */
-      return;
+        TestUtil.closeDB(con);
     }
 
-    con.setAutoCommit(true);
-    Statement stmt = con.createStatement();
+    @Test
+    public void transactionalParametersAutocommit() throws Exception {
+        con = TestUtil.openDB();
 
-    // A SET LOCAL in autocommit should have no visible effect as we report the reset value too
-    Assert.assertEquals("Driver Tests", ((PGConnection) con).getParameterStatus("application_name"));
-    stmt.executeUpdate("SET LOCAL application_name = 'pgjdbc_ParameterStatusTestLocal';");
-    Assert.assertEquals("Driver Tests", ((PGConnection) con).getParameterStatus("application_name"));
+        if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
+            /* This test uses application_name which was added in 9.0 */
+            return;
+        }
 
-    stmt.close();
-    TestUtil.closeDB(con);
-  }
+        con.setAutoCommit(true);
+        Statement stmt = con.createStatement();
 
-  @Test(expected = UnsupportedOperationException.class)
-  public void parameterMapReadOnly() throws Exception {
-    try {
-      con = TestUtil.openDB();
-      Map params = ((PGConnection) con).getParameterStatuses();
-      params.put("DateStyle", "invalid");
-      Assert.fail("Attempt to write to exposed parameters map must throw");
-    } finally {
-      TestUtil.closeDB(con);
-    }
-  }
+        // A SET LOCAL in autocommit should have no visible effect as we report the reset value too
+        Assert.assertEquals("Driver Tests", ((PGConnection) con).getParameterStatus("application_name"));
+        stmt.executeUpdate("SET LOCAL application_name = 'pgjdbc_ParameterStatusTestLocal';");
+        Assert.assertEquals("Driver Tests", ((PGConnection) con).getParameterStatus("application_name"));
 
-  @Test
-  public void parameterMapIsView() throws Exception {
-    con = TestUtil.openDB();
-
-    if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
-      /* This test uses application_name which was added in 9.0 */
-      return;
+        stmt.close();
+        TestUtil.closeDB(con);
     }
 
-    Map params = ((PGConnection) con).getParameterStatuses();
+    @Test(expected = UnsupportedOperationException.class)
+    public void parameterMapReadOnly() throws Exception {
+        try {
+            con = TestUtil.openDB();
+            Map params = ((PGConnection) con).getParameterStatuses();
+            params.put("DateStyle", "invalid");
+            Assert.fail("Attempt to write to exposed parameters map must throw");
+        } finally {
+            TestUtil.closeDB(con);
+        }
+    }
 
-    Statement stmt = con.createStatement();
+    @Test
+    public void parameterMapIsView() throws Exception {
+        con = TestUtil.openDB();
 
-    Assert.assertEquals("Driver Tests", params.get("application_name"));
-    stmt.executeUpdate("SET application_name = 'pgjdbc_paramstatus_view';");
-    Assert.assertEquals("pgjdbc_paramstatus_view", params.get("application_name"));
+        if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
+            /* This test uses application_name which was added in 9.0 */
+            return;
+        }
 
-    stmt.close();
-    TestUtil.closeDB(con);
-  }
+        Map params = ((PGConnection) con).getParameterStatuses();
+
+        Statement stmt = con.createStatement();
+
+        Assert.assertEquals("Driver Tests", params.get("application_name"));
+        stmt.executeUpdate("SET application_name = 'pgjdbc_paramstatus_view';");
+        Assert.assertEquals("pgjdbc_paramstatus_view", params.get("application_name"));
+
+        stmt.close();
+        TestUtil.closeDB(con);
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PreparedStatementTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PreparedStatementTest.java
index d43bf05..3ae3e2c 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PreparedStatementTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/PreparedStatementTest.java
@@ -52,1550 +52,1550 @@ import java.util.logging.Logger;
 @RunWith(Parameterized.class)
 public class PreparedStatementTest extends BaseTest4 {
 
-  private static final int NUMERIC_MAX_PRECISION = 1000;
-  private static final int NUMERIC_MAX_DISPLAY_SCALE = NUMERIC_MAX_PRECISION;
+    private static final int NUMERIC_MAX_PRECISION = 1000;
+    private static final int NUMERIC_MAX_DISPLAY_SCALE = NUMERIC_MAX_PRECISION;
 
-  public PreparedStatementTest(BinaryMode binaryMode) {
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
-    }
-    return ids;
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTable(con, "streamtable", "bin bytea, str text");
-    TestUtil.createTable(con, "texttable", "ch char(3), te text, vc varchar(3)");
-    TestUtil.createTable(con, "intervaltable", "i interval");
-    TestUtil.createTable(con, "inttable", "a int");
-    TestUtil.createTable(con, "bool_tab", "bool_val boolean, null_val boolean, tf_val boolean, "
-        + "truefalse_val boolean, yn_val boolean, yesno_val boolean, "
-        + "onoff_val boolean, onezero_val boolean");
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "streamtable");
-    TestUtil.dropTable(con, "texttable");
-    TestUtil.dropTable(con, "intervaltable");
-    TestUtil.dropTable(con, "inttable");
-    TestUtil.dropTable(con, "bool_tab");
-    super.tearDown();
-  }
-
-  private int getNumberOfServerPreparedStatements(String sql)
-      throws SQLException {
-    PreparedStatement pstmt = null;
-    ResultSet rs = null;
-    try {
-      pstmt = con.prepareStatement(
-          "select count(*) from pg_prepared_statements where statement = ?");
-      pstmt.setString(1, sql);
-      rs = pstmt.executeQuery();
-      rs.next();
-      return rs.getInt(1);
-    } finally {
-      TestUtil.closeQuietly(rs);
-      TestUtil.closeQuietly(pstmt);
-    }
-  }
-
-  @Test
-  public void testSetBinaryStream() throws SQLException {
-    assumeByteaSupported();
-    ByteArrayInputStream bais;
-    byte[] buf = new byte[10];
-    for (int i = 0; i < buf.length; i++) {
-      buf[i] = (byte) i;
+    public PreparedStatementTest(BinaryMode binaryMode) {
+        setBinaryMode(binaryMode);
     }
 
-    bais = null;
-    doSetBinaryStream(bais, 0);
-
-    bais = new ByteArrayInputStream(new byte[0]);
-    doSetBinaryStream(bais, 0);
-
-    bais = new ByteArrayInputStream(buf);
-    doSetBinaryStream(bais, 0);
-
-    bais = new ByteArrayInputStream(buf);
-    doSetBinaryStream(bais, 10);
-  }
-
-  @Test
-  public void testSetAsciiStream() throws Exception {
-    ByteArrayOutputStream baos = new ByteArrayOutputStream();
-    PrintWriter pw = new PrintWriter(new OutputStreamWriter(baos, "ASCII"));
-    pw.println("Hello");
-    pw.flush();
-
-    ByteArrayInputStream bais;
-
-    bais = new ByteArrayInputStream(baos.toByteArray());
-    doSetAsciiStream(bais, 0);
-
-    bais = new ByteArrayInputStream(baos.toByteArray());
-    doSetAsciiStream(bais, 6);
-
-    bais = new ByteArrayInputStream(baos.toByteArray());
-    doSetAsciiStream(bais, 100);
-  }
-
-  @Test
-  public void testExecuteStringOnPreparedStatement() throws Exception {
-    PreparedStatement pstmt = con.prepareStatement("SELECT 1");
-
-    try {
-      pstmt.executeQuery("SELECT 2");
-      fail("Expected an exception when executing a new SQL query on a prepared statement");
-    } catch (SQLException e) {
+    @Parameterized.Parameters(name = "binary = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
+        }
+        return ids;
     }
 
-    try {
-      pstmt.executeUpdate("UPDATE streamtable SET bin=bin");
-      fail("Expected an exception when executing a new SQL update on a prepared statement");
-    } catch (SQLException e) {
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTable(con, "streamtable", "bin bytea, str text");
+        TestUtil.createTable(con, "texttable", "ch char(3), te text, vc varchar(3)");
+        TestUtil.createTable(con, "intervaltable", "i interval");
+        TestUtil.createTable(con, "inttable", "a int");
+        TestUtil.createTable(con, "bool_tab", "bool_val boolean, null_val boolean, tf_val boolean, "
+                + "truefalse_val boolean, yn_val boolean, yesno_val boolean, "
+                + "onoff_val boolean, onezero_val boolean");
     }
 
-    try {
-      pstmt.execute("UPDATE streamtable SET bin=bin");
-      fail("Expected an exception when executing a new SQL statement on a prepared statement");
-    } catch (SQLException e) {
-    }
-  }
-
-  @Test
-  public void testBinaryStreamErrorsRestartable() throws SQLException {
-    byte[] buf = new byte[10];
-    for (int i = 0; i < buf.length; i++) {
-      buf[i] = (byte) i;
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "streamtable");
+        TestUtil.dropTable(con, "texttable");
+        TestUtil.dropTable(con, "intervaltable");
+        TestUtil.dropTable(con, "inttable");
+        TestUtil.dropTable(con, "bool_tab");
+        super.tearDown();
     }
 
-    // InputStream is shorter than the length argument implies.
-    InputStream is = new ByteArrayInputStream(buf);
-    runBrokenStream(is, buf.length + 1);
-
-    // InputStream throws an Exception during read.
-    is = new BrokenInputStream(new ByteArrayInputStream(buf), buf.length / 2);
-    runBrokenStream(is, buf.length);
-
-    // Invalid length < 0.
-    is = new ByteArrayInputStream(buf);
-    runBrokenStream(is, -1);
-
-    // Total Bind message length too long.
-    is = new ByteArrayInputStream(buf);
-    runBrokenStream(is, Integer.MAX_VALUE);
-  }
-
-  private void runBrokenStream(InputStream is, int length) throws SQLException {
-    PreparedStatement pstmt = null;
-    try {
-      pstmt = con.prepareStatement("INSERT INTO streamtable (bin,str) VALUES (?,?)");
-      pstmt.setBinaryStream(1, is, length);
-      pstmt.setString(2, "Other");
-      pstmt.executeUpdate();
-      fail("This isn't supposed to work.");
-    } catch (SQLException sqle) {
-      // don't need to rollback because we're in autocommit mode
-      pstmt.close();
-
-      // verify the connection is still valid and the row didn't go in.
-      Statement stmt = con.createStatement();
-      ResultSet rs = stmt.executeQuery("SELECT COUNT(*) FROM streamtable");
-      assertTrue(rs.next());
-      assertEquals(0, rs.getInt(1));
-      rs.close();
-      stmt.close();
-    }
-  }
-
-  private void doSetBinaryStream(ByteArrayInputStream bais, int length) throws SQLException {
-    PreparedStatement pstmt =
-        con.prepareStatement("INSERT INTO streamtable (bin,str) VALUES (?,?)");
-    pstmt.setBinaryStream(1, bais, length);
-    pstmt.setString(2, null);
-    pstmt.executeUpdate();
-    pstmt.close();
-  }
-
-  private void doSetAsciiStream(InputStream is, int length) throws SQLException {
-    PreparedStatement pstmt =
-        con.prepareStatement("INSERT INTO streamtable (bin,str) VALUES (?,?)");
-    pstmt.setBytes(1, null);
-    pstmt.setAsciiStream(2, is, length);
-    pstmt.executeUpdate();
-    pstmt.close();
-  }
-
-  @Test
-  public void testTrailingSpaces() throws SQLException {
-    PreparedStatement pstmt =
-        con.prepareStatement("INSERT INTO texttable (ch, te, vc) VALUES (?, ?, ?) ");
-    String str = "a  ";
-    pstmt.setString(1, str);
-    pstmt.setString(2, str);
-    pstmt.setString(3, str);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("SELECT ch, te, vc FROM texttable WHERE ch=? AND te=? AND vc=?");
-    pstmt.setString(1, str);
-    pstmt.setString(2, str);
-    pstmt.setString(3, str);
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(str, rs.getString(1));
-    assertEquals(str, rs.getString(2));
-    assertEquals(str, rs.getString(3));
-    rs.close();
-    pstmt.close();
-  }
-
-  @Test
-  public void testBinds() throws SQLException {
-    // braces around (42) are required to puzzle the parser
-    String query = "INSERT INTO inttable(a) VALUES (?);SELECT (42)";
-    PreparedStatement ps = con.prepareStatement(query);
-    ps.setInt(1, 100500);
-    ps.execute();
-    ResultSet rs = ps.getResultSet();
-    Assert.assertNull("insert produces no results ==> getResultSet should be null", rs);
-    Assert.assertTrue("There are two statements => getMoreResults should be true", ps.getMoreResults());
-    rs = ps.getResultSet();
-    Assert.assertNotNull("select produces results ==> getResultSet should be not null", rs);
-    Assert.assertTrue("select produces 1 row ==> rs.next should be true", rs.next());
-    Assert.assertEquals("second result of query " + query, 42, rs.getInt(1));
-
-    TestUtil.closeQuietly(rs);
-    TestUtil.closeQuietly(ps);
-  }
-
-  @Test
-  public void testSetNull() throws SQLException {
-    // valid: fully qualified type to setNull()
-    PreparedStatement pstmt = con.prepareStatement("INSERT INTO texttable (te) VALUES (?)");
-    pstmt.setNull(1, Types.VARCHAR);
-    pstmt.executeUpdate();
-
-    // valid: fully qualified type to setObject()
-    pstmt.setObject(1, null, Types.VARCHAR);
-    pstmt.executeUpdate();
-
-    // valid: setObject() with partial type info and a typed "null object instance"
-    org.postgresql.util.PGobject dummy = new org.postgresql.util.PGobject();
-    dummy.setType("text");
-    dummy.setValue(null);
-    pstmt.setObject(1, dummy, Types.OTHER);
-    pstmt.executeUpdate();
-
-    // setObject() with no type info
-    pstmt.setObject(1, null);
-    pstmt.executeUpdate();
-
-    // setObject() with insufficient type info
-    pstmt.setObject(1, null, Types.OTHER);
-    pstmt.executeUpdate();
-
-    // setNull() with insufficient type info
-    pstmt.setNull(1, Types.OTHER);
-    pstmt.executeUpdate();
-
-    pstmt.close();
-
-    assumeMinimumServerVersion(ServerVersion.v8_3);
-    pstmt = con.prepareStatement("select 'ok' where ?=? or (? is null) ");
-    pstmt.setObject(1, UUID.randomUUID(), Types.OTHER);
-    pstmt.setNull(2, Types.OTHER, "uuid");
-    pstmt.setNull(3, Types.OTHER, "uuid");
-    ResultSet rs = pstmt.executeQuery();
-
-    assertTrue(rs.next());
-    assertEquals("ok", rs.getObject(1));
-
-    rs.close();
-    pstmt.close();
-
-  }
-
-  @Test
-  public void testSingleQuotes() throws SQLException {
-    String[] testStrings = new String[]{
-      "bare ? question mark",
-      "quoted \\' single quote",
-      "doubled '' single quote",
-      "octal \\060 constant",
-      "escaped \\? question mark",
-      "double \\\\ backslash",
-      "double \" quote",};
-
-    String[] testStringsStdConf = new String[]{
-      "bare ? question mark",
-      "quoted '' single quote",
-      "doubled '' single quote",
-      "octal 0 constant",
-      "escaped ? question mark",
-      "double \\ backslash",
-      "double \" quote",};
-
-    String[] expected = new String[]{
-      "bare ? question mark",
-      "quoted ' single quote",
-      "doubled ' single quote",
-      "octal 0 constant",
-      "escaped ? question mark",
-      "double \\ backslash",
-      "double \" quote",};
-
-    boolean oldStdStrings = TestUtil.getStandardConformingStrings(con);
-    Statement stmt = con.createStatement();
-
-    // Test with standard_conforming_strings turned off.
-    stmt.execute("SET standard_conforming_strings TO off");
-    for (int i = 0; i < testStrings.length; i++) {
-      PreparedStatement pstmt = con.prepareStatement("SELECT '" + testStrings[i] + "'");
-      ResultSet rs = pstmt.executeQuery();
-      assertTrue(rs.next());
-      assertEquals(expected[i], rs.getString(1));
-      rs.close();
-      pstmt.close();
+    private int getNumberOfServerPreparedStatements(String sql)
+            throws SQLException {
+        PreparedStatement pstmt = null;
+        ResultSet rs = null;
+        try {
+            pstmt = con.prepareStatement(
+                    "select count(*) from pg_prepared_statements where statement = ?");
+            pstmt.setString(1, sql);
+            rs = pstmt.executeQuery();
+            rs.next();
+            return rs.getInt(1);
+        } finally {
+            TestUtil.closeQuietly(rs);
+            TestUtil.closeQuietly(pstmt);
+        }
     }
 
-    // Test with standard_conforming_strings turned off...
-    // ... using the escape string syntax (E'').
-    stmt.execute("SET standard_conforming_strings TO on");
-    for (int i = 0; i < testStrings.length; i++) {
-      PreparedStatement pstmt = con.prepareStatement("SELECT E'" + testStrings[i] + "'");
-      ResultSet rs = pstmt.executeQuery();
-      assertTrue(rs.next());
-      assertEquals(expected[i], rs.getString(1));
-      rs.close();
-      pstmt.close();
-    }
-    // ... using standard conforming input strings.
-    for (int i = 0; i < testStrings.length; i++) {
-      PreparedStatement pstmt = con.prepareStatement("SELECT '" + testStringsStdConf[i] + "'");
-      ResultSet rs = pstmt.executeQuery();
-      assertTrue(rs.next());
-      assertEquals(expected[i], rs.getString(1));
-      rs.close();
-      pstmt.close();
+    @Test
+    public void testSetBinaryStream() throws SQLException {
+        assumeByteaSupported();
+        ByteArrayInputStream bais;
+        byte[] buf = new byte[10];
+        for (int i = 0; i < buf.length; i++) {
+            buf[i] = (byte) i;
+        }
+
+        bais = null;
+        doSetBinaryStream(bais, 0);
+
+        bais = new ByteArrayInputStream(new byte[0]);
+        doSetBinaryStream(bais, 0);
+
+        bais = new ByteArrayInputStream(buf);
+        doSetBinaryStream(bais, 0);
+
+        bais = new ByteArrayInputStream(buf);
+        doSetBinaryStream(bais, 10);
     }
 
-    stmt.execute("SET standard_conforming_strings TO " + (oldStdStrings ? "on" : "off"));
-    stmt.close();
-  }
+    @Test
+    public void testSetAsciiStream() throws Exception {
+        ByteArrayOutputStream baos = new ByteArrayOutputStream();
+        PrintWriter pw = new PrintWriter(new OutputStreamWriter(baos, "ASCII"));
+        pw.println("Hello");
+        pw.flush();
 
-  @Test
-  public void testDoubleQuotes() throws SQLException {
-    String[] testStrings = new String[]{
-        "bare ? question mark",
-        "single ' quote",
-        "doubled '' single quote",
-        "doubled \"\" double quote",
-        "no backslash interpretation here: \\",
-    };
+        ByteArrayInputStream bais;
 
-    for (String testString : testStrings) {
-      PreparedStatement pstmt =
-          con.prepareStatement("CREATE TABLE \"" + testString + "\" (i integer)");
-      pstmt.executeUpdate();
-      pstmt.close();
+        bais = new ByteArrayInputStream(baos.toByteArray());
+        doSetAsciiStream(bais, 0);
 
-      pstmt = con.prepareStatement("DROP TABLE \"" + testString + "\"");
-      pstmt.executeUpdate();
-      pstmt.close();
-    }
-  }
+        bais = new ByteArrayInputStream(baos.toByteArray());
+        doSetAsciiStream(bais, 6);
 
-  @Test
-  public void testDollarQuotes() throws SQLException {
-    // dollar-quotes are supported in the backend since version 8.0
-    PreparedStatement st;
-    ResultSet rs;
-
-    st = con.prepareStatement("SELECT $$;$$ WHERE $x$?$x$=$_0$?$_0$ AND $$?$$=?");
-    st.setString(1, "?");
-    rs = st.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(";", rs.getString(1));
-    assertFalse(rs.next());
-    st.close();
-
-    st = con.prepareStatement(
-        "SELECT $__$;$__$ WHERE ''''=$q_1$'$q_1$ AND ';'=?;"
-            + "SELECT $x$$a$;$x $a$$x$ WHERE $$;$$=? OR ''=$c$c$;$c$;"
-            + "SELECT ?");
-    st.setString(1, ";");
-    st.setString(2, ";");
-    st.setString(3, "$a$ $a$");
-
-    assertTrue(st.execute());
-    rs = st.getResultSet();
-    assertTrue(rs.next());
-    assertEquals(";", rs.getString(1));
-    assertFalse(rs.next());
-
-    assertTrue(st.getMoreResults());
-    rs = st.getResultSet();
-    assertTrue(rs.next());
-    assertEquals("$a$;$x $a$", rs.getString(1));
-    assertFalse(rs.next());
-
-    assertTrue(st.getMoreResults());
-    rs = st.getResultSet();
-    assertTrue(rs.next());
-    assertEquals("$a$ $a$", rs.getString(1));
-    assertFalse(rs.next());
-    st.close();
-  }
-
-  @Test
-  public void testDollarQuotesAndIdentifiers() throws SQLException {
-    // dollar-quotes are supported in the backend since version 8.0
-    PreparedStatement st;
-
-    con.createStatement().execute("CREATE TEMP TABLE a$b$c(a varchar, b varchar)");
-    st = con.prepareStatement("INSERT INTO a$b$c (a, b) VALUES (?, ?)");
-    st.setString(1, "a");
-    st.setString(2, "b");
-    st.executeUpdate();
-    st.close();
-
-    con.createStatement().execute("CREATE TEMP TABLE e$f$g(h varchar, e$f$g varchar) ");
-    st = con.prepareStatement("UPDATE e$f$g SET h = ? || e$f$g");
-    st.setString(1, "a");
-    st.executeUpdate();
-    st.close();
-  }
-
-  @Test
-  public void testComments() throws SQLException {
-    PreparedStatement st;
-    ResultSet rs;
-
-    st = con.prepareStatement("SELECT /*?*/ /*/*/*/**/*/*/*/1;SELECT ?;--SELECT ?");
-    st.setString(1, "a");
-    assertTrue(st.execute());
-    assertTrue(st.getMoreResults());
-    assertFalse(st.getMoreResults());
-    st.close();
-
-    st = con.prepareStatement("SELECT /**/'?'/*/**/*/ WHERE '?'=/*/*/*?*/*/*/--?\n?");
-    st.setString(1, "?");
-    rs = st.executeQuery();
-    assertTrue(rs.next());
-    assertEquals("?", rs.getString(1));
-    assertFalse(rs.next());
-    st.close();
-  }
-
-  @Test
-  public void testDoubleQuestionMark() throws SQLException {
-    PreparedStatement st;
-    ResultSet rs;
-
-    st = con.prepareStatement("select ??- lseg '((-1,0),(1,0))';");
-    rs = st.executeQuery();
-    assertTrue(rs.next());
-    // Bool values in binary mode are first converted to their Java type (Boolean), and then
-    // converted to String, which means that we receive 'true'. Bool values in text mode are
-    // returned as the same text value that was returned by the server, i.e. 't'.
-    assertEquals(binaryMode == BinaryMode.FORCE && preferQueryMode != PreferQueryMode.SIMPLE ? "true" : "t", rs.getString(1));
-    assertFalse(rs.next());
-    st.close();
-
-    st = con.prepareStatement("select lseg '((-1,0),(1,0))' ??# box '((-2,-2),(2,2))';");
-    rs = st.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(binaryMode == BinaryMode.FORCE && preferQueryMode != PreferQueryMode.SIMPLE ? "true" : "t", rs.getString(1));
-    assertFalse(rs.next());
-    st.close();
-  }
-
-  @Test
-  public void testNumeric() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement(
-        "CREATE TEMP TABLE numeric_tab (max_numeric_positive numeric, min_numeric_positive numeric, max_numeric_negative numeric, min_numeric_negative numeric, null_value numeric)");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    char[] wholeDigits = new char[NUMERIC_MAX_DISPLAY_SCALE];
-    for (int i = 0; i < NUMERIC_MAX_DISPLAY_SCALE; i++) {
-      wholeDigits[i] = '9';
+        bais = new ByteArrayInputStream(baos.toByteArray());
+        doSetAsciiStream(bais, 100);
     }
 
-    char[] fractionDigits = new char[NUMERIC_MAX_PRECISION];
-    for (int i = 0; i < NUMERIC_MAX_PRECISION; i++) {
-      fractionDigits[i] = '9';
+    @Test
+    public void testExecuteStringOnPreparedStatement() throws Exception {
+        PreparedStatement pstmt = con.prepareStatement("SELECT 1");
+
+        try {
+            pstmt.executeQuery("SELECT 2");
+            fail("Expected an exception when executing a new SQL query on a prepared statement");
+        } catch (SQLException e) {
+        }
+
+        try {
+            pstmt.executeUpdate("UPDATE streamtable SET bin=bin");
+            fail("Expected an exception when executing a new SQL update on a prepared statement");
+        } catch (SQLException e) {
+        }
+
+        try {
+            pstmt.execute("UPDATE streamtable SET bin=bin");
+            fail("Expected an exception when executing a new SQL statement on a prepared statement");
+        } catch (SQLException e) {
+        }
     }
 
-    String maxValueString = new String(wholeDigits);
-    String minValueString = new String(fractionDigits);
-    BigDecimal[] values = new BigDecimal[4];
-    values[0] = new BigDecimal(maxValueString);
-    values[1] = new BigDecimal("-" + maxValueString);
-    values[2] = new BigDecimal(minValueString);
-    values[3] = new BigDecimal("-" + minValueString);
+    @Test
+    public void testBinaryStreamErrorsRestartable() throws SQLException {
+        byte[] buf = new byte[10];
+        for (int i = 0; i < buf.length; i++) {
+            buf[i] = (byte) i;
+        }
 
-    pstmt = con.prepareStatement("insert into numeric_tab values (?,?,?,?,?)");
-    for (int i = 1; i < 5; i++) {
-      pstmt.setBigDecimal(i, values[i - 1]);
+        // InputStream is shorter than the length argument implies.
+        InputStream is = new ByteArrayInputStream(buf);
+        runBrokenStream(is, buf.length + 1);
+
+        // InputStream throws an Exception during read.
+        is = new BrokenInputStream(new ByteArrayInputStream(buf), buf.length / 2);
+        runBrokenStream(is, buf.length);
+
+        // Invalid length < 0.
+        is = new ByteArrayInputStream(buf);
+        runBrokenStream(is, -1);
+
+        // Total Bind message length too long.
+        is = new ByteArrayInputStream(buf);
+        runBrokenStream(is, Integer.MAX_VALUE);
     }
 
-    pstmt.setNull(5, Types.NUMERIC);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("select * from numeric_tab");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    for (int i = 1; i < 5; i++) {
-      assertTrue(rs.getBigDecimal(i).compareTo(values[i - 1]) == 0);
-    }
-    rs.getDouble(5);
-    assertTrue(rs.wasNull());
-    rs.close();
-    pstmt.close();
-
-  }
-
-  @Test
-  public void testDouble() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement(
-        "CREATE TEMP TABLE double_tab (max_double float, min_double float, null_value float)");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("insert into double_tab values (?,?,?)");
-    pstmt.setDouble(1, 1.0E125);
-    pstmt.setDouble(2, 1.0E-130);
-    pstmt.setNull(3, Types.DOUBLE);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("select * from double_tab");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    double d = rs.getDouble(1);
-    assertTrue(rs.getDouble(1) == 1.0E125);
-    assertTrue(rs.getDouble(2) == 1.0E-130);
-    rs.getDouble(3);
-    assertTrue(rs.wasNull());
-    rs.close();
-    pstmt.close();
-
-  }
-
-  @Test
-  public void testFloat() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement(
-        "CREATE TEMP TABLE float_tab (max_float real, min_float real, null_value real)");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("insert into float_tab values (?,?,?)");
-    pstmt.setFloat(1, (float) 1.0E37);
-    pstmt.setFloat(2, (float) 1.0E-37);
-    pstmt.setNull(3, Types.FLOAT);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("select * from float_tab");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    float f = rs.getFloat(1);
-    assertTrue("expected 1.0E37,received " + rs.getFloat(1), rs.getFloat(1) == (float) 1.0E37);
-    assertTrue("expected 1.0E-37,received " + rs.getFloat(2), rs.getFloat(2) == (float) 1.0E-37);
-    rs.getDouble(3);
-    assertTrue(rs.wasNull());
-    rs.close();
-    pstmt.close();
-
-  }
-
-  @Test
-  public void testNaNLiteralsSimpleStatement() throws SQLException {
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("select 'NaN'::numeric, 'NaN'::real, 'NaN'::double precision");
-    checkNaNLiterals(stmt, rs);
-  }
-
-  @Test
-  public void testNaNLiteralsPreparedStatement() throws SQLException {
-    PreparedStatement stmt = con.prepareStatement("select 'NaN'::numeric, 'NaN'::real, 'NaN'::double precision");
-    checkNaNLiterals(stmt, stmt.executeQuery());
-  }
-
-  private void checkNaNLiterals(Statement stmt, ResultSet rs) throws SQLException {
-    rs.next();
-    assertTrue("Double.isNaN((Double) rs.getObject", Double.isNaN((Double) rs.getObject(3)));
-    assertTrue("Double.isNaN(rs.getDouble", Double.isNaN(rs.getDouble(3)));
-    assertTrue("Float.isNaN((Float) rs.getObject", Float.isNaN((Float) rs.getObject(2)));
-    assertTrue("Float.isNaN(rs.getFloat", Float.isNaN(rs.getFloat(2)));
-    assertTrue("Double.isNaN((Double) rs.getObject", Double.isNaN((Double) rs.getObject(1)));
-    assertTrue("Double.isNaN(rs.getDouble", Double.isNaN(rs.getDouble(1)));
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testNaNSetDoubleFloat() throws SQLException {
-    PreparedStatement ps = con.prepareStatement("select ?, ?");
-    ps.setFloat(1, Float.NaN);
-    ps.setDouble(2, Double.NaN);
-
-    checkNaNParams(ps);
-  }
-
-  @Test
-  public void testNaNSetObject() throws SQLException {
-    PreparedStatement ps = con.prepareStatement("select ?, ?");
-    ps.setObject(1, Float.NaN);
-    ps.setObject(2, Double.NaN);
-
-    checkNaNParams(ps);
-  }
-
-  private void checkNaNParams(PreparedStatement ps) throws SQLException {
-    ResultSet rs = ps.executeQuery();
-    rs.next();
-
-    assertTrue("Float.isNaN((Float) rs.getObject", Float.isNaN((Float) rs.getObject(1)));
-    assertTrue("Float.isNaN(rs.getFloat", Float.isNaN(rs.getFloat(1)));
-    assertTrue("Double.isNaN(rs.getDouble", Double.isNaN(rs.getDouble(2)));
-    assertTrue("Double.isNaN(rs.getDouble", Double.isNaN(rs.getDouble(2)));
-
-    TestUtil.closeQuietly(rs);
-    TestUtil.closeQuietly(ps);
-  }
-
-  @Test
-  public void testBoolean() throws SQLException {
-    testBoolean(0);
-    testBoolean(1);
-    testBoolean(5);
-    testBoolean(-1);
-  }
-
-  public void testBoolean(int prepareThreshold) throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement("insert into bool_tab values (?,?,?,?,?,?,?,?)");
-    ((org.postgresql.PGStatement) pstmt).setPrepareThreshold(prepareThreshold);
-
-    // Test TRUE values
-    pstmt.setBoolean(1, true);
-    pstmt.setObject(1, Boolean.TRUE);
-    pstmt.setNull(2, Types.BIT);
-    pstmt.setObject(3, 't', Types.BIT);
-    pstmt.setObject(3, 'T', Types.BIT);
-    pstmt.setObject(3, "t", Types.BIT);
-    pstmt.setObject(4, "true", Types.BIT);
-    pstmt.setObject(5, 'y', Types.BIT);
-    pstmt.setObject(5, 'Y', Types.BIT);
-    pstmt.setObject(5, "Y", Types.BIT);
-    pstmt.setObject(6, "YES", Types.BIT);
-    pstmt.setObject(7, "On", Types.BIT);
-    pstmt.setObject(8, '1', Types.BIT);
-    pstmt.setObject(8, "1", Types.BIT);
-    assertEquals("one row inserted, true values", 1, pstmt.executeUpdate());
-    // Test FALSE values
-    pstmt.setBoolean(1, false);
-    pstmt.setObject(1, Boolean.FALSE);
-    pstmt.setNull(2, Types.BOOLEAN);
-    pstmt.setObject(3, 'f', Types.BOOLEAN);
-    pstmt.setObject(3, 'F', Types.BOOLEAN);
-    pstmt.setObject(3, "F", Types.BOOLEAN);
-    pstmt.setObject(4, "false", Types.BOOLEAN);
-    pstmt.setObject(5, 'n', Types.BOOLEAN);
-    pstmt.setObject(5, 'N', Types.BOOLEAN);
-    pstmt.setObject(5, "N", Types.BOOLEAN);
-    pstmt.setObject(6, "NO", Types.BOOLEAN);
-    pstmt.setObject(7, "Off", Types.BOOLEAN);
-    pstmt.setObject(8, "0", Types.BOOLEAN);
-    pstmt.setObject(8, '0', Types.BOOLEAN);
-    assertEquals("one row inserted, false values", 1, pstmt.executeUpdate());
-    // Test weird values
-    pstmt.setObject(1, (byte) 0, Types.BOOLEAN);
-    pstmt.setObject(2, BigDecimal.ONE, Types.BOOLEAN);
-    pstmt.setObject(3, 0L, Types.BOOLEAN);
-    pstmt.setObject(4, 0x1, Types.BOOLEAN);
-    pstmt.setObject(5, (float) 0, Types.BOOLEAN);
-    pstmt.setObject(5, 1.0d, Types.BOOLEAN);
-    pstmt.setObject(5, 0.0f, Types.BOOLEAN);
-    pstmt.setObject(6, Integer.valueOf("1"), Types.BOOLEAN);
-    pstmt.setObject(7, new java.math.BigInteger("0"), Types.BOOLEAN);
-    pstmt.clearParameters();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("select * from bool_tab");
-    ((org.postgresql.PGStatement) pstmt).setPrepareThreshold(prepareThreshold);
-    ResultSet rs = pstmt.executeQuery();
-
-    assertTrue(rs.next());
-    assertTrue("expected true, received " + rs.getBoolean(1), rs.getBoolean(1));
-    rs.getFloat(2);
-    assertTrue(rs.wasNull());
-    assertTrue("expected true, received " + rs.getBoolean(3), rs.getBoolean(3));
-    assertTrue("expected true, received " + rs.getBoolean(4), rs.getBoolean(4));
-    assertTrue("expected true, received " + rs.getBoolean(5), rs.getBoolean(5));
-    assertTrue("expected true, received " + rs.getBoolean(6), rs.getBoolean(6));
-    assertTrue("expected true, received " + rs.getBoolean(7), rs.getBoolean(7));
-    assertTrue("expected true, received " + rs.getBoolean(8), rs.getBoolean(8));
-
-    assertTrue(rs.next());
-    assertFalse("expected false, received " + rs.getBoolean(1), rs.getBoolean(1));
-    rs.getBoolean(2);
-    assertTrue(rs.wasNull());
-    assertFalse("expected false, received " + rs.getBoolean(3), rs.getBoolean(3));
-    assertFalse("expected false, received " + rs.getBoolean(4), rs.getBoolean(4));
-    assertFalse("expected false, received " + rs.getBoolean(5), rs.getBoolean(5));
-    assertFalse("expected false, received " + rs.getBoolean(6), rs.getBoolean(6));
-    assertFalse("expected false, received " + rs.getBoolean(7), rs.getBoolean(7));
-    assertFalse("expected false, received " + rs.getBoolean(8), rs.getBoolean(8));
-
-    rs.close();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("TRUNCATE TABLE bool_tab");
-    pstmt.executeUpdate();
-    pstmt.close();
-  }
-
-  @Test
-  public void testBadBoolean() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement("INSERT INTO bad_bool VALUES (?)");
-    try {
-      pstmt.setObject(1, "this is not boolean", Types.BOOLEAN);
-      fail();
-    } catch (SQLException e) {
-      assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
-      assertEquals("Cannot cast to boolean: \"this is not boolean\"", e.getMessage());
-    }
-    try {
-      pstmt.setObject(1, 'X', Types.BOOLEAN);
-      fail();
-    } catch (SQLException e) {
-      assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
-      assertEquals("Cannot cast to boolean: \"X\"", e.getMessage());
-    }
-    try {
-      java.io.File obj = new java.io.File("");
-      pstmt.setObject(1, obj, Types.BOOLEAN);
-      fail();
-    } catch (SQLException e) {
-      assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
-      assertEquals("Cannot cast to boolean", e.getMessage());
-    }
-    try {
-      pstmt.setObject(1, "1.0", Types.BOOLEAN);
-      fail();
-    } catch (SQLException e) {
-      assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
-      assertEquals("Cannot cast to boolean: \"1.0\"", e.getMessage());
-    }
-    try {
-      pstmt.setObject(1, "-1", Types.BOOLEAN);
-      fail();
-    } catch (SQLException e) {
-      assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
-      assertEquals("Cannot cast to boolean: \"-1\"", e.getMessage());
-    }
-    try {
-      pstmt.setObject(1, "ok", Types.BOOLEAN);
-      fail();
-    } catch (SQLException e) {
-      assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
-      assertEquals("Cannot cast to boolean: \"ok\"", e.getMessage());
-    }
-    try {
-      pstmt.setObject(1, 0.99f, Types.BOOLEAN);
-      fail();
-    } catch (SQLException e) {
-      assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
-      assertEquals("Cannot cast to boolean: \"0.99\"", e.getMessage());
-    }
-    try {
-      pstmt.setObject(1, -0.01d, Types.BOOLEAN);
-      fail();
-    } catch (SQLException e) {
-      assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
-      assertEquals("Cannot cast to boolean: \"-0.01\"", e.getMessage());
-    }
-    try {
-      pstmt.setObject(1, new java.sql.Date(0), Types.BOOLEAN);
-      fail();
-    } catch (SQLException e) {
-      assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
-      assertEquals("Cannot cast to boolean", e.getMessage());
-    }
-    try {
-      pstmt.setObject(1, new java.math.BigInteger("1000"), Types.BOOLEAN);
-      fail();
-    } catch (SQLException e) {
-      assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
-      assertEquals("Cannot cast to boolean: \"1000\"", e.getMessage());
-    }
-    try {
-      pstmt.setObject(1, Math.PI, Types.BOOLEAN);
-      fail();
-    } catch (SQLException e) {
-      assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
-      assertEquals("Cannot cast to boolean: \"3.141592653589793\"", e.getMessage());
-    }
-    pstmt.close();
-  }
-
-  @Test
-  public void testSetFloatInteger() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement(
-        "CREATE temp TABLE float_tab (max_val float8, min_val float, null_val float8)");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    Integer maxInteger = 2147483647;
-    Integer minInteger = -2147483648;
-
-    Double maxFloat = 2147483647.0;
-    Double minFloat = (double) -2147483648;
-
-    pstmt = con.prepareStatement("insert into float_tab values (?,?,?)");
-    pstmt.setObject(1, maxInteger, Types.FLOAT);
-    pstmt.setObject(2, minInteger, Types.FLOAT);
-    pstmt.setNull(3, Types.FLOAT);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("select * from float_tab");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-
-    assertTrue("expected " + maxFloat + " ,received " + rs.getObject(1),
-        rs.getObject(1).equals(maxFloat));
-    assertTrue("expected " + minFloat + " ,received " + rs.getObject(2),
-        rs.getObject(2).equals(minFloat));
-    rs.getFloat(3);
-    assertTrue(rs.wasNull());
-    rs.close();
-    pstmt.close();
-
-  }
-
-  @Test
-  public void testSetFloatString() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement(
-        "CREATE temp TABLE float_tab (max_val float8, min_val float8, null_val float8)");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    String maxStringFloat = "1.0E37";
-    String minStringFloat = "1.0E-37";
-    Double maxFloat = 1.0E37;
-    Double minFloat = 1.0E-37;
-
-    pstmt = con.prepareStatement("insert into float_tab values (?,?,?)");
-    pstmt.setObject(1, maxStringFloat, Types.FLOAT);
-    pstmt.setObject(2, minStringFloat, Types.FLOAT);
-    pstmt.setNull(3, Types.FLOAT);
-    pstmt.executeUpdate();
-    pstmt.setObject(1, "1.0", Types.FLOAT);
-    pstmt.setObject(2, "0.0", Types.FLOAT);
-    pstmt.setNull(3, Types.FLOAT);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("select * from float_tab");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-
-    assertTrue(((Double) rs.getObject(1)).equals(maxFloat));
-    assertTrue(((Double) rs.getObject(2)).equals(minFloat));
-    assertTrue(rs.getDouble(1) == maxFloat);
-    assertTrue(rs.getDouble(2) == minFloat);
-    rs.getFloat(3);
-    assertTrue(rs.wasNull());
-
-    assertTrue(rs.next());
-    assertTrue("expected true, received " + rs.getBoolean(1), rs.getBoolean(1));
-    assertFalse("expected false,received " + rs.getBoolean(2), rs.getBoolean(2));
-
-    rs.close();
-    pstmt.close();
-
-  }
-
-  @Test
-  public void testSetFloatBigDecimal() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement(
-        "CREATE temp TABLE float_tab (max_val float8, min_val float8, null_val float8)");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    BigDecimal maxBigDecimalFloat = new BigDecimal("1.0E37");
-    BigDecimal minBigDecimalFloat = new BigDecimal("1.0E-37");
-    Double maxFloat = 1.0E37;
-    Double minFloat = 1.0E-37;
-
-    pstmt = con.prepareStatement("insert into float_tab values (?,?,?)");
-    pstmt.setObject(1, maxBigDecimalFloat, Types.FLOAT);
-    pstmt.setObject(2, minBigDecimalFloat, Types.FLOAT);
-    pstmt.setNull(3, Types.FLOAT);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("select * from float_tab");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-
-    assertTrue("expected " + maxFloat + " ,received " + rs.getObject(1),
-        ((Double) rs.getObject(1)).equals(maxFloat));
-    assertTrue("expected " + minFloat + " ,received " + rs.getObject(2),
-        ((Double) rs.getObject(2)).equals(minFloat));
-    rs.getFloat(3);
-    assertTrue(rs.wasNull());
-    rs.close();
-    pstmt.close();
-
-  }
-
-  @Test
-  public void testSetTinyIntFloat() throws SQLException {
-    PreparedStatement pstmt = con
-        .prepareStatement("CREATE temp TABLE tiny_int (max_val int4, min_val int4, null_val int4)");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    Integer maxInt = 127;
-    Integer minInt = -127;
-    Float maxIntFloat = 127F;
-    Float minIntFloat = (float) -127;
-
-    pstmt = con.prepareStatement("insert into tiny_int values (?,?,?)");
-    pstmt.setObject(1, maxIntFloat, Types.TINYINT);
-    pstmt.setObject(2, minIntFloat, Types.TINYINT);
-    pstmt.setNull(3, Types.TINYINT);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("select * from tiny_int");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-
-    assertEquals("maxInt as rs.getObject", maxInt, rs.getObject(1));
-    assertEquals("minInt as rs.getObject", minInt, rs.getObject(2));
-    rs.getObject(3);
-    assertTrue("rs.wasNull after rs.getObject", rs.wasNull());
-    assertEquals("maxInt as rs.getInt", maxInt, (Integer) rs.getInt(1));
-    assertEquals("minInt as rs.getInt", minInt, (Integer) rs.getInt(2));
-    rs.getInt(3);
-    assertTrue("rs.wasNull after rs.getInt", rs.wasNull());
-    assertEquals("maxInt as rs.getLong", Long.valueOf(maxInt), (Long) rs.getLong(1));
-    assertEquals("minInt as rs.getLong", Long.valueOf(minInt), (Long) rs.getLong(2));
-    rs.getLong(3);
-    assertTrue("rs.wasNull after rs.getLong", rs.wasNull());
-    assertEquals("maxInt as rs.getBigDecimal", BigDecimal.valueOf(maxInt), rs.getBigDecimal(1));
-    assertEquals("minInt as rs.getBigDecimal", BigDecimal.valueOf(minInt), rs.getBigDecimal(2));
-    assertNull("rs.getBigDecimal", rs.getBigDecimal(3));
-    assertTrue("rs.getBigDecimal after rs.getLong", rs.wasNull());
-    assertEquals("maxInt as rs.getBigDecimal(scale=0)", BigDecimal.valueOf(maxInt),
-        rs.getBigDecimal(1, 0));
-    assertEquals("minInt as rs.getBigDecimal(scale=0)", BigDecimal.valueOf(minInt),
-        rs.getBigDecimal(2, 0));
-    assertNull("rs.getBigDecimal(scale=0)", rs.getBigDecimal(3, 0));
-    assertTrue("rs.getBigDecimal after rs.getLong", rs.wasNull());
-    assertEquals("maxInt as rs.getBigDecimal(scale=1)",
-        BigDecimal.valueOf(maxInt).setScale(1, RoundingMode.HALF_EVEN), rs.getBigDecimal(1, 1));
-    assertEquals("minInt as rs.getBigDecimal(scale=1)",
-        BigDecimal.valueOf(minInt).setScale(1, RoundingMode.HALF_EVEN), rs.getBigDecimal(2, 1));
-    rs.getFloat(3);
-    assertTrue(rs.wasNull());
-    rs.close();
-    pstmt.close();
-
-  }
-
-  @Test
-  public void testSetSmallIntFloat() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement(
-        "CREATE temp TABLE small_int (max_val int4, min_val int4, null_val int4)");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    Integer maxInt = 32767;
-    Integer minInt = -32768;
-    Float maxIntFloat = 32767F;
-    Float minIntFloat = (float) -32768;
-
-    pstmt = con.prepareStatement("insert into small_int values (?,?,?)");
-    pstmt.setObject(1, maxIntFloat, Types.SMALLINT);
-    pstmt.setObject(2, minIntFloat, Types.SMALLINT);
-    pstmt.setNull(3, Types.TINYINT);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("select * from small_int");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-
-    assertTrue("expected " + maxInt + " ,received " + rs.getObject(1),
-        rs.getObject(1).equals(maxInt));
-    assertTrue("expected " + minInt + " ,received " + rs.getObject(2),
-        rs.getObject(2).equals(minInt));
-    rs.getFloat(3);
-    assertTrue(rs.wasNull());
-    rs.close();
-    pstmt.close();
-  }
-
-  @Test
-  public void testSetIntFloat() throws SQLException {
-    PreparedStatement pstmt = con
-        .prepareStatement("CREATE temp TABLE int_TAB (max_val int4, min_val int4, null_val int4)");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    Integer maxInt = 1000;
-    Integer minInt = -1000;
-    Float maxIntFloat = 1000F;
-    Float minIntFloat = (float) -1000;
-
-    pstmt = con.prepareStatement("insert into int_tab values (?,?,?)");
-    pstmt.setObject(1, maxIntFloat, Types.INTEGER);
-    pstmt.setObject(2, minIntFloat, Types.INTEGER);
-    pstmt.setNull(3, Types.INTEGER);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("select * from int_tab");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-
-    assertTrue("expected " + maxInt + " ,received " + rs.getObject(1),
-        ((Integer) rs.getObject(1)).equals(maxInt));
-    assertTrue("expected " + minInt + " ,received " + rs.getObject(2),
-        ((Integer) rs.getObject(2)).equals(minInt));
-    rs.getFloat(3);
-    assertTrue(rs.wasNull());
-    rs.close();
-    pstmt.close();
-
-  }
-
-  @Test
-  public void testSetBooleanDouble() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement(
-        "CREATE temp TABLE double_tab (max_val float, min_val float, null_val float)");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    Double dBooleanTrue = 1.0;
-    Double dBooleanFalse = (double) 0;
-
-    pstmt = con.prepareStatement("insert into double_tab values (?,?,?)");
-    pstmt.setObject(1, Boolean.TRUE, Types.DOUBLE);
-    pstmt.setObject(2, Boolean.FALSE, Types.DOUBLE);
-    pstmt.setNull(3, Types.DOUBLE);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("select * from double_tab");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-
-    assertTrue("expected " + dBooleanTrue + " ,received " + rs.getObject(1),
-        rs.getObject(1).equals(dBooleanTrue));
-    assertTrue("expected " + dBooleanFalse + " ,received " + rs.getObject(2),
-        rs.getObject(2).equals(dBooleanFalse));
-    rs.getFloat(3);
-    assertTrue(rs.wasNull());
-    rs.close();
-    pstmt.close();
-
-  }
-
-  @Test
-  public void testSetBooleanNumeric() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement(
-        "CREATE temp TABLE numeric_tab (max_val numeric(30,15), min_val numeric(30,15), null_val numeric(30,15))");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    BigDecimal dBooleanTrue = new BigDecimal(1);
-    BigDecimal dBooleanFalse = new BigDecimal(0);
-
-    pstmt = con.prepareStatement("insert into numeric_tab values (?,?,?)");
-    pstmt.setObject(1, Boolean.TRUE, Types.NUMERIC, 2);
-    pstmt.setObject(2, Boolean.FALSE, Types.NUMERIC, 2);
-    pstmt.setNull(3, Types.DOUBLE);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("select * from numeric_tab");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-
-    assertTrue("expected " + dBooleanTrue + " ,received " + rs.getObject(1),
-        ((BigDecimal) rs.getObject(1)).compareTo(dBooleanTrue) == 0);
-    assertTrue("expected " + dBooleanFalse + " ,received " + rs.getObject(2),
-        ((BigDecimal) rs.getObject(2)).compareTo(dBooleanFalse) == 0);
-    rs.getFloat(3);
-    assertTrue(rs.wasNull());
-    rs.close();
-    pstmt.close();
-
-  }
-
-  @Test
-  public void testSetBooleanDecimal() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement(
-        "CREATE temp TABLE DECIMAL_TAB (max_val numeric(30,15), min_val numeric(30,15), null_val numeric(30,15))");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    BigDecimal dBooleanTrue = new BigDecimal(1);
-    BigDecimal dBooleanFalse = new BigDecimal(0);
-
-    pstmt = con.prepareStatement("insert into DECIMAL_TAB values (?,?,?)");
-    pstmt.setObject(1, Boolean.TRUE, Types.DECIMAL, 2);
-    pstmt.setObject(2, Boolean.FALSE, Types.DECIMAL, 2);
-    pstmt.setNull(3, Types.DOUBLE);
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("select * from DECIMAL_TAB");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-
-    assertTrue("expected " + dBooleanTrue + " ,received " + rs.getObject(1),
-        ((BigDecimal) rs.getObject(1)).compareTo(dBooleanTrue) == 0);
-    assertTrue("expected " + dBooleanFalse + " ,received " + rs.getObject(2),
-        ((BigDecimal) rs.getObject(2)).compareTo(dBooleanFalse) == 0);
-    rs.getFloat(3);
-    assertTrue(rs.wasNull());
-    rs.close();
-    pstmt.close();
-
-  }
-
-  @Test
-  public void testSetObjectBigDecimalUnscaled() throws SQLException {
-    TestUtil.createTempTable(con, "decimal_scale",
-        "n1 numeric, n2 numeric, n3 numeric, n4 numeric");
-    PreparedStatement pstmt = con.prepareStatement("insert into decimal_scale values(?,?,?,?)");
-    BigDecimal v = new BigDecimal("3.141593");
-    pstmt.setObject(1, v, Types.NUMERIC);
-
-    String vs = v.toPlainString();
-    pstmt.setObject(2, vs, Types.NUMERIC);
-
-    Float vf = Float.valueOf(vs);
-    pstmt.setObject(3, vf, Types.NUMERIC);
-
-    Double vd = Double.valueOf(vs);
-    pstmt.setObject(4, vd, Types.NUMERIC);
-
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    pstmt = con.prepareStatement("select n1,n2,n3,n4 from decimal_scale");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertTrue("expected numeric set via BigDecimal " + v + " stored as " + rs.getBigDecimal(1),
-        v.compareTo(rs.getBigDecimal(1)) == 0);
-    assertTrue("expected numeric set via String" + vs + " stored as " + rs.getBigDecimal(2),
-        v.compareTo(rs.getBigDecimal(2)) == 0);
-    // float is really bad...
-    assertTrue("expected numeric set via Float" + vf + " stored as " + rs.getBigDecimal(3),
-        v.compareTo(rs.getBigDecimal(3).setScale(6, RoundingMode.HALF_UP)) == 0);
-    assertTrue("expected numeric set via Double" + vd + " stored as " + rs.getBigDecimal(4),
-        v.compareTo(rs.getBigDecimal(4)) == 0);
-
-    rs.close();
-    pstmt.close();
-  }
-
-  @Test
-  public void testSetObjectBigDecimalWithScale() throws SQLException {
-    TestUtil.createTempTable(con, "decimal_scale",
-        "n1 numeric, n2 numeric, n3 numeric, n4 numeric");
-    PreparedStatement psinsert = con.prepareStatement("insert into decimal_scale values(?,?,?,?)");
-    PreparedStatement psselect = con.prepareStatement("select n1,n2,n3,n4 from decimal_scale");
-    PreparedStatement pstruncate = con.prepareStatement("truncate table decimal_scale");
-
-    BigDecimal v = new BigDecimal("3.141593");
-    String vs = v.toPlainString();
-    Float vf = Float.valueOf(vs);
-    Double vd = Double.valueOf(vs);
-
-    for (int s = 0; s < 6; s++) {
-      psinsert.setObject(1, v, Types.NUMERIC, s);
-      psinsert.setObject(2, vs, Types.NUMERIC, s);
-      psinsert.setObject(3, vf, Types.NUMERIC, s);
-      psinsert.setObject(4, vd, Types.NUMERIC, s);
-
-      psinsert.executeUpdate();
-
-      ResultSet rs = psselect.executeQuery();
-      assertTrue(rs.next());
-      BigDecimal vscaled = v.setScale(s, RoundingMode.HALF_UP);
-      assertTrue(
-          "expected numeric set via BigDecimal " + v + " with scale " + s + " stored as " + vscaled,
-          vscaled.compareTo(rs.getBigDecimal(1)) == 0);
-      assertTrue(
-          "expected numeric set via String" + vs + " with scale " + s + " stored as " + vscaled,
-          vscaled.compareTo(rs.getBigDecimal(2)) == 0);
-      assertTrue(
-          "expected numeric set via Float" + vf + " with scale " + s + " stored as " + vscaled,
-          vscaled.compareTo(rs.getBigDecimal(3)) == 0);
-      assertTrue(
-          "expected numeric set via Double" + vd + " with scale " + s + " stored as " + vscaled,
-          vscaled.compareTo(rs.getBigDecimal(4)) == 0);
-      rs.close();
-      pstruncate.executeUpdate();
+    private void runBrokenStream(InputStream is, int length) throws SQLException {
+        PreparedStatement pstmt = null;
+        try {
+            pstmt = con.prepareStatement("INSERT INTO streamtable (bin,str) VALUES (?,?)");
+            pstmt.setBinaryStream(1, is, length);
+            pstmt.setString(2, "Other");
+            pstmt.executeUpdate();
+            fail("This isn't supposed to work.");
+        } catch (SQLException sqle) {
+            // don't need to rollback because we're in autocommit mode
+            pstmt.close();
+
+            // verify the connection is still valid and the row didn't go in.
+            Statement stmt = con.createStatement();
+            ResultSet rs = stmt.executeQuery("SELECT COUNT(*) FROM streamtable");
+            assertTrue(rs.next());
+            assertEquals(0, rs.getInt(1));
+            rs.close();
+            stmt.close();
+        }
     }
 
-    psinsert.close();
-    psselect.close();
-    pstruncate.close();
-  }
-
-  @Test
-  public void testSetObjectWithBigDecimal() throws SQLException {
-    TestUtil.createTempTable(con, "number_fallback",
-            "n1 numeric");
-    PreparedStatement psinsert = con.prepareStatement("insert into number_fallback values(?)");
-    PreparedStatement psselect = con.prepareStatement("select n1 from number_fallback");
-
-    psinsert.setObject(1, new BigDecimal("733"));
-    psinsert.execute();
-
-    ResultSet rs = psselect.executeQuery();
-    assertTrue(rs.next());
-    assertTrue(
-        "expected 733, but received " + rs.getBigDecimal(1),
-        new BigDecimal("733").compareTo(rs.getBigDecimal(1)) == 0);
-
-    psinsert.close();
-    psselect.close();
-  }
-
-  @Test
-  public void testSetObjectNumberFallbackWithBigInteger() throws SQLException {
-    TestUtil.createTempTable(con, "number_fallback",
-            "n1 numeric");
-    PreparedStatement psinsert = con.prepareStatement("insert into number_fallback values(?)");
-    PreparedStatement psselect = con.prepareStatement("select n1 from number_fallback");
-
-    psinsert.setObject(1, new BigInteger("733"));
-    psinsert.execute();
-
-    ResultSet rs = psselect.executeQuery();
-    assertTrue(rs.next());
-    assertTrue(
-        "expected 733, but received " + rs.getBigDecimal(1),
-        new BigDecimal("733").compareTo(rs.getBigDecimal(1)) == 0);
-
-    psinsert.close();
-    psselect.close();
-  }
-
-  @Test
-  public void testSetObjectNumberFallbackWithAtomicLong() throws SQLException {
-    TestUtil.createTempTable(con, "number_fallback",
-            "n1 numeric");
-    PreparedStatement psinsert = con.prepareStatement("insert into number_fallback values(?)");
-    PreparedStatement psselect = con.prepareStatement("select n1 from number_fallback");
-
-    psinsert.setObject(1, new AtomicLong(733));
-    psinsert.execute();
-
-    ResultSet rs = psselect.executeQuery();
-    assertTrue(rs.next());
-    assertTrue(
-        "expected 733, but received " + rs.getBigDecimal(1),
-        new BigDecimal("733").compareTo(rs.getBigDecimal(1)) == 0);
-
-    psinsert.close();
-    psselect.close();
-  }
-
-  @Test
-  public void testUnknownSetObject() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement("INSERT INTO intervaltable(i) VALUES (?)");
-
-    pstmt.setString(1, "1 week");
-    try {
-      pstmt.executeUpdate();
-      assertTrue("When using extended protocol, interval vs character varying type mismatch error is expected",
-          preferQueryMode == PreferQueryMode.SIMPLE);
-    } catch (SQLException sqle) {
-      // ERROR: column "i" is of type interval but expression is of type character varying
+    private void doSetBinaryStream(ByteArrayInputStream bais, int length) throws SQLException {
+        PreparedStatement pstmt =
+                con.prepareStatement("INSERT INTO streamtable (bin,str) VALUES (?,?)");
+        pstmt.setBinaryStream(1, bais, length);
+        pstmt.setString(2, null);
+        pstmt.executeUpdate();
+        pstmt.close();
     }
 
-    pstmt.setObject(1, "1 week", Types.OTHER);
-    pstmt.executeUpdate();
-    pstmt.close();
-  }
-
-  /**
-   * With autoboxing this apparently happens more often now.
-   */
-  @Test
-  public void testSetObjectCharacter() throws SQLException {
-    PreparedStatement ps = con.prepareStatement("INSERT INTO texttable(te) VALUES (?)");
-    ps.setObject(1, 'z');
-    ps.executeUpdate();
-    ps.close();
-  }
-
-  /**
-   * When we have parameters of unknown type and it's not using the unnamed statement, we issue a
-   * protocol level statement describe message for the V3 protocol. This test just makes sure that
-   * works.
-   */
-  @Test
-  public void testStatementDescribe() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement("SELECT ?::int");
-    pstmt.setObject(1, 2, Types.OTHER);
-    for (int i = 0; i < 10; i++) {
-      ResultSet rs = pstmt.executeQuery();
-      assertTrue(rs.next());
-      assertEquals(2, rs.getInt(1));
-      rs.close();
-    }
-    pstmt.close();
-  }
-
-  @Test
-  public void testBatchWithPrepareThreshold5() throws SQLException {
-    assumeBinaryModeRegular();
-    Assume.assumeTrue("simple protocol only does not support prepared statement requests",
-        preferQueryMode != PreferQueryMode.SIMPLE);
-
-    PreparedStatement pstmt = con.prepareStatement("CREATE temp TABLE batch_tab_threshold5 (id bigint, val bigint)");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    // When using a prepareThreshold of 5, a batch update should use server-side prepare
-    pstmt = con.prepareStatement("INSERT INTO batch_tab_threshold5 (id, val) VALUES (?,?)");
-    ((PgStatement) pstmt).setPrepareThreshold(5);
-    for (int p = 0; p < 5; p++) {
-      for (int i = 0; i <= 5; i++) {
-        pstmt.setLong(1, i);
-        pstmt.setLong(2, i);
-        pstmt.addBatch();
-      }
-      pstmt.executeBatch();
-    }
-    pstmt.close();
-    assertTrue("prepareThreshold=5, so the statement should be server-prepared",
-        ((PGStatement) pstmt).isUseServerPrepare());
-    assertEquals("prepareThreshold=5, so the statement should be server-prepared", 1,
-        getNumberOfServerPreparedStatements("INSERT INTO batch_tab_threshold5 (id, val) VALUES ($1,$2)"));
-  }
-
-  @Test
-  public void testBatchWithPrepareThreshold0() throws SQLException {
-    assumeBinaryModeRegular();
-    Assume.assumeTrue("simple protocol only does not support prepared statement requests",
-        preferQueryMode != PreferQueryMode.SIMPLE);
-
-    PreparedStatement pstmt = con.prepareStatement("CREATE temp TABLE batch_tab_threshold0 (id bigint, val bigint)");
-    pstmt.executeUpdate();
-    pstmt.close();
-
-    // When using a prepareThreshold of 0, a batch update should not use server-side prepare
-    pstmt = con.prepareStatement("INSERT INTO batch_tab_threshold0 (id, val) VALUES (?,?)");
-    ((PgStatement) pstmt).setPrepareThreshold(0);
-    for (int p = 0; p < 5; p++) {
-      for (int i = 0; i <= 5; i++) {
-        pstmt.setLong(1, i);
-        pstmt.setLong(2, i);
-        pstmt.addBatch();
-      }
-      pstmt.executeBatch();
-    }
-    pstmt.close();
-
-    assertFalse("prepareThreshold=0, so the statement should not be server-prepared",
-        ((PGStatement) pstmt).isUseServerPrepare());
-    assertEquals("prepareThreshold=0, so the statement should not be server-prepared", 0,
-        getNumberOfServerPreparedStatements("INSERT INTO batch_tab_threshold0 (id, val) VALUES ($1,$2)"));
-  }
-
-  @Test
-  public void testSelectPrepareThreshold0AutoCommitFalseFetchSizeNonZero() throws SQLException {
-    assumeBinaryModeRegular();
-    Assume.assumeTrue("simple protocol only does not support prepared statement requests",
-        preferQueryMode != PreferQueryMode.SIMPLE);
-
-    con.setAutoCommit(false);
-    PreparedStatement pstmt = null;
-    ResultSet rs = null;
-    try {
-      pstmt = con.prepareStatement("SELECT 42");
-      ((PgStatement) pstmt).setPrepareThreshold(0);
-      pstmt.setFetchSize(1);
-      rs = pstmt.executeQuery();
-      rs.next();
-      assertEquals(42, rs.getInt(1));
-    } finally {
-      TestUtil.closeQuietly(rs);
-      TestUtil.closeQuietly(pstmt);
+    private void doSetAsciiStream(InputStream is, int length) throws SQLException {
+        PreparedStatement pstmt =
+                con.prepareStatement("INSERT INTO streamtable (bin,str) VALUES (?,?)");
+        pstmt.setBytes(1, null);
+        pstmt.setAsciiStream(2, is, length);
+        pstmt.executeUpdate();
+        pstmt.close();
     }
 
-    assertFalse("prepareThreshold=0, so the statement should not be server-prepared",
-        ((PGStatement) pstmt).isUseServerPrepare());
+    @Test
+    public void testTrailingSpaces() throws SQLException {
+        PreparedStatement pstmt =
+                con.prepareStatement("INSERT INTO texttable (ch, te, vc) VALUES (?, ?, ?) ");
+        String str = "a  ";
+        pstmt.setString(1, str);
+        pstmt.setString(2, str);
+        pstmt.setString(3, str);
+        pstmt.executeUpdate();
+        pstmt.close();
 
-    assertEquals("prepareThreshold=0, so the statement should not be server-prepared", 0,
-        getNumberOfServerPreparedStatements("SELECT 42"));
-  }
+        pstmt = con.prepareStatement("SELECT ch, te, vc FROM texttable WHERE ch=? AND te=? AND vc=?");
+        pstmt.setString(1, str);
+        pstmt.setString(2, str);
+        pstmt.setString(3, str);
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(str, rs.getString(1));
+        assertEquals(str, rs.getString(2));
+        assertEquals(str, rs.getString(3));
+        rs.close();
+        pstmt.close();
+    }
 
-  @Test
-  public void testInappropriateStatementSharing() throws SQLException {
-    PreparedStatement ps = con.prepareStatement("SELECT ?::timestamp");
-    assertFirstParameterTypeName("after prepare ?::timestamp bind type should be timestamp", "timestamp", ps);
-    try {
-      Timestamp ts = new Timestamp(1474997614836L);
-      // Since PreparedStatement isn't cached immediately, we need to some warm up
-      for (int i = 0; i < 3; i++) {
+    @Test
+    public void testBinds() throws SQLException {
+        // braces around (42) are required to puzzle the parser
+        String query = "INSERT INTO inttable(a) VALUES (?);SELECT (42)";
+        PreparedStatement ps = con.prepareStatement(query);
+        ps.setInt(1, 100500);
+        ps.execute();
+        ResultSet rs = ps.getResultSet();
+        Assert.assertNull("insert produces no results ==> getResultSet should be null", rs);
+        Assert.assertTrue("There are two statements => getMoreResults should be true", ps.getMoreResults());
+        rs = ps.getResultSet();
+        Assert.assertNotNull("select produces results ==> getResultSet should be not null", rs);
+        Assert.assertTrue("select produces 1 row ==> rs.next should be true", rs.next());
+        Assert.assertEquals("second result of query " + query, 42, rs.getInt(1));
+
+        TestUtil.closeQuietly(rs);
+        TestUtil.closeQuietly(ps);
+    }
+
+    @Test
+    public void testSetNull() throws SQLException {
+        // valid: fully qualified type to setNull()
+        PreparedStatement pstmt = con.prepareStatement("INSERT INTO texttable (te) VALUES (?)");
+        pstmt.setNull(1, Types.VARCHAR);
+        pstmt.executeUpdate();
+
+        // valid: fully qualified type to setObject()
+        pstmt.setObject(1, null, Types.VARCHAR);
+        pstmt.executeUpdate();
+
+        // valid: setObject() with partial type info and a typed "null object instance"
+        org.postgresql.util.PGobject dummy = new org.postgresql.util.PGobject();
+        dummy.setType("text");
+        dummy.setValue(null);
+        pstmt.setObject(1, dummy, Types.OTHER);
+        pstmt.executeUpdate();
+
+        // setObject() with no type info
+        pstmt.setObject(1, null);
+        pstmt.executeUpdate();
+
+        // setObject() with insufficient type info
+        pstmt.setObject(1, null, Types.OTHER);
+        pstmt.executeUpdate();
+
+        // setNull() with insufficient type info
+        pstmt.setNull(1, Types.OTHER);
+        pstmt.executeUpdate();
+
+        pstmt.close();
+
+        assumeMinimumServerVersion(ServerVersion.v8_3);
+        pstmt = con.prepareStatement("select 'ok' where ?=? or (? is null) ");
+        pstmt.setObject(1, UUID.randomUUID(), Types.OTHER);
+        pstmt.setNull(2, Types.OTHER, "uuid");
+        pstmt.setNull(3, Types.OTHER, "uuid");
+        ResultSet rs = pstmt.executeQuery();
+
+        assertTrue(rs.next());
+        assertEquals("ok", rs.getObject(1));
+
+        rs.close();
+        pstmt.close();
+
+    }
+
+    @Test
+    public void testSingleQuotes() throws SQLException {
+        String[] testStrings = new String[]{
+                "bare ? question mark",
+                "quoted \\' single quote",
+                "doubled '' single quote",
+                "octal \\060 constant",
+                "escaped \\? question mark",
+                "double \\\\ backslash",
+                "double \" quote",};
+
+        String[] testStringsStdConf = new String[]{
+                "bare ? question mark",
+                "quoted '' single quote",
+                "doubled '' single quote",
+                "octal 0 constant",
+                "escaped ? question mark",
+                "double \\ backslash",
+                "double \" quote",};
+
+        String[] expected = new String[]{
+                "bare ? question mark",
+                "quoted ' single quote",
+                "doubled ' single quote",
+                "octal 0 constant",
+                "escaped ? question mark",
+                "double \\ backslash",
+                "double \" quote",};
+
+        boolean oldStdStrings = TestUtil.getStandardConformingStrings(con);
+        Statement stmt = con.createStatement();
+
+        // Test with standard_conforming_strings turned off.
+        stmt.execute("SET standard_conforming_strings TO off");
+        for (int i = 0; i < testStrings.length; i++) {
+            PreparedStatement pstmt = con.prepareStatement("SELECT '" + testStrings[i] + "'");
+            ResultSet rs = pstmt.executeQuery();
+            assertTrue(rs.next());
+            assertEquals(expected[i], rs.getString(1));
+            rs.close();
+            pstmt.close();
+        }
+
+        // Test with standard_conforming_strings turned off...
+        // ... using the escape string syntax (E'').
+        stmt.execute("SET standard_conforming_strings TO on");
+        for (int i = 0; i < testStrings.length; i++) {
+            PreparedStatement pstmt = con.prepareStatement("SELECT E'" + testStrings[i] + "'");
+            ResultSet rs = pstmt.executeQuery();
+            assertTrue(rs.next());
+            assertEquals(expected[i], rs.getString(1));
+            rs.close();
+            pstmt.close();
+        }
+        // ... using standard conforming input strings.
+        for (int i = 0; i < testStrings.length; i++) {
+            PreparedStatement pstmt = con.prepareStatement("SELECT '" + testStringsStdConf[i] + "'");
+            ResultSet rs = pstmt.executeQuery();
+            assertTrue(rs.next());
+            assertEquals(expected[i], rs.getString(1));
+            rs.close();
+            pstmt.close();
+        }
+
+        stmt.execute("SET standard_conforming_strings TO " + (oldStdStrings ? "on" : "off"));
+        stmt.close();
+    }
+
+    @Test
+    public void testDoubleQuotes() throws SQLException {
+        String[] testStrings = new String[]{
+                "bare ? question mark",
+                "single ' quote",
+                "doubled '' single quote",
+                "doubled \"\" double quote",
+                "no backslash interpretation here: \\",
+        };
+
+        for (String testString : testStrings) {
+            PreparedStatement pstmt =
+                    con.prepareStatement("CREATE TABLE \"" + testString + "\" (i integer)");
+            pstmt.executeUpdate();
+            pstmt.close();
+
+            pstmt = con.prepareStatement("DROP TABLE \"" + testString + "\"");
+            pstmt.executeUpdate();
+            pstmt.close();
+        }
+    }
+
+    @Test
+    public void testDollarQuotes() throws SQLException {
+        // dollar-quotes are supported in the backend since version 8.0
+        PreparedStatement st;
         ResultSet rs;
 
-        // Flip statement to use Oid.DATE
-        ps.setNull(1, Types.DATE);
-        assertFirstParameterTypeName("set parameter to DATE", "date", ps);
-        rs = ps.executeQuery();
-        assertFirstParameterTypeName("set parameter to DATE (executeQuery should not affect parameterMetadata)",
-            "date", ps);
+        st = con.prepareStatement("SELECT $$;$$ WHERE $x$?$x$=$_0$?$_0$ AND $$?$$=?");
+        st.setString(1, "?");
+        rs = st.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(";", rs.getString(1));
+        assertFalse(rs.next());
+        st.close();
+
+        st = con.prepareStatement(
+                "SELECT $__$;$__$ WHERE ''''=$q_1$'$q_1$ AND ';'=?;"
+                        + "SELECT $x$$a$;$x $a$$x$ WHERE $$;$$=? OR ''=$c$c$;$c$;"
+                        + "SELECT ?");
+        st.setString(1, ";");
+        st.setString(2, ";");
+        st.setString(3, "$a$ $a$");
+
+        assertTrue(st.execute());
+        rs = st.getResultSet();
+        assertTrue(rs.next());
+        assertEquals(";", rs.getString(1));
+        assertFalse(rs.next());
+
+        assertTrue(st.getMoreResults());
+        rs = st.getResultSet();
+        assertTrue(rs.next());
+        assertEquals("$a$;$x $a$", rs.getString(1));
+        assertFalse(rs.next());
+
+        assertTrue(st.getMoreResults());
+        rs = st.getResultSet();
+        assertTrue(rs.next());
+        assertEquals("$a$ $a$", rs.getString(1));
+        assertFalse(rs.next());
+        st.close();
+    }
+
+    @Test
+    public void testDollarQuotesAndIdentifiers() throws SQLException {
+        // dollar-quotes are supported in the backend since version 8.0
+        PreparedStatement st;
+
+        con.createStatement().execute("CREATE TEMP TABLE a$b$c(a varchar, b varchar)");
+        st = con.prepareStatement("INSERT INTO a$b$c (a, b) VALUES (?, ?)");
+        st.setString(1, "a");
+        st.setString(2, "b");
+        st.executeUpdate();
+        st.close();
+
+        con.createStatement().execute("CREATE TEMP TABLE e$f$g(h varchar, e$f$g varchar) ");
+        st = con.prepareStatement("UPDATE e$f$g SET h = ? || e$f$g");
+        st.setString(1, "a");
+        st.executeUpdate();
+        st.close();
+    }
+
+    @Test
+    public void testComments() throws SQLException {
+        PreparedStatement st;
+        ResultSet rs;
+
+        st = con.prepareStatement("SELECT /*?*/ /*/*/*/**/*/*/*/1;SELECT ?;--SELECT ?");
+        st.setString(1, "a");
+        assertTrue(st.execute());
+        assertTrue(st.getMoreResults());
+        assertFalse(st.getMoreResults());
+        st.close();
+
+        st = con.prepareStatement("SELECT /**/'?'/*/**/*/ WHERE '?'=/*/*/*?*/*/*/--?\n?");
+        st.setString(1, "?");
+        rs = st.executeQuery();
+        assertTrue(rs.next());
+        assertEquals("?", rs.getString(1));
+        assertFalse(rs.next());
+        st.close();
+    }
+
+    @Test
+    public void testDoubleQuestionMark() throws SQLException {
+        PreparedStatement st;
+        ResultSet rs;
+
+        st = con.prepareStatement("select ??- lseg '((-1,0),(1,0))';");
+        rs = st.executeQuery();
+        assertTrue(rs.next());
+        // Bool values in binary mode are first converted to their Java type (Boolean), and then
+        // converted to String, which means that we receive 'true'. Bool values in text mode are
+        // returned as the same text value that was returned by the server, i.e. 't'.
+        assertEquals(binaryMode == BinaryMode.FORCE && preferQueryMode != PreferQueryMode.SIMPLE ? "true" : "t", rs.getString(1));
+        assertFalse(rs.next());
+        st.close();
+
+        st = con.prepareStatement("select lseg '((-1,0),(1,0))' ??# box '((-2,-2),(2,2))';");
+        rs = st.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(binaryMode == BinaryMode.FORCE && preferQueryMode != PreferQueryMode.SIMPLE ? "true" : "t", rs.getString(1));
+        assertFalse(rs.next());
+        st.close();
+    }
+
+    @Test
+    public void testNumeric() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement(
+                "CREATE TEMP TABLE numeric_tab (max_numeric_positive numeric, min_numeric_positive numeric, max_numeric_negative numeric, min_numeric_negative numeric, null_value numeric)");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        char[] wholeDigits = new char[NUMERIC_MAX_DISPLAY_SCALE];
+        for (int i = 0; i < NUMERIC_MAX_DISPLAY_SCALE; i++) {
+            wholeDigits[i] = '9';
+        }
+
+        char[] fractionDigits = new char[NUMERIC_MAX_PRECISION];
+        for (int i = 0; i < NUMERIC_MAX_PRECISION; i++) {
+            fractionDigits[i] = '9';
+        }
+
+        String maxValueString = new String(wholeDigits);
+        String minValueString = new String(fractionDigits);
+        BigDecimal[] values = new BigDecimal[4];
+        values[0] = new BigDecimal(maxValueString);
+        values[1] = new BigDecimal("-" + maxValueString);
+        values[2] = new BigDecimal(minValueString);
+        values[3] = new BigDecimal("-" + minValueString);
+
+        pstmt = con.prepareStatement("insert into numeric_tab values (?,?,?,?,?)");
+        for (int i = 1; i < 5; i++) {
+            pstmt.setBigDecimal(i, values[i - 1]);
+        }
+
+        pstmt.setNull(5, Types.NUMERIC);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("select * from numeric_tab");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        for (int i = 1; i < 5; i++) {
+            assertTrue(rs.getBigDecimal(i).compareTo(values[i - 1]) == 0);
+        }
+        rs.getDouble(5);
+        assertTrue(rs.wasNull());
+        rs.close();
+        pstmt.close();
+
+    }
+
+    @Test
+    public void testDouble() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement(
+                "CREATE TEMP TABLE double_tab (max_double float, min_double float, null_value float)");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("insert into double_tab values (?,?,?)");
+        pstmt.setDouble(1, 1.0E125);
+        pstmt.setDouble(2, 1.0E-130);
+        pstmt.setNull(3, Types.DOUBLE);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("select * from double_tab");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        double d = rs.getDouble(1);
+        assertTrue(rs.getDouble(1) == 1.0E125);
+        assertTrue(rs.getDouble(2) == 1.0E-130);
+        rs.getDouble(3);
+        assertTrue(rs.wasNull());
+        rs.close();
+        pstmt.close();
+
+    }
+
+    @Test
+    public void testFloat() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement(
+                "CREATE TEMP TABLE float_tab (max_float real, min_float real, null_value real)");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("insert into float_tab values (?,?,?)");
+        pstmt.setFloat(1, (float) 1.0E37);
+        pstmt.setFloat(2, (float) 1.0E-37);
+        pstmt.setNull(3, Types.FLOAT);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("select * from float_tab");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        float f = rs.getFloat(1);
+        assertTrue("expected 1.0E37,received " + rs.getFloat(1), rs.getFloat(1) == (float) 1.0E37);
+        assertTrue("expected 1.0E-37,received " + rs.getFloat(2), rs.getFloat(2) == (float) 1.0E-37);
+        rs.getDouble(3);
+        assertTrue(rs.wasNull());
+        rs.close();
+        pstmt.close();
+
+    }
+
+    @Test
+    public void testNaNLiteralsSimpleStatement() throws SQLException {
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("select 'NaN'::numeric, 'NaN'::real, 'NaN'::double precision");
+        checkNaNLiterals(stmt, rs);
+    }
+
+    @Test
+    public void testNaNLiteralsPreparedStatement() throws SQLException {
+        PreparedStatement stmt = con.prepareStatement("select 'NaN'::numeric, 'NaN'::real, 'NaN'::double precision");
+        checkNaNLiterals(stmt, stmt.executeQuery());
+    }
+
+    private void checkNaNLiterals(Statement stmt, ResultSet rs) throws SQLException {
+        rs.next();
+        assertTrue("Double.isNaN((Double) rs.getObject", Double.isNaN((Double) rs.getObject(3)));
+        assertTrue("Double.isNaN(rs.getDouble", Double.isNaN(rs.getDouble(3)));
+        assertTrue("Float.isNaN((Float) rs.getObject", Float.isNaN((Float) rs.getObject(2)));
+        assertTrue("Float.isNaN(rs.getFloat", Float.isNaN(rs.getFloat(2)));
+        assertTrue("Double.isNaN((Double) rs.getObject", Double.isNaN((Double) rs.getObject(1)));
+        assertTrue("Double.isNaN(rs.getDouble", Double.isNaN(rs.getDouble(1)));
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testNaNSetDoubleFloat() throws SQLException {
+        PreparedStatement ps = con.prepareStatement("select ?, ?");
+        ps.setFloat(1, Float.NaN);
+        ps.setDouble(2, Double.NaN);
+
+        checkNaNParams(ps);
+    }
+
+    @Test
+    public void testNaNSetObject() throws SQLException {
+        PreparedStatement ps = con.prepareStatement("select ?, ?");
+        ps.setObject(1, Float.NaN);
+        ps.setObject(2, Double.NaN);
+
+        checkNaNParams(ps);
+    }
+
+    private void checkNaNParams(PreparedStatement ps) throws SQLException {
+        ResultSet rs = ps.executeQuery();
+        rs.next();
+
+        assertTrue("Float.isNaN((Float) rs.getObject", Float.isNaN((Float) rs.getObject(1)));
+        assertTrue("Float.isNaN(rs.getFloat", Float.isNaN(rs.getFloat(1)));
+        assertTrue("Double.isNaN(rs.getDouble", Double.isNaN(rs.getDouble(2)));
+        assertTrue("Double.isNaN(rs.getDouble", Double.isNaN(rs.getDouble(2)));
+
+        TestUtil.closeQuietly(rs);
+        TestUtil.closeQuietly(ps);
+    }
+
+    @Test
+    public void testBoolean() throws SQLException {
+        testBoolean(0);
+        testBoolean(1);
+        testBoolean(5);
+        testBoolean(-1);
+    }
+
+    public void testBoolean(int prepareThreshold) throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement("insert into bool_tab values (?,?,?,?,?,?,?,?)");
+        ((org.postgresql.PGStatement) pstmt).setPrepareThreshold(prepareThreshold);
+
+        // Test TRUE values
+        pstmt.setBoolean(1, true);
+        pstmt.setObject(1, Boolean.TRUE);
+        pstmt.setNull(2, Types.BIT);
+        pstmt.setObject(3, 't', Types.BIT);
+        pstmt.setObject(3, 'T', Types.BIT);
+        pstmt.setObject(3, "t", Types.BIT);
+        pstmt.setObject(4, "true", Types.BIT);
+        pstmt.setObject(5, 'y', Types.BIT);
+        pstmt.setObject(5, 'Y', Types.BIT);
+        pstmt.setObject(5, "Y", Types.BIT);
+        pstmt.setObject(6, "YES", Types.BIT);
+        pstmt.setObject(7, "On", Types.BIT);
+        pstmt.setObject(8, '1', Types.BIT);
+        pstmt.setObject(8, "1", Types.BIT);
+        assertEquals("one row inserted, true values", 1, pstmt.executeUpdate());
+        // Test FALSE values
+        pstmt.setBoolean(1, false);
+        pstmt.setObject(1, Boolean.FALSE);
+        pstmt.setNull(2, Types.BOOLEAN);
+        pstmt.setObject(3, 'f', Types.BOOLEAN);
+        pstmt.setObject(3, 'F', Types.BOOLEAN);
+        pstmt.setObject(3, "F", Types.BOOLEAN);
+        pstmt.setObject(4, "false", Types.BOOLEAN);
+        pstmt.setObject(5, 'n', Types.BOOLEAN);
+        pstmt.setObject(5, 'N', Types.BOOLEAN);
+        pstmt.setObject(5, "N", Types.BOOLEAN);
+        pstmt.setObject(6, "NO", Types.BOOLEAN);
+        pstmt.setObject(7, "Off", Types.BOOLEAN);
+        pstmt.setObject(8, "0", Types.BOOLEAN);
+        pstmt.setObject(8, '0', Types.BOOLEAN);
+        assertEquals("one row inserted, false values", 1, pstmt.executeUpdate());
+        // Test weird values
+        pstmt.setObject(1, (byte) 0, Types.BOOLEAN);
+        pstmt.setObject(2, BigDecimal.ONE, Types.BOOLEAN);
+        pstmt.setObject(3, 0L, Types.BOOLEAN);
+        pstmt.setObject(4, 0x1, Types.BOOLEAN);
+        pstmt.setObject(5, (float) 0, Types.BOOLEAN);
+        pstmt.setObject(5, 1.0d, Types.BOOLEAN);
+        pstmt.setObject(5, 0.0f, Types.BOOLEAN);
+        pstmt.setObject(6, Integer.valueOf("1"), Types.BOOLEAN);
+        pstmt.setObject(7, new java.math.BigInteger("0"), Types.BOOLEAN);
+        pstmt.clearParameters();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("select * from bool_tab");
+        ((org.postgresql.PGStatement) pstmt).setPrepareThreshold(prepareThreshold);
+        ResultSet rs = pstmt.executeQuery();
+
+        assertTrue(rs.next());
+        assertTrue("expected true, received " + rs.getBoolean(1), rs.getBoolean(1));
+        rs.getFloat(2);
+        assertTrue(rs.wasNull());
+        assertTrue("expected true, received " + rs.getBoolean(3), rs.getBoolean(3));
+        assertTrue("expected true, received " + rs.getBoolean(4), rs.getBoolean(4));
+        assertTrue("expected true, received " + rs.getBoolean(5), rs.getBoolean(5));
+        assertTrue("expected true, received " + rs.getBoolean(6), rs.getBoolean(6));
+        assertTrue("expected true, received " + rs.getBoolean(7), rs.getBoolean(7));
+        assertTrue("expected true, received " + rs.getBoolean(8), rs.getBoolean(8));
+
+        assertTrue(rs.next());
+        assertFalse("expected false, received " + rs.getBoolean(1), rs.getBoolean(1));
+        rs.getBoolean(2);
+        assertTrue(rs.wasNull());
+        assertFalse("expected false, received " + rs.getBoolean(3), rs.getBoolean(3));
+        assertFalse("expected false, received " + rs.getBoolean(4), rs.getBoolean(4));
+        assertFalse("expected false, received " + rs.getBoolean(5), rs.getBoolean(5));
+        assertFalse("expected false, received " + rs.getBoolean(6), rs.getBoolean(6));
+        assertFalse("expected false, received " + rs.getBoolean(7), rs.getBoolean(7));
+        assertFalse("expected false, received " + rs.getBoolean(8), rs.getBoolean(8));
+
+        rs.close();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("TRUNCATE TABLE bool_tab");
+        pstmt.executeUpdate();
+        pstmt.close();
+    }
+
+    @Test
+    public void testBadBoolean() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement("INSERT INTO bad_bool VALUES (?)");
         try {
-          assertTrue(rs.next());
-          assertNull("NULL DATE converted to TIMESTAMP should return NULL value on getObject",
-              rs.getObject(1));
-        } finally {
-          rs.close();
+            pstmt.setObject(1, "this is not boolean", Types.BOOLEAN);
+            fail();
+        } catch (SQLException e) {
+            assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
+            assertEquals("Cannot cast to boolean: \"this is not boolean\"", e.getMessage());
         }
-
-        // Flop statement to use Oid.UNSPECIFIED
-        ps.setTimestamp(1, ts);
-        assertFirstParameterTypeName("set parameter to Timestamp", "timestamp", ps);
-        rs = ps.executeQuery();
-        assertFirstParameterTypeName("set parameter to Timestamp (executeQuery should not affect parameterMetadata)",
-            "timestamp", ps);
         try {
-          assertTrue(rs.next());
-          assertEquals(
-              "Looks like we got a narrowing of the data (TIMESTAMP -> DATE). It might caused by inappropriate caching of the statement.",
-              ts, rs.getObject(1));
+            pstmt.setObject(1, 'X', Types.BOOLEAN);
+            fail();
+        } catch (SQLException e) {
+            assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
+            assertEquals("Cannot cast to boolean: \"X\"", e.getMessage());
+        }
+        try {
+            java.io.File obj = new java.io.File("");
+            pstmt.setObject(1, obj, Types.BOOLEAN);
+            fail();
+        } catch (SQLException e) {
+            assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
+            assertEquals("Cannot cast to boolean", e.getMessage());
+        }
+        try {
+            pstmt.setObject(1, "1.0", Types.BOOLEAN);
+            fail();
+        } catch (SQLException e) {
+            assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
+            assertEquals("Cannot cast to boolean: \"1.0\"", e.getMessage());
+        }
+        try {
+            pstmt.setObject(1, "-1", Types.BOOLEAN);
+            fail();
+        } catch (SQLException e) {
+            assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
+            assertEquals("Cannot cast to boolean: \"-1\"", e.getMessage());
+        }
+        try {
+            pstmt.setObject(1, "ok", Types.BOOLEAN);
+            fail();
+        } catch (SQLException e) {
+            assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
+            assertEquals("Cannot cast to boolean: \"ok\"", e.getMessage());
+        }
+        try {
+            pstmt.setObject(1, 0.99f, Types.BOOLEAN);
+            fail();
+        } catch (SQLException e) {
+            assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
+            assertEquals("Cannot cast to boolean: \"0.99\"", e.getMessage());
+        }
+        try {
+            pstmt.setObject(1, -0.01d, Types.BOOLEAN);
+            fail();
+        } catch (SQLException e) {
+            assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
+            assertEquals("Cannot cast to boolean: \"-0.01\"", e.getMessage());
+        }
+        try {
+            pstmt.setObject(1, new java.sql.Date(0), Types.BOOLEAN);
+            fail();
+        } catch (SQLException e) {
+            assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
+            assertEquals("Cannot cast to boolean", e.getMessage());
+        }
+        try {
+            pstmt.setObject(1, new java.math.BigInteger("1000"), Types.BOOLEAN);
+            fail();
+        } catch (SQLException e) {
+            assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
+            assertEquals("Cannot cast to boolean: \"1000\"", e.getMessage());
+        }
+        try {
+            pstmt.setObject(1, Math.PI, Types.BOOLEAN);
+            fail();
+        } catch (SQLException e) {
+            assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
+            assertEquals("Cannot cast to boolean: \"3.141592653589793\"", e.getMessage());
+        }
+        pstmt.close();
+    }
+
+    @Test
+    public void testSetFloatInteger() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement(
+                "CREATE temp TABLE float_tab (max_val float8, min_val float, null_val float8)");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        Integer maxInteger = 2147483647;
+        Integer minInteger = -2147483648;
+
+        Double maxFloat = 2147483647.0;
+        Double minFloat = (double) -2147483648;
+
+        pstmt = con.prepareStatement("insert into float_tab values (?,?,?)");
+        pstmt.setObject(1, maxInteger, Types.FLOAT);
+        pstmt.setObject(2, minInteger, Types.FLOAT);
+        pstmt.setNull(3, Types.FLOAT);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("select * from float_tab");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+
+        assertTrue("expected " + maxFloat + " ,received " + rs.getObject(1),
+                rs.getObject(1).equals(maxFloat));
+        assertTrue("expected " + minFloat + " ,received " + rs.getObject(2),
+                rs.getObject(2).equals(minFloat));
+        rs.getFloat(3);
+        assertTrue(rs.wasNull());
+        rs.close();
+        pstmt.close();
+
+    }
+
+    @Test
+    public void testSetFloatString() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement(
+                "CREATE temp TABLE float_tab (max_val float8, min_val float8, null_val float8)");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        String maxStringFloat = "1.0E37";
+        String minStringFloat = "1.0E-37";
+        Double maxFloat = 1.0E37;
+        Double minFloat = 1.0E-37;
+
+        pstmt = con.prepareStatement("insert into float_tab values (?,?,?)");
+        pstmt.setObject(1, maxStringFloat, Types.FLOAT);
+        pstmt.setObject(2, minStringFloat, Types.FLOAT);
+        pstmt.setNull(3, Types.FLOAT);
+        pstmt.executeUpdate();
+        pstmt.setObject(1, "1.0", Types.FLOAT);
+        pstmt.setObject(2, "0.0", Types.FLOAT);
+        pstmt.setNull(3, Types.FLOAT);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("select * from float_tab");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+
+        assertTrue(((Double) rs.getObject(1)).equals(maxFloat));
+        assertTrue(((Double) rs.getObject(2)).equals(minFloat));
+        assertTrue(rs.getDouble(1) == maxFloat);
+        assertTrue(rs.getDouble(2) == minFloat);
+        rs.getFloat(3);
+        assertTrue(rs.wasNull());
+
+        assertTrue(rs.next());
+        assertTrue("expected true, received " + rs.getBoolean(1), rs.getBoolean(1));
+        assertFalse("expected false,received " + rs.getBoolean(2), rs.getBoolean(2));
+
+        rs.close();
+        pstmt.close();
+
+    }
+
+    @Test
+    public void testSetFloatBigDecimal() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement(
+                "CREATE temp TABLE float_tab (max_val float8, min_val float8, null_val float8)");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        BigDecimal maxBigDecimalFloat = new BigDecimal("1.0E37");
+        BigDecimal minBigDecimalFloat = new BigDecimal("1.0E-37");
+        Double maxFloat = 1.0E37;
+        Double minFloat = 1.0E-37;
+
+        pstmt = con.prepareStatement("insert into float_tab values (?,?,?)");
+        pstmt.setObject(1, maxBigDecimalFloat, Types.FLOAT);
+        pstmt.setObject(2, minBigDecimalFloat, Types.FLOAT);
+        pstmt.setNull(3, Types.FLOAT);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("select * from float_tab");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+
+        assertTrue("expected " + maxFloat + " ,received " + rs.getObject(1),
+                ((Double) rs.getObject(1)).equals(maxFloat));
+        assertTrue("expected " + minFloat + " ,received " + rs.getObject(2),
+                ((Double) rs.getObject(2)).equals(minFloat));
+        rs.getFloat(3);
+        assertTrue(rs.wasNull());
+        rs.close();
+        pstmt.close();
+
+    }
+
+    @Test
+    public void testSetTinyIntFloat() throws SQLException {
+        PreparedStatement pstmt = con
+                .prepareStatement("CREATE temp TABLE tiny_int (max_val int4, min_val int4, null_val int4)");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        Integer maxInt = 127;
+        Integer minInt = -127;
+        Float maxIntFloat = 127F;
+        Float minIntFloat = (float) -127;
+
+        pstmt = con.prepareStatement("insert into tiny_int values (?,?,?)");
+        pstmt.setObject(1, maxIntFloat, Types.TINYINT);
+        pstmt.setObject(2, minIntFloat, Types.TINYINT);
+        pstmt.setNull(3, Types.TINYINT);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("select * from tiny_int");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+
+        assertEquals("maxInt as rs.getObject", maxInt, rs.getObject(1));
+        assertEquals("minInt as rs.getObject", minInt, rs.getObject(2));
+        rs.getObject(3);
+        assertTrue("rs.wasNull after rs.getObject", rs.wasNull());
+        assertEquals("maxInt as rs.getInt", maxInt, (Integer) rs.getInt(1));
+        assertEquals("minInt as rs.getInt", minInt, (Integer) rs.getInt(2));
+        rs.getInt(3);
+        assertTrue("rs.wasNull after rs.getInt", rs.wasNull());
+        assertEquals("maxInt as rs.getLong", Long.valueOf(maxInt), (Long) rs.getLong(1));
+        assertEquals("minInt as rs.getLong", Long.valueOf(minInt), (Long) rs.getLong(2));
+        rs.getLong(3);
+        assertTrue("rs.wasNull after rs.getLong", rs.wasNull());
+        assertEquals("maxInt as rs.getBigDecimal", BigDecimal.valueOf(maxInt), rs.getBigDecimal(1));
+        assertEquals("minInt as rs.getBigDecimal", BigDecimal.valueOf(minInt), rs.getBigDecimal(2));
+        assertNull("rs.getBigDecimal", rs.getBigDecimal(3));
+        assertTrue("rs.getBigDecimal after rs.getLong", rs.wasNull());
+        assertEquals("maxInt as rs.getBigDecimal(scale=0)", BigDecimal.valueOf(maxInt),
+                rs.getBigDecimal(1, 0));
+        assertEquals("minInt as rs.getBigDecimal(scale=0)", BigDecimal.valueOf(minInt),
+                rs.getBigDecimal(2, 0));
+        assertNull("rs.getBigDecimal(scale=0)", rs.getBigDecimal(3, 0));
+        assertTrue("rs.getBigDecimal after rs.getLong", rs.wasNull());
+        assertEquals("maxInt as rs.getBigDecimal(scale=1)",
+                BigDecimal.valueOf(maxInt).setScale(1, RoundingMode.HALF_EVEN), rs.getBigDecimal(1, 1));
+        assertEquals("minInt as rs.getBigDecimal(scale=1)",
+                BigDecimal.valueOf(minInt).setScale(1, RoundingMode.HALF_EVEN), rs.getBigDecimal(2, 1));
+        rs.getFloat(3);
+        assertTrue(rs.wasNull());
+        rs.close();
+        pstmt.close();
+
+    }
+
+    @Test
+    public void testSetSmallIntFloat() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement(
+                "CREATE temp TABLE small_int (max_val int4, min_val int4, null_val int4)");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        Integer maxInt = 32767;
+        Integer minInt = -32768;
+        Float maxIntFloat = 32767F;
+        Float minIntFloat = (float) -32768;
+
+        pstmt = con.prepareStatement("insert into small_int values (?,?,?)");
+        pstmt.setObject(1, maxIntFloat, Types.SMALLINT);
+        pstmt.setObject(2, minIntFloat, Types.SMALLINT);
+        pstmt.setNull(3, Types.TINYINT);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("select * from small_int");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+
+        assertTrue("expected " + maxInt + " ,received " + rs.getObject(1),
+                rs.getObject(1).equals(maxInt));
+        assertTrue("expected " + minInt + " ,received " + rs.getObject(2),
+                rs.getObject(2).equals(minInt));
+        rs.getFloat(3);
+        assertTrue(rs.wasNull());
+        rs.close();
+        pstmt.close();
+    }
+
+    @Test
+    public void testSetIntFloat() throws SQLException {
+        PreparedStatement pstmt = con
+                .prepareStatement("CREATE temp TABLE int_TAB (max_val int4, min_val int4, null_val int4)");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        Integer maxInt = 1000;
+        Integer minInt = -1000;
+        Float maxIntFloat = 1000F;
+        Float minIntFloat = (float) -1000;
+
+        pstmt = con.prepareStatement("insert into int_tab values (?,?,?)");
+        pstmt.setObject(1, maxIntFloat, Types.INTEGER);
+        pstmt.setObject(2, minIntFloat, Types.INTEGER);
+        pstmt.setNull(3, Types.INTEGER);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("select * from int_tab");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+
+        assertTrue("expected " + maxInt + " ,received " + rs.getObject(1),
+                ((Integer) rs.getObject(1)).equals(maxInt));
+        assertTrue("expected " + minInt + " ,received " + rs.getObject(2),
+                ((Integer) rs.getObject(2)).equals(minInt));
+        rs.getFloat(3);
+        assertTrue(rs.wasNull());
+        rs.close();
+        pstmt.close();
+
+    }
+
+    @Test
+    public void testSetBooleanDouble() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement(
+                "CREATE temp TABLE double_tab (max_val float, min_val float, null_val float)");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        Double dBooleanTrue = 1.0;
+        Double dBooleanFalse = (double) 0;
+
+        pstmt = con.prepareStatement("insert into double_tab values (?,?,?)");
+        pstmt.setObject(1, Boolean.TRUE, Types.DOUBLE);
+        pstmt.setObject(2, Boolean.FALSE, Types.DOUBLE);
+        pstmt.setNull(3, Types.DOUBLE);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("select * from double_tab");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+
+        assertTrue("expected " + dBooleanTrue + " ,received " + rs.getObject(1),
+                rs.getObject(1).equals(dBooleanTrue));
+        assertTrue("expected " + dBooleanFalse + " ,received " + rs.getObject(2),
+                rs.getObject(2).equals(dBooleanFalse));
+        rs.getFloat(3);
+        assertTrue(rs.wasNull());
+        rs.close();
+        pstmt.close();
+
+    }
+
+    @Test
+    public void testSetBooleanNumeric() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement(
+                "CREATE temp TABLE numeric_tab (max_val numeric(30,15), min_val numeric(30,15), null_val numeric(30,15))");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        BigDecimal dBooleanTrue = new BigDecimal(1);
+        BigDecimal dBooleanFalse = new BigDecimal(0);
+
+        pstmt = con.prepareStatement("insert into numeric_tab values (?,?,?)");
+        pstmt.setObject(1, Boolean.TRUE, Types.NUMERIC, 2);
+        pstmt.setObject(2, Boolean.FALSE, Types.NUMERIC, 2);
+        pstmt.setNull(3, Types.DOUBLE);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("select * from numeric_tab");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+
+        assertTrue("expected " + dBooleanTrue + " ,received " + rs.getObject(1),
+                ((BigDecimal) rs.getObject(1)).compareTo(dBooleanTrue) == 0);
+        assertTrue("expected " + dBooleanFalse + " ,received " + rs.getObject(2),
+                ((BigDecimal) rs.getObject(2)).compareTo(dBooleanFalse) == 0);
+        rs.getFloat(3);
+        assertTrue(rs.wasNull());
+        rs.close();
+        pstmt.close();
+
+    }
+
+    @Test
+    public void testSetBooleanDecimal() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement(
+                "CREATE temp TABLE DECIMAL_TAB (max_val numeric(30,15), min_val numeric(30,15), null_val numeric(30,15))");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        BigDecimal dBooleanTrue = new BigDecimal(1);
+        BigDecimal dBooleanFalse = new BigDecimal(0);
+
+        pstmt = con.prepareStatement("insert into DECIMAL_TAB values (?,?,?)");
+        pstmt.setObject(1, Boolean.TRUE, Types.DECIMAL, 2);
+        pstmt.setObject(2, Boolean.FALSE, Types.DECIMAL, 2);
+        pstmt.setNull(3, Types.DOUBLE);
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("select * from DECIMAL_TAB");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+
+        assertTrue("expected " + dBooleanTrue + " ,received " + rs.getObject(1),
+                ((BigDecimal) rs.getObject(1)).compareTo(dBooleanTrue) == 0);
+        assertTrue("expected " + dBooleanFalse + " ,received " + rs.getObject(2),
+                ((BigDecimal) rs.getObject(2)).compareTo(dBooleanFalse) == 0);
+        rs.getFloat(3);
+        assertTrue(rs.wasNull());
+        rs.close();
+        pstmt.close();
+
+    }
+
+    @Test
+    public void testSetObjectBigDecimalUnscaled() throws SQLException {
+        TestUtil.createTempTable(con, "decimal_scale",
+                "n1 numeric, n2 numeric, n3 numeric, n4 numeric");
+        PreparedStatement pstmt = con.prepareStatement("insert into decimal_scale values(?,?,?,?)");
+        BigDecimal v = new BigDecimal("3.141593");
+        pstmt.setObject(1, v, Types.NUMERIC);
+
+        String vs = v.toPlainString();
+        pstmt.setObject(2, vs, Types.NUMERIC);
+
+        Float vf = Float.valueOf(vs);
+        pstmt.setObject(3, vf, Types.NUMERIC);
+
+        Double vd = Double.valueOf(vs);
+        pstmt.setObject(4, vd, Types.NUMERIC);
+
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        pstmt = con.prepareStatement("select n1,n2,n3,n4 from decimal_scale");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertTrue("expected numeric set via BigDecimal " + v + " stored as " + rs.getBigDecimal(1),
+                v.compareTo(rs.getBigDecimal(1)) == 0);
+        assertTrue("expected numeric set via String" + vs + " stored as " + rs.getBigDecimal(2),
+                v.compareTo(rs.getBigDecimal(2)) == 0);
+        // float is really bad...
+        assertTrue("expected numeric set via Float" + vf + " stored as " + rs.getBigDecimal(3),
+                v.compareTo(rs.getBigDecimal(3).setScale(6, RoundingMode.HALF_UP)) == 0);
+        assertTrue("expected numeric set via Double" + vd + " stored as " + rs.getBigDecimal(4),
+                v.compareTo(rs.getBigDecimal(4)) == 0);
+
+        rs.close();
+        pstmt.close();
+    }
+
+    @Test
+    public void testSetObjectBigDecimalWithScale() throws SQLException {
+        TestUtil.createTempTable(con, "decimal_scale",
+                "n1 numeric, n2 numeric, n3 numeric, n4 numeric");
+        PreparedStatement psinsert = con.prepareStatement("insert into decimal_scale values(?,?,?,?)");
+        PreparedStatement psselect = con.prepareStatement("select n1,n2,n3,n4 from decimal_scale");
+        PreparedStatement pstruncate = con.prepareStatement("truncate table decimal_scale");
+
+        BigDecimal v = new BigDecimal("3.141593");
+        String vs = v.toPlainString();
+        Float vf = Float.valueOf(vs);
+        Double vd = Double.valueOf(vs);
+
+        for (int s = 0; s < 6; s++) {
+            psinsert.setObject(1, v, Types.NUMERIC, s);
+            psinsert.setObject(2, vs, Types.NUMERIC, s);
+            psinsert.setObject(3, vf, Types.NUMERIC, s);
+            psinsert.setObject(4, vd, Types.NUMERIC, s);
+
+            psinsert.executeUpdate();
+
+            ResultSet rs = psselect.executeQuery();
+            assertTrue(rs.next());
+            BigDecimal vscaled = v.setScale(s, RoundingMode.HALF_UP);
+            assertTrue(
+                    "expected numeric set via BigDecimal " + v + " with scale " + s + " stored as " + vscaled,
+                    vscaled.compareTo(rs.getBigDecimal(1)) == 0);
+            assertTrue(
+                    "expected numeric set via String" + vs + " with scale " + s + " stored as " + vscaled,
+                    vscaled.compareTo(rs.getBigDecimal(2)) == 0);
+            assertTrue(
+                    "expected numeric set via Float" + vf + " with scale " + s + " stored as " + vscaled,
+                    vscaled.compareTo(rs.getBigDecimal(3)) == 0);
+            assertTrue(
+                    "expected numeric set via Double" + vd + " with scale " + s + " stored as " + vscaled,
+                    vscaled.compareTo(rs.getBigDecimal(4)) == 0);
+            rs.close();
+            pstruncate.executeUpdate();
+        }
+
+        psinsert.close();
+        psselect.close();
+        pstruncate.close();
+    }
+
+    @Test
+    public void testSetObjectWithBigDecimal() throws SQLException {
+        TestUtil.createTempTable(con, "number_fallback",
+                "n1 numeric");
+        PreparedStatement psinsert = con.prepareStatement("insert into number_fallback values(?)");
+        PreparedStatement psselect = con.prepareStatement("select n1 from number_fallback");
+
+        psinsert.setObject(1, new BigDecimal("733"));
+        psinsert.execute();
+
+        ResultSet rs = psselect.executeQuery();
+        assertTrue(rs.next());
+        assertTrue(
+                "expected 733, but received " + rs.getBigDecimal(1),
+                new BigDecimal("733").compareTo(rs.getBigDecimal(1)) == 0);
+
+        psinsert.close();
+        psselect.close();
+    }
+
+    @Test
+    public void testSetObjectNumberFallbackWithBigInteger() throws SQLException {
+        TestUtil.createTempTable(con, "number_fallback",
+                "n1 numeric");
+        PreparedStatement psinsert = con.prepareStatement("insert into number_fallback values(?)");
+        PreparedStatement psselect = con.prepareStatement("select n1 from number_fallback");
+
+        psinsert.setObject(1, new BigInteger("733"));
+        psinsert.execute();
+
+        ResultSet rs = psselect.executeQuery();
+        assertTrue(rs.next());
+        assertTrue(
+                "expected 733, but received " + rs.getBigDecimal(1),
+                new BigDecimal("733").compareTo(rs.getBigDecimal(1)) == 0);
+
+        psinsert.close();
+        psselect.close();
+    }
+
+    @Test
+    public void testSetObjectNumberFallbackWithAtomicLong() throws SQLException {
+        TestUtil.createTempTable(con, "number_fallback",
+                "n1 numeric");
+        PreparedStatement psinsert = con.prepareStatement("insert into number_fallback values(?)");
+        PreparedStatement psselect = con.prepareStatement("select n1 from number_fallback");
+
+        psinsert.setObject(1, new AtomicLong(733));
+        psinsert.execute();
+
+        ResultSet rs = psselect.executeQuery();
+        assertTrue(rs.next());
+        assertTrue(
+                "expected 733, but received " + rs.getBigDecimal(1),
+                new BigDecimal("733").compareTo(rs.getBigDecimal(1)) == 0);
+
+        psinsert.close();
+        psselect.close();
+    }
+
+    @Test
+    public void testUnknownSetObject() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement("INSERT INTO intervaltable(i) VALUES (?)");
+
+        pstmt.setString(1, "1 week");
+        try {
+            pstmt.executeUpdate();
+            assertTrue("When using extended protocol, interval vs character varying type mismatch error is expected",
+                    preferQueryMode == PreferQueryMode.SIMPLE);
+        } catch (SQLException sqle) {
+            // ERROR: column "i" is of type interval but expression is of type character varying
+        }
+
+        pstmt.setObject(1, "1 week", Types.OTHER);
+        pstmt.executeUpdate();
+        pstmt.close();
+    }
+
+    /**
+     * With autoboxing this apparently happens more often now.
+     */
+    @Test
+    public void testSetObjectCharacter() throws SQLException {
+        PreparedStatement ps = con.prepareStatement("INSERT INTO texttable(te) VALUES (?)");
+        ps.setObject(1, 'z');
+        ps.executeUpdate();
+        ps.close();
+    }
+
+    /**
+     * When we have parameters of unknown type and it's not using the unnamed statement, we issue a
+     * protocol level statement describe message for the V3 protocol. This test just makes sure that
+     * works.
+     */
+    @Test
+    public void testStatementDescribe() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement("SELECT ?::int");
+        pstmt.setObject(1, 2, Types.OTHER);
+        for (int i = 0; i < 10; i++) {
+            ResultSet rs = pstmt.executeQuery();
+            assertTrue(rs.next());
+            assertEquals(2, rs.getInt(1));
+            rs.close();
+        }
+        pstmt.close();
+    }
+
+    @Test
+    public void testBatchWithPrepareThreshold5() throws SQLException {
+        assumeBinaryModeRegular();
+        Assume.assumeTrue("simple protocol only does not support prepared statement requests",
+                preferQueryMode != PreferQueryMode.SIMPLE);
+
+        PreparedStatement pstmt = con.prepareStatement("CREATE temp TABLE batch_tab_threshold5 (id bigint, val bigint)");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        // When using a prepareThreshold of 5, a batch update should use server-side prepare
+        pstmt = con.prepareStatement("INSERT INTO batch_tab_threshold5 (id, val) VALUES (?,?)");
+        ((PgStatement) pstmt).setPrepareThreshold(5);
+        for (int p = 0; p < 5; p++) {
+            for (int i = 0; i <= 5; i++) {
+                pstmt.setLong(1, i);
+                pstmt.setLong(2, i);
+                pstmt.addBatch();
+            }
+            pstmt.executeBatch();
+        }
+        pstmt.close();
+        assertTrue("prepareThreshold=5, so the statement should be server-prepared",
+                ((PGStatement) pstmt).isUseServerPrepare());
+        assertEquals("prepareThreshold=5, so the statement should be server-prepared", 1,
+                getNumberOfServerPreparedStatements("INSERT INTO batch_tab_threshold5 (id, val) VALUES ($1,$2)"));
+    }
+
+    @Test
+    public void testBatchWithPrepareThreshold0() throws SQLException {
+        assumeBinaryModeRegular();
+        Assume.assumeTrue("simple protocol only does not support prepared statement requests",
+                preferQueryMode != PreferQueryMode.SIMPLE);
+
+        PreparedStatement pstmt = con.prepareStatement("CREATE temp TABLE batch_tab_threshold0 (id bigint, val bigint)");
+        pstmt.executeUpdate();
+        pstmt.close();
+
+        // When using a prepareThreshold of 0, a batch update should not use server-side prepare
+        pstmt = con.prepareStatement("INSERT INTO batch_tab_threshold0 (id, val) VALUES (?,?)");
+        ((PgStatement) pstmt).setPrepareThreshold(0);
+        for (int p = 0; p < 5; p++) {
+            for (int i = 0; i <= 5; i++) {
+                pstmt.setLong(1, i);
+                pstmt.setLong(2, i);
+                pstmt.addBatch();
+            }
+            pstmt.executeBatch();
+        }
+        pstmt.close();
+
+        assertFalse("prepareThreshold=0, so the statement should not be server-prepared",
+                ((PGStatement) pstmt).isUseServerPrepare());
+        assertEquals("prepareThreshold=0, so the statement should not be server-prepared", 0,
+                getNumberOfServerPreparedStatements("INSERT INTO batch_tab_threshold0 (id, val) VALUES ($1,$2)"));
+    }
+
+    @Test
+    public void testSelectPrepareThreshold0AutoCommitFalseFetchSizeNonZero() throws SQLException {
+        assumeBinaryModeRegular();
+        Assume.assumeTrue("simple protocol only does not support prepared statement requests",
+                preferQueryMode != PreferQueryMode.SIMPLE);
+
+        con.setAutoCommit(false);
+        PreparedStatement pstmt = null;
+        ResultSet rs = null;
+        try {
+            pstmt = con.prepareStatement("SELECT 42");
+            ((PgStatement) pstmt).setPrepareThreshold(0);
+            pstmt.setFetchSize(1);
+            rs = pstmt.executeQuery();
+            rs.next();
+            assertEquals(42, rs.getInt(1));
         } finally {
-          rs.close();
+            TestUtil.closeQuietly(rs);
+            TestUtil.closeQuietly(pstmt);
         }
-      }
-    } finally {
-      ps.close();
-    }
-  }
 
-  private void assertFirstParameterTypeName(String msg, String expected, PreparedStatement ps) throws SQLException {
-    if (preferQueryMode == PreferQueryMode.SIMPLE) {
-      return;
-    }
-    ParameterMetaData pmd = ps.getParameterMetaData();
-    assertEquals("getParameterMetaData().getParameterTypeName(1) " + msg,
-        expected, pmd.getParameterTypeName(1));
-  }
+        assertFalse("prepareThreshold=0, so the statement should not be server-prepared",
+                ((PGStatement) pstmt).isUseServerPrepare());
 
-  @Test
-  public void testAlternatingBindType() throws SQLException {
-    assumeBinaryModeForce();
-    PreparedStatement ps = con.prepareStatement("SELECT /*testAlternatingBindType*/ ?");
-    ResultSet rs;
-    Logger log = Logger.getLogger("org.postgresql.core.v3.SimpleQuery");
-    Level prevLevel = log.getLevel();
-    if (prevLevel == null || prevLevel.intValue() > Level.FINER.intValue()) {
-      log.setLevel(Level.FINER);
+        assertEquals("prepareThreshold=0, so the statement should not be server-prepared", 0,
+                getNumberOfServerPreparedStatements("SELECT 42"));
     }
-    final AtomicInteger numOfReParses = new AtomicInteger();
-    Handler handler = new Handler() {
-      @Override
-      public void publish(LogRecord record) {
-        if (record.getMessage().contains("un-prepare it and parse")) {
-          numOfReParses.incrementAndGet();
+
+    @Test
+    public void testInappropriateStatementSharing() throws SQLException {
+        PreparedStatement ps = con.prepareStatement("SELECT ?::timestamp");
+        assertFirstParameterTypeName("after prepare ?::timestamp bind type should be timestamp", "timestamp", ps);
+        try {
+            Timestamp ts = new Timestamp(1474997614836L);
+            // Since PreparedStatement isn't cached immediately, we need to some warm up
+            for (int i = 0; i < 3; i++) {
+                ResultSet rs;
+
+                // Flip statement to use Oid.DATE
+                ps.setNull(1, Types.DATE);
+                assertFirstParameterTypeName("set parameter to DATE", "date", ps);
+                rs = ps.executeQuery();
+                assertFirstParameterTypeName("set parameter to DATE (executeQuery should not affect parameterMetadata)",
+                        "date", ps);
+                try {
+                    assertTrue(rs.next());
+                    assertNull("NULL DATE converted to TIMESTAMP should return NULL value on getObject",
+                            rs.getObject(1));
+                } finally {
+                    rs.close();
+                }
+
+                // Flop statement to use Oid.UNSPECIFIED
+                ps.setTimestamp(1, ts);
+                assertFirstParameterTypeName("set parameter to Timestamp", "timestamp", ps);
+                rs = ps.executeQuery();
+                assertFirstParameterTypeName("set parameter to Timestamp (executeQuery should not affect parameterMetadata)",
+                        "timestamp", ps);
+                try {
+                    assertTrue(rs.next());
+                    assertEquals(
+                            "Looks like we got a narrowing of the data (TIMESTAMP -> DATE). It might caused by inappropriate caching of the statement.",
+                            ts, rs.getObject(1));
+                } finally {
+                    rs.close();
+                }
+            }
+        } finally {
+            ps.close();
         }
-      }
-
-      @Override
-      public void flush() {
-      }
-
-      @Override
-      public void close() throws SecurityException {
-      }
-    };
-    log.addHandler(handler);
-    try {
-      ps.setString(1, "42");
-      rs = ps.executeQuery();
-      rs.next();
-      Assert.assertEquals("setString(1, \"42\") -> \"42\" expected", "42", rs.getObject(1));
-      rs.close();
-
-      // The bind type is flipped from VARCHAR to INTEGER, and it causes the driver to prepare statement again
-      ps.setNull(1, Types.INTEGER);
-      rs = ps.executeQuery();
-      rs.next();
-      Assert.assertNull("setNull(1, Types.INTEGER) -> null expected", rs.getObject(1));
-      Assert.assertEquals("A re-parse was expected, so the number of parses should be 1",
-          1, numOfReParses.get());
-      rs.close();
-
-      // The bind type is flipped from INTEGER to VARCHAR, and it causes the driver to prepare statement again
-      ps.setString(1, "42");
-      rs = ps.executeQuery();
-      rs.next();
-      Assert.assertEquals("setString(1, \"42\") -> \"42\" expected", "42", rs.getObject(1));
-      Assert.assertEquals("One more re-parse is expected, so the number of parses should be 2",
-          2, numOfReParses.get());
-      rs.close();
-
-      // Types.OTHER null is sent as UNSPECIFIED, and pgjdbc does not re-parse on UNSPECIFIED nulls
-      // Note: do not rely on absence of re-parse on using Types.OTHER. Try using consistent data types
-      ps.setNull(1, Types.OTHER);
-      rs = ps.executeQuery();
-      rs.next();
-      Assert.assertNull("setNull(1, Types.OTHER) -> null expected", rs.getObject(1));
-      Assert.assertEquals("setNull(, Types.OTHER) should not cause re-parse",
-          2, numOfReParses.get());
-
-      // Types.INTEGER null is sent as int4 null, and it leads to re-parse
-      ps.setNull(1, Types.INTEGER);
-      rs = ps.executeQuery();
-      rs.next();
-      Assert.assertNull("setNull(1, Types.INTEGER) -> null expected", rs.getObject(1));
-      Assert.assertEquals("setNull(, Types.INTEGER) causes re-parse",
-          3, numOfReParses.get());
-      rs.close();
-    } finally {
-      TestUtil.closeQuietly(ps);
-      log.removeHandler(handler);
-      log.setLevel(prevLevel);
     }
-  }
 
-  @Test
-  public void testNoParametersNPE() throws SQLException {
-    try {
-      PreparedStatement ps = con.prepareStatement("select 1");
-      ps.setString(1, "null");
-    } catch ( NullPointerException ex ) {
-      fail("Should throw a SQLException");
-    } catch (SQLException ex) {
-      // ignore
+    private void assertFirstParameterTypeName(String msg, String expected, PreparedStatement ps) throws SQLException {
+        if (preferQueryMode == PreferQueryMode.SIMPLE) {
+            return;
+        }
+        ParameterMetaData pmd = ps.getParameterMetaData();
+        assertEquals("getParameterMetaData().getParameterTypeName(1) " + msg,
+                expected, pmd.getParameterTypeName(1));
+    }
+
+    @Test
+    public void testAlternatingBindType() throws SQLException {
+        assumeBinaryModeForce();
+        PreparedStatement ps = con.prepareStatement("SELECT /*testAlternatingBindType*/ ?");
+        ResultSet rs;
+        Logger log = Logger.getLogger("org.postgresql.core.v3.SimpleQuery");
+        Level prevLevel = log.getLevel();
+        if (prevLevel == null || prevLevel.intValue() > Level.FINER.intValue()) {
+            log.setLevel(Level.FINER);
+        }
+        final AtomicInteger numOfReParses = new AtomicInteger();
+        Handler handler = new Handler() {
+            @Override
+            public void publish(LogRecord record) {
+                if (record.getMessage().contains("un-prepare it and parse")) {
+                    numOfReParses.incrementAndGet();
+                }
+            }
+
+            @Override
+            public void flush() {
+            }
+
+            @Override
+            public void close() throws SecurityException {
+            }
+        };
+        log.addHandler(handler);
+        try {
+            ps.setString(1, "42");
+            rs = ps.executeQuery();
+            rs.next();
+            Assert.assertEquals("setString(1, \"42\") -> \"42\" expected", "42", rs.getObject(1));
+            rs.close();
+
+            // The bind type is flipped from VARCHAR to INTEGER, and it causes the driver to prepare statement again
+            ps.setNull(1, Types.INTEGER);
+            rs = ps.executeQuery();
+            rs.next();
+            Assert.assertNull("setNull(1, Types.INTEGER) -> null expected", rs.getObject(1));
+            Assert.assertEquals("A re-parse was expected, so the number of parses should be 1",
+                    1, numOfReParses.get());
+            rs.close();
+
+            // The bind type is flipped from INTEGER to VARCHAR, and it causes the driver to prepare statement again
+            ps.setString(1, "42");
+            rs = ps.executeQuery();
+            rs.next();
+            Assert.assertEquals("setString(1, \"42\") -> \"42\" expected", "42", rs.getObject(1));
+            Assert.assertEquals("One more re-parse is expected, so the number of parses should be 2",
+                    2, numOfReParses.get());
+            rs.close();
+
+            // Types.OTHER null is sent as UNSPECIFIED, and pgjdbc does not re-parse on UNSPECIFIED nulls
+            // Note: do not rely on absence of re-parse on using Types.OTHER. Try using consistent data types
+            ps.setNull(1, Types.OTHER);
+            rs = ps.executeQuery();
+            rs.next();
+            Assert.assertNull("setNull(1, Types.OTHER) -> null expected", rs.getObject(1));
+            Assert.assertEquals("setNull(, Types.OTHER) should not cause re-parse",
+                    2, numOfReParses.get());
+
+            // Types.INTEGER null is sent as int4 null, and it leads to re-parse
+            ps.setNull(1, Types.INTEGER);
+            rs = ps.executeQuery();
+            rs.next();
+            Assert.assertNull("setNull(1, Types.INTEGER) -> null expected", rs.getObject(1));
+            Assert.assertEquals("setNull(, Types.INTEGER) causes re-parse",
+                    3, numOfReParses.get());
+            rs.close();
+        } finally {
+            TestUtil.closeQuietly(ps);
+            log.removeHandler(handler);
+            log.setLevel(prevLevel);
+        }
+    }
+
+    @Test
+    public void testNoParametersNPE() throws SQLException {
+        try {
+            PreparedStatement ps = con.prepareStatement("select 1");
+            ps.setString(1, "null");
+        } catch (NullPointerException ex) {
+            fail("Should throw a SQLException");
+        } catch (SQLException ex) {
+            // ignore
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/QuotationTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/QuotationTest.java
index 84637af..f6d7fd4 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/QuotationTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/QuotationTest.java
@@ -5,143 +5,139 @@
 
 package org.postgresql.test.jdbc2;
 
-import org.postgresql.test.SlowTests;
-import org.postgresql.test.TestUtil;
-
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collection;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.postgresql.test.SlowTests;
+import org.postgresql.test.TestUtil;
 
 @RunWith(Parameterized.class)
 public class QuotationTest extends BaseTest4 {
-  private enum QuoteStyle {
-    SIMPLE("'"), DOLLAR_NOTAG("$$"), DOLLAR_A("$a$"), DOLLAR_DEF("$DEF$"),
-    SMILING_FACE("$o‿o$")
-    ;
-
-    private final String quote;
-
-    QuoteStyle(String quote) {
-      this.quote = quote;
+    private final String expr;
+    private final String expected;
+    public QuotationTest(QuoteStyle quoteStyle, String expected, String expr) {
+        this.expected = expected;
+        this.expr = expr;
     }
 
-    @Override
-    public String toString() {
-      return quote;
-    }
-  }
+    @Parameterized.Parameters(name = "{index}: quotes(style={0}, src={1}, quoted={2})")
+    public static Iterable<Object[]> data() {
+        Collection<String> prefix = new ArrayList<>();
+        // Too many prefixes make test run long
+        prefix.add("");
+        prefix.add("/*\n$\n*//* ? *//*{fn *//* now} */");
+        prefix.add("-- $\n");
+        prefix.add("--\n/* $ */");
 
-  private final String expr;
-  private final String expected;
-
-  public QuotationTest(QuoteStyle quoteStyle, String expected, String expr) {
-    this.expected = expected;
-    this.expr = expr;
-  }
-
-  @Parameterized.Parameters(name = "{index}: quotes(style={0}, src={1}, quoted={2})")
-  public static Iterable<Object[]> data() {
-    Collection<String> prefix = new ArrayList<>();
-    // Too many prefixes make test run long
-    prefix.add("");
-    prefix.add("/*\n$\n*//* ? *//*{fn *//* now} */");
-    prefix.add("-- $\n");
-    prefix.add("--\n/* $ */");
-
-    Collection<Object[]> ids = new ArrayList<>();
-    Collection<String> garbageValues = new ArrayList<>();
-    garbageValues.add("{fn now}");
-    garbageValues.add("{extract}");
-    garbageValues.add("{select}");
-    garbageValues.add("?select");
-    garbageValues.add("select?");
-    garbageValues.add("??select");
-    garbageValues.add("}{");
-    garbageValues.add("{");
-    garbageValues.add("}");
-    garbageValues.add("--");
-    garbageValues.add("/*");
-    garbageValues.add("*/");
-    for (QuoteStyle quoteStyle : QuoteStyle.values()) {
-      garbageValues.add(quoteStyle.toString());
-    }
-    for (char ch = 'a'; ch <= 'z'; ch++) {
-      garbageValues.add(Character.toString(ch));
-    }
-
-    for (QuoteStyle quoteStyle : QuoteStyle.values()) {
-      for (String garbage : garbageValues) {
-        String unquoted = garbage;
-        for (int i = 0; i < 3; i++) {
-          String quoted = unquoted;
-          if (quoteStyle == QuoteStyle.SIMPLE) {
-            quoted = quoted.replaceAll("'", "''");
-          }
-          quoted = quoteStyle.toString() + quoted + quoteStyle.toString();
-          if (quoted.endsWith("$$$") && quoteStyle == QuoteStyle.DOLLAR_NOTAG) {
-            // $$$a$$$ is parsed like $$ $a $$ $ -> thus we skip this test
-            continue;
-          }
-          if (quoteStyle != QuoteStyle.SIMPLE && garbage.equals(quoteStyle.toString())) {
-            // $a$$a$$a$ is not valid
-            continue;
-          }
-          String expected = unquoted;
-          for (String p : prefix) {
-            ids.add(new Object[]{quoteStyle, expected, p + quoted});
-          }
-          if (unquoted.length() == 1) {
-            char ch = unquoted.charAt(0);
-            if (ch >= 'a' && ch <= 'z') {
-              // Will assume if 'a' works, then 'aa', 'aaa' will also work
-              break;
-            }
-          }
-          unquoted += garbage;
+        Collection<Object[]> ids = new ArrayList<>();
+        Collection<String> garbageValues = new ArrayList<>();
+        garbageValues.add("{fn now}");
+        garbageValues.add("{extract}");
+        garbageValues.add("{select}");
+        garbageValues.add("?select");
+        garbageValues.add("select?");
+        garbageValues.add("??select");
+        garbageValues.add("}{");
+        garbageValues.add("{");
+        garbageValues.add("}");
+        garbageValues.add("--");
+        garbageValues.add("/*");
+        garbageValues.add("*/");
+        for (QuoteStyle quoteStyle : QuoteStyle.values()) {
+            garbageValues.add(quoteStyle.toString());
         }
-      }
+        for (char ch = 'a'; ch <= 'z'; ch++) {
+            garbageValues.add(Character.toString(ch));
+        }
+
+        for (QuoteStyle quoteStyle : QuoteStyle.values()) {
+            for (String garbage : garbageValues) {
+                String unquoted = garbage;
+                for (int i = 0; i < 3; i++) {
+                    String quoted = unquoted;
+                    if (quoteStyle == QuoteStyle.SIMPLE) {
+                        quoted = quoted.replaceAll("'", "''");
+                    }
+                    quoted = quoteStyle.toString() + quoted + quoteStyle.toString();
+                    if (quoted.endsWith("$$$") && quoteStyle == QuoteStyle.DOLLAR_NOTAG) {
+                        // $$$a$$$ is parsed like $$ $a $$ $ -> thus we skip this test
+                        continue;
+                    }
+                    if (quoteStyle != QuoteStyle.SIMPLE && garbage.equals(quoteStyle.toString())) {
+                        // $a$$a$$a$ is not valid
+                        continue;
+                    }
+                    String expected = unquoted;
+                    for (String p : prefix) {
+                        ids.add(new Object[]{quoteStyle, expected, p + quoted});
+                    }
+                    if (unquoted.length() == 1) {
+                        char ch = unquoted.charAt(0);
+                        if (ch >= 'a' && ch <= 'z') {
+                            // Will assume if 'a' works, then 'aa', 'aaa' will also work
+                            break;
+                        }
+                    }
+                    unquoted += garbage;
+                }
+            }
+        }
+
+        return ids;
     }
 
-    return ids;
-  }
-
-  @Test
-  @Category(SlowTests.class)
-  public void quotedString() throws SQLException {
-    PreparedStatement ps = con.prepareStatement("select " + expr);
-    try {
-      ResultSet rs = ps.executeQuery();
-      rs.next();
-      String val = rs.getString(1);
-      Assert.assertEquals(expected, val);
-    } catch (SQLException e) {
-      TestUtil.closeQuietly(ps);
+    @Test
+    @Category(SlowTests.class)
+    public void quotedString() throws SQLException {
+        PreparedStatement ps = con.prepareStatement("select " + expr);
+        try {
+            ResultSet rs = ps.executeQuery();
+            rs.next();
+            String val = rs.getString(1);
+            Assert.assertEquals(expected, val);
+        } catch (SQLException e) {
+            TestUtil.closeQuietly(ps);
+        }
     }
-  }
 
-  @Test
-  @Category(SlowTests.class)
-  public void bindInTheMiddle() throws SQLException {
-    PreparedStatement ps = con.prepareStatement("select " + expr + ", ?, " + expr);
-    try {
-      ps.setInt(1, 42);
-      ResultSet rs = ps.executeQuery();
-      rs.next();
-      String val1 = rs.getString(1);
-      String val3 = rs.getString(3);
-      Assert.assertEquals(expected, val1);
-      Assert.assertEquals(expected, val3);
-    } catch (SQLException e) {
-      TestUtil.closeQuietly(ps);
+    @Test
+    @Category(SlowTests.class)
+    public void bindInTheMiddle() throws SQLException {
+        PreparedStatement ps = con.prepareStatement("select " + expr + ", ?, " + expr);
+        try {
+            ps.setInt(1, 42);
+            ResultSet rs = ps.executeQuery();
+            rs.next();
+            String val1 = rs.getString(1);
+            String val3 = rs.getString(3);
+            Assert.assertEquals(expected, val1);
+            Assert.assertEquals(expected, val3);
+        } catch (SQLException e) {
+            TestUtil.closeQuietly(ps);
+        }
+    }
+
+    private enum QuoteStyle {
+        SIMPLE("'"), DOLLAR_NOTAG("$$"), DOLLAR_A("$a$"), DOLLAR_DEF("$DEF$"),
+        SMILING_FACE("$o‿o$");
+
+        private final String quote;
+
+        QuoteStyle(String quote) {
+            this.quote = quote;
+        }
+
+        @Override
+        public String toString() {
+            return quote;
+        }
     }
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/RefCursorFetchTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/RefCursorFetchTest.java
index a56a310..b9bb270 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/RefCursorFetchTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/RefCursorFetchTest.java
@@ -28,139 +28,139 @@ import java.util.Properties;
 
 @RunWith(Parameterized.class)
 public class RefCursorFetchTest extends BaseTest4 {
-  private final int numRows;
-  private final Integer defaultFetchSize;
-  private final Integer statementFetchSize;
-  private final Integer resultSetFetchSize;
-  private final AutoCommit autoCommit;
-  private final boolean commitAfterExecute;
+    private final int numRows;
+    private final Integer defaultFetchSize;
+    private final Integer statementFetchSize;
+    private final Integer resultSetFetchSize;
+    private final AutoCommit autoCommit;
+    private final boolean commitAfterExecute;
 
-  public RefCursorFetchTest(BinaryMode binaryMode, int numRows,
-      Integer defaultFetchSize,
-      Integer statementFetchSize,
-      Integer resultSetFetchSize,
-      AutoCommit autoCommit, boolean commitAfterExecute) {
-    this.numRows = numRows;
-    this.defaultFetchSize = defaultFetchSize;
-    this.statementFetchSize = statementFetchSize;
-    this.resultSetFetchSize = resultSetFetchSize;
-    this.autoCommit = autoCommit;
-    this.commitAfterExecute = commitAfterExecute;
-    setBinaryMode(binaryMode);
-  }
+    public RefCursorFetchTest(BinaryMode binaryMode, int numRows,
+                              Integer defaultFetchSize,
+                              Integer statementFetchSize,
+                              Integer resultSetFetchSize,
+                              AutoCommit autoCommit, boolean commitAfterExecute) {
+        this.numRows = numRows;
+        this.defaultFetchSize = defaultFetchSize;
+        this.statementFetchSize = statementFetchSize;
+        this.resultSetFetchSize = resultSetFetchSize;
+        this.autoCommit = autoCommit;
+        this.commitAfterExecute = commitAfterExecute;
+        setBinaryMode(binaryMode);
+    }
 
-  @Parameterized.Parameters(name = "binary = {0}, numRows = {1}, defaultFetchSize = {2}, statementFetchSize = {3}, resultSetFetchSize = {4}, autoCommit = {5}, commitAfterExecute = {6}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      for (int numRows : new int[]{0, 10, 101}) {
-        for (Integer defaultFetchSize : new Integer[]{null, 0, 9, 50}) {
-          for (AutoCommit autoCommit : AutoCommit.values()) {
-            for (boolean commitAfterExecute : new boolean[]{true, false}) {
-              for (Integer resultSetFetchSize : new Integer[]{null, 0, 9, 50}) {
-                for (Integer statementFetchSize : new Integer[]{null, 0, 9, 50}) {
-                  ids.add(new Object[]{binaryMode, numRows, defaultFetchSize, statementFetchSize, resultSetFetchSize, autoCommit, commitAfterExecute});
+    @Parameterized.Parameters(name = "binary = {0}, numRows = {1}, defaultFetchSize = {2}, statementFetchSize = {3}, resultSetFetchSize = {4}, autoCommit = {5}, commitAfterExecute = {6}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            for (int numRows : new int[]{0, 10, 101}) {
+                for (Integer defaultFetchSize : new Integer[]{null, 0, 9, 50}) {
+                    for (AutoCommit autoCommit : AutoCommit.values()) {
+                        for (boolean commitAfterExecute : new boolean[]{true, false}) {
+                            for (Integer resultSetFetchSize : new Integer[]{null, 0, 9, 50}) {
+                                for (Integer statementFetchSize : new Integer[]{null, 0, 9, 50}) {
+                                    ids.add(new Object[]{binaryMode, numRows, defaultFetchSize, statementFetchSize, resultSetFetchSize, autoCommit, commitAfterExecute});
+                                }
+                            }
+                        }
+                    }
                 }
-              }
             }
-          }
         }
-      }
+        return ids;
     }
-    return ids;
-  }
 
-  @Override
-  protected void updateProperties(Properties props) {
-    super.updateProperties(props);
-    if (defaultFetchSize != null) {
-      PGProperty.DEFAULT_ROW_FETCH_SIZE.set(props, defaultFetchSize);
-    }
-  }
+    @BeforeClass
+    public static void beforeClass() throws Exception {
+        TestUtil.assumeHaveMinimumServerVersion(ServerVersion.v9_0);
+        try (Connection con = TestUtil.openDB()) {
+            assumeCallableStatementsSupported(con);
+            TestUtil.createTable(con, "test_blob", "content bytea");
+            TestUtil.execute(con, "");
+            TestUtil.execute(con, "--create function to read data\n"
+                    + "CREATE OR REPLACE FUNCTION test_blob(p_cur OUT REFCURSOR, p_limit int4) AS $body$\n"
+                    + "BEGIN\n"
+                    + "OPEN p_cur FOR SELECT content FROM test_blob LIMIT p_limit;\n"
+                    + "END;\n"
+                    + "$body$ LANGUAGE plpgsql STABLE");
 
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    TestUtil.assumeHaveMinimumServerVersion(ServerVersion.v9_0);
-    try (Connection con = TestUtil.openDB()) {
-      assumeCallableStatementsSupported(con);
-      TestUtil.createTable(con, "test_blob", "content bytea");
-      TestUtil.execute(con, "");
-      TestUtil.execute(con, "--create function to read data\n"
-          + "CREATE OR REPLACE FUNCTION test_blob(p_cur OUT REFCURSOR, p_limit int4) AS $body$\n"
-          + "BEGIN\n"
-          + "OPEN p_cur FOR SELECT content FROM test_blob LIMIT p_limit;\n"
-          + "END;\n"
-          + "$body$ LANGUAGE plpgsql STABLE");
-
-      TestUtil.execute(con, "--generate 101 rows with 4096 bytes:\n"
-          + "insert into test_blob\n"
-          + "select(select decode(string_agg(lpad(to_hex(width_bucket(random(), 0, 1, 256) - 1), 2, '0'), ''), 'hex')"
-          + " FROM generate_series(1, 4096))\n"
-          + "from generate_series (1, 200)");
-    }
-  }
-
-  @AfterClass
-  public static void afterClass() throws Exception {
-    try (Connection con = TestUtil.openDB()) {
-      TestUtil.dropTable(con, "test_blob");
-      TestUtil.dropFunction(con, "test_blob", "REFCURSOR, int4");
-    }
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    con.setAutoCommit(autoCommit == AutoCommit.YES);
-  }
-
-  @Test
-  public void fetchAllRows() throws SQLException {
-    int cnt = 0;
-    try (CallableStatement call = con.prepareCall("{? = call test_blob(?)}")) {
-      con.setAutoCommit(false); // ref cursors only work if auto commit is off
-      if (statementFetchSize != null) {
-        call.setFetchSize(statementFetchSize);
-      }
-      call.registerOutParameter(1, Types.REF_CURSOR);
-      call.setInt(2, numRows);
-      call.execute();
-      if (commitAfterExecute) {
-        if (autoCommit == AutoCommit.NO) {
-          con.commit();
-        } else {
-          con.setAutoCommit(false);
-          con.setAutoCommit(true);
+            TestUtil.execute(con, "--generate 101 rows with 4096 bytes:\n"
+                    + "insert into test_blob\n"
+                    + "select(select decode(string_agg(lpad(to_hex(width_bucket(random(), 0, 1, 256) - 1), 2, '0'), ''), 'hex')"
+                    + " FROM generate_series(1, 4096))\n"
+                    + "from generate_series (1, 200)");
+        }
+    }
+
+    @AfterClass
+    public static void afterClass() throws Exception {
+        try (Connection con = TestUtil.openDB()) {
+            TestUtil.dropTable(con, "test_blob");
+            TestUtil.dropFunction(con, "test_blob", "REFCURSOR, int4");
+        }
+    }
+
+    @Override
+    protected void updateProperties(Properties props) {
+        super.updateProperties(props);
+        if (defaultFetchSize != null) {
+            PGProperty.DEFAULT_ROW_FETCH_SIZE.set(props, defaultFetchSize);
+        }
+    }
+
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        con.setAutoCommit(autoCommit == AutoCommit.YES);
+    }
+
+    @Test
+    public void fetchAllRows() throws SQLException {
+        int cnt = 0;
+        try (CallableStatement call = con.prepareCall("{? = call test_blob(?)}")) {
+            con.setAutoCommit(false); // ref cursors only work if auto commit is off
+            if (statementFetchSize != null) {
+                call.setFetchSize(statementFetchSize);
+            }
+            call.registerOutParameter(1, Types.REF_CURSOR);
+            call.setInt(2, numRows);
+            call.execute();
+            if (commitAfterExecute) {
+                if (autoCommit == AutoCommit.NO) {
+                    con.commit();
+                } else {
+                    con.setAutoCommit(false);
+                    con.setAutoCommit(true);
+                }
+            }
+            try (ResultSet rs = (ResultSet) call.getObject(1)) {
+                if (resultSetFetchSize != null) {
+                    rs.setFetchSize(resultSetFetchSize);
+                }
+                while (rs.next()) {
+                    cnt++;
+                }
+                assertEquals("number of rows from test_blob(...) call", numRows, cnt);
+            } catch (SQLException e) {
+                if (commitAfterExecute && "34000".equals(e.getSQLState())) {
+                    // Transaction commit closes refcursor, so the fetch call is expected to fail
+                    // File: postgres.c, Routine: exec_execute_message, Line: 2070
+                    //   Server SQLState: 34000
+                    // TODO: add statementFetchSize, resultSetFetchSize when implemented
+                    Integer fetchSize = defaultFetchSize;
+                    int expectedRows =
+                            fetchSize != null && fetchSize != 0 ? Math.min(fetchSize, numRows) : numRows;
+                    assertEquals(
+                            "The transaction was committed before processing the results,"
+                                    + " so expecting ResultSet to buffer fetchSize=" + fetchSize + " rows out of "
+                                    + numRows,
+                            expectedRows,
+                            cnt
+                    );
+                    return;
+                }
+                throw e;
+            }
         }
-      }
-      try (ResultSet rs = (ResultSet) call.getObject(1)) {
-        if (resultSetFetchSize != null) {
-          rs.setFetchSize(resultSetFetchSize);
-        }
-        while (rs.next()) {
-          cnt++;
-        }
-        assertEquals("number of rows from test_blob(...) call", numRows, cnt);
-      } catch (SQLException e) {
-        if (commitAfterExecute && "34000".equals(e.getSQLState())) {
-          // Transaction commit closes refcursor, so the fetch call is expected to fail
-          // File: postgres.c, Routine: exec_execute_message, Line: 2070
-          //   Server SQLState: 34000
-          // TODO: add statementFetchSize, resultSetFetchSize when implemented
-          Integer fetchSize = defaultFetchSize;
-          int expectedRows =
-              fetchSize != null && fetchSize != 0 ? Math.min(fetchSize, numRows) : numRows;
-          assertEquals(
-              "The transaction was committed before processing the results,"
-                  + " so expecting ResultSet to buffer fetchSize=" + fetchSize + " rows out of "
-                  + numRows,
-              expectedRows,
-              cnt
-          );
-          return;
-        }
-        throw e;
-      }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/RefCursorTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/RefCursorTest.java
index 2358c57..267348d 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/RefCursorTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/RefCursorTest.java
@@ -5,18 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import org.postgresql.test.TestUtil;
-
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
 import java.sql.CallableStatement;
 import java.sql.Connection;
 import java.sql.ResultSet;
@@ -25,6 +13,15 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.sql.Types;
 import java.util.Arrays;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.postgresql.test.TestUtil;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
 /**
  * RefCursor ResultSet tests. This test case is basically the same as the ResultSet test case.
@@ -37,139 +34,139 @@ import java.util.Arrays;
 @RunWith(Parameterized.class)
 public class RefCursorTest extends BaseTest4 {
 
-  private final int cursorType;
+    private final int cursorType;
 
-  public RefCursorTest(String typeName, int cursorType) {
-    this.cursorType = cursorType;
-  }
-
-  @Parameterized.Parameters(name = "typeName = {0}, cursorType = {1}")
-  public static Iterable<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-        {"OTHER", Types.OTHER},
-        {"REF_CURSOR", Types.REF_CURSOR},
-    });
-  }
-
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    try (Connection con = TestUtil.openDB()) {
-      assumeCallableStatementsSupported(con);
+    public RefCursorTest(String typeName, int cursorType) {
+        this.cursorType = cursorType;
     }
-  }
 
-  @Override
-  public void setUp() throws Exception {
-    // this is the same as the ResultSet setup.
-    super.setUp();
-    Statement stmt = con.createStatement();
+    @Parameterized.Parameters(name = "typeName = {0}, cursorType = {1}")
+    public static Iterable<Object[]> data() {
+        return Arrays.asList(new Object[][]{
+                {"OTHER", Types.OTHER},
+                {"REF_CURSOR", Types.REF_CURSOR},
+        });
+    }
 
-    TestUtil.createTable(con, "testrs", "id integer primary key");
+    @BeforeClass
+    public static void beforeClass() throws Exception {
+        try (Connection con = TestUtil.openDB()) {
+            assumeCallableStatementsSupported(con);
+        }
+    }
 
-    stmt.executeUpdate("INSERT INTO testrs VALUES (1)");
-    stmt.executeUpdate("INSERT INTO testrs VALUES (2)");
-    stmt.executeUpdate("INSERT INTO testrs VALUES (3)");
-    stmt.executeUpdate("INSERT INTO testrs VALUES (4)");
-    stmt.executeUpdate("INSERT INTO testrs VALUES (6)");
-    stmt.executeUpdate("INSERT INTO testrs VALUES (9)");
+    @Override
+    public void setUp() throws Exception {
+        // this is the same as the ResultSet setup.
+        super.setUp();
+        Statement stmt = con.createStatement();
 
-    // Create the functions.
-    stmt.execute("CREATE OR REPLACE FUNCTION testspg__getRefcursor () RETURNS refcursor AS '"
-        + "declare v_resset refcursor; begin open v_resset for select id from testrs order by id; "
-        + "return v_resset; end;' LANGUAGE plpgsql;");
-    stmt.execute("CREATE OR REPLACE FUNCTION testspg__getEmptyRefcursor () RETURNS refcursor AS '"
-        + "declare v_resset refcursor; begin open v_resset for select id from testrs where id < 1 order by id; "
-        + "return v_resset; end;' LANGUAGE plpgsql;");
-    stmt.close();
-    con.setAutoCommit(false);
-  }
+        TestUtil.createTable(con, "testrs", "id integer primary key");
 
-  @Override
-  public void tearDown() throws SQLException {
-    con.setAutoCommit(true);
-    Statement stmt = con.createStatement();
-    stmt.execute("drop FUNCTION testspg__getRefcursor ();");
-    stmt.execute("drop FUNCTION testspg__getEmptyRefcursor ();");
-    TestUtil.dropTable(con, "testrs");
-    super.tearDown();
-  }
+        stmt.executeUpdate("INSERT INTO testrs VALUES (1)");
+        stmt.executeUpdate("INSERT INTO testrs VALUES (2)");
+        stmt.executeUpdate("INSERT INTO testrs VALUES (3)");
+        stmt.executeUpdate("INSERT INTO testrs VALUES (4)");
+        stmt.executeUpdate("INSERT INTO testrs VALUES (6)");
+        stmt.executeUpdate("INSERT INTO testrs VALUES (9)");
 
-  @Test
-  public void testResult() throws SQLException {
-    CallableStatement call = con.prepareCall("{ ? = call testspg__getRefcursor () }");
-    call.registerOutParameter(1, cursorType);
-    call.execute();
-    ResultSet rs = (ResultSet) call.getObject(1);
+        // Create the functions.
+        stmt.execute("CREATE OR REPLACE FUNCTION testspg__getRefcursor () RETURNS refcursor AS '"
+                + "declare v_resset refcursor; begin open v_resset for select id from testrs order by id; "
+                + "return v_resset; end;' LANGUAGE plpgsql;");
+        stmt.execute("CREATE OR REPLACE FUNCTION testspg__getEmptyRefcursor () RETURNS refcursor AS '"
+                + "declare v_resset refcursor; begin open v_resset for select id from testrs where id < 1 order by id; "
+                + "return v_resset; end;' LANGUAGE plpgsql;");
+        stmt.close();
+        con.setAutoCommit(false);
+    }
 
-    assertTrue(rs.next());
-    assertEquals(1, rs.getInt(1));
+    @Override
+    public void tearDown() throws SQLException {
+        con.setAutoCommit(true);
+        Statement stmt = con.createStatement();
+        stmt.execute("drop FUNCTION testspg__getRefcursor ();");
+        stmt.execute("drop FUNCTION testspg__getEmptyRefcursor ();");
+        TestUtil.dropTable(con, "testrs");
+        super.tearDown();
+    }
 
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
+    @Test
+    public void testResult() throws SQLException {
+        CallableStatement call = con.prepareCall("{ ? = call testspg__getRefcursor () }");
+        call.registerOutParameter(1, cursorType);
+        call.execute();
+        ResultSet rs = (ResultSet) call.getObject(1);
 
-    assertTrue(rs.next());
-    assertEquals(3, rs.getInt(1));
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
 
-    assertTrue(rs.next());
-    assertEquals(4, rs.getInt(1));
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
 
-    assertTrue(rs.next());
-    assertEquals(6, rs.getInt(1));
+        assertTrue(rs.next());
+        assertEquals(3, rs.getInt(1));
 
-    assertTrue(rs.next());
-    assertEquals(9, rs.getInt(1));
+        assertTrue(rs.next());
+        assertEquals(4, rs.getInt(1));
 
-    assertFalse(rs.next());
-    rs.close();
+        assertTrue(rs.next());
+        assertEquals(6, rs.getInt(1));
 
-    call.close();
-  }
+        assertTrue(rs.next());
+        assertEquals(9, rs.getInt(1));
 
-  @Test
-  public void testEmptyResult() throws SQLException {
-    CallableStatement call = con.prepareCall("{ ? = call testspg__getEmptyRefcursor () }");
-    call.registerOutParameter(1, cursorType);
-    call.execute();
+        assertFalse(rs.next());
+        rs.close();
 
-    ResultSet rs = (ResultSet) call.getObject(1);
-    assertTrue(!rs.next());
-    rs.close();
+        call.close();
+    }
 
-    call.close();
-  }
+    @Test
+    public void testEmptyResult() throws SQLException {
+        CallableStatement call = con.prepareCall("{ ? = call testspg__getEmptyRefcursor () }");
+        call.registerOutParameter(1, cursorType);
+        call.execute();
 
-  @Test
-  public void testMetaData() throws SQLException {
-    CallableStatement call = con.prepareCall("{ ? = call testspg__getRefcursor () }");
-    call.registerOutParameter(1, cursorType);
-    call.execute();
+        ResultSet rs = (ResultSet) call.getObject(1);
+        assertTrue(!rs.next());
+        rs.close();
 
-    ResultSet rs = (ResultSet) call.getObject(1);
-    ResultSetMetaData rsmd = rs.getMetaData();
-    assertNotNull(rsmd);
-    assertEquals(1, rsmd.getColumnCount());
-    assertEquals(Types.INTEGER, rsmd.getColumnType(1));
-    assertEquals("int4", rsmd.getColumnTypeName(1));
-    rs.close();
+        call.close();
+    }
 
-    call.close();
-  }
+    @Test
+    public void testMetaData() throws SQLException {
+        CallableStatement call = con.prepareCall("{ ? = call testspg__getRefcursor () }");
+        call.registerOutParameter(1, cursorType);
+        call.execute();
 
-  @Test
-  public void testResultType() throws SQLException {
-    CallableStatement call = con.prepareCall("{ ? = call testspg__getRefcursor () }",
-        ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
-    call.registerOutParameter(1, cursorType);
-    call.execute();
-    ResultSet rs = (ResultSet) call.getObject(1);
+        ResultSet rs = (ResultSet) call.getObject(1);
+        ResultSetMetaData rsmd = rs.getMetaData();
+        assertNotNull(rsmd);
+        assertEquals(1, rsmd.getColumnCount());
+        assertEquals(Types.INTEGER, rsmd.getColumnType(1));
+        assertEquals("int4", rsmd.getColumnTypeName(1));
+        rs.close();
 
-    assertEquals(rs.getType(), ResultSet.TYPE_SCROLL_INSENSITIVE);
-    assertEquals(rs.getConcurrency(), ResultSet.CONCUR_READ_ONLY);
+        call.close();
+    }
 
-    assertTrue(rs.last());
-    assertEquals(6, rs.getRow());
-    rs.close();
-    call.close();
-  }
+    @Test
+    public void testResultType() throws SQLException {
+        CallableStatement call = con.prepareCall("{ ? = call testspg__getRefcursor () }",
+                ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
+        call.registerOutParameter(1, cursorType);
+        call.execute();
+        ResultSet rs = (ResultSet) call.getObject(1);
+
+        assertEquals(rs.getType(), ResultSet.TYPE_SCROLL_INSENSITIVE);
+        assertEquals(rs.getConcurrency(), ResultSet.CONCUR_READ_ONLY);
+
+        assertTrue(rs.last());
+        assertEquals(6, rs.getRow());
+        rs.close();
+        call.close();
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ReplaceProcessingTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ReplaceProcessingTest.java
index 506f0a5..d540978 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ReplaceProcessingTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ReplaceProcessingTest.java
@@ -16,31 +16,31 @@ import java.util.Arrays;
 @RunWith(Parameterized.class)
 public class ReplaceProcessingTest extends BaseTest4 {
 
-  @Parameterized.Parameter(0)
-  public String input;
-  @Parameterized.Parameter(1)
-  public String expected;
+    @Parameterized.Parameter(0)
+    public String input;
+    @Parameterized.Parameter(1)
+    public String expected;
 
-  @Parameterized.Parameters(name = "input={0}, expected={1}")
-  public static Iterable<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-        {"{fn timestampadd(SQL_TSI_YEAR, ?, {fn now()})}", "(CAST( $1||' year' as interval)+ now())"},
-        {"{fn timestampadd(SQL_TSI_MONTH, ?, {fn now()})}", "(CAST( $1||' month' as interval)+ now())"},
-        {"{fn timestampadd(SQL_TSI_DAY, ?, {fn now()})}", "(CAST( $1||' day' as interval)+ now())"},
-        {"{fn timestampadd(SQL_TSI_WEEK, ?, {fn now()})}", "(CAST( $1||' week' as interval)+ now())"},
-        {"{fn timestampadd(SQL_TSI_MINUTE, ?, {fn now()})}", "(CAST( $1||' minute' as interval)+ now())"},
-        {"{fn timestampadd(SQL_TSI_SECOND, ?, {fn now()})}", "(CAST( $1||' second' as interval)+ now())"},
-        {"{fn user()}", "user"},
-        {"{fn ifnull(?,?)}", "coalesce($1,$2)"},
-        {"{fn database()}", "current_database()"},
-        // Not yet supported
-        // {"{fn timestampadd(SQL_TSI_QUARTER, ?, {fn now()})}", "(CAST( $1||' quarter' as interval)+ now())"},
-        // {"{fn timestampadd(SQL_TSI_FRAC_SECOND, ?, {fn now()})}", "(CAST( $1||' second' as interval)+ now())"},
-    });
-  }
+    @Parameterized.Parameters(name = "input={0}, expected={1}")
+    public static Iterable<Object[]> data() {
+        return Arrays.asList(new Object[][]{
+                {"{fn timestampadd(SQL_TSI_YEAR, ?, {fn now()})}", "(CAST( $1||' year' as interval)+ now())"},
+                {"{fn timestampadd(SQL_TSI_MONTH, ?, {fn now()})}", "(CAST( $1||' month' as interval)+ now())"},
+                {"{fn timestampadd(SQL_TSI_DAY, ?, {fn now()})}", "(CAST( $1||' day' as interval)+ now())"},
+                {"{fn timestampadd(SQL_TSI_WEEK, ?, {fn now()})}", "(CAST( $1||' week' as interval)+ now())"},
+                {"{fn timestampadd(SQL_TSI_MINUTE, ?, {fn now()})}", "(CAST( $1||' minute' as interval)+ now())"},
+                {"{fn timestampadd(SQL_TSI_SECOND, ?, {fn now()})}", "(CAST( $1||' second' as interval)+ now())"},
+                {"{fn user()}", "user"},
+                {"{fn ifnull(?,?)}", "coalesce($1,$2)"},
+                {"{fn database()}", "current_database()"},
+                // Not yet supported
+                // {"{fn timestampadd(SQL_TSI_QUARTER, ?, {fn now()})}", "(CAST( $1||' quarter' as interval)+ now())"},
+                // {"{fn timestampadd(SQL_TSI_FRAC_SECOND, ?, {fn now()})}", "(CAST( $1||' second' as interval)+ now())"},
+        });
+    }
 
-  @Test
-  public void run() throws SQLException {
-    Assert.assertEquals(input, expected, con.nativeSQL(input));
-  }
+    @Test
+    public void run() throws SQLException {
+        Assert.assertEquals(input, expected, con.nativeSQL(input));
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetMetaDataTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetMetaDataTest.java
index d8aa0a2..84316e1 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetMetaDataTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetMetaDataTest.java
@@ -35,355 +35,355 @@ import java.util.Properties;
 
 @RunWith(Parameterized.class)
 public class ResultSetMetaDataTest extends BaseTest4 {
-  Connection conn;
-  private final Integer databaseMetadataCacheFields;
-  private final Integer databaseMetadataCacheFieldsMib;
+    private final Integer databaseMetadataCacheFields;
+    private final Integer databaseMetadataCacheFieldsMib;
+    Connection conn;
 
-  public ResultSetMetaDataTest(Integer databaseMetadataCacheFields, Integer databaseMetadataCacheFieldsMib) {
-    this.databaseMetadataCacheFields = databaseMetadataCacheFields;
-    this.databaseMetadataCacheFieldsMib = databaseMetadataCacheFieldsMib;
-  }
-
-  @Parameterized.Parameters(name = "databaseMetadataCacheFields = {0}, databaseMetadataCacheFieldsMib = {1}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (Integer fields : new Integer[]{null, 0}) {
-      for (Integer fieldsMib : new Integer[]{null, 0}) {
-        ids.add(new Object[]{fields, fieldsMib});
-      }
-    }
-    return ids;
-  }
-
-  @Override
-  protected void updateProperties(Properties props) {
-    super.updateProperties(props);
-    if (databaseMetadataCacheFields != null) {
-      PGProperty.DATABASE_METADATA_CACHE_FIELDS.set(props, databaseMetadataCacheFields);
-    }
-    if (databaseMetadataCacheFieldsMib != null) {
-      PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.set(props, databaseMetadataCacheFieldsMib);
-    }
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    conn = con;
-    TestUtil.createTable(conn, "rsmd1", "a int primary key, b text, c decimal(10,2)");
-    TestUtil.createTable(conn, "rsmd_cache", "a int primary key");
-    TestUtil.createTable(conn, "timetest",
-        "tm time(3), tmtz timetz, ts timestamp without time zone, tstz timestamp(6) with time zone");
-
-    TestUtil.dropSequence(conn, "serialtest_a_seq");
-    TestUtil.dropSequence(conn, "serialtest_b_seq");
-
-    if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v10)) {
-      TestUtil.createTable(conn, "identitytest", "id int GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY");
+    public ResultSetMetaDataTest(Integer databaseMetadataCacheFields, Integer databaseMetadataCacheFieldsMib) {
+        this.databaseMetadataCacheFields = databaseMetadataCacheFields;
+        this.databaseMetadataCacheFieldsMib = databaseMetadataCacheFieldsMib;
     }
 
-    TestUtil.createTable(conn, "serialtest", "a serial, b bigserial, c int");
-    TestUtil.createTable(conn, "alltypes",
-        "bool boolean, i2 int2, i4 int4, i8 int8, num numeric(10,2), re real, fl float, ch char(3), vc varchar(3), tx text, d date, t time without time zone, tz time with time zone, ts timestamp without time zone, tsz timestamp with time zone, bt bytea");
-    TestUtil.createTable(conn, "sizetest",
-        "fixedchar char(5), fixedvarchar varchar(5), unfixedvarchar varchar, txt text, bytearr bytea, num64 numeric(6,4), num60 numeric(6,0), num numeric, ip inet");
-    TestUtil.createTable(conn, "compositetest", "col rsmd1");
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(conn, "compositetest");
-    TestUtil.dropTable(conn, "rsmd1");
-    TestUtil.dropTable(conn, "rsmd_cache");
-    TestUtil.dropTable(conn, "timetest");
-    TestUtil.dropTable(conn, "serialtest");
-    if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v10)) {
-      TestUtil.dropTable(conn, "identitytest");
-    }
-    TestUtil.dropTable(conn, "alltypes");
-    TestUtil.dropTable(conn, "sizetest");
-    TestUtil.dropSequence(conn, "serialtest_a_seq");
-    TestUtil.dropSequence(conn, "serialtest_b_seq");
-    super.tearDown();
-  }
-
-  @Test
-  public void testStandardResultSet() throws SQLException {
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT a,b,c,a+c as total, b as d FROM rsmd1");
-    runStandardTests(rs.getMetaData());
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testPreparedResultSet() throws SQLException {
-    assumePreparedStatementMetadataSupported();
-
-    PreparedStatement pstmt =
-        conn.prepareStatement("SELECT a,b,c,a+c as total, b as d FROM rsmd1 WHERE b = ?");
-    runStandardTests(pstmt.getMetaData());
-    pstmt.close();
-  }
-
-  private void runStandardTests(ResultSetMetaData rsmd) throws SQLException {
-    PGResultSetMetaData pgrsmd = (PGResultSetMetaData) rsmd;
-
-    assertEquals(5, rsmd.getColumnCount());
-
-    assertEquals("a", rsmd.getColumnLabel(1));
-    assertEquals("total", rsmd.getColumnLabel(4));
-
-    assertEquals("a", rsmd.getColumnName(1));
-    assertEquals("", pgrsmd.getBaseColumnName(4));
-    assertEquals("b", pgrsmd.getBaseColumnName(5));
-
-    assertEquals(Types.INTEGER, rsmd.getColumnType(1));
-    assertEquals(Types.VARCHAR, rsmd.getColumnType(2));
-
-    assertEquals("int4", rsmd.getColumnTypeName(1));
-    assertEquals("text", rsmd.getColumnTypeName(2));
-
-    assertEquals(10, rsmd.getPrecision(3));
-
-    assertEquals(2, rsmd.getScale(3));
-
-    assertEquals("", rsmd.getSchemaName(1));
-    assertEquals("", rsmd.getSchemaName(4));
-    assertEquals("public", pgrsmd.getBaseSchemaName(1));
-    assertEquals("", pgrsmd.getBaseSchemaName(4));
-
-    assertEquals("rsmd1", rsmd.getTableName(1));
-    assertEquals("", rsmd.getTableName(4));
-    assertEquals("rsmd1", pgrsmd.getBaseTableName(1));
-    assertEquals("", pgrsmd.getBaseTableName(4));
-
-    assertEquals(ResultSetMetaData.columnNoNulls, rsmd.isNullable(1));
-    assertEquals(ResultSetMetaData.columnNullable, rsmd.isNullable(2));
-    assertEquals(ResultSetMetaData.columnNullableUnknown, rsmd.isNullable(4));
-  }
-
-  // verify that a prepared update statement returns no metadata and doesn't execute.
-  @Test
-  public void testPreparedUpdate() throws SQLException {
-    assumePreparedStatementMetadataSupported();
-    PreparedStatement pstmt = conn.prepareStatement("INSERT INTO rsmd1(a,b) VALUES(?,?)");
-    pstmt.setInt(1, 1);
-    pstmt.setString(2, "hello");
-    ResultSetMetaData rsmd = pstmt.getMetaData();
-    assertNull(rsmd);
-    pstmt.close();
-
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT COUNT(*) FROM rsmd1");
-    assertTrue(rs.next());
-    assertEquals(0, rs.getInt(1));
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testDatabaseMetaDataNames() throws SQLException {
-    DatabaseMetaData databaseMetaData = conn.getMetaData();
-    ResultSet resultSet = databaseMetaData.getTableTypes();
-    ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
-    assertEquals(1, resultSetMetaData.getColumnCount());
-    assertEquals("TABLE_TYPE", resultSetMetaData.getColumnName(1));
-    resultSet.close();
-  }
-
-  @Test
-  public void testTimestampInfo() throws SQLException {
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT tm, tmtz, ts, tstz FROM timetest");
-    ResultSetMetaData rsmd = rs.getMetaData();
-
-    // For reference:
-    // TestUtil.createTable(con, "timetest", "tm time(3), tmtz timetz, ts timestamp without time
-    // zone, tstz timestamp(6) with time zone");
-
-    assertEquals(3, rsmd.getScale(1));
-    assertEquals(6, rsmd.getScale(2));
-    assertEquals(6, rsmd.getScale(3));
-    assertEquals(6, rsmd.getScale(4));
-
-    assertEquals(12, rsmd.getColumnDisplaySize(1));
-    assertEquals(21, rsmd.getColumnDisplaySize(2));
-    assertEquals(29, rsmd.getColumnDisplaySize(3));
-    assertEquals(35, rsmd.getColumnDisplaySize(4));
-
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testColumnDisplaySize() throws SQLException {
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery(
-        "SELECT fixedchar, fixedvarchar, unfixedvarchar, txt, bytearr, num64, num60, num, ip FROM sizetest");
-    ResultSetMetaData rsmd = rs.getMetaData();
-
-    assertEquals(5, rsmd.getColumnDisplaySize(1));
-    assertEquals(5, rsmd.getColumnDisplaySize(2));
-    assertEquals(Integer.MAX_VALUE, rsmd.getColumnDisplaySize(3));
-    assertEquals(Integer.MAX_VALUE, rsmd.getColumnDisplaySize(4));
-    assertEquals(Integer.MAX_VALUE, rsmd.getColumnDisplaySize(5));
-    assertEquals(8, rsmd.getColumnDisplaySize(6));
-    assertEquals(7, rsmd.getColumnDisplaySize(7));
-    assertEquals(131089, rsmd.getColumnDisplaySize(8));
-    assertEquals(Integer.MAX_VALUE, rsmd.getColumnDisplaySize(9));
-  }
-
-  @Test
-  public void testIsAutoIncrement() throws SQLException {
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT c,b,a FROM serialtest");
-    ResultSetMetaData rsmd = rs.getMetaData();
-
-    assertTrue(!rsmd.isAutoIncrement(1));
-    assertTrue(rsmd.isAutoIncrement(2));
-    assertTrue(rsmd.isAutoIncrement(3));
-    assertEquals("bigserial", rsmd.getColumnTypeName(2));
-    assertEquals("serial", rsmd.getColumnTypeName(3));
-
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testClassesMatch() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(
-        "INSERT INTO alltypes (bool, i2, i4, i8, num, re, fl, ch, vc, tx, d, t, tz, ts, tsz, bt) VALUES ('t', 2, 4, 8, 3.1, 3.14, 3.141, 'c', 'vc', 'tx', '2004-04-09', '09:01:00', '11:11:00-01','2004-04-09 09:01:00','1999-09-19 14:23:12-09', '\\\\123')");
-    ResultSet rs = stmt.executeQuery("SELECT * FROM alltypes");
-    ResultSetMetaData rsmd = rs.getMetaData();
-    assertTrue(rs.next());
-    for (int i = 0; i < rsmd.getColumnCount(); i++) {
-      assertEquals(rs.getObject(i + 1).getClass().getName(), rsmd.getColumnClassName(i + 1));
-    }
-  }
-
-  @Test
-  public void testComposite() throws Exception {
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT col FROM compositetest");
-    ResultSetMetaData rsmd = rs.getMetaData();
-    assertEquals(Types.STRUCT, rsmd.getColumnType(1));
-    assertEquals("rsmd1", rsmd.getColumnTypeName(1));
-  }
-
-  @Test
-  public void testUnexecutedStatement() throws Exception {
-    assumePreparedStatementMetadataSupported();
-    PreparedStatement pstmt = conn.prepareStatement("SELECT col FROM compositetest");
-    // we have not executed the statement but we can still get the metadata
-    ResultSetMetaData rsmd = pstmt.getMetaData();
-    assertEquals(Types.STRUCT, rsmd.getColumnType(1));
-    assertEquals("rsmd1", rsmd.getColumnTypeName(1));
-  }
-
-  @Test
-  public void testClosedResultSet() throws Exception {
-    assumePreparedStatementMetadataSupported();
-    PreparedStatement pstmt = conn.prepareStatement("SELECT col FROM compositetest");
-    ResultSet rs = pstmt.executeQuery();
-    rs.close();
-    // close the statement and make sure we can still get the metadata
-    ResultSetMetaData rsmd = pstmt.getMetaData();
-    assertEquals(Types.STRUCT, rsmd.getColumnType(1));
-    assertEquals("rsmd1", rsmd.getColumnTypeName(1));
-  }
-
-  @Test
-  public void testIdentityColumn() throws Exception {
-    assumeMinimumServerVersion(ServerVersion.v10);
-    assumePreparedStatementMetadataSupported();
-    PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM identitytest");
-    ResultSet rs = pstmt.executeQuery();
-    ResultSetMetaData rsmd = pstmt.getMetaData();
-    Assert.assertTrue(rsmd.isAutoIncrement(1));
-  }
-
-  // Verifies that the field metadatacache will cache when enabled and also functions properly
-  // when disabled.
-  @Test
-  public void testCache() throws Exception {
-    boolean isCacheDisabled = Integer.valueOf(0).equals(databaseMetadataCacheFields)
-                           || Integer.valueOf(0).equals(databaseMetadataCacheFieldsMib);
-
-    {
-      PreparedStatement pstmt = conn.prepareStatement("SELECT a FROM rsmd_cache");
-      ResultSet rs = pstmt.executeQuery();
-      PGResultSetMetaData pgrsmd = (PGResultSetMetaData) rs.getMetaData();
-      assertEquals("a", pgrsmd.getBaseColumnName(1));
-      TestUtil.closeQuietly(rs);
-      TestUtil.closeQuietly(pstmt);
+    @Parameterized.Parameters(name = "databaseMetadataCacheFields = {0}, databaseMetadataCacheFieldsMib = {1}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (Integer fields : new Integer[]{null, 0}) {
+            for (Integer fieldsMib : new Integer[]{null, 0}) {
+                ids.add(new Object[]{fields, fieldsMib});
+            }
+        }
+        return ids;
     }
 
-    Statement stmt = conn.createStatement();
-    stmt.execute("ALTER TABLE rsmd_cache RENAME COLUMN a TO b");
-    TestUtil.closeQuietly(stmt);
-
-    {
-      PreparedStatement pstmt = conn.prepareStatement("SELECT b FROM rsmd_cache");
-      ResultSet rs = pstmt.executeQuery();
-      PGResultSetMetaData pgrsmd = (PGResultSetMetaData) rs.getMetaData();
-      // Unless the cache is disabled, we expect to see stale results.
-      assertEquals(isCacheDisabled ? "b" : "a", pgrsmd.getBaseColumnName(1));
-      TestUtil.closeQuietly(rs);
-      TestUtil.closeQuietly(pstmt);
+    @Override
+    protected void updateProperties(Properties props) {
+        super.updateProperties(props);
+        if (databaseMetadataCacheFields != null) {
+            PGProperty.DATABASE_METADATA_CACHE_FIELDS.set(props, databaseMetadataCacheFields);
+        }
+        if (databaseMetadataCacheFieldsMib != null) {
+            PGProperty.DATABASE_METADATA_CACHE_FIELDS_MIB.set(props, databaseMetadataCacheFieldsMib);
+        }
     }
-  }
 
-  private void assumePreparedStatementMetadataSupported() {
-    Assume.assumeTrue("prepared statement metadata is not supported for simple protocol",
-        preferQueryMode.compareTo(PreferQueryMode.EXTENDED_FOR_PREPARED) >= 0);
-  }
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        conn = con;
+        TestUtil.createTable(conn, "rsmd1", "a int primary key, b text, c decimal(10,2)");
+        TestUtil.createTable(conn, "rsmd_cache", "a int primary key");
+        TestUtil.createTable(conn, "timetest",
+                "tm time(3), tmtz timetz, ts timestamp without time zone, tstz timestamp(6) with time zone");
 
-  @Test
-  public void testSmallSerialColumns() throws SQLException {
-    org.junit.Assume.assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_2));
-    TestUtil.createTable(con, "smallserial_test", "a smallserial");
+        TestUtil.dropSequence(conn, "serialtest_a_seq");
+        TestUtil.dropSequence(conn, "serialtest_b_seq");
 
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT a FROM smallserial_test");
-    ResultSetMetaData rsmd = rs.getMetaData();
-    assertTrue(rsmd.isAutoIncrement(1));
-    assertEquals("smallserial_test", rsmd.getTableName(1));
-    assertEquals("a", rsmd.getColumnName(1));
-    assertEquals(Types.SMALLINT, rsmd.getColumnType(1));
-    assertEquals("smallserial", rsmd.getColumnTypeName(1));
-    rs.close();
+        if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v10)) {
+            TestUtil.createTable(conn, "identitytest", "id int GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY");
+        }
 
-    TestUtil.dropTable(con, "smallserial_test");
-  }
-
-  @Test
-  public void testSmallSerialSequenceLikeColumns() throws SQLException {
-    Statement stmt = con.createStatement();
-    // This is the equivalent of the smallserial, not the actual smallserial
-    stmt.execute("CREATE SEQUENCE smallserial_test_a_seq;\n"
-        + "CREATE TABLE smallserial_test (\n"
-        + "    a smallint NOT NULL DEFAULT nextval('smallserial_test_a_seq')\n"
-        + ");\n"
-        + "ALTER SEQUENCE smallserial_test_a_seq OWNED BY smallserial_test.a;");
-
-    ResultSet rs = stmt.executeQuery("SELECT a FROM smallserial_test");
-    ResultSetMetaData rsmd = rs.getMetaData();
-    assertTrue(rsmd.isAutoIncrement(1));
-    assertEquals("smallserial_test", rsmd.getTableName(1));
-    assertEquals("a", rsmd.getColumnName(1));
-    assertEquals(Types.SMALLINT, rsmd.getColumnType(1));
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_2)) {
-      // in Pg 9.2+ it behaves like smallserial
-      assertEquals("smallserial", rsmd.getColumnTypeName(1));
-    } else {
-      assertEquals("int2", rsmd.getColumnTypeName(1));
+        TestUtil.createTable(conn, "serialtest", "a serial, b bigserial, c int");
+        TestUtil.createTable(conn, "alltypes",
+                "bool boolean, i2 int2, i4 int4, i8 int8, num numeric(10,2), re real, fl float, ch char(3), vc varchar(3), tx text, d date, t time without time zone, tz time with time zone, ts timestamp without time zone, tsz timestamp with time zone, bt bytea");
+        TestUtil.createTable(conn, "sizetest",
+                "fixedchar char(5), fixedvarchar varchar(5), unfixedvarchar varchar, txt text, bytearr bytea, num64 numeric(6,4), num60 numeric(6,0), num numeric, ip inet");
+        TestUtil.createTable(conn, "compositetest", "col rsmd1");
     }
-    rs.close();
 
-    stmt.execute("DROP TABLE smallserial_test");
-    stmt.close();
-  }
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(conn, "compositetest");
+        TestUtil.dropTable(conn, "rsmd1");
+        TestUtil.dropTable(conn, "rsmd_cache");
+        TestUtil.dropTable(conn, "timetest");
+        TestUtil.dropTable(conn, "serialtest");
+        if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v10)) {
+            TestUtil.dropTable(conn, "identitytest");
+        }
+        TestUtil.dropTable(conn, "alltypes");
+        TestUtil.dropTable(conn, "sizetest");
+        TestUtil.dropSequence(conn, "serialtest_a_seq");
+        TestUtil.dropSequence(conn, "serialtest_b_seq");
+        super.tearDown();
+    }
+
+    @Test
+    public void testStandardResultSet() throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT a,b,c,a+c as total, b as d FROM rsmd1");
+        runStandardTests(rs.getMetaData());
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testPreparedResultSet() throws SQLException {
+        assumePreparedStatementMetadataSupported();
+
+        PreparedStatement pstmt =
+                conn.prepareStatement("SELECT a,b,c,a+c as total, b as d FROM rsmd1 WHERE b = ?");
+        runStandardTests(pstmt.getMetaData());
+        pstmt.close();
+    }
+
+    private void runStandardTests(ResultSetMetaData rsmd) throws SQLException {
+        PGResultSetMetaData pgrsmd = (PGResultSetMetaData) rsmd;
+
+        assertEquals(5, rsmd.getColumnCount());
+
+        assertEquals("a", rsmd.getColumnLabel(1));
+        assertEquals("total", rsmd.getColumnLabel(4));
+
+        assertEquals("a", rsmd.getColumnName(1));
+        assertEquals("", pgrsmd.getBaseColumnName(4));
+        assertEquals("b", pgrsmd.getBaseColumnName(5));
+
+        assertEquals(Types.INTEGER, rsmd.getColumnType(1));
+        assertEquals(Types.VARCHAR, rsmd.getColumnType(2));
+
+        assertEquals("int4", rsmd.getColumnTypeName(1));
+        assertEquals("text", rsmd.getColumnTypeName(2));
+
+        assertEquals(10, rsmd.getPrecision(3));
+
+        assertEquals(2, rsmd.getScale(3));
+
+        assertEquals("", rsmd.getSchemaName(1));
+        assertEquals("", rsmd.getSchemaName(4));
+        assertEquals("public", pgrsmd.getBaseSchemaName(1));
+        assertEquals("", pgrsmd.getBaseSchemaName(4));
+
+        assertEquals("rsmd1", rsmd.getTableName(1));
+        assertEquals("", rsmd.getTableName(4));
+        assertEquals("rsmd1", pgrsmd.getBaseTableName(1));
+        assertEquals("", pgrsmd.getBaseTableName(4));
+
+        assertEquals(ResultSetMetaData.columnNoNulls, rsmd.isNullable(1));
+        assertEquals(ResultSetMetaData.columnNullable, rsmd.isNullable(2));
+        assertEquals(ResultSetMetaData.columnNullableUnknown, rsmd.isNullable(4));
+    }
+
+    // verify that a prepared update statement returns no metadata and doesn't execute.
+    @Test
+    public void testPreparedUpdate() throws SQLException {
+        assumePreparedStatementMetadataSupported();
+        PreparedStatement pstmt = conn.prepareStatement("INSERT INTO rsmd1(a,b) VALUES(?,?)");
+        pstmt.setInt(1, 1);
+        pstmt.setString(2, "hello");
+        ResultSetMetaData rsmd = pstmt.getMetaData();
+        assertNull(rsmd);
+        pstmt.close();
+
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT COUNT(*) FROM rsmd1");
+        assertTrue(rs.next());
+        assertEquals(0, rs.getInt(1));
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testDatabaseMetaDataNames() throws SQLException {
+        DatabaseMetaData databaseMetaData = conn.getMetaData();
+        ResultSet resultSet = databaseMetaData.getTableTypes();
+        ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
+        assertEquals(1, resultSetMetaData.getColumnCount());
+        assertEquals("TABLE_TYPE", resultSetMetaData.getColumnName(1));
+        resultSet.close();
+    }
+
+    @Test
+    public void testTimestampInfo() throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT tm, tmtz, ts, tstz FROM timetest");
+        ResultSetMetaData rsmd = rs.getMetaData();
+
+        // For reference:
+        // TestUtil.createTable(con, "timetest", "tm time(3), tmtz timetz, ts timestamp without time
+        // zone, tstz timestamp(6) with time zone");
+
+        assertEquals(3, rsmd.getScale(1));
+        assertEquals(6, rsmd.getScale(2));
+        assertEquals(6, rsmd.getScale(3));
+        assertEquals(6, rsmd.getScale(4));
+
+        assertEquals(12, rsmd.getColumnDisplaySize(1));
+        assertEquals(21, rsmd.getColumnDisplaySize(2));
+        assertEquals(29, rsmd.getColumnDisplaySize(3));
+        assertEquals(35, rsmd.getColumnDisplaySize(4));
+
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testColumnDisplaySize() throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery(
+                "SELECT fixedchar, fixedvarchar, unfixedvarchar, txt, bytearr, num64, num60, num, ip FROM sizetest");
+        ResultSetMetaData rsmd = rs.getMetaData();
+
+        assertEquals(5, rsmd.getColumnDisplaySize(1));
+        assertEquals(5, rsmd.getColumnDisplaySize(2));
+        assertEquals(Integer.MAX_VALUE, rsmd.getColumnDisplaySize(3));
+        assertEquals(Integer.MAX_VALUE, rsmd.getColumnDisplaySize(4));
+        assertEquals(Integer.MAX_VALUE, rsmd.getColumnDisplaySize(5));
+        assertEquals(8, rsmd.getColumnDisplaySize(6));
+        assertEquals(7, rsmd.getColumnDisplaySize(7));
+        assertEquals(131089, rsmd.getColumnDisplaySize(8));
+        assertEquals(Integer.MAX_VALUE, rsmd.getColumnDisplaySize(9));
+    }
+
+    @Test
+    public void testIsAutoIncrement() throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT c,b,a FROM serialtest");
+        ResultSetMetaData rsmd = rs.getMetaData();
+
+        assertTrue(!rsmd.isAutoIncrement(1));
+        assertTrue(rsmd.isAutoIncrement(2));
+        assertTrue(rsmd.isAutoIncrement(3));
+        assertEquals("bigserial", rsmd.getColumnTypeName(2));
+        assertEquals("serial", rsmd.getColumnTypeName(3));
+
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testClassesMatch() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(
+                "INSERT INTO alltypes (bool, i2, i4, i8, num, re, fl, ch, vc, tx, d, t, tz, ts, tsz, bt) VALUES ('t', 2, 4, 8, 3.1, 3.14, 3.141, 'c', 'vc', 'tx', '2004-04-09', '09:01:00', '11:11:00-01','2004-04-09 09:01:00','1999-09-19 14:23:12-09', '\\\\123')");
+        ResultSet rs = stmt.executeQuery("SELECT * FROM alltypes");
+        ResultSetMetaData rsmd = rs.getMetaData();
+        assertTrue(rs.next());
+        for (int i = 0; i < rsmd.getColumnCount(); i++) {
+            assertEquals(rs.getObject(i + 1).getClass().getName(), rsmd.getColumnClassName(i + 1));
+        }
+    }
+
+    @Test
+    public void testComposite() throws Exception {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT col FROM compositetest");
+        ResultSetMetaData rsmd = rs.getMetaData();
+        assertEquals(Types.STRUCT, rsmd.getColumnType(1));
+        assertEquals("rsmd1", rsmd.getColumnTypeName(1));
+    }
+
+    @Test
+    public void testUnexecutedStatement() throws Exception {
+        assumePreparedStatementMetadataSupported();
+        PreparedStatement pstmt = conn.prepareStatement("SELECT col FROM compositetest");
+        // we have not executed the statement but we can still get the metadata
+        ResultSetMetaData rsmd = pstmt.getMetaData();
+        assertEquals(Types.STRUCT, rsmd.getColumnType(1));
+        assertEquals("rsmd1", rsmd.getColumnTypeName(1));
+    }
+
+    @Test
+    public void testClosedResultSet() throws Exception {
+        assumePreparedStatementMetadataSupported();
+        PreparedStatement pstmt = conn.prepareStatement("SELECT col FROM compositetest");
+        ResultSet rs = pstmt.executeQuery();
+        rs.close();
+        // close the statement and make sure we can still get the metadata
+        ResultSetMetaData rsmd = pstmt.getMetaData();
+        assertEquals(Types.STRUCT, rsmd.getColumnType(1));
+        assertEquals("rsmd1", rsmd.getColumnTypeName(1));
+    }
+
+    @Test
+    public void testIdentityColumn() throws Exception {
+        assumeMinimumServerVersion(ServerVersion.v10);
+        assumePreparedStatementMetadataSupported();
+        PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM identitytest");
+        ResultSet rs = pstmt.executeQuery();
+        ResultSetMetaData rsmd = pstmt.getMetaData();
+        Assert.assertTrue(rsmd.isAutoIncrement(1));
+    }
+
+    // Verifies that the field metadatacache will cache when enabled and also functions properly
+    // when disabled.
+    @Test
+    public void testCache() throws Exception {
+        boolean isCacheDisabled = Integer.valueOf(0).equals(databaseMetadataCacheFields)
+                || Integer.valueOf(0).equals(databaseMetadataCacheFieldsMib);
+
+        {
+            PreparedStatement pstmt = conn.prepareStatement("SELECT a FROM rsmd_cache");
+            ResultSet rs = pstmt.executeQuery();
+            PGResultSetMetaData pgrsmd = (PGResultSetMetaData) rs.getMetaData();
+            assertEquals("a", pgrsmd.getBaseColumnName(1));
+            TestUtil.closeQuietly(rs);
+            TestUtil.closeQuietly(pstmt);
+        }
+
+        Statement stmt = conn.createStatement();
+        stmt.execute("ALTER TABLE rsmd_cache RENAME COLUMN a TO b");
+        TestUtil.closeQuietly(stmt);
+
+        {
+            PreparedStatement pstmt = conn.prepareStatement("SELECT b FROM rsmd_cache");
+            ResultSet rs = pstmt.executeQuery();
+            PGResultSetMetaData pgrsmd = (PGResultSetMetaData) rs.getMetaData();
+            // Unless the cache is disabled, we expect to see stale results.
+            assertEquals(isCacheDisabled ? "b" : "a", pgrsmd.getBaseColumnName(1));
+            TestUtil.closeQuietly(rs);
+            TestUtil.closeQuietly(pstmt);
+        }
+    }
+
+    private void assumePreparedStatementMetadataSupported() {
+        Assume.assumeTrue("prepared statement metadata is not supported for simple protocol",
+                preferQueryMode.compareTo(PreferQueryMode.EXTENDED_FOR_PREPARED) >= 0);
+    }
+
+    @Test
+    public void testSmallSerialColumns() throws SQLException {
+        org.junit.Assume.assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_2));
+        TestUtil.createTable(con, "smallserial_test", "a smallserial");
+
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT a FROM smallserial_test");
+        ResultSetMetaData rsmd = rs.getMetaData();
+        assertTrue(rsmd.isAutoIncrement(1));
+        assertEquals("smallserial_test", rsmd.getTableName(1));
+        assertEquals("a", rsmd.getColumnName(1));
+        assertEquals(Types.SMALLINT, rsmd.getColumnType(1));
+        assertEquals("smallserial", rsmd.getColumnTypeName(1));
+        rs.close();
+
+        TestUtil.dropTable(con, "smallserial_test");
+    }
+
+    @Test
+    public void testSmallSerialSequenceLikeColumns() throws SQLException {
+        Statement stmt = con.createStatement();
+        // This is the equivalent of the smallserial, not the actual smallserial
+        stmt.execute("CREATE SEQUENCE smallserial_test_a_seq;\n"
+                + "CREATE TABLE smallserial_test (\n"
+                + "    a smallint NOT NULL DEFAULT nextval('smallserial_test_a_seq')\n"
+                + ");\n"
+                + "ALTER SEQUENCE smallserial_test_a_seq OWNED BY smallserial_test.a;");
+
+        ResultSet rs = stmt.executeQuery("SELECT a FROM smallserial_test");
+        ResultSetMetaData rsmd = rs.getMetaData();
+        assertTrue(rsmd.isAutoIncrement(1));
+        assertEquals("smallserial_test", rsmd.getTableName(1));
+        assertEquals("a", rsmd.getColumnName(1));
+        assertEquals(Types.SMALLINT, rsmd.getColumnType(1));
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_2)) {
+            // in Pg 9.2+ it behaves like smallserial
+            assertEquals("smallserial", rsmd.getColumnTypeName(1));
+        } else {
+            assertEquals("int2", rsmd.getColumnTypeName(1));
+        }
+        rs.close();
+
+        stmt.execute("DROP TABLE smallserial_test");
+        stmt.close();
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetRefreshTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetRefreshTest.java
index 023f271..c9e810c 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetRefreshTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetRefreshTest.java
@@ -16,39 +16,39 @@ import java.sql.SQLException;
 import java.sql.Statement;
 
 public class ResultSetRefreshTest extends BaseTest4 {
-  @Test
-  public void testWithDataColumnThatRequiresEscaping() throws Exception {
-    TestUtil.dropTable(con, "refresh_row_bad_ident");
-    TestUtil.execute(con, "CREATE TABLE refresh_row_bad_ident (id int PRIMARY KEY, \"1 FROM refresh_row_bad_ident; SELECT 2; SELECT *\" int)");
-    TestUtil.execute(con, "INSERT INTO refresh_row_bad_ident (id) VALUES (1), (2), (3)");
+    @Test
+    public void testWithDataColumnThatRequiresEscaping() throws Exception {
+        TestUtil.dropTable(con, "refresh_row_bad_ident");
+        TestUtil.execute(con, "CREATE TABLE refresh_row_bad_ident (id int PRIMARY KEY, \"1 FROM refresh_row_bad_ident; SELECT 2; SELECT *\" int)");
+        TestUtil.execute(con, "INSERT INTO refresh_row_bad_ident (id) VALUES (1), (2), (3)");
 
-    Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = stmt.executeQuery("SELECT * FROM refresh_row_bad_ident");
-    assertTrue(rs.next());
-    try {
-      rs.refreshRow();
-    } catch (SQLException ex) {
-      throw new RuntimeException("ResultSet.refreshRow() did not handle escaping data column identifiers", ex);
+        Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = stmt.executeQuery("SELECT * FROM refresh_row_bad_ident");
+        assertTrue(rs.next());
+        try {
+            rs.refreshRow();
+        } catch (SQLException ex) {
+            throw new RuntimeException("ResultSet.refreshRow() did not handle escaping data column identifiers", ex);
+        }
+        rs.close();
+        stmt.close();
     }
-    rs.close();
-    stmt.close();
-  }
 
-  @Test
-  public void testWithKeyColumnThatRequiresEscaping() throws Exception {
-    TestUtil.dropTable(con, "refresh_row_bad_ident");
-    TestUtil.execute(con, "CREATE TABLE refresh_row_bad_ident (\"my key\" int PRIMARY KEY)");
-    TestUtil.execute(con, "INSERT INTO refresh_row_bad_ident VALUES (1), (2), (3)");
+    @Test
+    public void testWithKeyColumnThatRequiresEscaping() throws Exception {
+        TestUtil.dropTable(con, "refresh_row_bad_ident");
+        TestUtil.execute(con, "CREATE TABLE refresh_row_bad_ident (\"my key\" int PRIMARY KEY)");
+        TestUtil.execute(con, "INSERT INTO refresh_row_bad_ident VALUES (1), (2), (3)");
 
-    Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = stmt.executeQuery("SELECT * FROM refresh_row_bad_ident");
-    assertTrue(rs.next());
-    try {
-      rs.refreshRow();
-    } catch (SQLException ex) {
-      throw new RuntimeException("ResultSet.refreshRow() did not handle escaping key column identifiers", ex);
+        Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = stmt.executeQuery("SELECT * FROM refresh_row_bad_ident");
+        assertTrue(rs.next());
+        try {
+            rs.refreshRow();
+        } catch (SQLException ex) {
+            throw new RuntimeException("ResultSet.refreshRow() did not handle escaping key column identifiers", ex);
+        }
+        rs.close();
+        stmt.close();
     }
-    rs.close();
-    stmt.close();
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetTest.java
index 0968e61..1f005a5 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ResultSetTest.java
@@ -5,24 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertThrows;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeTrue;
-
-import org.postgresql.core.ServerVersion;
-import org.postgresql.jdbc.PreferQueryMode;
-import org.postgresql.test.TestUtil;
-import org.postgresql.util.PGobject;
-import org.postgresql.util.PSQLException;
-
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
 import java.lang.reflect.Field;
 import java.math.BigDecimal;
 import java.sql.Connection;
@@ -42,6 +24,21 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.jdbc.PreferQueryMode;
+import org.postgresql.test.TestUtil;
+import org.postgresql.util.PGobject;
+import org.postgresql.util.PSQLException;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
 
 /*
  * ResultSet tests.
@@ -49,1397 +46,1397 @@ import java.util.concurrent.TimeoutException;
 @RunWith(Parameterized.class)
 public class ResultSetTest extends BaseTest4 {
 
-  public ResultSetTest(BinaryMode binaryMode) {
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
+    public ResultSetTest(BinaryMode binaryMode) {
+        setBinaryMode(binaryMode);
     }
-    return ids;
-  }
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    Statement stmt = con.createStatement();
-
-    TestUtil.createTable(con, "testrs", "id integer");
-
-    stmt.executeUpdate("INSERT INTO testrs VALUES (1)");
-    stmt.executeUpdate("INSERT INTO testrs VALUES (2)");
-    stmt.executeUpdate("INSERT INTO testrs VALUES (3)");
-    stmt.executeUpdate("INSERT INTO testrs VALUES (4)");
-    stmt.executeUpdate("INSERT INTO testrs VALUES (6)");
-    stmt.executeUpdate("INSERT INTO testrs VALUES (9)");
-
-    TestUtil.createTable(con, "teststring", "a text");
-    stmt.executeUpdate("INSERT INTO teststring VALUES ('12345')");
-
-    TestUtil.createTable(con, "testint", "a int");
-    stmt.executeUpdate("INSERT INTO testint VALUES (12345)");
-
-    // Boolean Tests
-    TestUtil.createTable(con, "testbool", "a boolean, b int");
-    stmt.executeUpdate("INSERT INTO testbool VALUES(true, 1)");
-    stmt.executeUpdate("INSERT INTO testbool VALUES(false, 0)");
-
-    TestUtil.createTable(con, "testboolstring", "a varchar(30), b boolean");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES('1 ', true)");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES('0', false)");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES(' t', true)");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES('f', false)");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES('True', true)");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES('      False   ', false)");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES('yes', true)");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES('  no  ', false)");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES('y', true)");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES('n', false)");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES('oN', true)");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES('oFf', false)");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES('OK', null)");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES('NOT', null)");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES('not a boolean', null)");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES('1.0', null)");
-    stmt.executeUpdate("INSERT INTO testboolstring VALUES('0.0', null)");
-
-    TestUtil.createTable(con, "testboolfloat", "i int, a float4, b boolean");
-    stmt.executeUpdate("INSERT INTO testboolfloat VALUES(1, '1.0'::real, true)");
-    stmt.executeUpdate("INSERT INTO testboolfloat VALUES(2, '0.0'::real, false)");
-    stmt.executeUpdate("INSERT INTO testboolfloat VALUES(3, 1.000::real, true)");
-    stmt.executeUpdate("INSERT INTO testboolfloat VALUES(4, 0.000::real, false)");
-    stmt.executeUpdate("INSERT INTO testboolfloat VALUES(5, '1.001'::real, null)");
-    stmt.executeUpdate("INSERT INTO testboolfloat VALUES(6, '-1.001'::real, null)");
-    stmt.executeUpdate("INSERT INTO testboolfloat VALUES(7, 123.4::real, null)");
-    stmt.executeUpdate("INSERT INTO testboolfloat VALUES(8, 1.234e2::real, null)");
-    stmt.executeUpdate("INSERT INTO testboolfloat VALUES(9, 100.00e-2::real, true)");
-    stmt.executeUpdate("INSERT INTO testboolfloat VALUES(10, '9223371487098961921', null)");
-    stmt.executeUpdate("INSERT INTO testboolfloat VALUES(11, '10223372036850000000', null)");
-    String floatVal = Float.toString(StrictMath.nextDown(Long.MAX_VALUE - 1));
-    stmt.executeUpdate("INSERT INTO testboolfloat VALUES(12, " + floatVal + ", null)");
-    floatVal = Float.toString(StrictMath.nextDown(Long.MAX_VALUE + 1));
-    stmt.executeUpdate("INSERT INTO testboolfloat VALUES(13, " + floatVal + ", null)");
-    floatVal = Float.toString(StrictMath.nextUp(Long.MIN_VALUE - 1));
-    stmt.executeUpdate("INSERT INTO testboolfloat VALUES(14, " + floatVal + ", null)");
-    floatVal = Float.toString(StrictMath.nextUp(Long.MIN_VALUE + 1));
-    stmt.executeUpdate("INSERT INTO testboolfloat VALUES(15, " + floatVal + ", null)");
-
-    TestUtil.createTable(con, "testboolint", "a bigint, b boolean");
-    stmt.executeUpdate("INSERT INTO testboolint VALUES(1, true)");
-    stmt.executeUpdate("INSERT INTO testboolint VALUES(0, false)");
-    stmt.executeUpdate("INSERT INTO testboolint VALUES(-1, null)");
-    stmt.executeUpdate("INSERT INTO testboolint VALUES(9223372036854775807, null)");
-    stmt.executeUpdate("INSERT INTO testboolint VALUES(-9223372036854775808, null)");
-
-    // End Boolean Tests
-
-    // TestUtil.createTable(con, "testbit", "a bit");
-
-    TestUtil.createTable(con, "testnumeric", "t text, a numeric");
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('1.0', '1.0')");
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('0.0', '0.0')");
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('-1.0', '-1.0')");
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('1.2', '1.2')");
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('-2.5', '-2.5')");
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('0.000000000000000000000000000990', '0.000000000000000000000000000990')");
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('10.0000000000099', '10.0000000000099')");
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('.10000000000000', '.10000000000000')");
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('.10', '.10')");
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('1.10000000000000', '1.10000000000000')");
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('99999.2', '99999.2')");
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('99999', '99999')");
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('-99999.2', '-99999.2')");
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('-99999', '-99999')");
-
-    // Integer.MaxValue
-    stmt.execute("INSERT INTO testnumeric VALUES('2147483647', '2147483647')");
-
-    // Integer.MinValue
-    stmt.execute("INSERT INTO testnumeric VALUES( '-2147483648', '-2147483648')");
-
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('2147483648', '2147483648')");
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('-2147483649', '-2147483649')");
-
-    // Long.MaxValue
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('9223372036854775807','9223372036854775807')");
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('9223372036854775807.9', '9223372036854775807.9')");
-
-    // Long.MinValue
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('-9223372036854775808', '-9223372036854775808')");
-
-    // Long.MaxValue +1
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('9223372036854775808', '9223372036854775808')");
-
-    // Long.Minvalue -1
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('-9223372036854775809', '-9223372036854775809')");
-
-    stmt.executeUpdate("INSERT INTO testnumeric VALUES('10223372036850000000', '10223372036850000000')");
-
-    TestUtil.createTable(con, "testpgobject", "id integer NOT NULL, d date, PRIMARY KEY (id)");
-    stmt.execute("INSERT INTO testpgobject VALUES(1, '2010-11-3')");
-
-    stmt.close();
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "testrs");
-    TestUtil.dropTable(con, "teststring");
-    TestUtil.dropTable(con, "testint");
-    // TestUtil.dropTable(con, "testbit");
-    TestUtil.dropTable(con, "testboolstring");
-    TestUtil.dropTable(con, "testboolfloat");
-    TestUtil.dropTable(con, "testboolint");
-    TestUtil.dropTable(con, "testnumeric");
-    TestUtil.dropTable(con, "testpgobject");
-    super.tearDown();
-  }
-
-  @Test
-  public void testBackward() throws SQLException {
-    Statement stmt =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
-    ResultSet rs = stmt.executeQuery("SELECT * FROM testrs");
-    rs.afterLast();
-    assertTrue(rs.previous());
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testAbsolute() throws SQLException {
-    Statement stmt =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
-    ResultSet rs = stmt.executeQuery("SELECT * FROM testrs");
-
-    assertTrue(!rs.absolute(0));
-    assertEquals(0, rs.getRow());
-
-    assertTrue(rs.absolute(-1));
-    assertEquals(6, rs.getRow());
-
-    assertTrue(rs.absolute(1));
-    assertEquals(1, rs.getRow());
-
-    assertTrue(!rs.absolute(-10));
-    assertEquals(0, rs.getRow());
-    assertTrue(rs.next());
-    assertEquals(1, rs.getRow());
-
-    assertTrue(!rs.absolute(10));
-    assertEquals(0, rs.getRow());
-    assertTrue(rs.previous());
-    assertEquals(6, rs.getRow());
-
-    stmt.close();
-  }
-
-  @Test
-  public void testRelative() throws SQLException {
-    Statement stmt =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
-    ResultSet rs = stmt.executeQuery("SELECT * FROM testrs");
-
-    assertTrue(!rs.relative(0));
-    assertEquals(0, rs.getRow());
-    assertTrue(rs.isBeforeFirst());
-
-    assertTrue(rs.relative(2));
-    assertEquals(2, rs.getRow());
-
-    assertTrue(rs.relative(1));
-    assertEquals(3, rs.getRow());
-
-    assertTrue(rs.relative(0));
-    assertEquals(3, rs.getRow());
-
-    assertTrue(!rs.relative(-3));
-    assertEquals(0, rs.getRow());
-    assertTrue(rs.isBeforeFirst());
-
-    assertTrue(rs.relative(4));
-    assertEquals(4, rs.getRow());
-
-    assertTrue(rs.relative(-1));
-    assertEquals(3, rs.getRow());
-
-    assertTrue(!rs.relative(6));
-    assertEquals(0, rs.getRow());
-    assertTrue(rs.isAfterLast());
-
-    assertTrue(rs.relative(-4));
-    assertEquals(3, rs.getRow());
-
-    assertTrue(!rs.relative(-6));
-    assertEquals(0, rs.getRow());
-    assertTrue(rs.isBeforeFirst());
-
-    stmt.close();
-  }
-
-  @Test
-  public void testEmptyResult() throws SQLException {
-    Statement stmt =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
-    ResultSet rs = stmt.executeQuery("SELECT * FROM testrs where id=100");
-    rs.beforeFirst();
-    rs.afterLast();
-    assertTrue(!rs.first());
-    assertTrue(!rs.last());
-    assertTrue(!rs.next());
-  }
-
-  @Test
-  public void testMaxFieldSize() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.setMaxFieldSize(2);
-
-    ResultSet rs = stmt.executeQuery("select * from testint");
-
-    // max should not apply to the following since per the spec
-    // it should apply only to binary and char/varchar columns
-    rs.next();
-    assertEquals("12345", rs.getString(1));
-    // getBytes returns 5 bytes for txt transfer, 4 for bin transfer
-    assertTrue(rs.getBytes(1).length >= 4);
-
-    // max should apply to the following since the column is
-    // a varchar column
-    rs = stmt.executeQuery("select * from teststring");
-    rs.next();
-    assertEquals("12", rs.getString(1));
-    assertEquals("12", new String(rs.getBytes(1)));
-  }
-
-  @Test
-  public void testBooleanBool() throws SQLException {
-    testBoolean("testbool", 0);
-    testBoolean("testbool", 1);
-    testBoolean("testbool", 5);
-    testBoolean("testbool", -1);
-  }
-
-  @Test
-  public void testBooleanString() throws SQLException {
-    testBoolean("testboolstring", 0);
-    testBoolean("testboolstring", 1);
-    testBoolean("testboolstring", 5);
-    testBoolean("testboolstring", -1);
-  }
-
-  @Test
-  public void testBooleanFloat() throws SQLException {
-    testBoolean("testboolfloat", 0);
-    testBoolean("testboolfloat", 1);
-    testBoolean("testboolfloat", 5);
-    testBoolean("testboolfloat", -1);
-  }
-
-  @Test
-  public void testBooleanInt() throws SQLException {
-    testBoolean("testboolint", 0);
-    testBoolean("testboolint", 1);
-    testBoolean("testboolint", 5);
-    testBoolean("testboolint", -1);
-  }
-
-  public void testBoolean(String table, int prepareThreshold) throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement("select a, b from " + table);
-    ((org.postgresql.PGStatement) pstmt).setPrepareThreshold(prepareThreshold);
-    ResultSet rs = pstmt.executeQuery();
-    while (rs.next()) {
-      rs.getBoolean(2);
-      Boolean expected = rs.wasNull() ? null : rs.getBoolean(2); // Hack to get SQL NULL
-      if (expected != null) {
-        assertEquals(expected, rs.getBoolean(1));
-      } else {
-        // expected value with null are bad values
-        try {
-          rs.getBoolean(1);
-          fail();
-        } catch (SQLException e) {
-          assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
+    @Parameterized.Parameters(name = "binary = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
         }
-      }
-    }
-    rs.close();
-    pstmt.close();
-  }
-
-  @Test
-  public void testGetBooleanJDBCCompliance() throws SQLException {
-    // The JDBC specification in Table B-6 "Use of ResultSet getter Methods to Retrieve JDBC Data Types"
-    // the getBoolean have this Supported JDBC Type: TINYINT, SMALLINT, INTEGER, BIGINT, REAL, FLOAT,
-    // DOUBLE, DECIMAL, NUMERIC, BIT, BOOLEAN, CHAR, VARCHAR, LONGVARCHAR
-
-    // There is no TINYINT in PostgreSQL
-    testgetBoolean("int2"); // SMALLINT
-    testgetBoolean("int4"); // INTEGER
-    testgetBoolean("int8"); // BIGINT
-    testgetBoolean("float4"); // REAL
-    testgetBoolean("float8"); // FLOAT, DOUBLE
-    testgetBoolean("numeric"); // DECIMAL, NUMERIC
-    testgetBoolean("bpchar"); // CHAR
-    testgetBoolean("varchar"); // VARCHAR
-    testgetBoolean("text"); // LONGVARCHAR?
-  }
-
-  public void testgetBoolean(String dataType) throws SQLException {
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("select 1::" + dataType + ", 0::" + dataType + ", 2::" + dataType);
-    assertTrue(rs.next());
-    assertEquals(true, rs.getBoolean(1));
-    assertEquals(false, rs.getBoolean(2));
-
-    try {
-      // The JDBC ResultSet JavaDoc states that only 1 and 0 are valid values, so 2 should return error.
-      rs.getBoolean(3);
-      fail();
-    } catch (SQLException e) {
-      assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
-      // message can be 2 or 2.0 depending on whether binary or text
-      final String message = e.getMessage();
-      if (!"Cannot cast to boolean: \"2.0\"".equals(message)) {
-        assertEquals("Cannot cast to boolean: \"2\"", message);
-      }
-    }
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testgetBadBoolean() throws SQLException {
-    testBadBoolean("'2017-03-13 14:25:48.130861'::timestamp", "2017-03-13 14:25:48.130861");
-    testBadBoolean("'2017-03-13'::date", "2017-03-13");
-    testBadBoolean("'2017-03-13 14:25:48.130861'::time", "14:25:48.130861");
-    testBadBoolean("ARRAY[[1,0],[0,1]]", "{{1,0},{0,1}}");
-    testBadBoolean("29::bit(4)", "1101");
-  }
-
-  @Test
-  public void testGetBadUuidBoolean() throws SQLException {
-    assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3));
-    testBadBoolean("'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'::uuid", "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11");
-  }
-
-  public void testBadBoolean(String select, String value) throws SQLException {
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("select " + select);
-    assertTrue(rs.next());
-    try {
-      rs.getBoolean(1);
-      fail();
-    } catch (SQLException e) {
-      //binary transfer gets different error code and message
-      if (org.postgresql.util.PSQLState.DATA_TYPE_MISMATCH.getState().equals(e.getSQLState())) {
-        final String message = e.getMessage();
-        if (!message.startsWith("Cannot convert the column of type ")) {
-          fail(message);
-        }
-        if (!message.endsWith(" to requested type boolean.")) {
-          fail(message);
-        }
-      } else {
-        assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
-        assertEquals("Cannot cast to boolean: \"" + value + "\"", e.getMessage());
-      }
-    }
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testgetByte() throws SQLException {
-    ResultSet rs = con.createStatement().executeQuery("select a from testnumeric");
-
-    assertTrue(rs.next());
-    assertEquals(1, rs.getByte(1));
-
-    assertTrue(rs.next());
-    assertEquals(0, rs.getByte(1));
-
-    assertTrue(rs.next());
-    assertEquals(-1, rs.getByte(1));
-
-    assertTrue(rs.next());
-    assertEquals(1, rs.getByte(1));
-
-    assertTrue(rs.next());
-    assertEquals(-2, rs.getByte(1));
-
-    assertTrue(rs.next());
-    assertEquals(0, rs.getByte(1));
-
-    assertTrue(rs.next());
-    assertEquals(10, rs.getByte(1));
-
-    assertTrue(rs.next());
-    assertEquals(0, rs.getByte(1));
-
-    assertTrue(rs.next());
-    assertEquals(0, rs.getByte(1));
-
-    assertTrue(rs.next());
-    assertEquals(1, rs.getByte(1));
-
-    while (rs.next()) {
-      try {
-        rs.getByte(1);
-        fail("Exception expected.");
-      } catch (SQLException e) {
-        assertEquals(e.getSQLState(), "22003");
-      }
-    }
-    rs.close();
-  }
-
-  @Test
-  public void testgetShort() throws SQLException {
-    ResultSet rs = con.createStatement().executeQuery("select a from testnumeric");
-
-    assertTrue(rs.next());
-    assertEquals(1, rs.getShort(1));
-
-    assertTrue(rs.next());
-    assertEquals(0, rs.getShort(1));
-
-    assertTrue(rs.next());
-    assertEquals(-1, rs.getShort(1));
-
-    assertTrue(rs.next());
-    assertEquals(1, rs.getShort(1));
-
-    assertTrue(rs.next());
-    assertEquals(-2, rs.getShort(1));
-
-    assertTrue(rs.next());
-    assertEquals(0, rs.getShort(1));
-
-    assertTrue(rs.next());
-    assertEquals(10, rs.getShort(1));
-
-    assertTrue(rs.next());
-    assertEquals(0, rs.getShort(1));
-
-    assertTrue(rs.next());
-    assertEquals(0, rs.getShort(1));
-
-    assertTrue(rs.next());
-    assertEquals(1, rs.getShort(1));
-
-    while (rs.next()) {
-      try {
-        rs.getShort(1);
-        fail("Exception expected.");
-      } catch (SQLException e) {
-      }
-    }
-    rs.close();
-  }
-
-  @Test
-  public void testgetInt() throws SQLException {
-    ResultSet rs = con.createStatement().executeQuery("select a from testnumeric");
-
-    assertTrue(rs.next());
-    assertEquals(1, rs.getInt(1));
-
-    assertTrue(rs.next());
-    assertEquals(0, rs.getInt(1));
-
-    assertTrue(rs.next());
-    assertEquals(-1, rs.getInt(1));
-
-    assertTrue(rs.next());
-    assertEquals(1, rs.getInt(1));
-
-    assertTrue(rs.next());
-    assertEquals(-2, rs.getInt(1));
-
-    assertTrue(rs.next());
-    assertEquals(0, rs.getInt(1));
-
-    assertTrue(rs.next());
-    assertEquals(10, rs.getInt(1));
-
-    assertTrue(rs.next());
-    assertEquals(0, rs.getInt(1));
-
-    assertTrue(rs.next());
-    assertEquals(0, rs.getInt(1));
-
-    assertTrue(rs.next());
-    assertEquals(1, rs.getInt(1));
-
-    assertTrue(rs.next());
-    assertEquals(99999, rs.getInt(1));
-
-    assertTrue(rs.next());
-    assertEquals(99999, rs.getInt(1));
-
-    assertTrue(rs.next());
-    assertEquals(-99999, rs.getInt(1));
-
-    assertTrue(rs.next());
-    assertEquals(-99999, rs.getInt(1));
-
-    assertTrue(rs.next());
-    assertEquals(Integer.MAX_VALUE, rs.getInt(1));
-
-    assertTrue(rs.next());
-    assertEquals(Integer.MIN_VALUE, rs.getInt(1));
-
-    while (rs.next()) {
-      try {
-        rs.getInt(1);
-        fail("Exception expected." + rs.getString(1));
-      } catch (SQLException e) {
-      }
-    }
-    rs.close();
-    // test for Issue #2748
-    rs = con.createStatement().executeQuery("select 2.0 :: double precision");
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
-    rs.close();
-
-  }
-
-  @Test
-  public void testgetLong() throws SQLException {
-    ResultSet rs = null;
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.0'");
-    assertTrue(rs.next());
-    assertEquals(1, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '0.0'");
-    assertTrue(rs.next());
-    assertEquals(0, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '-1.0'");
-    assertTrue(rs.next());
-    assertEquals(-1, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.2'");
-    assertTrue(rs.next());
-    assertEquals(1, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2.5'");
-    assertTrue(rs.next());
-    assertEquals(-2, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '0.000000000000000000000000000990'");
-    assertTrue(rs.next());
-    assertEquals(0, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '10.0000000000099'");
-    assertTrue(rs.next());
-    assertEquals(10, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '.10000000000000'");
-    assertTrue(rs.next());
-    assertEquals(0, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '.10'");
-    assertTrue(rs.next());
-    assertEquals(0, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.10000000000000'");
-    assertTrue(rs.next());
-    assertEquals(1, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '99999.2'");
-    assertTrue(rs.next());
-    assertEquals(99999, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '99999'");
-    assertTrue(rs.next());
-    assertEquals(99999, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '-99999.2'");
-    assertTrue(rs.next());
-    assertEquals(-99999, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '-99999'");
-    assertTrue(rs.next());
-    assertEquals(-99999, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '2147483647'");
-    assertTrue(rs.next());
-    assertEquals((Integer.MAX_VALUE), rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2147483648'");
-    assertTrue(rs.next());
-    assertEquals((Integer.MIN_VALUE), rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '2147483648'");
-    assertTrue(rs.next());
-    assertEquals(((long) Integer.MAX_VALUE) + 1, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2147483649'");
-    assertTrue(rs.next());
-    assertEquals(((long) Integer.MIN_VALUE) - 1, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775807'");
-    assertTrue(rs.next());
-    assertEquals(Long.MAX_VALUE, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775807.9'");
-    assertTrue(rs.next());
-    assertEquals(Long.MAX_VALUE, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '-9223372036854775808'");
-    assertTrue(rs.next());
-    assertEquals(Long.MIN_VALUE, rs.getLong(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775808'");
-    assertTrue(rs.next());
-    try {
-      rs.getLong(1);
-      fail("Exception expected. " + rs.getString(1));
-    } catch (SQLException e) {
-    }
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '-9223372036854775809'");
-    assertTrue(rs.next());
-    try {
-      rs.getLong(1);
-      fail("Exception expected. " + rs.getString(1));
-    } catch (SQLException e) {
-    }
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '10223372036850000000'");
-    assertTrue(rs.next());
-    try {
-      rs.getLong(1);
-      fail("Exception expected. " + rs.getString(1));
-    } catch (SQLException e) {
-    }
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select i, a from testboolfloat order by i");
-
-    assertTrue(rs.next());
-    assertEquals(1, rs.getLong(2));
-
-    assertTrue(rs.next());
-    assertEquals(0, rs.getLong(2));
-
-    assertTrue(rs.next());
-    assertEquals(1, rs.getLong(2));
-
-    assertTrue(rs.next());
-    assertEquals(0, rs.getLong(2));
-
-    assertTrue(rs.next());
-    assertEquals(1, rs.getLong(2));
-
-    assertTrue(rs.next());
-    assertEquals(-1, rs.getLong(2));
-
-    assertTrue(rs.next());
-    assertEquals(123, rs.getLong(2));
-
-    assertTrue(rs.next());
-    assertEquals(123, rs.getLong(2));
-
-    assertTrue(rs.next());
-    assertEquals(1, rs.getLong(2));
-
-    assertTrue(rs.next());
-    // the string value from database trims the significant digits, leading to larger variance than binary
-    // the liberica jdk gets similar variance, even in forced binary mode
-    assertEquals(9223371487098961921.0, rs.getLong(2), 1.0e11);
-
-    assertTrue(rs.next());
-    do {
-      try {
-        int row = rs.getInt(1);
-        long l = rs.getLong(2);
-        if ( row == 12 ) {
-          assertEquals(9223371487098961920.0, l, 1.0e11);
-        } else if ( row == 15 ) {
-          assertEquals(-9223371487098961920.0, l, 1.0e11);
-        } else {
-          fail("Exception expected." + rs.getString(2));
-        }
-      } catch (SQLException e) {
-      }
-    } while (rs.next());
-
-    rs.close();
-  }
-
-  @Test
-  public void testgetBigDecimal() throws SQLException {
-    ResultSet rs = null;
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.0'");
-    assertTrue(rs.next());
-    assertEquals(BigDecimal.valueOf(1.0), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '0.0'");
-    assertTrue(rs.next());
-    assertEquals(BigDecimal.valueOf(0.0), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '-1.0'");
-    assertTrue(rs.next());
-    assertEquals(BigDecimal.valueOf(-1.0), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.2'");
-    assertTrue(rs.next());
-    assertEquals(BigDecimal.valueOf(1.2), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2.5'");
-    assertTrue(rs.next());
-    assertEquals(BigDecimal.valueOf(-2.5), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '0.000000000000000000000000000990'");
-    assertTrue(rs.next());
-    assertEquals(new BigDecimal("0.000000000000000000000000000990"), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '10.0000000000099'");
-    assertTrue(rs.next());
-    assertEquals(new BigDecimal("10.0000000000099"), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '.10000000000000'");
-    assertTrue(rs.next());
-    assertEquals(new BigDecimal("0.10000000000000"), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '.10'");
-    assertTrue(rs.next());
-    assertEquals(new BigDecimal("0.10"), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.10000000000000'");
-    assertTrue(rs.next());
-    assertEquals(new BigDecimal("1.10000000000000"), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '99999.2'");
-    assertTrue(rs.next());
-    assertEquals(BigDecimal.valueOf(99999.2), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '99999'");
-    assertTrue(rs.next());
-    assertEquals(BigDecimal.valueOf(99999), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '-99999.2'");
-    assertTrue(rs.next());
-    assertEquals(BigDecimal.valueOf(-99999.2), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '-99999'");
-    assertTrue(rs.next());
-    assertEquals(BigDecimal.valueOf(-99999), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '2147483647'");
-    assertTrue(rs.next());
-    assertEquals(BigDecimal.valueOf(2147483647), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2147483648'");
-    assertTrue(rs.next());
-    assertEquals(BigDecimal.valueOf(-2147483648), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '2147483648'");
-    assertTrue(rs.next());
-    assertEquals(new BigDecimal("2147483648"), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2147483649'");
-    assertTrue(rs.next());
-    assertEquals(new BigDecimal("-2147483649"), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775807'");
-    assertTrue(rs.next());
-    assertEquals(new BigDecimal("9223372036854775807"), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775807.9'");
-    assertTrue(rs.next());
-    assertEquals(new BigDecimal("9223372036854775807.9"), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '-9223372036854775808'");
-    assertTrue(rs.next());
-    assertEquals(new BigDecimal("-9223372036854775808"), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775808'");
-    assertTrue(rs.next());
-    assertEquals(new BigDecimal("9223372036854775808"), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '-9223372036854775809'");
-    assertTrue(rs.next());
-    assertEquals(new BigDecimal("-9223372036854775809"), rs.getBigDecimal(1));
-    rs.close();
-
-    rs = con.createStatement().executeQuery("select a from testnumeric where t = '10223372036850000000'");
-    assertTrue(rs.next());
-    assertEquals(new BigDecimal("10223372036850000000"), rs.getBigDecimal(1));
-    rs.close();
-  }
-
-  @Test
-  public void testParameters() throws SQLException {
-    Statement stmt =
-        con.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    stmt.setFetchSize(100);
-    stmt.setFetchDirection(ResultSet.FETCH_UNKNOWN);
-
-    ResultSet rs = stmt.executeQuery("SELECT * FROM testrs");
-
-    assertEquals(ResultSet.CONCUR_UPDATABLE, stmt.getResultSetConcurrency());
-    assertEquals(ResultSet.TYPE_SCROLL_SENSITIVE, stmt.getResultSetType());
-    assertEquals(100, stmt.getFetchSize());
-    assertEquals(ResultSet.FETCH_UNKNOWN, stmt.getFetchDirection());
-
-    assertEquals(ResultSet.CONCUR_UPDATABLE, rs.getConcurrency());
-    assertEquals(ResultSet.TYPE_SCROLL_SENSITIVE, rs.getType());
-    assertEquals(100, rs.getFetchSize());
-    assertEquals(ResultSet.FETCH_UNKNOWN, rs.getFetchDirection());
-
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testCreateStatementWithInvalidResultSetParams() throws SQLException {
-    assertThrows(PSQLException.class, () -> con.createStatement(-1, -1,-1));
-  }
-
-  @Test
-  public void testCreateStatementWithInvalidResultSetConcurrency() throws SQLException {
-    assertThrows(PSQLException.class, () -> con.createStatement( ResultSet.TYPE_SCROLL_INSENSITIVE, -1) );
-  }
-
-  @Test
-  public void testCreateStatementWithInvalidResultSetHoldability() throws SQLException {
-    assertThrows(PSQLException.class, () -> con.createStatement( ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE, -1) );
-  }
-
-  @Test
-  public void testPrepareStatementWithInvalidResultSetParams() throws SQLException {
-    assertThrows(PSQLException.class, () -> con.prepareStatement("SELECT id FROM testrs", -1, -1,-1));
-  }
-
-  @Test
-  public void testPrepareStatementWithInvalidResultSetConcurrency() throws SQLException {
-    assertThrows(PSQLException.class, () -> con.prepareStatement("SELECT id FROM testrs", ResultSet.TYPE_SCROLL_INSENSITIVE, -1) );
-  }
-
-  @Test
-  public void testPrepareStatementWithInvalidResultSetHoldability() throws SQLException {
-    assertThrows(PSQLException.class, () -> con.prepareStatement("SELECT id FROM testrs", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE, -1) );
-  }
-
-  @Test
-  public void testPrepareCallWithInvalidResultSetParams() throws SQLException {
-    assertThrows(PSQLException.class, () -> con.prepareCall("SELECT id FROM testrs", -1, -1,-1));
-  }
-
-  @Test
-  public void testPrepareCallWithInvalidResultSetConcurrency() throws SQLException {
-    assertThrows(PSQLException.class, () -> con.prepareCall("SELECT id FROM testrs", ResultSet.TYPE_SCROLL_INSENSITIVE, -1) );
-  }
-
-  @Test
-  public void testPrepareCallWithInvalidResultSetHoldability() throws SQLException {
-    assertThrows(PSQLException.class, () -> con.prepareCall("SELECT id FROM testrs", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE, -1) );
-  }
-
-  @Test
-  public void testZeroRowResultPositioning() throws SQLException {
-    Statement stmt =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs =
-        stmt.executeQuery("SELECT * FROM pg_database WHERE datname='nonexistentdatabase'");
-    assertTrue(!rs.previous());
-    assertTrue(!rs.previous());
-    assertTrue(!rs.next());
-    assertTrue(!rs.next());
-    assertTrue(!rs.next());
-    assertTrue(!rs.next());
-    assertTrue(!rs.next());
-    assertTrue(!rs.previous());
-    assertTrue(!rs.first());
-    assertTrue(!rs.last());
-    assertEquals(0, rs.getRow());
-    assertTrue(!rs.absolute(1));
-    assertTrue(!rs.relative(1));
-    assertTrue(!rs.isBeforeFirst());
-    assertTrue(!rs.isAfterLast());
-    assertTrue(!rs.isFirst());
-    assertTrue(!rs.isLast());
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testRowResultPositioning() throws SQLException {
-    Statement stmt =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    // Create a one row result set.
-    ResultSet rs = stmt.executeQuery("SELECT * FROM pg_database WHERE datname='template1'");
-
-    assertTrue(rs.isBeforeFirst());
-    assertTrue(!rs.isAfterLast());
-    assertTrue(!rs.isFirst());
-    assertTrue(!rs.isLast());
-
-    assertTrue(rs.next());
-
-    assertTrue(!rs.isBeforeFirst());
-    assertTrue(!rs.isAfterLast());
-    assertTrue(rs.isFirst());
-    assertTrue(rs.isLast());
-
-    assertTrue(!rs.next());
-
-    assertTrue(!rs.isBeforeFirst());
-    assertTrue(rs.isAfterLast());
-    assertTrue(!rs.isFirst());
-    assertTrue(!rs.isLast());
-
-    assertTrue(rs.previous());
-
-    assertTrue(!rs.isBeforeFirst());
-    assertTrue(!rs.isAfterLast());
-    assertTrue(rs.isFirst());
-    assertTrue(rs.isLast());
-
-    assertTrue(rs.absolute(1));
-
-    assertTrue(!rs.isBeforeFirst());
-    assertTrue(!rs.isAfterLast());
-    assertTrue(rs.isFirst());
-    assertTrue(rs.isLast());
-
-    assertTrue(!rs.absolute(0));
-
-    assertTrue(rs.isBeforeFirst());
-    assertTrue(!rs.isAfterLast());
-    assertTrue(!rs.isFirst());
-    assertTrue(!rs.isLast());
-
-    assertTrue(!rs.absolute(2));
-
-    assertTrue(!rs.isBeforeFirst());
-    assertTrue(rs.isAfterLast());
-    assertTrue(!rs.isFirst());
-    assertTrue(!rs.isLast());
-
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testForwardOnlyExceptions() throws SQLException {
-    // Test that illegal operations on a TYPE_FORWARD_ONLY resultset
-    // correctly result in throwing an exception.
-    Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
-    ResultSet rs = stmt.executeQuery("SELECT * FROM testnumeric");
-
-    try {
-      rs.absolute(1);
-      fail("absolute() on a TYPE_FORWARD_ONLY resultset did not throw an exception");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.afterLast();
-      fail(
-          "afterLast() on a TYPE_FORWARD_ONLY resultset did not throw an exception on a TYPE_FORWARD_ONLY resultset");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.beforeFirst();
-      fail("beforeFirst() on a TYPE_FORWARD_ONLY resultset did not throw an exception");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.first();
-      fail("first() on a TYPE_FORWARD_ONLY resultset did not throw an exception");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.last();
-      fail("last() on a TYPE_FORWARD_ONLY resultset did not throw an exception");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.previous();
-      fail("previous() on a TYPE_FORWARD_ONLY resultset did not throw an exception");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.relative(1);
-      fail("relative() on a TYPE_FORWARD_ONLY resultset did not throw an exception");
-    } catch (SQLException e) {
-    }
-
-    try {
-      rs.setFetchDirection(ResultSet.FETCH_REVERSE);
-      fail(
-          "setFetchDirection(FETCH_REVERSE) on a TYPE_FORWARD_ONLY resultset did not throw an exception");
-    } catch (SQLException e) {
-    }
-
-    try {
-      rs.setFetchDirection(ResultSet.FETCH_UNKNOWN);
-      fail(
-          "setFetchDirection(FETCH_UNKNOWN) on a TYPE_FORWARD_ONLY resultset did not throw an exception");
-    } catch (SQLException e) {
-    }
-
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testCaseInsensitiveFindColumn() throws SQLException {
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT id, id AS \"ID2\" FROM testrs");
-    assertEquals(1, rs.findColumn("id"));
-    assertEquals(1, rs.findColumn("ID"));
-    assertEquals(1, rs.findColumn("Id"));
-    assertEquals(2, rs.findColumn("id2"));
-    assertEquals(2, rs.findColumn("ID2"));
-    assertEquals(2, rs.findColumn("Id2"));
-    try {
-      rs.findColumn("id3");
-      fail("There isn't an id3 column in the ResultSet.");
-    } catch (SQLException sqle) {
-    }
-  }
-
-  @Test
-  public void testGetOutOfBounds() throws SQLException {
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT id FROM testrs");
-    assertTrue(rs.next());
-
-    try {
-      rs.getInt(-9);
-    } catch (SQLException sqle) {
-    }
-
-    try {
-      rs.getInt(1000);
-    } catch (SQLException sqle) {
-    }
-  }
-
-  @Test
-  public void testClosedResult() throws SQLException {
-    Statement stmt =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = stmt.executeQuery("SELECT id FROM testrs");
-    rs.close();
-
-    rs.close(); // Closing twice is allowed.
-    try {
-      rs.getInt(1);
-      fail("Expected SQLException");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.getInt("id");
-      fail("Expected SQLException");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.getType();
-      fail("Expected SQLException");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.wasNull();
-      fail("Expected SQLException");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.absolute(3);
-      fail("Expected SQLException");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.isBeforeFirst();
-      fail("Expected SQLException");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.setFetchSize(10);
-      fail("Expected SQLException");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.getMetaData();
-      fail("Expected SQLException");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.rowUpdated();
-      fail("Expected SQLException");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.updateInt(1, 1);
-      fail("Expected SQLException");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.moveToInsertRow();
-      fail("Expected SQLException");
-    } catch (SQLException e) {
-    }
-    try {
-      rs.clearWarnings();
-      fail("Expected SQLException");
-    } catch (SQLException e) {
-    }
-  }
-
-  /*
-   * The JDBC spec says when you have duplicate column names, the first one should be returned.
-   */
-  @Test
-  public void testDuplicateColumnNameOrder() throws SQLException {
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT 1 AS a, 2 AS a");
-    assertTrue(rs.next());
-    assertEquals(1, rs.getInt("a"));
-  }
-
-  @Test
-  public void testTurkishLocale() throws SQLException {
-    Locale current = Locale.getDefault();
-    try {
-      Locale.setDefault(new Locale("tr", "TR"));
-      Statement stmt = con.createStatement();
-      ResultSet rs = stmt.executeQuery("SELECT id FROM testrs");
-      int sum = 0;
-      while (rs.next()) {
-        sum += rs.getInt("ID");
-      }
-      rs.close();
-      assertEquals(25, sum);
-    } finally {
-      Locale.setDefault(current);
-    }
-  }
-
-  @Test
-  public void testUpdateWithPGobject() throws SQLException {
-    Statement stmt =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-
-    ResultSet rs = stmt.executeQuery("select * from testpgobject where id = 1");
-    assertTrue(rs.next());
-    assertEquals("2010-11-03", rs.getDate("d").toString());
-
-    PGobject pgobj = new PGobject();
-    pgobj.setType("date");
-    pgobj.setValue("2014-12-23");
-    rs.updateObject("d", pgobj);
-    rs.updateRow();
-    rs.close();
-
-    ResultSet rs1 = stmt.executeQuery("select * from testpgobject where id = 1");
-    assertTrue(rs1.next());
-    assertEquals("2014-12-23", rs1.getDate("d").toString());
-    rs1.close();
-
-    stmt.close();
-  }
-
-  /**
-   * Test the behavior of the result set column mapping cache for simple statements.
-   */
-  @Test
-  public void testStatementResultSetColumnMappingCache() throws SQLException {
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("select * from testrs");
-    Map<String, Integer> columnNameIndexMap;
-    columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
-    assertEquals(null, columnNameIndexMap);
-    assertTrue(rs.next());
-    columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
-    assertEquals(null, columnNameIndexMap);
-    rs.getInt("ID");
-    columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
-    assertNotNull(columnNameIndexMap);
-    rs.getInt("id");
-    assertSame(columnNameIndexMap, getResultSetColumnNameIndexMap(rs));
-    rs.close();
-    rs = stmt.executeQuery("select * from testrs");
-    columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
-    assertEquals(null, columnNameIndexMap);
-    assertTrue(rs.next());
-    rs.getInt("Id");
-    columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
-    assertNotNull(columnNameIndexMap);
-    rs.close();
-    stmt.close();
-  }
-
-  /**
-   * Test the behavior of the result set column mapping cache for prepared statements.
-   */
-  @Test
-  public void testPreparedStatementResultSetColumnMappingCache() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement("SELECT id FROM testrs");
-    ResultSet rs = pstmt.executeQuery();
-    Map<String, Integer> columnNameIndexMap;
-    columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
-    assertEquals(null, columnNameIndexMap);
-    assertTrue(rs.next());
-    columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
-    assertEquals(null, columnNameIndexMap);
-    rs.getInt("id");
-    columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
-    assertNotNull(columnNameIndexMap);
-    rs.close();
-    rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
-    assertEquals(null, columnNameIndexMap);
-    rs.getInt("id");
-    columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
-    assertNotNull(columnNameIndexMap);
-    rs.close();
-    pstmt.close();
-  }
-
-  /**
-   * Test the behavior of the result set column mapping cache for prepared statements once the
-   * statement is named.
-   */
-  @Test
-  public void testNamedPreparedStatementResultSetColumnMappingCache() throws SQLException {
-    assumeTrue("Simple protocol only mode does not support server-prepared statements",
-        preferQueryMode != PreferQueryMode.SIMPLE);
-    PreparedStatement pstmt = con.prepareStatement("SELECT id FROM testrs");
-    ResultSet rs;
-    // Make sure the prepared statement is named.
-    // This ensures column mapping cache is reused across different result sets.
-    for (int i = 0; i < 5; i++) {
-      rs = pstmt.executeQuery();
-      rs.close();
-    }
-    rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    rs.getInt("id");
-    Map<String, Integer> columnNameIndexMap;
-    columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
-    assertNotNull(columnNameIndexMap);
-    rs.close();
-    rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    rs.getInt("id");
-    assertSame(
-        "Cached mapping should be same between different result sets of same named prepared statement",
-        columnNameIndexMap, getResultSetColumnNameIndexMap(rs));
-    rs.close();
-    pstmt.close();
-  }
-
-  @SuppressWarnings("unchecked")
-  private Map<String, Integer> getResultSetColumnNameIndexMap(ResultSet stmt) {
-    try {
-      Field columnNameIndexMapField = stmt.getClass().getDeclaredField("columnNameIndexMap");
-      columnNameIndexMapField.setAccessible(true);
-      return (Map<String, Integer>) columnNameIndexMapField.get(stmt);
-    } catch (Exception e) {
-    }
-    return null;
-  }
-
-  private static class SelectTimestampManyTimes implements Callable<Integer> {
-
-    private final Connection connection;
-    private final int expectedYear;
-
-    protected SelectTimestampManyTimes(Connection connection, int expectedYear) {
-      this.connection = connection;
-      this.expectedYear = expectedYear;
+        return ids;
     }
 
     @Override
-    public Integer call() throws SQLException {
-      int year = expectedYear;
-      try (Statement statement = connection.createStatement()) {
-        for (int i = 0; i < 10; i++) {
-          try (ResultSet resultSet = statement.executeQuery(
-              String.format("SELECT unnest(array_fill('8/10/%d'::timestamp, ARRAY[%d]))",
-                  expectedYear, 500))) {
-            while (resultSet.next()) {
-              Timestamp d = resultSet.getTimestamp(1);
-              year = 1900 + d.getYear();
-              if (year != expectedYear) {
-                return year;
-              }
-            }
-          }
-        }
-      }
-      return year;
+    public void setUp() throws Exception {
+        super.setUp();
+        Statement stmt = con.createStatement();
+
+        TestUtil.createTable(con, "testrs", "id integer");
+
+        stmt.executeUpdate("INSERT INTO testrs VALUES (1)");
+        stmt.executeUpdate("INSERT INTO testrs VALUES (2)");
+        stmt.executeUpdate("INSERT INTO testrs VALUES (3)");
+        stmt.executeUpdate("INSERT INTO testrs VALUES (4)");
+        stmt.executeUpdate("INSERT INTO testrs VALUES (6)");
+        stmt.executeUpdate("INSERT INTO testrs VALUES (9)");
+
+        TestUtil.createTable(con, "teststring", "a text");
+        stmt.executeUpdate("INSERT INTO teststring VALUES ('12345')");
+
+        TestUtil.createTable(con, "testint", "a int");
+        stmt.executeUpdate("INSERT INTO testint VALUES (12345)");
+
+        // Boolean Tests
+        TestUtil.createTable(con, "testbool", "a boolean, b int");
+        stmt.executeUpdate("INSERT INTO testbool VALUES(true, 1)");
+        stmt.executeUpdate("INSERT INTO testbool VALUES(false, 0)");
+
+        TestUtil.createTable(con, "testboolstring", "a varchar(30), b boolean");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES('1 ', true)");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES('0', false)");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES(' t', true)");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES('f', false)");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES('True', true)");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES('      False   ', false)");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES('yes', true)");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES('  no  ', false)");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES('y', true)");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES('n', false)");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES('oN', true)");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES('oFf', false)");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES('OK', null)");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES('NOT', null)");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES('not a boolean', null)");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES('1.0', null)");
+        stmt.executeUpdate("INSERT INTO testboolstring VALUES('0.0', null)");
+
+        TestUtil.createTable(con, "testboolfloat", "i int, a float4, b boolean");
+        stmt.executeUpdate("INSERT INTO testboolfloat VALUES(1, '1.0'::real, true)");
+        stmt.executeUpdate("INSERT INTO testboolfloat VALUES(2, '0.0'::real, false)");
+        stmt.executeUpdate("INSERT INTO testboolfloat VALUES(3, 1.000::real, true)");
+        stmt.executeUpdate("INSERT INTO testboolfloat VALUES(4, 0.000::real, false)");
+        stmt.executeUpdate("INSERT INTO testboolfloat VALUES(5, '1.001'::real, null)");
+        stmt.executeUpdate("INSERT INTO testboolfloat VALUES(6, '-1.001'::real, null)");
+        stmt.executeUpdate("INSERT INTO testboolfloat VALUES(7, 123.4::real, null)");
+        stmt.executeUpdate("INSERT INTO testboolfloat VALUES(8, 1.234e2::real, null)");
+        stmt.executeUpdate("INSERT INTO testboolfloat VALUES(9, 100.00e-2::real, true)");
+        stmt.executeUpdate("INSERT INTO testboolfloat VALUES(10, '9223371487098961921', null)");
+        stmt.executeUpdate("INSERT INTO testboolfloat VALUES(11, '10223372036850000000', null)");
+        String floatVal = Float.toString(StrictMath.nextDown(Long.MAX_VALUE - 1));
+        stmt.executeUpdate("INSERT INTO testboolfloat VALUES(12, " + floatVal + ", null)");
+        floatVal = Float.toString(StrictMath.nextDown(Long.MAX_VALUE + 1));
+        stmt.executeUpdate("INSERT INTO testboolfloat VALUES(13, " + floatVal + ", null)");
+        floatVal = Float.toString(StrictMath.nextUp(Long.MIN_VALUE - 1));
+        stmt.executeUpdate("INSERT INTO testboolfloat VALUES(14, " + floatVal + ", null)");
+        floatVal = Float.toString(StrictMath.nextUp(Long.MIN_VALUE + 1));
+        stmt.executeUpdate("INSERT INTO testboolfloat VALUES(15, " + floatVal + ", null)");
+
+        TestUtil.createTable(con, "testboolint", "a bigint, b boolean");
+        stmt.executeUpdate("INSERT INTO testboolint VALUES(1, true)");
+        stmt.executeUpdate("INSERT INTO testboolint VALUES(0, false)");
+        stmt.executeUpdate("INSERT INTO testboolint VALUES(-1, null)");
+        stmt.executeUpdate("INSERT INTO testboolint VALUES(9223372036854775807, null)");
+        stmt.executeUpdate("INSERT INTO testboolint VALUES(-9223372036854775808, null)");
+
+        // End Boolean Tests
+
+        // TestUtil.createTable(con, "testbit", "a bit");
+
+        TestUtil.createTable(con, "testnumeric", "t text, a numeric");
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('1.0', '1.0')");
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('0.0', '0.0')");
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('-1.0', '-1.0')");
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('1.2', '1.2')");
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('-2.5', '-2.5')");
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('0.000000000000000000000000000990', '0.000000000000000000000000000990')");
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('10.0000000000099', '10.0000000000099')");
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('.10000000000000', '.10000000000000')");
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('.10', '.10')");
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('1.10000000000000', '1.10000000000000')");
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('99999.2', '99999.2')");
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('99999', '99999')");
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('-99999.2', '-99999.2')");
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('-99999', '-99999')");
+
+        // Integer.MaxValue
+        stmt.execute("INSERT INTO testnumeric VALUES('2147483647', '2147483647')");
+
+        // Integer.MinValue
+        stmt.execute("INSERT INTO testnumeric VALUES( '-2147483648', '-2147483648')");
+
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('2147483648', '2147483648')");
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('-2147483649', '-2147483649')");
+
+        // Long.MaxValue
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('9223372036854775807','9223372036854775807')");
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('9223372036854775807.9', '9223372036854775807.9')");
+
+        // Long.MinValue
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('-9223372036854775808', '-9223372036854775808')");
+
+        // Long.MaxValue +1
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('9223372036854775808', '9223372036854775808')");
+
+        // Long.Minvalue -1
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('-9223372036854775809', '-9223372036854775809')");
+
+        stmt.executeUpdate("INSERT INTO testnumeric VALUES('10223372036850000000', '10223372036850000000')");
+
+        TestUtil.createTable(con, "testpgobject", "id integer NOT NULL, d date, PRIMARY KEY (id)");
+        stmt.execute("INSERT INTO testpgobject VALUES(1, '2010-11-3')");
+
+        stmt.close();
     }
 
-  }
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "testrs");
+        TestUtil.dropTable(con, "teststring");
+        TestUtil.dropTable(con, "testint");
+        // TestUtil.dropTable(con, "testbit");
+        TestUtil.dropTable(con, "testboolstring");
+        TestUtil.dropTable(con, "testboolfloat");
+        TestUtil.dropTable(con, "testboolint");
+        TestUtil.dropTable(con, "testnumeric");
+        TestUtil.dropTable(con, "testpgobject");
+        super.tearDown();
+    }
 
-  @Test
-  public void testTimestamp() throws InterruptedException, ExecutionException, TimeoutException {
-    ExecutorService e = Executors.newFixedThreadPool(2);
-    Integer year1 = 7777;
-    Future<Integer> future1 = e.submit(new SelectTimestampManyTimes(con, year1));
-    Integer year2 = 2017;
-    Future<Integer> future2 = e.submit(new SelectTimestampManyTimes(con, year2));
-    assertEquals("Year was changed in another thread", year1, future1.get(1, TimeUnit.MINUTES));
-    assertEquals("Year was changed in another thread", year2, future2.get(1, TimeUnit.MINUTES));
-    e.shutdown();
-    e.awaitTermination(1, TimeUnit.MINUTES);
-  }
+    @Test
+    public void testBackward() throws SQLException {
+        Statement stmt =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
+        ResultSet rs = stmt.executeQuery("SELECT * FROM testrs");
+        rs.afterLast();
+        assertTrue(rs.previous());
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testAbsolute() throws SQLException {
+        Statement stmt =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
+        ResultSet rs = stmt.executeQuery("SELECT * FROM testrs");
+
+        assertTrue(!rs.absolute(0));
+        assertEquals(0, rs.getRow());
+
+        assertTrue(rs.absolute(-1));
+        assertEquals(6, rs.getRow());
+
+        assertTrue(rs.absolute(1));
+        assertEquals(1, rs.getRow());
+
+        assertTrue(!rs.absolute(-10));
+        assertEquals(0, rs.getRow());
+        assertTrue(rs.next());
+        assertEquals(1, rs.getRow());
+
+        assertTrue(!rs.absolute(10));
+        assertEquals(0, rs.getRow());
+        assertTrue(rs.previous());
+        assertEquals(6, rs.getRow());
+
+        stmt.close();
+    }
+
+    @Test
+    public void testRelative() throws SQLException {
+        Statement stmt =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
+        ResultSet rs = stmt.executeQuery("SELECT * FROM testrs");
+
+        assertTrue(!rs.relative(0));
+        assertEquals(0, rs.getRow());
+        assertTrue(rs.isBeforeFirst());
+
+        assertTrue(rs.relative(2));
+        assertEquals(2, rs.getRow());
+
+        assertTrue(rs.relative(1));
+        assertEquals(3, rs.getRow());
+
+        assertTrue(rs.relative(0));
+        assertEquals(3, rs.getRow());
+
+        assertTrue(!rs.relative(-3));
+        assertEquals(0, rs.getRow());
+        assertTrue(rs.isBeforeFirst());
+
+        assertTrue(rs.relative(4));
+        assertEquals(4, rs.getRow());
+
+        assertTrue(rs.relative(-1));
+        assertEquals(3, rs.getRow());
+
+        assertTrue(!rs.relative(6));
+        assertEquals(0, rs.getRow());
+        assertTrue(rs.isAfterLast());
+
+        assertTrue(rs.relative(-4));
+        assertEquals(3, rs.getRow());
+
+        assertTrue(!rs.relative(-6));
+        assertEquals(0, rs.getRow());
+        assertTrue(rs.isBeforeFirst());
+
+        stmt.close();
+    }
+
+    @Test
+    public void testEmptyResult() throws SQLException {
+        Statement stmt =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
+        ResultSet rs = stmt.executeQuery("SELECT * FROM testrs where id=100");
+        rs.beforeFirst();
+        rs.afterLast();
+        assertTrue(!rs.first());
+        assertTrue(!rs.last());
+        assertTrue(!rs.next());
+    }
+
+    @Test
+    public void testMaxFieldSize() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.setMaxFieldSize(2);
+
+        ResultSet rs = stmt.executeQuery("select * from testint");
+
+        // max should not apply to the following since per the spec
+        // it should apply only to binary and char/varchar columns
+        rs.next();
+        assertEquals("12345", rs.getString(1));
+        // getBytes returns 5 bytes for txt transfer, 4 for bin transfer
+        assertTrue(rs.getBytes(1).length >= 4);
+
+        // max should apply to the following since the column is
+        // a varchar column
+        rs = stmt.executeQuery("select * from teststring");
+        rs.next();
+        assertEquals("12", rs.getString(1));
+        assertEquals("12", new String(rs.getBytes(1)));
+    }
+
+    @Test
+    public void testBooleanBool() throws SQLException {
+        testBoolean("testbool", 0);
+        testBoolean("testbool", 1);
+        testBoolean("testbool", 5);
+        testBoolean("testbool", -1);
+    }
+
+    @Test
+    public void testBooleanString() throws SQLException {
+        testBoolean("testboolstring", 0);
+        testBoolean("testboolstring", 1);
+        testBoolean("testboolstring", 5);
+        testBoolean("testboolstring", -1);
+    }
+
+    @Test
+    public void testBooleanFloat() throws SQLException {
+        testBoolean("testboolfloat", 0);
+        testBoolean("testboolfloat", 1);
+        testBoolean("testboolfloat", 5);
+        testBoolean("testboolfloat", -1);
+    }
+
+    @Test
+    public void testBooleanInt() throws SQLException {
+        testBoolean("testboolint", 0);
+        testBoolean("testboolint", 1);
+        testBoolean("testboolint", 5);
+        testBoolean("testboolint", -1);
+    }
+
+    public void testBoolean(String table, int prepareThreshold) throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement("select a, b from " + table);
+        ((org.postgresql.PGStatement) pstmt).setPrepareThreshold(prepareThreshold);
+        ResultSet rs = pstmt.executeQuery();
+        while (rs.next()) {
+            rs.getBoolean(2);
+            Boolean expected = rs.wasNull() ? null : rs.getBoolean(2); // Hack to get SQL NULL
+            if (expected != null) {
+                assertEquals(expected, rs.getBoolean(1));
+            } else {
+                // expected value with null are bad values
+                try {
+                    rs.getBoolean(1);
+                    fail();
+                } catch (SQLException e) {
+                    assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
+                }
+            }
+        }
+        rs.close();
+        pstmt.close();
+    }
+
+    @Test
+    public void testGetBooleanJDBCCompliance() throws SQLException {
+        // The JDBC specification in Table B-6 "Use of ResultSet getter Methods to Retrieve JDBC Data Types"
+        // the getBoolean have this Supported JDBC Type: TINYINT, SMALLINT, INTEGER, BIGINT, REAL, FLOAT,
+        // DOUBLE, DECIMAL, NUMERIC, BIT, BOOLEAN, CHAR, VARCHAR, LONGVARCHAR
+
+        // There is no TINYINT in PostgreSQL
+        testgetBoolean("int2"); // SMALLINT
+        testgetBoolean("int4"); // INTEGER
+        testgetBoolean("int8"); // BIGINT
+        testgetBoolean("float4"); // REAL
+        testgetBoolean("float8"); // FLOAT, DOUBLE
+        testgetBoolean("numeric"); // DECIMAL, NUMERIC
+        testgetBoolean("bpchar"); // CHAR
+        testgetBoolean("varchar"); // VARCHAR
+        testgetBoolean("text"); // LONGVARCHAR?
+    }
+
+    public void testgetBoolean(String dataType) throws SQLException {
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("select 1::" + dataType + ", 0::" + dataType + ", 2::" + dataType);
+        assertTrue(rs.next());
+        assertEquals(true, rs.getBoolean(1));
+        assertEquals(false, rs.getBoolean(2));
+
+        try {
+            // The JDBC ResultSet JavaDoc states that only 1 and 0 are valid values, so 2 should return error.
+            rs.getBoolean(3);
+            fail();
+        } catch (SQLException e) {
+            assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
+            // message can be 2 or 2.0 depending on whether binary or text
+            final String message = e.getMessage();
+            if (!"Cannot cast to boolean: \"2.0\"".equals(message)) {
+                assertEquals("Cannot cast to boolean: \"2\"", message);
+            }
+        }
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testgetBadBoolean() throws SQLException {
+        testBadBoolean("'2017-03-13 14:25:48.130861'::timestamp", "2017-03-13 14:25:48.130861");
+        testBadBoolean("'2017-03-13'::date", "2017-03-13");
+        testBadBoolean("'2017-03-13 14:25:48.130861'::time", "14:25:48.130861");
+        testBadBoolean("ARRAY[[1,0],[0,1]]", "{{1,0},{0,1}}");
+        testBadBoolean("29::bit(4)", "1101");
+    }
+
+    @Test
+    public void testGetBadUuidBoolean() throws SQLException {
+        assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3));
+        testBadBoolean("'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'::uuid", "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11");
+    }
+
+    public void testBadBoolean(String select, String value) throws SQLException {
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("select " + select);
+        assertTrue(rs.next());
+        try {
+            rs.getBoolean(1);
+            fail();
+        } catch (SQLException e) {
+            //binary transfer gets different error code and message
+            if (org.postgresql.util.PSQLState.DATA_TYPE_MISMATCH.getState().equals(e.getSQLState())) {
+                final String message = e.getMessage();
+                if (!message.startsWith("Cannot convert the column of type ")) {
+                    fail(message);
+                }
+                if (!message.endsWith(" to requested type boolean.")) {
+                    fail(message);
+                }
+            } else {
+                assertEquals(org.postgresql.util.PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
+                assertEquals("Cannot cast to boolean: \"" + value + "\"", e.getMessage());
+            }
+        }
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testgetByte() throws SQLException {
+        ResultSet rs = con.createStatement().executeQuery("select a from testnumeric");
+
+        assertTrue(rs.next());
+        assertEquals(1, rs.getByte(1));
+
+        assertTrue(rs.next());
+        assertEquals(0, rs.getByte(1));
+
+        assertTrue(rs.next());
+        assertEquals(-1, rs.getByte(1));
+
+        assertTrue(rs.next());
+        assertEquals(1, rs.getByte(1));
+
+        assertTrue(rs.next());
+        assertEquals(-2, rs.getByte(1));
+
+        assertTrue(rs.next());
+        assertEquals(0, rs.getByte(1));
+
+        assertTrue(rs.next());
+        assertEquals(10, rs.getByte(1));
+
+        assertTrue(rs.next());
+        assertEquals(0, rs.getByte(1));
+
+        assertTrue(rs.next());
+        assertEquals(0, rs.getByte(1));
+
+        assertTrue(rs.next());
+        assertEquals(1, rs.getByte(1));
+
+        while (rs.next()) {
+            try {
+                rs.getByte(1);
+                fail("Exception expected.");
+            } catch (SQLException e) {
+                assertEquals(e.getSQLState(), "22003");
+            }
+        }
+        rs.close();
+    }
+
+    @Test
+    public void testgetShort() throws SQLException {
+        ResultSet rs = con.createStatement().executeQuery("select a from testnumeric");
+
+        assertTrue(rs.next());
+        assertEquals(1, rs.getShort(1));
+
+        assertTrue(rs.next());
+        assertEquals(0, rs.getShort(1));
+
+        assertTrue(rs.next());
+        assertEquals(-1, rs.getShort(1));
+
+        assertTrue(rs.next());
+        assertEquals(1, rs.getShort(1));
+
+        assertTrue(rs.next());
+        assertEquals(-2, rs.getShort(1));
+
+        assertTrue(rs.next());
+        assertEquals(0, rs.getShort(1));
+
+        assertTrue(rs.next());
+        assertEquals(10, rs.getShort(1));
+
+        assertTrue(rs.next());
+        assertEquals(0, rs.getShort(1));
+
+        assertTrue(rs.next());
+        assertEquals(0, rs.getShort(1));
+
+        assertTrue(rs.next());
+        assertEquals(1, rs.getShort(1));
+
+        while (rs.next()) {
+            try {
+                rs.getShort(1);
+                fail("Exception expected.");
+            } catch (SQLException e) {
+            }
+        }
+        rs.close();
+    }
+
+    @Test
+    public void testgetInt() throws SQLException {
+        ResultSet rs = con.createStatement().executeQuery("select a from testnumeric");
+
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+
+        assertTrue(rs.next());
+        assertEquals(0, rs.getInt(1));
+
+        assertTrue(rs.next());
+        assertEquals(-1, rs.getInt(1));
+
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+
+        assertTrue(rs.next());
+        assertEquals(-2, rs.getInt(1));
+
+        assertTrue(rs.next());
+        assertEquals(0, rs.getInt(1));
+
+        assertTrue(rs.next());
+        assertEquals(10, rs.getInt(1));
+
+        assertTrue(rs.next());
+        assertEquals(0, rs.getInt(1));
+
+        assertTrue(rs.next());
+        assertEquals(0, rs.getInt(1));
+
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+
+        assertTrue(rs.next());
+        assertEquals(99999, rs.getInt(1));
+
+        assertTrue(rs.next());
+        assertEquals(99999, rs.getInt(1));
+
+        assertTrue(rs.next());
+        assertEquals(-99999, rs.getInt(1));
+
+        assertTrue(rs.next());
+        assertEquals(-99999, rs.getInt(1));
+
+        assertTrue(rs.next());
+        assertEquals(Integer.MAX_VALUE, rs.getInt(1));
+
+        assertTrue(rs.next());
+        assertEquals(Integer.MIN_VALUE, rs.getInt(1));
+
+        while (rs.next()) {
+            try {
+                rs.getInt(1);
+                fail("Exception expected." + rs.getString(1));
+            } catch (SQLException e) {
+            }
+        }
+        rs.close();
+        // test for Issue #2748
+        rs = con.createStatement().executeQuery("select 2.0 :: double precision");
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        rs.close();
+
+    }
+
+    @Test
+    public void testgetLong() throws SQLException {
+        ResultSet rs = null;
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.0'");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '0.0'");
+        assertTrue(rs.next());
+        assertEquals(0, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '-1.0'");
+        assertTrue(rs.next());
+        assertEquals(-1, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.2'");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2.5'");
+        assertTrue(rs.next());
+        assertEquals(-2, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '0.000000000000000000000000000990'");
+        assertTrue(rs.next());
+        assertEquals(0, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '10.0000000000099'");
+        assertTrue(rs.next());
+        assertEquals(10, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '.10000000000000'");
+        assertTrue(rs.next());
+        assertEquals(0, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '.10'");
+        assertTrue(rs.next());
+        assertEquals(0, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.10000000000000'");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '99999.2'");
+        assertTrue(rs.next());
+        assertEquals(99999, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '99999'");
+        assertTrue(rs.next());
+        assertEquals(99999, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '-99999.2'");
+        assertTrue(rs.next());
+        assertEquals(-99999, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '-99999'");
+        assertTrue(rs.next());
+        assertEquals(-99999, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '2147483647'");
+        assertTrue(rs.next());
+        assertEquals((Integer.MAX_VALUE), rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2147483648'");
+        assertTrue(rs.next());
+        assertEquals((Integer.MIN_VALUE), rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '2147483648'");
+        assertTrue(rs.next());
+        assertEquals(((long) Integer.MAX_VALUE) + 1, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2147483649'");
+        assertTrue(rs.next());
+        assertEquals(((long) Integer.MIN_VALUE) - 1, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775807'");
+        assertTrue(rs.next());
+        assertEquals(Long.MAX_VALUE, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775807.9'");
+        assertTrue(rs.next());
+        assertEquals(Long.MAX_VALUE, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '-9223372036854775808'");
+        assertTrue(rs.next());
+        assertEquals(Long.MIN_VALUE, rs.getLong(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775808'");
+        assertTrue(rs.next());
+        try {
+            rs.getLong(1);
+            fail("Exception expected. " + rs.getString(1));
+        } catch (SQLException e) {
+        }
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '-9223372036854775809'");
+        assertTrue(rs.next());
+        try {
+            rs.getLong(1);
+            fail("Exception expected. " + rs.getString(1));
+        } catch (SQLException e) {
+        }
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '10223372036850000000'");
+        assertTrue(rs.next());
+        try {
+            rs.getLong(1);
+            fail("Exception expected. " + rs.getString(1));
+        } catch (SQLException e) {
+        }
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select i, a from testboolfloat order by i");
+
+        assertTrue(rs.next());
+        assertEquals(1, rs.getLong(2));
+
+        assertTrue(rs.next());
+        assertEquals(0, rs.getLong(2));
+
+        assertTrue(rs.next());
+        assertEquals(1, rs.getLong(2));
+
+        assertTrue(rs.next());
+        assertEquals(0, rs.getLong(2));
+
+        assertTrue(rs.next());
+        assertEquals(1, rs.getLong(2));
+
+        assertTrue(rs.next());
+        assertEquals(-1, rs.getLong(2));
+
+        assertTrue(rs.next());
+        assertEquals(123, rs.getLong(2));
+
+        assertTrue(rs.next());
+        assertEquals(123, rs.getLong(2));
+
+        assertTrue(rs.next());
+        assertEquals(1, rs.getLong(2));
+
+        assertTrue(rs.next());
+        // the string value from database trims the significant digits, leading to larger variance than binary
+        // the liberica jdk gets similar variance, even in forced binary mode
+        assertEquals(9223371487098961921.0, rs.getLong(2), 1.0e11);
+
+        assertTrue(rs.next());
+        do {
+            try {
+                int row = rs.getInt(1);
+                long l = rs.getLong(2);
+                if (row == 12) {
+                    assertEquals(9223371487098961920.0, l, 1.0e11);
+                } else if (row == 15) {
+                    assertEquals(-9223371487098961920.0, l, 1.0e11);
+                } else {
+                    fail("Exception expected." + rs.getString(2));
+                }
+            } catch (SQLException e) {
+            }
+        } while (rs.next());
+
+        rs.close();
+    }
+
+    @Test
+    public void testgetBigDecimal() throws SQLException {
+        ResultSet rs = null;
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.0'");
+        assertTrue(rs.next());
+        assertEquals(BigDecimal.valueOf(1.0), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '0.0'");
+        assertTrue(rs.next());
+        assertEquals(BigDecimal.valueOf(0.0), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '-1.0'");
+        assertTrue(rs.next());
+        assertEquals(BigDecimal.valueOf(-1.0), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.2'");
+        assertTrue(rs.next());
+        assertEquals(BigDecimal.valueOf(1.2), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2.5'");
+        assertTrue(rs.next());
+        assertEquals(BigDecimal.valueOf(-2.5), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '0.000000000000000000000000000990'");
+        assertTrue(rs.next());
+        assertEquals(new BigDecimal("0.000000000000000000000000000990"), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '10.0000000000099'");
+        assertTrue(rs.next());
+        assertEquals(new BigDecimal("10.0000000000099"), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '.10000000000000'");
+        assertTrue(rs.next());
+        assertEquals(new BigDecimal("0.10000000000000"), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '.10'");
+        assertTrue(rs.next());
+        assertEquals(new BigDecimal("0.10"), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '1.10000000000000'");
+        assertTrue(rs.next());
+        assertEquals(new BigDecimal("1.10000000000000"), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '99999.2'");
+        assertTrue(rs.next());
+        assertEquals(BigDecimal.valueOf(99999.2), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '99999'");
+        assertTrue(rs.next());
+        assertEquals(BigDecimal.valueOf(99999), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '-99999.2'");
+        assertTrue(rs.next());
+        assertEquals(BigDecimal.valueOf(-99999.2), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '-99999'");
+        assertTrue(rs.next());
+        assertEquals(BigDecimal.valueOf(-99999), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '2147483647'");
+        assertTrue(rs.next());
+        assertEquals(BigDecimal.valueOf(2147483647), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2147483648'");
+        assertTrue(rs.next());
+        assertEquals(BigDecimal.valueOf(-2147483648), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '2147483648'");
+        assertTrue(rs.next());
+        assertEquals(new BigDecimal("2147483648"), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '-2147483649'");
+        assertTrue(rs.next());
+        assertEquals(new BigDecimal("-2147483649"), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775807'");
+        assertTrue(rs.next());
+        assertEquals(new BigDecimal("9223372036854775807"), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775807.9'");
+        assertTrue(rs.next());
+        assertEquals(new BigDecimal("9223372036854775807.9"), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '-9223372036854775808'");
+        assertTrue(rs.next());
+        assertEquals(new BigDecimal("-9223372036854775808"), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '9223372036854775808'");
+        assertTrue(rs.next());
+        assertEquals(new BigDecimal("9223372036854775808"), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '-9223372036854775809'");
+        assertTrue(rs.next());
+        assertEquals(new BigDecimal("-9223372036854775809"), rs.getBigDecimal(1));
+        rs.close();
+
+        rs = con.createStatement().executeQuery("select a from testnumeric where t = '10223372036850000000'");
+        assertTrue(rs.next());
+        assertEquals(new BigDecimal("10223372036850000000"), rs.getBigDecimal(1));
+        rs.close();
+    }
+
+    @Test
+    public void testParameters() throws SQLException {
+        Statement stmt =
+                con.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        stmt.setFetchSize(100);
+        stmt.setFetchDirection(ResultSet.FETCH_UNKNOWN);
+
+        ResultSet rs = stmt.executeQuery("SELECT * FROM testrs");
+
+        assertEquals(ResultSet.CONCUR_UPDATABLE, stmt.getResultSetConcurrency());
+        assertEquals(ResultSet.TYPE_SCROLL_SENSITIVE, stmt.getResultSetType());
+        assertEquals(100, stmt.getFetchSize());
+        assertEquals(ResultSet.FETCH_UNKNOWN, stmt.getFetchDirection());
+
+        assertEquals(ResultSet.CONCUR_UPDATABLE, rs.getConcurrency());
+        assertEquals(ResultSet.TYPE_SCROLL_SENSITIVE, rs.getType());
+        assertEquals(100, rs.getFetchSize());
+        assertEquals(ResultSet.FETCH_UNKNOWN, rs.getFetchDirection());
+
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testCreateStatementWithInvalidResultSetParams() throws SQLException {
+        assertThrows(PSQLException.class, () -> con.createStatement(-1, -1, -1));
+    }
+
+    @Test
+    public void testCreateStatementWithInvalidResultSetConcurrency() throws SQLException {
+        assertThrows(PSQLException.class, () -> con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, -1));
+    }
+
+    @Test
+    public void testCreateStatementWithInvalidResultSetHoldability() throws SQLException {
+        assertThrows(PSQLException.class, () -> con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE, -1));
+    }
+
+    @Test
+    public void testPrepareStatementWithInvalidResultSetParams() throws SQLException {
+        assertThrows(PSQLException.class, () -> con.prepareStatement("SELECT id FROM testrs", -1, -1, -1));
+    }
+
+    @Test
+    public void testPrepareStatementWithInvalidResultSetConcurrency() throws SQLException {
+        assertThrows(PSQLException.class, () -> con.prepareStatement("SELECT id FROM testrs", ResultSet.TYPE_SCROLL_INSENSITIVE, -1));
+    }
+
+    @Test
+    public void testPrepareStatementWithInvalidResultSetHoldability() throws SQLException {
+        assertThrows(PSQLException.class, () -> con.prepareStatement("SELECT id FROM testrs", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE, -1));
+    }
+
+    @Test
+    public void testPrepareCallWithInvalidResultSetParams() throws SQLException {
+        assertThrows(PSQLException.class, () -> con.prepareCall("SELECT id FROM testrs", -1, -1, -1));
+    }
+
+    @Test
+    public void testPrepareCallWithInvalidResultSetConcurrency() throws SQLException {
+        assertThrows(PSQLException.class, () -> con.prepareCall("SELECT id FROM testrs", ResultSet.TYPE_SCROLL_INSENSITIVE, -1));
+    }
+
+    @Test
+    public void testPrepareCallWithInvalidResultSetHoldability() throws SQLException {
+        assertThrows(PSQLException.class, () -> con.prepareCall("SELECT id FROM testrs", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE, -1));
+    }
+
+    @Test
+    public void testZeroRowResultPositioning() throws SQLException {
+        Statement stmt =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs =
+                stmt.executeQuery("SELECT * FROM pg_database WHERE datname='nonexistentdatabase'");
+        assertTrue(!rs.previous());
+        assertTrue(!rs.previous());
+        assertTrue(!rs.next());
+        assertTrue(!rs.next());
+        assertTrue(!rs.next());
+        assertTrue(!rs.next());
+        assertTrue(!rs.next());
+        assertTrue(!rs.previous());
+        assertTrue(!rs.first());
+        assertTrue(!rs.last());
+        assertEquals(0, rs.getRow());
+        assertTrue(!rs.absolute(1));
+        assertTrue(!rs.relative(1));
+        assertTrue(!rs.isBeforeFirst());
+        assertTrue(!rs.isAfterLast());
+        assertTrue(!rs.isFirst());
+        assertTrue(!rs.isLast());
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testRowResultPositioning() throws SQLException {
+        Statement stmt =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        // Create a one row result set.
+        ResultSet rs = stmt.executeQuery("SELECT * FROM pg_database WHERE datname='template1'");
+
+        assertTrue(rs.isBeforeFirst());
+        assertTrue(!rs.isAfterLast());
+        assertTrue(!rs.isFirst());
+        assertTrue(!rs.isLast());
+
+        assertTrue(rs.next());
+
+        assertTrue(!rs.isBeforeFirst());
+        assertTrue(!rs.isAfterLast());
+        assertTrue(rs.isFirst());
+        assertTrue(rs.isLast());
+
+        assertTrue(!rs.next());
+
+        assertTrue(!rs.isBeforeFirst());
+        assertTrue(rs.isAfterLast());
+        assertTrue(!rs.isFirst());
+        assertTrue(!rs.isLast());
+
+        assertTrue(rs.previous());
+
+        assertTrue(!rs.isBeforeFirst());
+        assertTrue(!rs.isAfterLast());
+        assertTrue(rs.isFirst());
+        assertTrue(rs.isLast());
+
+        assertTrue(rs.absolute(1));
+
+        assertTrue(!rs.isBeforeFirst());
+        assertTrue(!rs.isAfterLast());
+        assertTrue(rs.isFirst());
+        assertTrue(rs.isLast());
+
+        assertTrue(!rs.absolute(0));
+
+        assertTrue(rs.isBeforeFirst());
+        assertTrue(!rs.isAfterLast());
+        assertTrue(!rs.isFirst());
+        assertTrue(!rs.isLast());
+
+        assertTrue(!rs.absolute(2));
+
+        assertTrue(!rs.isBeforeFirst());
+        assertTrue(rs.isAfterLast());
+        assertTrue(!rs.isFirst());
+        assertTrue(!rs.isLast());
+
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testForwardOnlyExceptions() throws SQLException {
+        // Test that illegal operations on a TYPE_FORWARD_ONLY resultset
+        // correctly result in throwing an exception.
+        Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
+        ResultSet rs = stmt.executeQuery("SELECT * FROM testnumeric");
+
+        try {
+            rs.absolute(1);
+            fail("absolute() on a TYPE_FORWARD_ONLY resultset did not throw an exception");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.afterLast();
+            fail(
+                    "afterLast() on a TYPE_FORWARD_ONLY resultset did not throw an exception on a TYPE_FORWARD_ONLY resultset");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.beforeFirst();
+            fail("beforeFirst() on a TYPE_FORWARD_ONLY resultset did not throw an exception");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.first();
+            fail("first() on a TYPE_FORWARD_ONLY resultset did not throw an exception");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.last();
+            fail("last() on a TYPE_FORWARD_ONLY resultset did not throw an exception");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.previous();
+            fail("previous() on a TYPE_FORWARD_ONLY resultset did not throw an exception");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.relative(1);
+            fail("relative() on a TYPE_FORWARD_ONLY resultset did not throw an exception");
+        } catch (SQLException e) {
+        }
+
+        try {
+            rs.setFetchDirection(ResultSet.FETCH_REVERSE);
+            fail(
+                    "setFetchDirection(FETCH_REVERSE) on a TYPE_FORWARD_ONLY resultset did not throw an exception");
+        } catch (SQLException e) {
+        }
+
+        try {
+            rs.setFetchDirection(ResultSet.FETCH_UNKNOWN);
+            fail(
+                    "setFetchDirection(FETCH_UNKNOWN) on a TYPE_FORWARD_ONLY resultset did not throw an exception");
+        } catch (SQLException e) {
+        }
+
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testCaseInsensitiveFindColumn() throws SQLException {
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT id, id AS \"ID2\" FROM testrs");
+        assertEquals(1, rs.findColumn("id"));
+        assertEquals(1, rs.findColumn("ID"));
+        assertEquals(1, rs.findColumn("Id"));
+        assertEquals(2, rs.findColumn("id2"));
+        assertEquals(2, rs.findColumn("ID2"));
+        assertEquals(2, rs.findColumn("Id2"));
+        try {
+            rs.findColumn("id3");
+            fail("There isn't an id3 column in the ResultSet.");
+        } catch (SQLException sqle) {
+        }
+    }
+
+    @Test
+    public void testGetOutOfBounds() throws SQLException {
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT id FROM testrs");
+        assertTrue(rs.next());
+
+        try {
+            rs.getInt(-9);
+        } catch (SQLException sqle) {
+        }
+
+        try {
+            rs.getInt(1000);
+        } catch (SQLException sqle) {
+        }
+    }
+
+    @Test
+    public void testClosedResult() throws SQLException {
+        Statement stmt =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = stmt.executeQuery("SELECT id FROM testrs");
+        rs.close();
+
+        rs.close(); // Closing twice is allowed.
+        try {
+            rs.getInt(1);
+            fail("Expected SQLException");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.getInt("id");
+            fail("Expected SQLException");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.getType();
+            fail("Expected SQLException");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.wasNull();
+            fail("Expected SQLException");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.absolute(3);
+            fail("Expected SQLException");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.isBeforeFirst();
+            fail("Expected SQLException");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.setFetchSize(10);
+            fail("Expected SQLException");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.getMetaData();
+            fail("Expected SQLException");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.rowUpdated();
+            fail("Expected SQLException");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.updateInt(1, 1);
+            fail("Expected SQLException");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.moveToInsertRow();
+            fail("Expected SQLException");
+        } catch (SQLException e) {
+        }
+        try {
+            rs.clearWarnings();
+            fail("Expected SQLException");
+        } catch (SQLException e) {
+        }
+    }
+
+    /*
+     * The JDBC spec says when you have duplicate column names, the first one should be returned.
+     */
+    @Test
+    public void testDuplicateColumnNameOrder() throws SQLException {
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT 1 AS a, 2 AS a");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt("a"));
+    }
+
+    @Test
+    public void testTurkishLocale() throws SQLException {
+        Locale current = Locale.getDefault();
+        try {
+            Locale.setDefault(new Locale("tr", "TR"));
+            Statement stmt = con.createStatement();
+            ResultSet rs = stmt.executeQuery("SELECT id FROM testrs");
+            int sum = 0;
+            while (rs.next()) {
+                sum += rs.getInt("ID");
+            }
+            rs.close();
+            assertEquals(25, sum);
+        } finally {
+            Locale.setDefault(current);
+        }
+    }
+
+    @Test
+    public void testUpdateWithPGobject() throws SQLException {
+        Statement stmt =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+
+        ResultSet rs = stmt.executeQuery("select * from testpgobject where id = 1");
+        assertTrue(rs.next());
+        assertEquals("2010-11-03", rs.getDate("d").toString());
+
+        PGobject pgobj = new PGobject();
+        pgobj.setType("date");
+        pgobj.setValue("2014-12-23");
+        rs.updateObject("d", pgobj);
+        rs.updateRow();
+        rs.close();
+
+        ResultSet rs1 = stmt.executeQuery("select * from testpgobject where id = 1");
+        assertTrue(rs1.next());
+        assertEquals("2014-12-23", rs1.getDate("d").toString());
+        rs1.close();
+
+        stmt.close();
+    }
+
+    /**
+     * Test the behavior of the result set column mapping cache for simple statements.
+     */
+    @Test
+    public void testStatementResultSetColumnMappingCache() throws SQLException {
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("select * from testrs");
+        Map<String, Integer> columnNameIndexMap;
+        columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
+        assertEquals(null, columnNameIndexMap);
+        assertTrue(rs.next());
+        columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
+        assertEquals(null, columnNameIndexMap);
+        rs.getInt("ID");
+        columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
+        assertNotNull(columnNameIndexMap);
+        rs.getInt("id");
+        assertSame(columnNameIndexMap, getResultSetColumnNameIndexMap(rs));
+        rs.close();
+        rs = stmt.executeQuery("select * from testrs");
+        columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
+        assertEquals(null, columnNameIndexMap);
+        assertTrue(rs.next());
+        rs.getInt("Id");
+        columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
+        assertNotNull(columnNameIndexMap);
+        rs.close();
+        stmt.close();
+    }
+
+    /**
+     * Test the behavior of the result set column mapping cache for prepared statements.
+     */
+    @Test
+    public void testPreparedStatementResultSetColumnMappingCache() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement("SELECT id FROM testrs");
+        ResultSet rs = pstmt.executeQuery();
+        Map<String, Integer> columnNameIndexMap;
+        columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
+        assertEquals(null, columnNameIndexMap);
+        assertTrue(rs.next());
+        columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
+        assertEquals(null, columnNameIndexMap);
+        rs.getInt("id");
+        columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
+        assertNotNull(columnNameIndexMap);
+        rs.close();
+        rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
+        assertEquals(null, columnNameIndexMap);
+        rs.getInt("id");
+        columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
+        assertNotNull(columnNameIndexMap);
+        rs.close();
+        pstmt.close();
+    }
+
+    /**
+     * Test the behavior of the result set column mapping cache for prepared statements once the
+     * statement is named.
+     */
+    @Test
+    public void testNamedPreparedStatementResultSetColumnMappingCache() throws SQLException {
+        assumeTrue("Simple protocol only mode does not support server-prepared statements",
+                preferQueryMode != PreferQueryMode.SIMPLE);
+        PreparedStatement pstmt = con.prepareStatement("SELECT id FROM testrs");
+        ResultSet rs;
+        // Make sure the prepared statement is named.
+        // This ensures column mapping cache is reused across different result sets.
+        for (int i = 0; i < 5; i++) {
+            rs = pstmt.executeQuery();
+            rs.close();
+        }
+        rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        rs.getInt("id");
+        Map<String, Integer> columnNameIndexMap;
+        columnNameIndexMap = getResultSetColumnNameIndexMap(rs);
+        assertNotNull(columnNameIndexMap);
+        rs.close();
+        rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        rs.getInt("id");
+        assertSame(
+                "Cached mapping should be same between different result sets of same named prepared statement",
+                columnNameIndexMap, getResultSetColumnNameIndexMap(rs));
+        rs.close();
+        pstmt.close();
+    }
+
+    @SuppressWarnings("unchecked")
+    private Map<String, Integer> getResultSetColumnNameIndexMap(ResultSet stmt) {
+        try {
+            Field columnNameIndexMapField = stmt.getClass().getDeclaredField("columnNameIndexMap");
+            columnNameIndexMapField.setAccessible(true);
+            return (Map<String, Integer>) columnNameIndexMapField.get(stmt);
+        } catch (Exception e) {
+        }
+        return null;
+    }
+
+    @Test
+    public void testTimestamp() throws InterruptedException, ExecutionException, TimeoutException {
+        ExecutorService e = Executors.newFixedThreadPool(2);
+        Integer year1 = 7777;
+        Future<Integer> future1 = e.submit(new SelectTimestampManyTimes(con, year1));
+        Integer year2 = 2017;
+        Future<Integer> future2 = e.submit(new SelectTimestampManyTimes(con, year2));
+        assertEquals("Year was changed in another thread", year1, future1.get(1, TimeUnit.MINUTES));
+        assertEquals("Year was changed in another thread", year2, future2.get(1, TimeUnit.MINUTES));
+        e.shutdown();
+        e.awaitTermination(1, TimeUnit.MINUTES);
+    }
+
+    private static class SelectTimestampManyTimes implements Callable<Integer> {
+
+        private final Connection connection;
+        private final int expectedYear;
+
+        protected SelectTimestampManyTimes(Connection connection, int expectedYear) {
+            this.connection = connection;
+            this.expectedYear = expectedYear;
+        }
+
+        @Override
+        public Integer call() throws SQLException {
+            int year = expectedYear;
+            try (Statement statement = connection.createStatement()) {
+                for (int i = 0; i < 10; i++) {
+                    try (ResultSet resultSet = statement.executeQuery(
+                            String.format("SELECT unnest(array_fill('8/10/%d'::timestamp, ARRAY[%d]))",
+                                    expectedYear, 500))) {
+                        while (resultSet.next()) {
+                            Timestamp d = resultSet.getTimestamp(1);
+                            year = 1900 + d.getYear();
+                            if (year != expectedYear) {
+                                return year;
+                            }
+                        }
+                    }
+                }
+            }
+            return year;
+        }
+
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/SearchPathLookupTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/SearchPathLookupTest.java
index e87b803..28ec594 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/SearchPathLookupTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/SearchPathLookupTest.java
@@ -20,109 +20,109 @@ import java.sql.ResultSet;
 import java.sql.Statement;
 
 /*
-* TestCase to test the internal functionality of org.postgresql.jdbc2.DatabaseMetaData
-*
-*/
+ * TestCase to test the internal functionality of org.postgresql.jdbc2.DatabaseMetaData
+ *
+ */
 class SearchPathLookupTest {
-  private BaseConnection con;
+    private BaseConnection con;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    con = (BaseConnection) TestUtil.openDB();
-  }
-
-  // TODO: make @getMetaData() consider search_path as well
-
-  /**
-   * This usecase is most common, here the object we are searching for is in the current_schema (the
-   * first schema in the search_path).
-   */
-  @Test
-  void searchPathNormalLookup() throws Exception {
-    Statement stmt = con.createStatement();
-    try {
-      TestUtil.createSchema(con, "first_schema");
-      TestUtil.createTable(con, "first_schema.x", "first_schema_field_n int4");
-      TestUtil.createSchema(con, "second_schema");
-      TestUtil.createTable(con, "second_schema.x", "second_schema_field_n text");
-      TestUtil.createSchema(con, "third_schema");
-      TestUtil.createTable(con, "third_schema.x", "third_schema_field_n float");
-      TestUtil.createSchema(con, "last_schema");
-      TestUtil.createTable(con, "last_schema.x", "last_schema_field_n text");
-      stmt.execute("SET search_path TO third_schema;");
-      TypeInfo typeInfo = con.getTypeInfo();
-      int oid = typeInfo.getPGType("x");
-      ResultSet rs = stmt.executeQuery("SELECT 'third_schema.x'::regtype::oid");
-      assertTrue(rs.next());
-      assertEquals(oid, rs.getInt(1));
-      assertFalse(rs.next());
-      TestUtil.dropSchema(con, "first_schema");
-      TestUtil.dropSchema(con, "second_schema");
-      TestUtil.dropSchema(con, "third_schema");
-      TestUtil.dropSchema(con, "last_schema");
-    } finally {
-      if (stmt != null) {
-        stmt.close();
-      }
-      TestUtil.closeDB(con);
+    @BeforeEach
+    void setUp() throws Exception {
+        con = (BaseConnection) TestUtil.openDB();
     }
-  }
 
-  /**
-   * This usecase is for the situations, when an object is located in a schema, that is in the
-   * search_path, but not in the current_schema, for example a public schema or some kind of schema,
-   * that is used for keeping utility objects.
-   */
-  @Test
-  void searchPathHiddenLookup() throws Exception {
-    Statement stmt = con.createStatement();
-    try {
-      TestUtil.createSchema(con, "first_schema");
-      TestUtil.createTable(con, "first_schema.x", "first_schema_field_n int4");
-      TestUtil.createSchema(con, "second_schema");
-      TestUtil.createTable(con, "second_schema.y", "second_schema_field_n text");
-      TestUtil.createSchema(con, "third_schema");
-      TestUtil.createTable(con, "third_schema.x", "third_schema_field_n float");
-      TestUtil.createSchema(con, "last_schema");
-      TestUtil.createTable(con, "last_schema.y", "last_schema_field_n text");
-      stmt.execute("SET search_path TO first_schema, second_schema, last_schema, public;");
-      TypeInfo typeInfo = con.getTypeInfo();
-      int oid = typeInfo.getPGType("y");
-      ResultSet rs = stmt.executeQuery("SELECT 'second_schema.y'::regtype::oid");
-      assertTrue(rs.next());
-      assertEquals(oid, rs.getInt(1));
-      assertFalse(rs.next());
-      TestUtil.dropSchema(con, "first_schema");
-      TestUtil.dropSchema(con, "second_schema");
-      TestUtil.dropSchema(con, "third_schema");
-      TestUtil.dropSchema(con, "last_schema");
-    } finally {
-      if (stmt != null) {
-        stmt.close();
-      }
-      TestUtil.closeDB(con);
-    }
-  }
+    // TODO: make @getMetaData() consider search_path as well
 
-  @Test
-  void searchPathBackwardsCompatibleLookup() throws Exception {
-    Statement stmt = con.createStatement();
-    try {
-      TestUtil.createSchema(con, "first_schema");
-      TestUtil.createTable(con, "first_schema.x", "first_schema_field int4");
-      TestUtil.createSchema(con, "second_schema");
-      TestUtil.createTable(con, "second_schema.x", "second_schema_field text");
-      TypeInfo typeInfo = con.getTypeInfo();
-      int oid = typeInfo.getPGType("x");
-      ResultSet rs = stmt
-          .executeQuery("SELECT oid FROM pg_type WHERE typname = 'x' ORDER BY oid DESC LIMIT 1");
-      assertTrue(rs.next());
-      assertEquals(oid, rs.getInt(1));
-      assertFalse(rs.next());
-      TestUtil.dropSchema(con, "first_schema");
-      TestUtil.dropSchema(con, "second_schema");
-    } finally {
-      TestUtil.closeDB(con);
+    /**
+     * This usecase is most common, here the object we are searching for is in the current_schema (the
+     * first schema in the search_path).
+     */
+    @Test
+    void searchPathNormalLookup() throws Exception {
+        Statement stmt = con.createStatement();
+        try {
+            TestUtil.createSchema(con, "first_schema");
+            TestUtil.createTable(con, "first_schema.x", "first_schema_field_n int4");
+            TestUtil.createSchema(con, "second_schema");
+            TestUtil.createTable(con, "second_schema.x", "second_schema_field_n text");
+            TestUtil.createSchema(con, "third_schema");
+            TestUtil.createTable(con, "third_schema.x", "third_schema_field_n float");
+            TestUtil.createSchema(con, "last_schema");
+            TestUtil.createTable(con, "last_schema.x", "last_schema_field_n text");
+            stmt.execute("SET search_path TO third_schema;");
+            TypeInfo typeInfo = con.getTypeInfo();
+            int oid = typeInfo.getPGType("x");
+            ResultSet rs = stmt.executeQuery("SELECT 'third_schema.x'::regtype::oid");
+            assertTrue(rs.next());
+            assertEquals(oid, rs.getInt(1));
+            assertFalse(rs.next());
+            TestUtil.dropSchema(con, "first_schema");
+            TestUtil.dropSchema(con, "second_schema");
+            TestUtil.dropSchema(con, "third_schema");
+            TestUtil.dropSchema(con, "last_schema");
+        } finally {
+            if (stmt != null) {
+                stmt.close();
+            }
+            TestUtil.closeDB(con);
+        }
+    }
+
+    /**
+     * This usecase is for the situations, when an object is located in a schema, that is in the
+     * search_path, but not in the current_schema, for example a public schema or some kind of schema,
+     * that is used for keeping utility objects.
+     */
+    @Test
+    void searchPathHiddenLookup() throws Exception {
+        Statement stmt = con.createStatement();
+        try {
+            TestUtil.createSchema(con, "first_schema");
+            TestUtil.createTable(con, "first_schema.x", "first_schema_field_n int4");
+            TestUtil.createSchema(con, "second_schema");
+            TestUtil.createTable(con, "second_schema.y", "second_schema_field_n text");
+            TestUtil.createSchema(con, "third_schema");
+            TestUtil.createTable(con, "third_schema.x", "third_schema_field_n float");
+            TestUtil.createSchema(con, "last_schema");
+            TestUtil.createTable(con, "last_schema.y", "last_schema_field_n text");
+            stmt.execute("SET search_path TO first_schema, second_schema, last_schema, public;");
+            TypeInfo typeInfo = con.getTypeInfo();
+            int oid = typeInfo.getPGType("y");
+            ResultSet rs = stmt.executeQuery("SELECT 'second_schema.y'::regtype::oid");
+            assertTrue(rs.next());
+            assertEquals(oid, rs.getInt(1));
+            assertFalse(rs.next());
+            TestUtil.dropSchema(con, "first_schema");
+            TestUtil.dropSchema(con, "second_schema");
+            TestUtil.dropSchema(con, "third_schema");
+            TestUtil.dropSchema(con, "last_schema");
+        } finally {
+            if (stmt != null) {
+                stmt.close();
+            }
+            TestUtil.closeDB(con);
+        }
+    }
+
+    @Test
+    void searchPathBackwardsCompatibleLookup() throws Exception {
+        Statement stmt = con.createStatement();
+        try {
+            TestUtil.createSchema(con, "first_schema");
+            TestUtil.createTable(con, "first_schema.x", "first_schema_field int4");
+            TestUtil.createSchema(con, "second_schema");
+            TestUtil.createTable(con, "second_schema.x", "second_schema_field text");
+            TypeInfo typeInfo = con.getTypeInfo();
+            int oid = typeInfo.getPGType("x");
+            ResultSet rs = stmt
+                    .executeQuery("SELECT oid FROM pg_type WHERE typname = 'x' ORDER BY oid DESC LIMIT 1");
+            assertTrue(rs.next());
+            assertEquals(oid, rs.getInt(1));
+            assertFalse(rs.next());
+            TestUtil.dropSchema(con, "first_schema");
+            TestUtil.dropSchema(con, "second_schema");
+        } finally {
+            TestUtil.closeDB(con);
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerCursorTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerCursorTest.java
index 1c24f17..99f2173 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerCursorTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerCursorTest.java
@@ -20,79 +20,79 @@ import java.sql.SQLException;
  */
 public class ServerCursorTest extends BaseTest4 {
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTable(con, "test_fetch", "value integer,data bytea");
-    con.setAutoCommit(false);
-  }
+    //CHECKSTYLE: OFF
+    // This string contains a variety different data:
+    // three japanese characters representing "japanese" in japanese
+    // the four characters "\000"
+    // a null character
+    // the seven ascii characters "english"
+    private static final String DATA_STRING = "\u65E5\u672C\u8A9E\\000\u0000english";
 
-  @Override
-  public void tearDown() throws SQLException {
-    con.rollback();
-    con.setAutoCommit(true);
-    TestUtil.dropTable(con, "test_fetch");
-    super.tearDown();
-  }
-
-  protected void createRows(int count) throws Exception {
-    PreparedStatement stmt = con.prepareStatement("insert into test_fetch(value,data) values(?,?)");
-    for (int i = 0; i < count; i++) {
-      stmt.setInt(1, i + 1);
-      stmt.setBytes(2, DATA_STRING.getBytes("UTF8"));
-      stmt.executeUpdate();
-    }
-    con.commit();
-  }
-
-  // Test regular cursor fetching
-  @Test
-  public void testBasicFetch() throws Exception {
-    assumeByteaSupported();
-    createRows(1);
-
-    PreparedStatement stmt =
-        con.prepareStatement("declare test_cursor cursor for select * from test_fetch");
-    stmt.execute();
-
-    stmt = con.prepareStatement("fetch forward from test_cursor");
-    ResultSet rs = stmt.executeQuery();
-    while (rs.next()) {
-      // there should only be one row returned
-      assertEquals("query value error", 1, rs.getInt(1));
-      byte[] dataBytes = rs.getBytes(2);
-      assertEquals("binary data got munged", DATA_STRING, new String(dataBytes, "UTF8"));
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTable(con, "test_fetch", "value integer,data bytea");
+        con.setAutoCommit(false);
     }
 
-  }
-
-  // Test binary cursor fetching
-  @Test
-  public void testBinaryFetch() throws Exception {
-    assumeByteaSupported();
-    createRows(1);
-
-    PreparedStatement stmt =
-        con.prepareStatement("declare test_cursor binary cursor for select * from test_fetch");
-    stmt.execute();
-
-    stmt = con.prepareStatement("fetch forward from test_cursor");
-    ResultSet rs = stmt.executeQuery();
-    while (rs.next()) {
-      // there should only be one row returned
-      byte[] dataBytes = rs.getBytes(2);
-      assertEquals("binary data got munged", DATA_STRING, new String(dataBytes, "UTF8"));
+    @Override
+    public void tearDown() throws SQLException {
+        con.rollback();
+        con.setAutoCommit(true);
+        TestUtil.dropTable(con, "test_fetch");
+        super.tearDown();
     }
 
-  }
+    protected void createRows(int count) throws Exception {
+        PreparedStatement stmt = con.prepareStatement("insert into test_fetch(value,data) values(?,?)");
+        for (int i = 0; i < count; i++) {
+            stmt.setInt(1, i + 1);
+            stmt.setBytes(2, DATA_STRING.getBytes("UTF8"));
+            stmt.executeUpdate();
+        }
+        con.commit();
+    }
 
-  //CHECKSTYLE: OFF
-  // This string contains a variety different data:
-  // three japanese characters representing "japanese" in japanese
-  // the four characters "\000"
-  // a null character
-  // the seven ascii characters "english"
-  private static final String DATA_STRING = "\u65E5\u672C\u8A9E\\000\u0000english";
-  //CHECKSTYLE: ON
+    // Test regular cursor fetching
+    @Test
+    public void testBasicFetch() throws Exception {
+        assumeByteaSupported();
+        createRows(1);
+
+        PreparedStatement stmt =
+                con.prepareStatement("declare test_cursor cursor for select * from test_fetch");
+        stmt.execute();
+
+        stmt = con.prepareStatement("fetch forward from test_cursor");
+        ResultSet rs = stmt.executeQuery();
+        while (rs.next()) {
+            // there should only be one row returned
+            assertEquals("query value error", 1, rs.getInt(1));
+            byte[] dataBytes = rs.getBytes(2);
+            assertEquals("binary data got munged", DATA_STRING, new String(dataBytes, "UTF8"));
+        }
+
+    }
+
+    // Test binary cursor fetching
+    @Test
+    public void testBinaryFetch() throws Exception {
+        assumeByteaSupported();
+        createRows(1);
+
+        PreparedStatement stmt =
+                con.prepareStatement("declare test_cursor binary cursor for select * from test_fetch");
+        stmt.execute();
+
+        stmt = con.prepareStatement("fetch forward from test_cursor");
+        ResultSet rs = stmt.executeQuery();
+        while (rs.next()) {
+            // there should only be one row returned
+            byte[] dataBytes = rs.getBytes(2);
+            assertEquals("binary data got munged", DATA_STRING, new String(dataBytes, "UTF8"));
+        }
+
+    }
+    //CHECKSTYLE: ON
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerErrorTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerErrorTest.java
index 9f06d46..658ea82 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerErrorTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerErrorTest.java
@@ -25,154 +25,154 @@ import java.sql.Statement;
  */
 public class ServerErrorTest extends BaseTest4 {
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    assumeMinimumServerVersion(ServerVersion.v9_3);
-    Statement stmt = con.createStatement();
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        assumeMinimumServerVersion(ServerVersion.v9_3);
+        Statement stmt = con.createStatement();
 
-    stmt.execute("CREATE DOMAIN testdom AS int4 CHECK (value < 10)");
-    TestUtil.createTable(con, "testerr", "id int not null, val testdom not null");
-    stmt.execute("ALTER TABLE testerr ADD CONSTRAINT testerr_pk PRIMARY KEY (id)");
-    stmt.close();
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "testerr");
-    Statement stmt = con.createStatement();
-    stmt.execute("DROP DOMAIN IF EXISTS testdom");
-    stmt.close();
-    super.tearDown();
-  }
-
-  @Test
-  public void testPrimaryKey() throws Exception {
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate("INSERT INTO testerr (id, val) VALUES (1, 1)");
-    try {
-      stmt.executeUpdate("INSERT INTO testerr (id, val) VALUES (1, 1)");
-      fail("Should have thrown a duplicate key exception.");
-    } catch (SQLException sqle) {
-      ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage();
-      assertEquals("public", err.getSchema());
-      assertEquals("testerr", err.getTable());
-      assertEquals("testerr_pk", err.getConstraint());
-      assertEquals(PSQLState.UNIQUE_VIOLATION.getState(), err.getSQLState());
-      assertNull(err.getDatatype());
-      assertNull(err.getColumn());
+        stmt.execute("CREATE DOMAIN testdom AS int4 CHECK (value < 10)");
+        TestUtil.createTable(con, "testerr", "id int not null, val testdom not null");
+        stmt.execute("ALTER TABLE testerr ADD CONSTRAINT testerr_pk PRIMARY KEY (id)");
+        stmt.close();
     }
-    stmt.close();
-  }
 
-  @Test
-  public void testColumn() throws Exception {
-    Statement stmt = con.createStatement();
-    try {
-      stmt.executeUpdate("INSERT INTO testerr (id, val) VALUES (1, NULL)");
-      fail("Should have thrown a not null constraint violation.");
-    } catch (SQLException sqle) {
-      ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage();
-      assertEquals("public", err.getSchema());
-      assertEquals("testerr", err.getTable());
-      assertEquals("val", err.getColumn());
-      assertNull(err.getDatatype());
-      assertNull(err.getConstraint());
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "testerr");
+        Statement stmt = con.createStatement();
+        stmt.execute("DROP DOMAIN IF EXISTS testdom");
+        stmt.close();
+        super.tearDown();
     }
-    stmt.close();
-  }
 
-  @Test
-  public void testDatatype() throws Exception {
-    Statement stmt = con.createStatement();
-    try {
-      stmt.executeUpdate("INSERT INTO testerr (id, val) VALUES (1, 20)");
-      fail("Should have thrown a constraint violation.");
-    } catch (SQLException sqle) {
-      ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage();
-      assertEquals("public", err.getSchema());
-      assertEquals("testdom", err.getDatatype());
-      assertEquals("testdom_check", err.getConstraint());
+    @Test
+    public void testPrimaryKey() throws Exception {
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate("INSERT INTO testerr (id, val) VALUES (1, 1)");
+        try {
+            stmt.executeUpdate("INSERT INTO testerr (id, val) VALUES (1, 1)");
+            fail("Should have thrown a duplicate key exception.");
+        } catch (SQLException sqle) {
+            ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage();
+            assertEquals("public", err.getSchema());
+            assertEquals("testerr", err.getTable());
+            assertEquals("testerr_pk", err.getConstraint());
+            assertEquals(PSQLState.UNIQUE_VIOLATION.getState(), err.getSQLState());
+            assertNull(err.getDatatype());
+            assertNull(err.getColumn());
+        }
+        stmt.close();
     }
-    stmt.close();
-  }
 
-  @Test
-  public void testNotNullConstraint() throws Exception {
-    Statement stmt = con.createStatement();
-    try {
-      stmt.executeUpdate("INSERT INTO testerr (val) VALUES (1)");
-      fail("Should have thrown a not-null exception.");
-    } catch (SQLException sqle) {
-      ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage();
-      assertEquals("public", err.getSchema());
-      assertEquals("testerr", err.getTable());
-      assertEquals("id", err.getColumn());
-      assertEquals(PSQLState.NOT_NULL_VIOLATION.getState(), err.getSQLState());
-      assertNull(err.getDatatype());
+    @Test
+    public void testColumn() throws Exception {
+        Statement stmt = con.createStatement();
+        try {
+            stmt.executeUpdate("INSERT INTO testerr (id, val) VALUES (1, NULL)");
+            fail("Should have thrown a not null constraint violation.");
+        } catch (SQLException sqle) {
+            ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage();
+            assertEquals("public", err.getSchema());
+            assertEquals("testerr", err.getTable());
+            assertEquals("val", err.getColumn());
+            assertNull(err.getDatatype());
+            assertNull(err.getConstraint());
+        }
+        stmt.close();
     }
-    stmt.close();
-  }
 
-  @Test
-  public void testForeignKeyConstraint() throws Exception {
-    TestUtil.createTable(con, "testerr_foreign", "id int not null, testerr_id int,"
-        + "CONSTRAINT testerr FOREIGN KEY (testerr_id) references testerr(id)");
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate("INSERT INTO testerr (id, val) VALUES (1, 1)");
-    try {
-      stmt.executeUpdate("INSERT INTO testerr_foreign (id, testerr_id) VALUES (1, 2)");
-      fail("Should have thrown a foreign key exception.");
-    } catch (SQLException sqle) {
-      ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage();
-      assertEquals("public", err.getSchema());
-      assertEquals("testerr_foreign", err.getTable());
-      assertEquals(PSQLState.FOREIGN_KEY_VIOLATION.getState(), err.getSQLState());
-      assertNull(err.getDatatype());
-      assertNull(err.getColumn());
+    @Test
+    public void testDatatype() throws Exception {
+        Statement stmt = con.createStatement();
+        try {
+            stmt.executeUpdate("INSERT INTO testerr (id, val) VALUES (1, 20)");
+            fail("Should have thrown a constraint violation.");
+        } catch (SQLException sqle) {
+            ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage();
+            assertEquals("public", err.getSchema());
+            assertEquals("testdom", err.getDatatype());
+            assertEquals("testdom_check", err.getConstraint());
+        }
+        stmt.close();
     }
-    TestUtil.dropTable(con, "testerr_foreign");
-    stmt.close();
-  }
 
-  @Test
-  public void testCheckConstraint() throws Exception {
-    TestUtil.createTable(con, "testerr_check", "id int not null, max10 int CHECK (max10 < 11)");
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate("INSERT INTO testerr_check (id, max10) VALUES (1, 5)");
-    try {
-      stmt.executeUpdate("INSERT INTO testerr_check (id, max10) VALUES (2, 11)");
-      fail("Should have thrown a check exception.");
-    } catch (SQLException sqle) {
-      ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage();
-      assertEquals("public", err.getSchema());
-      assertEquals("testerr_check", err.getTable());
-      assertEquals(PSQLState.CHECK_VIOLATION.getState(), err.getSQLState());
-      assertNull(err.getDatatype());
-      assertNull(err.getColumn());
+    @Test
+    public void testNotNullConstraint() throws Exception {
+        Statement stmt = con.createStatement();
+        try {
+            stmt.executeUpdate("INSERT INTO testerr (val) VALUES (1)");
+            fail("Should have thrown a not-null exception.");
+        } catch (SQLException sqle) {
+            ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage();
+            assertEquals("public", err.getSchema());
+            assertEquals("testerr", err.getTable());
+            assertEquals("id", err.getColumn());
+            assertEquals(PSQLState.NOT_NULL_VIOLATION.getState(), err.getSQLState());
+            assertNull(err.getDatatype());
+        }
+        stmt.close();
     }
-    TestUtil.dropTable(con, "testerr_check");
-    stmt.close();
-  }
 
-  @Test
-  public void testExclusionConstraint() throws Exception {
-    TestUtil.createTable(con, "testerr_exclude", "id int, EXCLUDE (id WITH =)");
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate("INSERT INTO testerr_exclude (id) VALUES (1108)");
-    try {
-      stmt.executeUpdate("INSERT INTO testerr_exclude (id) VALUES (1108)");
-      fail("Should have thrown an exclusion exception.");
-    } catch (SQLException sqle) {
-      ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage();
-      assertEquals("public", err.getSchema());
-      assertEquals("testerr_exclude", err.getTable());
-      assertEquals(PSQLState.EXCLUSION_VIOLATION.getState(), err.getSQLState());
-      assertNull(err.getDatatype());
-      assertNull(err.getColumn());
+    @Test
+    public void testForeignKeyConstraint() throws Exception {
+        TestUtil.createTable(con, "testerr_foreign", "id int not null, testerr_id int,"
+                + "CONSTRAINT testerr FOREIGN KEY (testerr_id) references testerr(id)");
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate("INSERT INTO testerr (id, val) VALUES (1, 1)");
+        try {
+            stmt.executeUpdate("INSERT INTO testerr_foreign (id, testerr_id) VALUES (1, 2)");
+            fail("Should have thrown a foreign key exception.");
+        } catch (SQLException sqle) {
+            ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage();
+            assertEquals("public", err.getSchema());
+            assertEquals("testerr_foreign", err.getTable());
+            assertEquals(PSQLState.FOREIGN_KEY_VIOLATION.getState(), err.getSQLState());
+            assertNull(err.getDatatype());
+            assertNull(err.getColumn());
+        }
+        TestUtil.dropTable(con, "testerr_foreign");
+        stmt.close();
+    }
+
+    @Test
+    public void testCheckConstraint() throws Exception {
+        TestUtil.createTable(con, "testerr_check", "id int not null, max10 int CHECK (max10 < 11)");
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate("INSERT INTO testerr_check (id, max10) VALUES (1, 5)");
+        try {
+            stmt.executeUpdate("INSERT INTO testerr_check (id, max10) VALUES (2, 11)");
+            fail("Should have thrown a check exception.");
+        } catch (SQLException sqle) {
+            ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage();
+            assertEquals("public", err.getSchema());
+            assertEquals("testerr_check", err.getTable());
+            assertEquals(PSQLState.CHECK_VIOLATION.getState(), err.getSQLState());
+            assertNull(err.getDatatype());
+            assertNull(err.getColumn());
+        }
+        TestUtil.dropTable(con, "testerr_check");
+        stmt.close();
+    }
+
+    @Test
+    public void testExclusionConstraint() throws Exception {
+        TestUtil.createTable(con, "testerr_exclude", "id int, EXCLUDE (id WITH =)");
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate("INSERT INTO testerr_exclude (id) VALUES (1108)");
+        try {
+            stmt.executeUpdate("INSERT INTO testerr_exclude (id) VALUES (1108)");
+            fail("Should have thrown an exclusion exception.");
+        } catch (SQLException sqle) {
+            ServerErrorMessage err = ((PSQLException) sqle).getServerErrorMessage();
+            assertEquals("public", err.getSchema());
+            assertEquals("testerr_exclude", err.getTable());
+            assertEquals(PSQLState.EXCLUSION_VIOLATION.getState(), err.getSQLState());
+            assertNull(err.getDatatype());
+            assertNull(err.getColumn());
+        }
+        TestUtil.dropTable(con, "testerr_exclude");
+        stmt.close();
     }
-    TestUtil.dropTable(con, "testerr_exclude");
-    stmt.close();
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerPreparedStmtTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerPreparedStmtTest.java
index 372ac66..13f4b97 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerPreparedStmtTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/ServerPreparedStmtTest.java
@@ -26,271 +26,271 @@ import java.sql.Statement;
  */
 public class ServerPreparedStmtTest extends BaseTest4 {
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-
-    Assume.assumeTrue("Server-prepared statements are not supported in simple protocol, thus ignoring the tests",
-        preferQueryMode != PreferQueryMode.SIMPLE);
-
-    Statement stmt = con.createStatement();
-
-    TestUtil.createTable(con, "testsps", "id integer, value boolean");
-
-    stmt.executeUpdate("INSERT INTO testsps VALUES (1,'t')");
-    stmt.executeUpdate("INSERT INTO testsps VALUES (2,'t')");
-    stmt.executeUpdate("INSERT INTO testsps VALUES (3,'t')");
-    stmt.executeUpdate("INSERT INTO testsps VALUES (4,'t')");
-    stmt.executeUpdate("INSERT INTO testsps VALUES (6,'t')");
-    stmt.executeUpdate("INSERT INTO testsps VALUES (9,'f')");
-
-    stmt.close();
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "testsps");
-    super.tearDown();
-  }
-
-  @SuppressWarnings("deprecation")
-  private static void setUseServerPrepare(PreparedStatement pstmt, boolean flag) throws SQLException {
-    pstmt.unwrap(PGStatement.class).setUseServerPrepare(flag);
-  }
-
-  @Test
-  public void testEmptyResults() throws Exception {
-    PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = ?");
-    setUseServerPrepare(pstmt, true);
-    for (int i = 0; i < 10; i++) {
-      pstmt.setInt(1, -1);
-      ResultSet rs = pstmt.executeQuery();
-      assertFalse(rs.next());
-      rs.close();
-    }
-    pstmt.close();
-  }
-
-  @Test
-  public void testPreparedExecuteCount() throws Exception {
-    PreparedStatement pstmt = con.prepareStatement("UPDATE testsps SET id = id + 44");
-    setUseServerPrepare(pstmt, true);
-    int count = pstmt.executeUpdate();
-    assertEquals(6, count);
-    pstmt.close();
-  }
-
-  @Test
-  public void testPreparedStatementsNoBinds() throws Exception {
-    PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = 2");
-    setUseServerPrepare(pstmt, true);
-    assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare());
-
-    // Test that basic functionality works
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
-    rs.close();
-
-    // Verify that subsequent calls still work
-    rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
-    rs.close();
-
-    // Verify that using the statement still works after turning off prepares
-    if (Boolean.getBoolean("org.postgresql.forceBinary")) {
-      return;
-    }
-    setUseServerPrepare(pstmt, false);
-    assertTrue(!pstmt.unwrap(PGStatement.class).isUseServerPrepare());
-
-    rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
-    rs.close();
-
-    pstmt.close();
-  }
-
-  @Test
-  public void testPreparedStatementsWithOneBind() throws Exception {
-    PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = ?");
-    setUseServerPrepare(pstmt, true);
-    assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare());
-
-    // Test that basic functionality works
-    pstmt.setInt(1, 2);
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
-    rs.close();
-
-    // Verify that subsequent calls still work
-    rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
-    rs.close();
-
-    // Verify that using the statement still works after turning off prepares
-    if (Boolean.getBoolean("org.postgresql.forceBinary")) {
-      return;
+    @SuppressWarnings("deprecation")
+    private static void setUseServerPrepare(PreparedStatement pstmt, boolean flag) throws SQLException {
+        pstmt.unwrap(PGStatement.class).setUseServerPrepare(flag);
     }
 
-    setUseServerPrepare(pstmt, false);
-    assertTrue(!pstmt.unwrap(PGStatement.class).isUseServerPrepare());
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
 
-    pstmt.setInt(1, 9);
-    rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(9, rs.getInt(1));
-    rs.close();
+        Assume.assumeTrue("Server-prepared statements are not supported in simple protocol, thus ignoring the tests",
+                preferQueryMode != PreferQueryMode.SIMPLE);
 
-    pstmt.close();
-  }
+        Statement stmt = con.createStatement();
 
-  // Verify we can bind booleans-as-objects ok.
-  @Test
-  public void testBooleanObjectBind() throws Exception {
-    PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE value = ?");
-    setUseServerPrepare(pstmt, true);
-    assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare());
+        TestUtil.createTable(con, "testsps", "id integer, value boolean");
 
-    pstmt.setObject(1, Boolean.FALSE, java.sql.Types.BIT);
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(9, rs.getInt(1));
-    rs.close();
-  }
+        stmt.executeUpdate("INSERT INTO testsps VALUES (1,'t')");
+        stmt.executeUpdate("INSERT INTO testsps VALUES (2,'t')");
+        stmt.executeUpdate("INSERT INTO testsps VALUES (3,'t')");
+        stmt.executeUpdate("INSERT INTO testsps VALUES (4,'t')");
+        stmt.executeUpdate("INSERT INTO testsps VALUES (6,'t')");
+        stmt.executeUpdate("INSERT INTO testsps VALUES (9,'f')");
 
-  // Verify we can bind booleans-as-integers ok.
-  @Test
-  public void testBooleanIntegerBind() throws Exception {
-    PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = ?");
-    setUseServerPrepare(pstmt, true);
-    assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare());
-
-    pstmt.setObject(1, Boolean.TRUE, java.sql.Types.INTEGER);
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(1, rs.getInt(1));
-    rs.close();
-  }
-
-  // Verify we can bind booleans-as-native-types ok.
-  @Test
-  public void testBooleanBind() throws Exception {
-    PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE value = ?");
-    setUseServerPrepare(pstmt, true);
-    assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare());
-
-    pstmt.setBoolean(1, false);
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(9, rs.getInt(1));
-    rs.close();
-  }
-
-  @Test
-  public void testPreparedStatementsWithBinds() throws Exception {
-    PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = ? or id = ?");
-    setUseServerPrepare(pstmt, true);
-    assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare());
-
-    // Test that basic functionality works
-    // bind different datatypes
-    pstmt.setInt(1, 2);
-    pstmt.setLong(2, 2);
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
-    rs.close();
-
-    // Verify that subsequent calls still work
-    rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
-    rs.close();
-
-    pstmt.close();
-  }
-
-  @Test
-  public void testSPSToggle() throws Exception {
-    // Verify we can toggle UseServerPrepare safely before a query is executed.
-    PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = 2");
-    setUseServerPrepare(pstmt, true);
-    setUseServerPrepare(pstmt, false);
-  }
-
-  @Test
-  public void testBytea() throws Exception {
-    // Verify we can use setBytes() with a server-prepared update.
-    try {
-      TestUtil.createTable(con, "testsps_bytea", "data bytea");
-
-      PreparedStatement pstmt = con.prepareStatement("INSERT INTO testsps_bytea(data) VALUES (?)");
-      setUseServerPrepare(pstmt, true);
-      pstmt.setBytes(1, new byte[100]);
-      pstmt.executeUpdate();
-    } finally {
-      TestUtil.dropTable(con, "testsps_bytea");
+        stmt.close();
     }
-  }
 
-  // Check statements are not transformed when they shouldn't be.
-  @Test
-  public void testCreateTable() throws Exception {
-    // CREATE TABLE isn't supported by PREPARE; the driver should realize this and
-    // still complete without error.
-    PreparedStatement pstmt = con.prepareStatement("CREATE TABLE testsps_bad(data int)");
-    setUseServerPrepare(pstmt, true);
-    pstmt.executeUpdate();
-    TestUtil.dropTable(con, "testsps_bad");
-  }
-
-  @Test
-  public void testMultistatement() throws Exception {
-    // Shouldn't try to PREPARE this one, if we do we get:
-    // PREPARE x(int,int) AS INSERT .... $1 ; INSERT ... $2 -- syntax error
-    try {
-      TestUtil.createTable(con, "testsps_multiple", "data int");
-      PreparedStatement pstmt = con.prepareStatement(
-          "INSERT INTO testsps_multiple(data) VALUES (?); INSERT INTO testsps_multiple(data) VALUES (?)");
-      setUseServerPrepare(pstmt, true);
-      pstmt.setInt(1, 1);
-      pstmt.setInt(2, 2);
-      pstmt.executeUpdate(); // Two inserts.
-
-      pstmt.setInt(1, 3);
-      pstmt.setInt(2, 4);
-      pstmt.executeUpdate(); // Two more inserts.
-
-      ResultSet check = con.createStatement().executeQuery("SELECT COUNT(*) FROM testsps_multiple");
-      assertTrue(check.next());
-      assertEquals(4, check.getInt(1));
-    } finally {
-      TestUtil.dropTable(con, "testsps_multiple");
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "testsps");
+        super.tearDown();
     }
-  }
 
-  @Test
-  public void testTypeChange() throws Exception {
-    PreparedStatement pstmt = con.prepareStatement("SELECT CAST (? AS TEXT)");
-    setUseServerPrepare(pstmt, true);
+    @Test
+    public void testEmptyResults() throws Exception {
+        PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = ?");
+        setUseServerPrepare(pstmt, true);
+        for (int i = 0; i < 10; i++) {
+            pstmt.setInt(1, -1);
+            ResultSet rs = pstmt.executeQuery();
+            assertFalse(rs.next());
+            rs.close();
+        }
+        pstmt.close();
+    }
 
-    // Prepare with int parameter.
-    pstmt.setInt(1, 1);
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(1, rs.getInt(1));
-    assertTrue(!rs.next());
+    @Test
+    public void testPreparedExecuteCount() throws Exception {
+        PreparedStatement pstmt = con.prepareStatement("UPDATE testsps SET id = id + 44");
+        setUseServerPrepare(pstmt, true);
+        int count = pstmt.executeUpdate();
+        assertEquals(6, count);
+        pstmt.close();
+    }
 
-    // Change to text parameter, check it still works.
-    pstmt.setString(1, "test string");
-    rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals("test string", rs.getString(1));
-    assertTrue(!rs.next());
-  }
+    @Test
+    public void testPreparedStatementsNoBinds() throws Exception {
+        PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = 2");
+        setUseServerPrepare(pstmt, true);
+        assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare());
+
+        // Test that basic functionality works
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        rs.close();
+
+        // Verify that subsequent calls still work
+        rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        rs.close();
+
+        // Verify that using the statement still works after turning off prepares
+        if (Boolean.getBoolean("org.postgresql.forceBinary")) {
+            return;
+        }
+        setUseServerPrepare(pstmt, false);
+        assertTrue(!pstmt.unwrap(PGStatement.class).isUseServerPrepare());
+
+        rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        rs.close();
+
+        pstmt.close();
+    }
+
+    @Test
+    public void testPreparedStatementsWithOneBind() throws Exception {
+        PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = ?");
+        setUseServerPrepare(pstmt, true);
+        assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare());
+
+        // Test that basic functionality works
+        pstmt.setInt(1, 2);
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        rs.close();
+
+        // Verify that subsequent calls still work
+        rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        rs.close();
+
+        // Verify that using the statement still works after turning off prepares
+        if (Boolean.getBoolean("org.postgresql.forceBinary")) {
+            return;
+        }
+
+        setUseServerPrepare(pstmt, false);
+        assertTrue(!pstmt.unwrap(PGStatement.class).isUseServerPrepare());
+
+        pstmt.setInt(1, 9);
+        rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(9, rs.getInt(1));
+        rs.close();
+
+        pstmt.close();
+    }
+
+    // Verify we can bind booleans-as-objects ok.
+    @Test
+    public void testBooleanObjectBind() throws Exception {
+        PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE value = ?");
+        setUseServerPrepare(pstmt, true);
+        assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare());
+
+        pstmt.setObject(1, Boolean.FALSE, java.sql.Types.BIT);
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(9, rs.getInt(1));
+        rs.close();
+    }
+
+    // Verify we can bind booleans-as-integers ok.
+    @Test
+    public void testBooleanIntegerBind() throws Exception {
+        PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = ?");
+        setUseServerPrepare(pstmt, true);
+        assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare());
+
+        pstmt.setObject(1, Boolean.TRUE, java.sql.Types.INTEGER);
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        rs.close();
+    }
+
+    // Verify we can bind booleans-as-native-types ok.
+    @Test
+    public void testBooleanBind() throws Exception {
+        PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE value = ?");
+        setUseServerPrepare(pstmt, true);
+        assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare());
+
+        pstmt.setBoolean(1, false);
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(9, rs.getInt(1));
+        rs.close();
+    }
+
+    @Test
+    public void testPreparedStatementsWithBinds() throws Exception {
+        PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = ? or id = ?");
+        setUseServerPrepare(pstmt, true);
+        assertTrue(pstmt.unwrap(PGStatement.class).isUseServerPrepare());
+
+        // Test that basic functionality works
+        // bind different datatypes
+        pstmt.setInt(1, 2);
+        pstmt.setLong(2, 2);
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        rs.close();
+
+        // Verify that subsequent calls still work
+        rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        rs.close();
+
+        pstmt.close();
+    }
+
+    @Test
+    public void testSPSToggle() throws Exception {
+        // Verify we can toggle UseServerPrepare safely before a query is executed.
+        PreparedStatement pstmt = con.prepareStatement("SELECT * FROM testsps WHERE id = 2");
+        setUseServerPrepare(pstmt, true);
+        setUseServerPrepare(pstmt, false);
+    }
+
+    @Test
+    public void testBytea() throws Exception {
+        // Verify we can use setBytes() with a server-prepared update.
+        try {
+            TestUtil.createTable(con, "testsps_bytea", "data bytea");
+
+            PreparedStatement pstmt = con.prepareStatement("INSERT INTO testsps_bytea(data) VALUES (?)");
+            setUseServerPrepare(pstmt, true);
+            pstmt.setBytes(1, new byte[100]);
+            pstmt.executeUpdate();
+        } finally {
+            TestUtil.dropTable(con, "testsps_bytea");
+        }
+    }
+
+    // Check statements are not transformed when they shouldn't be.
+    @Test
+    public void testCreateTable() throws Exception {
+        // CREATE TABLE isn't supported by PREPARE; the driver should realize this and
+        // still complete without error.
+        PreparedStatement pstmt = con.prepareStatement("CREATE TABLE testsps_bad(data int)");
+        setUseServerPrepare(pstmt, true);
+        pstmt.executeUpdate();
+        TestUtil.dropTable(con, "testsps_bad");
+    }
+
+    @Test
+    public void testMultistatement() throws Exception {
+        // Shouldn't try to PREPARE this one, if we do we get:
+        // PREPARE x(int,int) AS INSERT .... $1 ; INSERT ... $2 -- syntax error
+        try {
+            TestUtil.createTable(con, "testsps_multiple", "data int");
+            PreparedStatement pstmt = con.prepareStatement(
+                    "INSERT INTO testsps_multiple(data) VALUES (?); INSERT INTO testsps_multiple(data) VALUES (?)");
+            setUseServerPrepare(pstmt, true);
+            pstmt.setInt(1, 1);
+            pstmt.setInt(2, 2);
+            pstmt.executeUpdate(); // Two inserts.
+
+            pstmt.setInt(1, 3);
+            pstmt.setInt(2, 4);
+            pstmt.executeUpdate(); // Two more inserts.
+
+            ResultSet check = con.createStatement().executeQuery("SELECT COUNT(*) FROM testsps_multiple");
+            assertTrue(check.next());
+            assertEquals(4, check.getInt(1));
+        } finally {
+            TestUtil.dropTable(con, "testsps_multiple");
+        }
+    }
+
+    @Test
+    public void testTypeChange() throws Exception {
+        PreparedStatement pstmt = con.prepareStatement("SELECT CAST (? AS TEXT)");
+        setUseServerPrepare(pstmt, true);
+
+        // Prepare with int parameter.
+        pstmt.setInt(1, 1);
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertTrue(!rs.next());
+
+        // Change to text parameter, check it still works.
+        pstmt.setString(1, "test string");
+        rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals("test string", rs.getString(1));
+        assertTrue(!rs.next());
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/SocketTimeoutTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/SocketTimeoutTest.java
index 253aae7..67413ec 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/SocketTimeoutTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/SocketTimeoutTest.java
@@ -20,22 +20,22 @@ import java.util.Properties;
 
 class SocketTimeoutTest {
 
-  @Test
-  void socketTimeoutEnforcement() throws Exception {
-    Properties properties = new Properties();
-    PGProperty.SOCKET_TIMEOUT.set(properties, 1);
+    @Test
+    void socketTimeoutEnforcement() throws Exception {
+        Properties properties = new Properties();
+        PGProperty.SOCKET_TIMEOUT.set(properties, 1);
 
-    Connection conn = TestUtil.openDB(properties);
-    Statement stmt = null;
-    try {
-      stmt = conn.createStatement();
-      stmt.execute("SELECT pg_sleep(2)");
-      fail("Connection with socketTimeout did not throw expected exception");
-    } catch (SQLException e) {
-      assertTrue(conn.isClosed());
-    } finally {
-      TestUtil.closeQuietly(stmt);
-      TestUtil.closeDB(conn);
+        Connection conn = TestUtil.openDB(properties);
+        Statement stmt = null;
+        try {
+            stmt = conn.createStatement();
+            stmt.execute("SELECT pg_sleep(2)");
+            fail("Connection with socketTimeout did not throw expected exception");
+        } catch (SQLException e) {
+            assertTrue(conn.isClosed());
+        } finally {
+            TestUtil.closeQuietly(stmt);
+            TestUtil.closeDB(conn);
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/StatementTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/StatementTest.java
index 63c8fef..3d6c0c6 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/StatementTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/StatementTest.java
@@ -5,30 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertNotEquals;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.Driver;
-import org.postgresql.PGProperty;
-import org.postgresql.core.ServerVersion;
-import org.postgresql.jdbc.PgStatement;
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.util.StrangeProxyServer;
-import org.postgresql.util.LazyCleaner;
-import org.postgresql.util.PSQLState;
-import org.postgresql.util.SharedTimer;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assumptions;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.Timeout;
-
 import java.io.IOException;
 import java.sql.Connection;
 import java.sql.PreparedStatement;
@@ -53,1115 +29,1137 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import org.postgresql.Driver;
+import org.postgresql.PGProperty;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.jdbc.PgStatement;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.util.StrangeProxyServer;
+import org.postgresql.util.LazyCleaner;
+import org.postgresql.util.PSQLState;
+import org.postgresql.util.SharedTimer;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /*
-* Test for getObject
-*/
+ * Test for getObject
+ */
 class StatementTest {
-  private Connection con;
+    private Connection con;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    con = TestUtil.openDB();
-    TestUtil.createTempTable(con, "test_statement", "i int");
-    TestUtil.createTempTable(con, "escapetest",
-        "ts timestamp, d date, t time, \")\" varchar(5), \"\"\"){a}'\" text ");
-    TestUtil.createTempTable(con, "comparisontest", "str1 varchar(5), str2 varchar(15)");
-    TestUtil.createTable(con, "test_lock", "name text");
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("comparisontest", "str1,str2", "'_abcd','_found'"));
-    stmt.executeUpdate(TestUtil.insertSQL("comparisontest", "str1,str2", "'%abcd','%found'"));
-    stmt.close();
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    TestUtil.dropTable(con, "test_statement");
-    TestUtil.dropTable(con, "escapetest");
-    TestUtil.dropTable(con, "comparisontest");
-    TestUtil.dropTable(con, "test_lock");
-    TestUtil.execute(con, "DROP FUNCTION IF EXISTS notify_loop()");
-    TestUtil.execute(con, "DROP FUNCTION IF EXISTS notify_then_sleep()");
-    con.close();
-  }
-
-  private void assumeLongTest() {
-    // Run the test:
-    //   Travis: in PG_VERSION=HEAD
-    //   Other: always
-    if ("true".equals(System.getenv("TRAVIS"))) {
-      Assumptions.assumeTrue("HEAD".equals(System.getenv("PG_VERSION")));
+    @BeforeEach
+    void setUp() throws Exception {
+        con = TestUtil.openDB();
+        TestUtil.createTempTable(con, "test_statement", "i int");
+        TestUtil.createTempTable(con, "escapetest",
+                "ts timestamp, d date, t time, \")\" varchar(5), \"\"\"){a}'\" text ");
+        TestUtil.createTempTable(con, "comparisontest", "str1 varchar(5), str2 varchar(15)");
+        TestUtil.createTable(con, "test_lock", "name text");
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("comparisontest", "str1,str2", "'_abcd','_found'"));
+        stmt.executeUpdate(TestUtil.insertSQL("comparisontest", "str1,str2", "'%abcd','%found'"));
+        stmt.close();
     }
-  }
 
-  @Test
-  void close() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.close();
-
-    try {
-      stmt.getResultSet();
-      fail("statements should not be re-used after close");
-    } catch (SQLException ex) {
+    @AfterEach
+    void tearDown() throws Exception {
+        TestUtil.dropTable(con, "test_statement");
+        TestUtil.dropTable(con, "escapetest");
+        TestUtil.dropTable(con, "comparisontest");
+        TestUtil.dropTable(con, "test_lock");
+        TestUtil.execute(con, "DROP FUNCTION IF EXISTS notify_loop()");
+        TestUtil.execute(con, "DROP FUNCTION IF EXISTS notify_then_sleep()");
+        con.close();
     }
-  }
 
-  @Test
-  void resultSetClosed() throws SQLException {
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("select 1");
-    stmt.close();
-    assertTrue(rs.isClosed());
-  }
-
-  /**
-   * Closing a Statement twice is not an error.
-   */
-  @Test
-  void doubleClose() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.close();
-    stmt.close();
-  }
-
-  @Test
-  void multiExecute() throws SQLException {
-    Statement stmt = con.createStatement();
-    assertTrue(stmt.execute("SELECT 1 as a; UPDATE test_statement SET i=1; SELECT 2 as b, 3 as c"));
-
-    ResultSet rs = stmt.getResultSet();
-    assertTrue(rs.next());
-    assertEquals(1, rs.getInt(1));
-    rs.close();
-
-    assertFalse(stmt.getMoreResults());
-    assertEquals(0, stmt.getUpdateCount());
-
-    assertTrue(stmt.getMoreResults());
-    rs = stmt.getResultSet();
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
-    rs.close();
-
-    assertFalse(stmt.getMoreResults());
-    assertEquals(-1, stmt.getUpdateCount());
-    stmt.close();
-  }
-
-  @Test
-  void emptyQuery() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.execute("");
-    assertNull(stmt.getResultSet());
-    assertFalse(stmt.getMoreResults());
-  }
-
-  @Test
-  void updateCount() throws SQLException {
-    Statement stmt = con.createStatement();
-    int count;
-
-    count = stmt.executeUpdate("INSERT INTO test_statement VALUES (3)");
-    assertEquals(1, count);
-    count = stmt.executeUpdate("INSERT INTO test_statement VALUES (3)");
-    assertEquals(1, count);
-
-    count = stmt.executeUpdate("UPDATE test_statement SET i=4");
-    assertEquals(2, count);
-
-    count = stmt.executeUpdate("CREATE TEMP TABLE another_table (a int)");
-    assertEquals(0, count);
-
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
-      count = stmt.executeUpdate("CREATE TEMP TABLE yet_another_table AS SELECT x FROM generate_series(1,10) x");
-      assertEquals(10, count);
-    }
-  }
-
-  @Test
-  void escapeProcessing() throws SQLException {
-    Statement stmt = con.createStatement();
-    int count;
-
-    count = stmt.executeUpdate("insert into escapetest (ts) values ({ts '1900-01-01 00:00:00'})");
-    assertEquals(1, count);
-
-    count = stmt.executeUpdate("insert into escapetest (d) values ({d '1900-01-01'})");
-    assertEquals(1, count);
-
-    count = stmt.executeUpdate("insert into escapetest (t) values ({t '00:00:00'})");
-    assertEquals(1, count);
-
-    ResultSet rs = stmt.executeQuery("select {fn version()} as version");
-    assertTrue(rs.next());
-
-    // check nested and multiple escaped functions
-    rs = stmt.executeQuery("select {fn version()} as version, {fn log({fn log(3.0)})} as log");
-    assertTrue(rs.next());
-    assertEquals(Math.log(Math.log(3)), rs.getDouble(2), 0.00001);
-
-    stmt.executeUpdate("UPDATE escapetest SET \")\" = 'a', \"\"\"){a}'\" = 'b'");
-
-    // check "difficult" values
-    rs = stmt.executeQuery("select {fn concat(')',escapetest.\")\")} as concat"
-        + ", {fn concat('{','}')} "
-        + ", {fn concat('''','\"')} "
-        + ", {fn concat(\"\"\"){a}'\", '''}''')} "
-        + " FROM escapetest");
-    assertTrue(rs.next());
-    assertEquals(")a", rs.getString(1));
-    assertEquals("{}", rs.getString(2));
-    assertEquals("'\"", rs.getString(3));
-    assertEquals("b'}'", rs.getString(4));
-
-    count = stmt.executeUpdate("create temp table b (i int)");
-    assertEquals(0, count);
-
-    rs = stmt.executeQuery("select * from {oj test_statement a left outer join b on (a.i=b.i)} ");
-    assertFalse(rs.next());
-    // test escape character
-    rs = stmt
-        .executeQuery("select str2 from comparisontest where str1 like '|_abcd' {escape '|'} ");
-    assertTrue(rs.next());
-    assertEquals("_found", rs.getString(1));
-    rs = stmt
-        .executeQuery("select str2 from comparisontest where str1 like '|%abcd' {escape '|'} ");
-    assertTrue(rs.next());
-    assertEquals("%found", rs.getString(1));
-  }
-
-  @Test
-  void preparedFunction() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement("SELECT {fn concat('a', ?)}");
-    pstmt.setInt(1, 5);
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals("a5", rs.getString(1));
-  }
-
-  @Test
-  void dollarInComment() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement("SELECT /* $ */ {fn curdate()}");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertNotNull(rs.getString(1), "{fn curdate()} should be not null");
-  }
-
-  @Test
-  void dollarInCommentTwoComments() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement("SELECT /* $ *//* $ */ {fn curdate()}");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertNotNull(rs.getString(1), "{fn curdate()} should be not null");
-  }
-
-  @Test
-  void numericFunctions() throws SQLException {
-    Statement stmt = con.createStatement();
-
-    ResultSet rs = stmt.executeQuery("select {fn abs(-2.3)} as abs ");
-    assertTrue(rs.next());
-    assertEquals(2.3f, rs.getFloat(1), 0.00001);
-
-    rs = stmt.executeQuery("select {fn acos(-0.6)} as acos ");
-    assertTrue(rs.next());
-    assertEquals(Math.acos(-0.6), rs.getDouble(1), 0.00001);
-
-    rs = stmt.executeQuery("select {fn asin(-0.6)} as asin ");
-    assertTrue(rs.next());
-    assertEquals(Math.asin(-0.6), rs.getDouble(1), 0.00001);
-
-    rs = stmt.executeQuery("select {fn atan(-0.6)} as atan ");
-    assertTrue(rs.next());
-    assertEquals(Math.atan(-0.6), rs.getDouble(1), 0.00001);
-
-    rs = stmt.executeQuery("select {fn atan2(-2.3,7)} as atan2 ");
-    assertTrue(rs.next());
-    assertEquals(Math.atan2(-2.3, 7), rs.getDouble(1), 0.00001);
-
-    rs = stmt.executeQuery("select {fn ceiling(-2.3)} as ceiling ");
-    assertTrue(rs.next());
-    assertEquals(-2, rs.getDouble(1), 0.00001);
-
-    rs = stmt.executeQuery("select {fn cos(-2.3)} as cos, {fn cot(-2.3)} as cot ");
-    assertTrue(rs.next());
-    assertEquals(Math.cos(-2.3), rs.getDouble(1), 0.00001);
-    assertEquals(1 / Math.tan(-2.3), rs.getDouble(2), 0.00001);
-
-    rs = stmt.executeQuery("select {fn degrees({fn pi()})} as degrees ");
-    assertTrue(rs.next());
-    assertEquals(180, rs.getDouble(1), 0.00001);
-
-    rs = stmt.executeQuery("select {fn exp(-2.3)}, {fn floor(-2.3)},"
-        + " {fn log(2.3)},{fn log10(2.3)},{fn mod(3,2)}");
-    assertTrue(rs.next());
-    assertEquals(Math.exp(-2.3), rs.getDouble(1), 0.00001);
-    assertEquals(-3, rs.getDouble(2), 0.00001);
-    assertEquals(Math.log(2.3), rs.getDouble(3), 0.00001);
-    assertEquals(Math.log(2.3) / Math.log(10), rs.getDouble(4), 0.00001);
-    assertEquals(1, rs.getDouble(5), 0.00001);
-
-    rs = stmt.executeQuery("select {fn pi()}, {fn power(7,-2.3)},"
-        + " {fn radians(-180)},{fn round(3.1294,2)}");
-    assertTrue(rs.next());
-    assertEquals(Math.PI, rs.getDouble(1), 0.00001);
-    assertEquals(Math.pow(7, -2.3), rs.getDouble(2), 0.00001);
-    assertEquals(-Math.PI, rs.getDouble(3), 0.00001);
-    assertEquals(3.13, rs.getDouble(4), 0.00001);
-
-    rs = stmt.executeQuery("select {fn sign(-2.3)}, {fn sin(-2.3)},"
-        + " {fn sqrt(2.3)},{fn tan(-2.3)},{fn truncate(3.1294,2)}");
-    assertTrue(rs.next());
-    assertEquals(-1, rs.getInt(1));
-    assertEquals(Math.sin(-2.3), rs.getDouble(2), 0.00001);
-    assertEquals(Math.sqrt(2.3), rs.getDouble(3), 0.00001);
-    assertEquals(Math.tan(-2.3), rs.getDouble(4), 0.00001);
-    assertEquals(3.12, rs.getDouble(5), 0.00001);
-  }
-
-  @Test
-  void stringFunctions() throws SQLException {
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery(
-        "select {fn ascii(' test')},{fn char(32)}"
-        + ",{fn concat('ab','cd')}"
-        + ",{fn lcase('aBcD')},{fn left('1234',2)},{fn length('123 ')}"
-        + ",{fn locate('bc','abc')},{fn locate('bc','abc',3)}");
-    assertTrue(rs.next());
-    assertEquals(32, rs.getInt(1));
-    assertEquals(" ", rs.getString(2));
-    assertEquals("abcd", rs.getString(3));
-    assertEquals("abcd", rs.getString(4));
-    assertEquals("12", rs.getString(5));
-    assertEquals(3, rs.getInt(6));
-    assertEquals(2, rs.getInt(7));
-    assertEquals(0, rs.getInt(8));
-
-    rs = stmt.executeQuery(
-        "SELECT {fn insert('abcdef',3,2,'xxxx')}"
-        + ",{fn replace('abcdbc','bc','x')}");
-    assertTrue(rs.next());
-    assertEquals("abxxxxef", rs.getString(1));
-    assertEquals("axdx", rs.getString(2));
-
-    rs = stmt.executeQuery(
-        "select {fn ltrim(' ab')},{fn repeat('ab',2)}"
-        + ",{fn right('abcde',2)},{fn rtrim('ab ')}"
-        + ",{fn space(3)},{fn substring('abcd',2,2)}"
-        + ",{fn ucase('aBcD')}");
-    assertTrue(rs.next());
-    assertEquals("ab", rs.getString(1));
-    assertEquals("abab", rs.getString(2));
-    assertEquals("de", rs.getString(3));
-    assertEquals("ab", rs.getString(4));
-    assertEquals("   ", rs.getString(5));
-    assertEquals("bc", rs.getString(6));
-    assertEquals("ABCD", rs.getString(7));
-  }
-
-  @Test
-  void dateFuncWithParam() throws SQLException {
-    // Prior to 8.0 there is not an interval + timestamp operator,
-    // so timestampadd does not work.
-    //
-
-    PreparedStatement ps = con.prepareStatement(
-        "SELECT {fn timestampadd(SQL_TSI_QUARTER, ? ,{fn now()})}, {fn timestampadd(SQL_TSI_MONTH, ?, {fn now()})} ");
-    ps.setInt(1, 4);
-    ps.setInt(2, 12);
-    ResultSet rs = ps.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(rs.getTimestamp(1), rs.getTimestamp(2));
-  }
-
-  @Test
-  void dateFunctions() throws SQLException {
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("select {fn curdate()},{fn curtime()}"
-        + ",{fn dayname({fn now()})}, {fn dayofmonth({fn now()})}"
-        + ",{fn dayofweek({ts '2005-01-17 12:00:00'})},{fn dayofyear({fn now()})}"
-        + ",{fn hour({fn now()})},{fn minute({fn now()})}"
-        + ",{fn month({fn now()})}"
-        + ",{fn monthname({fn now()})},{fn quarter({fn now()})}"
-        + ",{fn second({fn now()})},{fn week({fn now()})}"
-        + ",{fn year({fn now()})} ");
-    assertTrue(rs.next());
-    // ensure sunday =>1 and monday =>2
-    assertEquals(2, rs.getInt(5));
-
-    // Prior to 8.0 there is not an interval + timestamp operator,
-    // so timestampadd does not work.
-    //
-
-    // second
-    rs = stmt.executeQuery(
-        "select {fn timestampdiff(SQL_TSI_SECOND,{fn now()},{fn timestampadd(SQL_TSI_SECOND,3,{fn now()})})} ");
-    assertTrue(rs.next());
-    assertEquals(3, rs.getInt(1));
-    // MINUTE
-    rs = stmt.executeQuery(
-        "select {fn timestampdiff(SQL_TSI_MINUTE,{fn now()},{fn timestampadd(SQL_TSI_MINUTE,3,{fn now()})})} ");
-    assertTrue(rs.next());
-    assertEquals(3, rs.getInt(1));
-    // HOUR
-    rs = stmt.executeQuery(
-        "select {fn timestampdiff(SQL_tsi_HOUR,{fn now()},{fn timestampadd(SQL_TSI_HOUR,3,{fn now()})})} ");
-    assertTrue(rs.next());
-    assertEquals(3, rs.getInt(1));
-    // day
-    rs = stmt.executeQuery(
-        "select {fn timestampdiff(SQL_TSI_DAY,{fn now()},{fn timestampadd(SQL_TSI_DAY,-3,{fn now()})})} ");
-    assertTrue(rs.next());
-    int res = rs.getInt(1);
-    if (res != -3 && res != -2) {
-      // set TimeZone='America/New_York';
-      // select CAST(-3 || ' day' as interval);
-      // interval
-      //----------
-      // -3 days
-      //
-      // select CAST(-3 || ' day' as interval)+now();
-      //           ?column?
-      //-------------------------------
-      // 2018-03-08 07:59:13.586895-05
-      //
-      // select CAST(-3 || ' day' as interval)+now()-now();
-      //     ?column?
-      //-------------------
-      // -2 days -23:00:00
-      fail("CAST(-3 || ' day' as interval)+now()-now() is expected to return -3 or -2. Actual value is " + res);
-    }
-    // WEEK => extract week from interval is not supported by backend
-    // rs = stmt.executeQuery("select {fn timestampdiff(SQL_TSI_WEEK,{fn now()},{fn
-    // timestampadd(SQL_TSI_WEEK,3,{fn now()})})} ");
-    // assertTrue(rs.next());
-    // assertEquals(3,rs.getInt(1));
-    // MONTH => backend assume there are 0 month in an interval of 92 days...
-    // rs = stmt.executeQuery("select {fn timestampdiff(SQL_TSI_MONTH,{fn now()},{fn
-    // timestampadd(SQL_TSI_MONTH,3,{fn now()})})} ");
-    // assertTrue(rs.next());
-    // assertEquals(3,rs.getInt(1));
-    // QUARTER => backend assume there are 1 quarter even in 270 days...
-    // rs = stmt.executeQuery("select {fn timestampdiff(SQL_TSI_QUARTER,{fn now()},{fn
-    // timestampadd(SQL_TSI_QUARTER,3,{fn now()})})} ");
-    // assertTrue(rs.next());
-    // assertEquals(3,rs.getInt(1));
-    // YEAR
-    // rs = stmt.executeQuery("select {fn timestampdiff(SQL_TSI_YEAR,{fn now()},{fn
-    // timestampadd(SQL_TSI_YEAR,3,{fn now()})})} ");
-    // assertTrue(rs.next());
-    // assertEquals(3,rs.getInt(1));
-  }
-
-  @Test
-  void systemFunctions() throws SQLException {
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery(
-        "select {fn ifnull(null,'2')}"
-        + ",{fn user()} ");
-    assertTrue(rs.next());
-    assertEquals("2", rs.getString(1));
-    assertEquals(TestUtil.getUser(), rs.getString(2));
-
-    rs = stmt.executeQuery("select {fn database()} ");
-    assertTrue(rs.next());
-    assertEquals(TestUtil.getDatabase(), rs.getString(1));
-  }
-
-  @Test
-  void warningsAreCleared() throws SQLException {
-    Statement stmt = con.createStatement();
-    // Will generate a NOTICE: for primary key index creation
-    stmt.execute("CREATE TEMP TABLE unused (a int primary key)");
-    stmt.executeQuery("SELECT 1");
-    // Executing another query should clear the warning from the first one.
-    assertNull(stmt.getWarnings());
-    stmt.close();
-  }
-
-  @Test
-  void warningsAreAvailableAsap()
-      throws Exception {
-    try (Connection outerLockCon = TestUtil.openDB()) {
-      outerLockCon.setAutoCommit(false);
-      //Acquire an exclusive lock so we can block the notice generating statement
-      outerLockCon.createStatement().execute("LOCK TABLE test_lock IN ACCESS EXCLUSIVE MODE;");
-      con.createStatement()
-              .execute("CREATE OR REPLACE FUNCTION notify_then_sleep() RETURNS VOID AS "
-                  + "$BODY$ "
-                  + "BEGIN "
-                  + "RAISE NOTICE 'Test 1'; "
-                  + "RAISE NOTICE 'Test 2'; "
-                  + "LOCK TABLE test_lock IN ACCESS EXCLUSIVE MODE; "
-                  + "END "
-                  + "$BODY$ "
-                  + "LANGUAGE plpgsql;");
-      con.createStatement().execute("SET SESSION client_min_messages = 'NOTICE'");
-      //If we never receive the two warnings the statement will just hang, so set a low timeout
-      con.createStatement().execute("SET SESSION statement_timeout = 1000");
-      final PreparedStatement preparedStatement = con.prepareStatement("SELECT notify_then_sleep()");
-      final Callable<Void> warningReader = new Callable<Void>() {
-        @Override
-        public Void call() throws SQLException, InterruptedException {
-          while (true) {
-            SQLWarning warning = preparedStatement.getWarnings();
-            if (warning != null) {
-              assertEquals("Test 1", warning.getMessage(), "First warning received not first notice raised");
-              SQLWarning next = warning.getNextWarning();
-              if (next != null) {
-                assertEquals("Test 2", next.getMessage(), "Second warning received not second notice raised");
-                //Release the lock so that the notice generating statement can end.
-                outerLockCon.commit();
-                return null;
-              }
-            }
-            //Break the loop on InterruptedException
-            Thread.sleep(0);
-          }
+    private void assumeLongTest() {
+        // Run the test:
+        //   Travis: in PG_VERSION=HEAD
+        //   Other: always
+        if ("true".equals(System.getenv("TRAVIS"))) {
+            Assumptions.assumeTrue("HEAD".equals(System.getenv("PG_VERSION")));
         }
-      };
-      ExecutorService executorService = Executors.newSingleThreadExecutor();
-      try {
-        Future<Void> future = executorService.submit(warningReader);
-        //Statement should only finish executing once we have
-        //received the two notices and released the outer lock.
-        preparedStatement.execute();
-
-        //If test takes longer than 2 seconds its a failure.
-        future.get(2, TimeUnit.SECONDS);
-      } finally {
-        executorService.shutdownNow();
-      }
     }
-  }
 
-  /**
-   * <p>Demonstrates a safe approach to concurrently reading the latest
-   * warnings while periodically clearing them.</p>
-   *
-   * <p>One drawback of this approach is that it requires the reader to make it to the end of the
-   * warning chain before clearing it, so long as your warning processing step is not very slow,
-   * this should happen more or less instantaneously even if you receive a lot of warnings.</p>
-   */
-  @Test
-  void concurrentWarningReadAndClear()
-      throws SQLException, InterruptedException, ExecutionException, TimeoutException {
-    final int iterations = 1000;
-    con.createStatement()
-        .execute("CREATE OR REPLACE FUNCTION notify_loop() RETURNS VOID AS "
-            + "$BODY$ "
-            + "BEGIN "
-            + "FOR i IN 1.. " + iterations + " LOOP "
-            + "  RAISE NOTICE 'Warning %', i; "
-            + "END LOOP; "
-            + "END "
-            + "$BODY$ "
-            + "LANGUAGE plpgsql;");
-    con.createStatement().execute("SET SESSION client_min_messages = 'NOTICE'");
-    final PreparedStatement statement = con.prepareStatement("SELECT notify_loop()");
-    final Callable<Void> warningReader = new Callable<Void>() {
-      @Override
-      public Void call() throws SQLException, InterruptedException {
-        SQLWarning lastProcessed = null;
-        int warnings = 0;
-        //For production code replace this with some condition that
-        //ends after the statement finishes execution
-        while (warnings < iterations) {
-          SQLWarning warn = statement.getWarnings();
-          //if next linked warning has value use that, otherwise keep using latest head
-          if (lastProcessed != null && lastProcessed.getNextWarning() != null) {
-            warn = lastProcessed.getNextWarning();
-          }
-          if (warn != null) {
-            warnings++;
-            //System.out.println("Processing " + warn.getMessage());
-            assertEquals("Warning " + warnings, warn.getMessage(), "Received warning out of expected order");
-            lastProcessed = warn;
-            //If the processed warning was the head of the chain clear
-            if (warn == statement.getWarnings()) {
-              //System.out.println("Clearing warnings");
-              statement.clearWarnings();
-            }
-          } else {
-            //Not required for this test, but a good idea adding some delay for production code
-            //to avoid high cpu usage while the query is running and no warnings are coming in.
-            //Alternatively use JDK9's Thread.onSpinWait()
-            Thread.sleep(10);
-          }
+    @Test
+    void close() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.close();
+
+        try {
+            stmt.getResultSet();
+            fail("statements should not be re-used after close");
+        } catch (SQLException ex) {
         }
-        assertEquals("Warning " + iterations, lastProcessed.getMessage(), "Didn't receive expected last warning");
-        return null;
-      }
-    };
-
-    final ExecutorService executor = Executors.newSingleThreadExecutor();
-    try {
-      final Future warningReaderThread = executor.submit(warningReader);
-      statement.execute();
-      //If the reader doesn't return after 2 seconds, it failed.
-      warningReaderThread.get(2, TimeUnit.SECONDS);
-    } finally {
-      executor.shutdownNow();
     }
-  }
 
-  /**
-   * The parser tries to break multiple statements into individual queries as required by the V3
-   * extended query protocol. It can be a little overzealous sometimes and this test ensures we keep
-   * multiple rule actions together in one statement.
-   */
-  @Test
-  void parsingSemiColons() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.execute(
-        "CREATE RULE r1 AS ON INSERT TO escapetest DO (DELETE FROM test_statement ; INSERT INTO test_statement VALUES (1); INSERT INTO test_statement VALUES (2); );");
-    stmt.executeUpdate("INSERT INTO escapetest(ts) VALUES (NULL)");
-    ResultSet rs = stmt.executeQuery("SELECT i from test_statement ORDER BY i");
-    assertTrue(rs.next());
-    assertEquals(1, rs.getInt(1));
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
-    assertFalse(rs.next());
-  }
-
-  @Test
-  void parsingDollarQuotes() throws SQLException {
-    // dollar-quotes are supported in the backend since version 8.0
-    Statement st = con.createStatement();
-    ResultSet rs;
-
-    rs = st.executeQuery("SELECT '$a$ ; $a$'");
-    assertTrue(rs.next());
-    assertEquals("$a$ ; $a$", rs.getObject(1));
-    rs.close();
-
-    rs = st.executeQuery("SELECT $$;$$");
-    assertTrue(rs.next());
-    assertEquals(";", rs.getObject(1));
-    rs.close();
-
-    rs = st.executeQuery("SELECT $OR$$a$'$b$a$$OR$ WHERE '$a$''$b$a$'=$OR$$a$'$b$a$$OR$OR ';'=''");
-    assertTrue(rs.next());
-    assertEquals("$a$'$b$a$", rs.getObject(1));
-    assertFalse(rs.next());
-    rs.close();
-
-    rs = st.executeQuery("SELECT $B$;$b$B$");
-    assertTrue(rs.next());
-    assertEquals(";$b", rs.getObject(1));
-    rs.close();
-
-    rs = st.executeQuery("SELECT $c$c$;$c$");
-    assertTrue(rs.next());
-    assertEquals("c$;", rs.getObject(1));
-    rs.close();
-
-    rs = st.executeQuery("SELECT $A0$;$A0$ WHERE ''=$t$t$t$ OR ';$t$'=';$t$'");
-    assertTrue(rs.next());
-    assertEquals(";", rs.getObject(1));
-    assertFalse(rs.next());
-    rs.close();
-
-    st.executeQuery("SELECT /* */$$;$$/**//*;*/").close();
-    st.executeQuery("SELECT /* */--;\n$$a$$/**/--\n--;\n").close();
-
-    st.close();
-  }
-
-  @Test
-  void unbalancedParensParseError() throws SQLException {
-    Statement stmt = con.createStatement();
-    try {
-      stmt.executeQuery("SELECT i FROM test_statement WHERE (1 > 0)) ORDER BY i");
-      fail("Should have thrown a parse error.");
-    } catch (SQLException sqle) {
+    @Test
+    void resultSetClosed() throws SQLException {
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("select 1");
+        stmt.close();
+        assertTrue(rs.isClosed());
     }
-  }
 
-  @Test
-  void executeUpdateFailsOnSelect() throws SQLException {
-    Statement stmt = con.createStatement();
-    try {
-      stmt.executeUpdate("SELECT 1");
-      fail("Should have thrown an error.");
-    } catch (SQLException sqle) {
+    /**
+     * Closing a Statement twice is not an error.
+     */
+    @Test
+    void doubleClose() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.close();
+        stmt.close();
     }
-  }
 
-  @Test
-  void executeUpdateFailsOnMultiStatementSelect() throws SQLException {
-    Statement stmt = con.createStatement();
-    try {
-      stmt.executeUpdate("/* */; SELECT 1");
-      fail("Should have thrown an error.");
-    } catch (SQLException sqle) {
+    @Test
+    void multiExecute() throws SQLException {
+        Statement stmt = con.createStatement();
+        assertTrue(stmt.execute("SELECT 1 as a; UPDATE test_statement SET i=1; SELECT 2 as b, 3 as c"));
+
+        ResultSet rs = stmt.getResultSet();
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        rs.close();
+
+        assertFalse(stmt.getMoreResults());
+        assertEquals(0, stmt.getUpdateCount());
+
+        assertTrue(stmt.getMoreResults());
+        rs = stmt.getResultSet();
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        rs.close();
+
+        assertFalse(stmt.getMoreResults());
+        assertEquals(-1, stmt.getUpdateCount());
+        stmt.close();
     }
-  }
 
-  @Test
-  void setQueryTimeout() throws SQLException {
-    Statement stmt = con.createStatement();
-    long start = 0;
-    boolean cancelReceived = false;
-    try {
-      stmt.setQueryTimeout(1);
-      start = System.nanoTime();
-      stmt.execute("select pg_sleep(10)");
-    } catch (SQLException sqle) {
-      // state for cancel
-      if ("57014".equals(sqle.getSQLState())) {
-        cancelReceived = true;
-      }
+    @Test
+    void emptyQuery() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.execute("");
+        assertNull(stmt.getResultSet());
+        assertFalse(stmt.getMoreResults());
     }
-    long duration = System.nanoTime() - start;
-    if (!cancelReceived || duration > TimeUnit.SECONDS.toNanos(5)) {
-      fail("Query should have been cancelled since the timeout was set to 1 sec."
-          + " Cancel state: " + cancelReceived + ", duration: " + duration);
+
+    @Test
+    void updateCount() throws SQLException {
+        Statement stmt = con.createStatement();
+        int count;
+
+        count = stmt.executeUpdate("INSERT INTO test_statement VALUES (3)");
+        assertEquals(1, count);
+        count = stmt.executeUpdate("INSERT INTO test_statement VALUES (3)");
+        assertEquals(1, count);
+
+        count = stmt.executeUpdate("UPDATE test_statement SET i=4");
+        assertEquals(2, count);
+
+        count = stmt.executeUpdate("CREATE TEMP TABLE another_table (a int)");
+        assertEquals(0, count);
+
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
+            count = stmt.executeUpdate("CREATE TEMP TABLE yet_another_table AS SELECT x FROM generate_series(1,10) x");
+            assertEquals(10, count);
+        }
     }
-  }
 
-  @Test
-  void longQueryTimeout() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.setQueryTimeout(Integer.MAX_VALUE);
-    assertEquals(Integer.MAX_VALUE,
-        stmt.getQueryTimeout(),
-        "setQueryTimeout(Integer.MAX_VALUE)");
-    stmt.setQueryTimeout(Integer.MAX_VALUE - 1);
-    assertEquals(Integer.MAX_VALUE - 1,
-        stmt.getQueryTimeout(),
-        "setQueryTimeout(Integer.MAX_VALUE-1)");
-  }
+    @Test
+    void escapeProcessing() throws SQLException {
+        Statement stmt = con.createStatement();
+        int count;
 
-  /**
-   * Test executes two queries one after another. The first one has timeout of 1ms, and the second
-   * one does not. The timeout of the first query should not impact the second one.
-   */
-  @Test
-  void shortQueryTimeout() throws SQLException {
-    assumeLongTest();
+        count = stmt.executeUpdate("insert into escapetest (ts) values ({ts '1900-01-01 00:00:00'})");
+        assertEquals(1, count);
 
-    long deadLine = System.nanoTime() + TimeUnit.SECONDS.toNanos(10);
-    Statement stmt = con.createStatement();
-    ((PgStatement) stmt).setQueryTimeoutMs(1);
-    Statement stmt2 = con.createStatement();
-    while (System.nanoTime() < deadLine) {
-      try {
-        // This usually won't time out but scheduler jitter, server load
-        // etc can cause a timeout.
-        stmt.executeQuery("select 1;");
-      } catch (SQLException e) {
-        // Expect "57014 query_canceled" (en-msg is "canceling statement due to statement timeout")
-        // but anything else is fatal. We can't differentiate other causes of statement cancel like
-        // "canceling statement due to user request" without error message matching though, and we
-        // don't want to do that.
-        assertEquals(
-            PSQLState.QUERY_CANCELED.getState(),
-            e.getSQLState(),
-            "Query is expected to be cancelled via st.close(), got " + e.getMessage());
-      }
-      // Must never time out.
-      stmt2.executeQuery("select 1;");
+        count = stmt.executeUpdate("insert into escapetest (d) values ({d '1900-01-01'})");
+        assertEquals(1, count);
+
+        count = stmt.executeUpdate("insert into escapetest (t) values ({t '00:00:00'})");
+        assertEquals(1, count);
+
+        ResultSet rs = stmt.executeQuery("select {fn version()} as version");
+        assertTrue(rs.next());
+
+        // check nested and multiple escaped functions
+        rs = stmt.executeQuery("select {fn version()} as version, {fn log({fn log(3.0)})} as log");
+        assertTrue(rs.next());
+        assertEquals(Math.log(Math.log(3)), rs.getDouble(2), 0.00001);
+
+        stmt.executeUpdate("UPDATE escapetest SET \")\" = 'a', \"\"\"){a}'\" = 'b'");
+
+        // check "difficult" values
+        rs = stmt.executeQuery("select {fn concat(')',escapetest.\")\")} as concat"
+                + ", {fn concat('{','}')} "
+                + ", {fn concat('''','\"')} "
+                + ", {fn concat(\"\"\"){a}'\", '''}''')} "
+                + " FROM escapetest");
+        assertTrue(rs.next());
+        assertEquals(")a", rs.getString(1));
+        assertEquals("{}", rs.getString(2));
+        assertEquals("'\"", rs.getString(3));
+        assertEquals("b'}'", rs.getString(4));
+
+        count = stmt.executeUpdate("create temp table b (i int)");
+        assertEquals(0, count);
+
+        rs = stmt.executeQuery("select * from {oj test_statement a left outer join b on (a.i=b.i)} ");
+        assertFalse(rs.next());
+        // test escape character
+        rs = stmt
+                .executeQuery("select str2 from comparisontest where str1 like '|_abcd' {escape '|'} ");
+        assertTrue(rs.next());
+        assertEquals("_found", rs.getString(1));
+        rs = stmt
+                .executeQuery("select str2 from comparisontest where str1 like '|%abcd' {escape '|'} ");
+        assertTrue(rs.next());
+        assertEquals("%found", rs.getString(1));
     }
-  }
 
-  @Test
-  void setQueryTimeoutWithSleep() throws SQLException, InterruptedException {
-    // check that the timeout starts ticking at execute, not at the
-    // setQueryTimeout call.
-    Statement stmt = con.createStatement();
-    try {
-      stmt.setQueryTimeout(1);
-      Thread.sleep(3000);
-      stmt.execute("select pg_sleep(5)");
-      fail("statement should have been canceled by query timeout");
-    } catch (SQLException sqle) {
-      // state for cancel
-      if (sqle.getSQLState().compareTo("57014") != 0) {
-        throw sqle;
-      }
-    }
-  }
-
-  @Test
-  void setQueryTimeoutOnPrepared() throws SQLException, InterruptedException {
-    // check that a timeout set on a prepared statement works on every
-    // execution.
-    PreparedStatement pstmt = con.prepareStatement("select pg_sleep(5)");
-    pstmt.setQueryTimeout(1);
-    for (int i = 1; i <= 3; i++) {
-      try {
+    @Test
+    void preparedFunction() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement("SELECT {fn concat('a', ?)}");
+        pstmt.setInt(1, 5);
         ResultSet rs = pstmt.executeQuery();
-        fail("statement should have been canceled by query timeout (execution #" + i + ")");
-      } catch (SQLException sqle) {
-        // state for cancel
-        if (sqle.getSQLState().compareTo("57014") != 0) {
-          throw sqle;
+        assertTrue(rs.next());
+        assertEquals("a5", rs.getString(1));
+    }
+
+    @Test
+    void dollarInComment() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement("SELECT /* $ */ {fn curdate()}");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertNotNull(rs.getString(1), "{fn curdate()} should be not null");
+    }
+
+    @Test
+    void dollarInCommentTwoComments() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement("SELECT /* $ *//* $ */ {fn curdate()}");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertNotNull(rs.getString(1), "{fn curdate()} should be not null");
+    }
+
+    @Test
+    void numericFunctions() throws SQLException {
+        Statement stmt = con.createStatement();
+
+        ResultSet rs = stmt.executeQuery("select {fn abs(-2.3)} as abs ");
+        assertTrue(rs.next());
+        assertEquals(2.3f, rs.getFloat(1), 0.00001);
+
+        rs = stmt.executeQuery("select {fn acos(-0.6)} as acos ");
+        assertTrue(rs.next());
+        assertEquals(Math.acos(-0.6), rs.getDouble(1), 0.00001);
+
+        rs = stmt.executeQuery("select {fn asin(-0.6)} as asin ");
+        assertTrue(rs.next());
+        assertEquals(Math.asin(-0.6), rs.getDouble(1), 0.00001);
+
+        rs = stmt.executeQuery("select {fn atan(-0.6)} as atan ");
+        assertTrue(rs.next());
+        assertEquals(Math.atan(-0.6), rs.getDouble(1), 0.00001);
+
+        rs = stmt.executeQuery("select {fn atan2(-2.3,7)} as atan2 ");
+        assertTrue(rs.next());
+        assertEquals(Math.atan2(-2.3, 7), rs.getDouble(1), 0.00001);
+
+        rs = stmt.executeQuery("select {fn ceiling(-2.3)} as ceiling ");
+        assertTrue(rs.next());
+        assertEquals(-2, rs.getDouble(1), 0.00001);
+
+        rs = stmt.executeQuery("select {fn cos(-2.3)} as cos, {fn cot(-2.3)} as cot ");
+        assertTrue(rs.next());
+        assertEquals(Math.cos(-2.3), rs.getDouble(1), 0.00001);
+        assertEquals(1 / Math.tan(-2.3), rs.getDouble(2), 0.00001);
+
+        rs = stmt.executeQuery("select {fn degrees({fn pi()})} as degrees ");
+        assertTrue(rs.next());
+        assertEquals(180, rs.getDouble(1), 0.00001);
+
+        rs = stmt.executeQuery("select {fn exp(-2.3)}, {fn floor(-2.3)},"
+                + " {fn log(2.3)},{fn log10(2.3)},{fn mod(3,2)}");
+        assertTrue(rs.next());
+        assertEquals(Math.exp(-2.3), rs.getDouble(1), 0.00001);
+        assertEquals(-3, rs.getDouble(2), 0.00001);
+        assertEquals(Math.log(2.3), rs.getDouble(3), 0.00001);
+        assertEquals(Math.log(2.3) / Math.log(10), rs.getDouble(4), 0.00001);
+        assertEquals(1, rs.getDouble(5), 0.00001);
+
+        rs = stmt.executeQuery("select {fn pi()}, {fn power(7,-2.3)},"
+                + " {fn radians(-180)},{fn round(3.1294,2)}");
+        assertTrue(rs.next());
+        assertEquals(Math.PI, rs.getDouble(1), 0.00001);
+        assertEquals(Math.pow(7, -2.3), rs.getDouble(2), 0.00001);
+        assertEquals(-Math.PI, rs.getDouble(3), 0.00001);
+        assertEquals(3.13, rs.getDouble(4), 0.00001);
+
+        rs = stmt.executeQuery("select {fn sign(-2.3)}, {fn sin(-2.3)},"
+                + " {fn sqrt(2.3)},{fn tan(-2.3)},{fn truncate(3.1294,2)}");
+        assertTrue(rs.next());
+        assertEquals(-1, rs.getInt(1));
+        assertEquals(Math.sin(-2.3), rs.getDouble(2), 0.00001);
+        assertEquals(Math.sqrt(2.3), rs.getDouble(3), 0.00001);
+        assertEquals(Math.tan(-2.3), rs.getDouble(4), 0.00001);
+        assertEquals(3.12, rs.getDouble(5), 0.00001);
+    }
+
+    @Test
+    void stringFunctions() throws SQLException {
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery(
+                "select {fn ascii(' test')},{fn char(32)}"
+                        + ",{fn concat('ab','cd')}"
+                        + ",{fn lcase('aBcD')},{fn left('1234',2)},{fn length('123 ')}"
+                        + ",{fn locate('bc','abc')},{fn locate('bc','abc',3)}");
+        assertTrue(rs.next());
+        assertEquals(32, rs.getInt(1));
+        assertEquals(" ", rs.getString(2));
+        assertEquals("abcd", rs.getString(3));
+        assertEquals("abcd", rs.getString(4));
+        assertEquals("12", rs.getString(5));
+        assertEquals(3, rs.getInt(6));
+        assertEquals(2, rs.getInt(7));
+        assertEquals(0, rs.getInt(8));
+
+        rs = stmt.executeQuery(
+                "SELECT {fn insert('abcdef',3,2,'xxxx')}"
+                        + ",{fn replace('abcdbc','bc','x')}");
+        assertTrue(rs.next());
+        assertEquals("abxxxxef", rs.getString(1));
+        assertEquals("axdx", rs.getString(2));
+
+        rs = stmt.executeQuery(
+                "select {fn ltrim(' ab')},{fn repeat('ab',2)}"
+                        + ",{fn right('abcde',2)},{fn rtrim('ab ')}"
+                        + ",{fn space(3)},{fn substring('abcd',2,2)}"
+                        + ",{fn ucase('aBcD')}");
+        assertTrue(rs.next());
+        assertEquals("ab", rs.getString(1));
+        assertEquals("abab", rs.getString(2));
+        assertEquals("de", rs.getString(3));
+        assertEquals("ab", rs.getString(4));
+        assertEquals("   ", rs.getString(5));
+        assertEquals("bc", rs.getString(6));
+        assertEquals("ABCD", rs.getString(7));
+    }
+
+    @Test
+    void dateFuncWithParam() throws SQLException {
+        // Prior to 8.0 there is not an interval + timestamp operator,
+        // so timestampadd does not work.
+        //
+
+        PreparedStatement ps = con.prepareStatement(
+                "SELECT {fn timestampadd(SQL_TSI_QUARTER, ? ,{fn now()})}, {fn timestampadd(SQL_TSI_MONTH, ?, {fn now()})} ");
+        ps.setInt(1, 4);
+        ps.setInt(2, 12);
+        ResultSet rs = ps.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(rs.getTimestamp(1), rs.getTimestamp(2));
+    }
+
+    @Test
+    void dateFunctions() throws SQLException {
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("select {fn curdate()},{fn curtime()}"
+                + ",{fn dayname({fn now()})}, {fn dayofmonth({fn now()})}"
+                + ",{fn dayofweek({ts '2005-01-17 12:00:00'})},{fn dayofyear({fn now()})}"
+                + ",{fn hour({fn now()})},{fn minute({fn now()})}"
+                + ",{fn month({fn now()})}"
+                + ",{fn monthname({fn now()})},{fn quarter({fn now()})}"
+                + ",{fn second({fn now()})},{fn week({fn now()})}"
+                + ",{fn year({fn now()})} ");
+        assertTrue(rs.next());
+        // ensure sunday =>1 and monday =>2
+        assertEquals(2, rs.getInt(5));
+
+        // Prior to 8.0 there is not an interval + timestamp operator,
+        // so timestampadd does not work.
+        //
+
+        // second
+        rs = stmt.executeQuery(
+                "select {fn timestampdiff(SQL_TSI_SECOND,{fn now()},{fn timestampadd(SQL_TSI_SECOND,3,{fn now()})})} ");
+        assertTrue(rs.next());
+        assertEquals(3, rs.getInt(1));
+        // MINUTE
+        rs = stmt.executeQuery(
+                "select {fn timestampdiff(SQL_TSI_MINUTE,{fn now()},{fn timestampadd(SQL_TSI_MINUTE,3,{fn now()})})} ");
+        assertTrue(rs.next());
+        assertEquals(3, rs.getInt(1));
+        // HOUR
+        rs = stmt.executeQuery(
+                "select {fn timestampdiff(SQL_tsi_HOUR,{fn now()},{fn timestampadd(SQL_TSI_HOUR,3,{fn now()})})} ");
+        assertTrue(rs.next());
+        assertEquals(3, rs.getInt(1));
+        // day
+        rs = stmt.executeQuery(
+                "select {fn timestampdiff(SQL_TSI_DAY,{fn now()},{fn timestampadd(SQL_TSI_DAY,-3,{fn now()})})} ");
+        assertTrue(rs.next());
+        int res = rs.getInt(1);
+        if (res != -3 && res != -2) {
+            // set TimeZone='America/New_York';
+            // select CAST(-3 || ' day' as interval);
+            // interval
+            //----------
+            // -3 days
+            //
+            // select CAST(-3 || ' day' as interval)+now();
+            //           ?column?
+            //-------------------------------
+            // 2018-03-08 07:59:13.586895-05
+            //
+            // select CAST(-3 || ' day' as interval)+now()-now();
+            //     ?column?
+            //-------------------
+            // -2 days -23:00:00
+            fail("CAST(-3 || ' day' as interval)+now()-now() is expected to return -3 or -2. Actual value is " + res);
         }
-      }
-    }
-  }
-
-  @Test
-  void setQueryTimeoutWithoutExecute() throws SQLException, InterruptedException {
-    // check that a timeout set on one statement doesn't affect another
-    Statement stmt1 = con.createStatement();
-    stmt1.setQueryTimeout(1);
-
-    Statement stmt2 = con.createStatement();
-    ResultSet rs = stmt2.executeQuery("SELECT pg_sleep(2)");
-  }
-
-  @Test
-  void resultSetTwice() throws SQLException {
-    Statement stmt = con.createStatement();
-
-    ResultSet rs = stmt.executeQuery("select {fn abs(-2.3)} as abs ");
-    assertNotNull(rs);
-
-    ResultSet rsOther = stmt.getResultSet();
-    assertNotNull(rsOther);
-  }
-
-  @Test
-  void multipleCancels() throws Exception {
-    SharedTimer sharedTimer = Driver.getSharedTimer();
-
-    Connection connA = null;
-    Connection connB = null;
-    Statement stmtA = null;
-    Statement stmtB = null;
-    ResultSet rsA = null;
-    ResultSet rsB = null;
-    try {
-      assertEquals(0, sharedTimer.getRefCount());
-      connA = TestUtil.openDB();
-      connB = TestUtil.openDB();
-      stmtA = connA.createStatement();
-      stmtB = connB.createStatement();
-      stmtA.setQueryTimeout(1);
-      stmtB.setQueryTimeout(1);
-      try {
-        rsA = stmtA.executeQuery("SELECT pg_sleep(2)");
-      } catch (SQLException e) {
-        // ignore the expected timeout
-      }
-      assertEquals(1, sharedTimer.getRefCount());
-      try {
-        rsB = stmtB.executeQuery("SELECT pg_sleep(2)");
-      } catch (SQLException e) {
-        // ignore the expected timeout
-      }
-    } finally {
-      TestUtil.closeQuietly(rsA);
-      TestUtil.closeQuietly(rsB);
-      TestUtil.closeQuietly(stmtA);
-      TestUtil.closeQuietly(stmtB);
-      TestUtil.closeQuietly(connA);
-      TestUtil.closeQuietly(connB);
-    }
-    assertEquals(0, sharedTimer.getRefCount());
-  }
-
-  @Test
-  @Timeout(30)
-  void cancelQueryWithBrokenNetwork() throws SQLException, IOException, InterruptedException {
-    // check that stmt.cancel() doesn't hang forever if the network is broken
-
-    ExecutorService executor = Executors.newCachedThreadPool();
-
-    try (StrangeProxyServer proxyServer = new StrangeProxyServer(TestUtil.getServer(), TestUtil.getPort())) {
-      Properties props = new Properties();
-      props.setProperty(TestUtil.SERVER_HOST_PORT_PROP, String.format("%s:%s", "localhost", proxyServer.getServerPort()));
-      PGProperty.CANCEL_SIGNAL_TIMEOUT.set(props, 1);
-
-      try (Connection conn = TestUtil.openDB(props); Statement stmt = conn.createStatement()) {
-        executor.submit(() -> stmt.execute("select pg_sleep(60)"));
-
-        Thread.sleep(1000);
-        proxyServer.stopForwardingAllClients();
-
-        stmt.cancel();
-        // Note: network is still inaccessible, so the statement execution is still in progress.
-        // So we abort the connection to allow implicit conn.close()
-        conn.abort(executor);
-      }
+        // WEEK => extract week from interval is not supported by backend
+        // rs = stmt.executeQuery("select {fn timestampdiff(SQL_TSI_WEEK,{fn now()},{fn
+        // timestampadd(SQL_TSI_WEEK,3,{fn now()})})} ");
+        // assertTrue(rs.next());
+        // assertEquals(3,rs.getInt(1));
+        // MONTH => backend assume there are 0 month in an interval of 92 days...
+        // rs = stmt.executeQuery("select {fn timestampdiff(SQL_TSI_MONTH,{fn now()},{fn
+        // timestampadd(SQL_TSI_MONTH,3,{fn now()})})} ");
+        // assertTrue(rs.next());
+        // assertEquals(3,rs.getInt(1));
+        // QUARTER => backend assume there are 1 quarter even in 270 days...
+        // rs = stmt.executeQuery("select {fn timestampdiff(SQL_TSI_QUARTER,{fn now()},{fn
+        // timestampadd(SQL_TSI_QUARTER,3,{fn now()})})} ");
+        // assertTrue(rs.next());
+        // assertEquals(3,rs.getInt(1));
+        // YEAR
+        // rs = stmt.executeQuery("select {fn timestampdiff(SQL_TSI_YEAR,{fn now()},{fn
+        // timestampadd(SQL_TSI_YEAR,3,{fn now()})})} ");
+        // assertTrue(rs.next());
+        // assertEquals(3,rs.getInt(1));
     }
 
-    executor.shutdownNow();
-  }
+    @Test
+    void systemFunctions() throws SQLException {
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery(
+                "select {fn ifnull(null,'2')}"
+                        + ",{fn user()} ");
+        assertTrue(rs.next());
+        assertEquals("2", rs.getString(1));
+        assertEquals(TestUtil.getUser(), rs.getString(2));
 
-  @Test
-  @Timeout(10)
-  void closeInProgressStatement() throws Exception {
-    ExecutorService executor = Executors.newSingleThreadExecutor();
-    final Connection outerLockCon = TestUtil.openDB();
-    outerLockCon.setAutoCommit(false);
-    //Acquire an exclusive lock so we can block the notice generating statement
-    outerLockCon.createStatement().execute("LOCK TABLE test_lock IN ACCESS EXCLUSIVE MODE;");
-
-    try {
-      con.createStatement().execute("SET SESSION client_min_messages = 'NOTICE'");
-      con.createStatement()
-          .execute("CREATE OR REPLACE FUNCTION notify_then_sleep() RETURNS VOID AS "
-              + "$BODY$ "
-              + "BEGIN "
-              + "RAISE NOTICE 'start';"
-              + "LOCK TABLE test_lock IN ACCESS EXCLUSIVE MODE;"
-              + "END "
-              + "$BODY$ "
-              + "LANGUAGE plpgsql;");
-      int cancels = 0;
-      for (int i = 0; i < 100; i++) {
-        final Statement st = con.createStatement();
-        executor.submit(new Callable<Void>() {
-          @Override
-          public Void call() throws Exception {
-            long start = System.nanoTime();
-            while (st.getWarnings() == null) {
-              long dt = System.nanoTime() - start;
-              if (dt > TimeUnit.SECONDS.toNanos(10)) {
-                throw new IllegalStateException("Expected to receive a notice within 10 seconds");
-              }
-            }
-            st.close();
-            return null;
-          }
-        });
-        st.setQueryTimeout(120);
-        try {
-          st.execute("select notify_then_sleep()");
-        } catch (SQLException e) {
-          assertEquals(
-              PSQLState.QUERY_CANCELED.getState(),
-              e.getSQLState(),
-              "Query is expected to be cancelled via st.close(), got " + e.getMessage()
-          );
-          cancels++;
-          break;
-        } finally {
-          TestUtil.closeQuietly(st);
-        }
-      }
-      assertNotEquals(0, cancels, "At least one QUERY_CANCELED state is expected");
-    } finally {
-      executor.shutdown();
-      TestUtil.closeQuietly(outerLockCon);
+        rs = stmt.executeQuery("select {fn database()} ");
+        assertTrue(rs.next());
+        assertEquals(TestUtil.getDatabase(), rs.getString(1));
     }
-  }
 
-  @Test
-  @Timeout(10)
-  void concurrentIsValid() throws Throwable {
-    ExecutorService executor = Executors.newCachedThreadPool();
-    try {
-      List<Future<?>> results = new ArrayList<>();
-      Random rnd = new Random();
-      for (int i = 0; i < 10; i++) {
-        Future<?> future = executor.submit(() -> {
-          try {
-            for (int j = 0; j < 50; j++) {
-              con.isValid(2);
-              try (PreparedStatement ps =
-                       con.prepareStatement("select * from generate_series(1,?) as x(id)")) {
-                int limit = rnd.nextInt(10);
-                ps.setInt(1, limit);
-                try (ResultSet r = ps.executeQuery()) {
-                  int cnt = 0;
-                  String callName = "generate_series(1, " + limit + ") in thread "
-                      + Thread.currentThread().getName();
-                  while (r.next()) {
-                    cnt++;
-                    assertEquals(cnt, r.getInt(1), callName + ", row " + cnt);
-                  }
-                  assertEquals(limit, cnt, callName + " number of rows");
+    @Test
+    void warningsAreCleared() throws SQLException {
+        Statement stmt = con.createStatement();
+        // Will generate a NOTICE: for primary key index creation
+        stmt.execute("CREATE TEMP TABLE unused (a int primary key)");
+        stmt.executeQuery("SELECT 1");
+        // Executing another query should clear the warning from the first one.
+        assertNull(stmt.getWarnings());
+        stmt.close();
+    }
+
+    @Test
+    void warningsAreAvailableAsap()
+            throws Exception {
+        try (Connection outerLockCon = TestUtil.openDB()) {
+            outerLockCon.setAutoCommit(false);
+            //Acquire an exclusive lock so we can block the notice generating statement
+            outerLockCon.createStatement().execute("LOCK TABLE test_lock IN ACCESS EXCLUSIVE MODE;");
+            con.createStatement()
+                    .execute("CREATE OR REPLACE FUNCTION notify_then_sleep() RETURNS VOID AS "
+                            + "$BODY$ "
+                            + "BEGIN "
+                            + "RAISE NOTICE 'Test 1'; "
+                            + "RAISE NOTICE 'Test 2'; "
+                            + "LOCK TABLE test_lock IN ACCESS EXCLUSIVE MODE; "
+                            + "END "
+                            + "$BODY$ "
+                            + "LANGUAGE plpgsql;");
+            con.createStatement().execute("SET SESSION client_min_messages = 'NOTICE'");
+            //If we never receive the two warnings the statement will just hang, so set a low timeout
+            con.createStatement().execute("SET SESSION statement_timeout = 1000");
+            final PreparedStatement preparedStatement = con.prepareStatement("SELECT notify_then_sleep()");
+            final Callable<Void> warningReader = new Callable<Void>() {
+                @Override
+                public Void call() throws SQLException, InterruptedException {
+                    while (true) {
+                        SQLWarning warning = preparedStatement.getWarnings();
+                        if (warning != null) {
+                            assertEquals("Test 1", warning.getMessage(), "First warning received not first notice raised");
+                            SQLWarning next = warning.getNextWarning();
+                            if (next != null) {
+                                assertEquals("Test 2", next.getMessage(), "Second warning received not second notice raised");
+                                //Release the lock so that the notice generating statement can end.
+                                outerLockCon.commit();
+                                return null;
+                            }
+                        }
+                        //Break the loop on InterruptedException
+                        Thread.sleep(0);
+                    }
                 }
-              }
-            }
-          } catch (SQLException e) {
-            throw new RuntimeException(e);
-          }
-        });
-        results.add(future);
-      }
-      for (Future<?> result : results) {
-        // Propagate exception if any
-        result.get();
-      }
-    } catch (ExecutionException e) {
-      throw e.getCause();
-    } finally {
-      executor.shutdown();
-      executor.awaitTermination(10, TimeUnit.SECONDS);
-    }
-  }
-
-  @Test
-  @Timeout(20)
-  void fastCloses() throws SQLException {
-    ExecutorService executor = Executors.newSingleThreadExecutor();
-    con.createStatement().execute("SET SESSION client_min_messages = 'NOTICE'");
-    con.createStatement()
-        .execute("CREATE OR REPLACE FUNCTION notify_then_sleep() RETURNS VOID AS "
-            + "$BODY$ "
-            + "BEGIN "
-            + "RAISE NOTICE 'start';"
-            + "EXECUTE pg_sleep(1);" // Note: timeout value does not matter here, we just test if test crashes or locks somehow
-            + "END "
-            + "$BODY$ "
-            + "LANGUAGE plpgsql;");
-    Map<String, Integer> cnt = new HashMap<>();
-    final Random rnd = new Random();
-    for (int i = 0; i < 1000; i++) {
-      final Statement st = con.createStatement();
-      executor.submit(new Callable<Void>() {
-        @Override
-        public Void call() throws Exception {
-          int s = rnd.nextInt(10);
-          if (s > 8) {
+            };
+            ExecutorService executorService = Executors.newSingleThreadExecutor();
             try {
-              Thread.sleep(s - 9);
-            } catch (InterruptedException ex) {
-              // don't execute the close here as this thread was cancelled below in shutdownNow
-              return null;
+                Future<Void> future = executorService.submit(warningReader);
+                //Statement should only finish executing once we have
+                //received the two notices and released the outer lock.
+                preparedStatement.execute();
+
+                //If test takes longer than 2 seconds its a failure.
+                future.get(2, TimeUnit.SECONDS);
+            } finally {
+                executorService.shutdownNow();
             }
-          }
-          st.close();
-          return null;
         }
-      });
-      ResultSet rs = null;
-      String sqlState = "0";
-      try {
-        rs = st.executeQuery("select 1");
-        // Acceptable
-      } catch (SQLException e) {
-        sqlState = e.getSQLState();
-        if (!PSQLState.OBJECT_NOT_IN_STATE.getState().equals(sqlState)
-            && !PSQLState.QUERY_CANCELED.getState().equals(sqlState)) {
-          assertEquals(
-              PSQLState.QUERY_CANCELED.getState(),
-              e.getSQLState(),
-              "Query is expected to be cancelled via st.close(), got " + e.getMessage()
-          );
-        }
-      } finally {
-        TestUtil.closeQuietly(rs);
-        TestUtil.closeQuietly(st);
-      }
-      Integer val = cnt.get(sqlState);
-      val = (val == null ? 0 : val) + 1;
-      cnt.put(sqlState, val);
-    }
-    System.out.println("[testFastCloses] total counts for each sql state: " + cnt);
-    executor.shutdown();
-  }
-
-  /**
-   * Tests that calling {@code java.sql.Statement#close()} from a concurrent thread does not result
-   * in {@link java.util.ConcurrentModificationException}.
-   */
-  @Test
-  void sideStatementFinalizers() throws SQLException {
-    long deadline = System.nanoTime() + TimeUnit.SECONDS.toNanos(2);
-
-    final AtomicInteger leaks = new AtomicInteger();
-    final AtomicReference<Throwable> cleanupFailure = new AtomicReference<>();
-    // Create several cleaners, so they can clean leaks concurrently
-    List<LazyCleaner> cleaners = new ArrayList<>();
-    for (int i = 0; i < 16; i++) {
-      cleaners.add(new LazyCleaner(Duration.ofSeconds(2), "pgjdbc-test-cleaner-" + i));
     }
 
-    for (int q = 0; System.nanoTime() < deadline || leaks.get() < 10000; q++) {
-      for (int i = 0; i < 100; i++) {
-        PreparedStatement ps = con.prepareStatement("select " + (i + q));
-        ps.close();
-      }
-      final int nextId = q;
-      int cleanerId = ThreadLocalRandom.current().nextInt(cleaners.size());
-      PreparedStatement ps = con.prepareStatement("select /*leak*/ " + nextId);
-      cleaners.get(cleanerId).register(new Object(), leak -> {
+    /**
+     * <p>Demonstrates a safe approach to concurrently reading the latest
+     * warnings while periodically clearing them.</p>
+     *
+     * <p>One drawback of this approach is that it requires the reader to make it to the end of the
+     * warning chain before clearing it, so long as your warning processing step is not very slow,
+     * this should happen more or less instantaneously even if you receive a lot of warnings.</p>
+     */
+    @Test
+    void concurrentWarningReadAndClear()
+            throws SQLException, InterruptedException, ExecutionException, TimeoutException {
+        final int iterations = 1000;
+        con.createStatement()
+                .execute("CREATE OR REPLACE FUNCTION notify_loop() RETURNS VOID AS "
+                        + "$BODY$ "
+                        + "BEGIN "
+                        + "FOR i IN 1.. " + iterations + " LOOP "
+                        + "  RAISE NOTICE 'Warning %', i; "
+                        + "END LOOP; "
+                        + "END "
+                        + "$BODY$ "
+                        + "LANGUAGE plpgsql;");
+        con.createStatement().execute("SET SESSION client_min_messages = 'NOTICE'");
+        final PreparedStatement statement = con.prepareStatement("SELECT notify_loop()");
+        final Callable<Void> warningReader = new Callable<Void>() {
+            @Override
+            public Void call() throws SQLException, InterruptedException {
+                SQLWarning lastProcessed = null;
+                int warnings = 0;
+                //For production code replace this with some condition that
+                //ends after the statement finishes execution
+                while (warnings < iterations) {
+                    SQLWarning warn = statement.getWarnings();
+                    //if next linked warning has value use that, otherwise keep using latest head
+                    if (lastProcessed != null && lastProcessed.getNextWarning() != null) {
+                        warn = lastProcessed.getNextWarning();
+                    }
+                    if (warn != null) {
+                        warnings++;
+                        //System.out.println("Processing " + warn.getMessage());
+                        assertEquals("Warning " + warnings, warn.getMessage(), "Received warning out of expected order");
+                        lastProcessed = warn;
+                        //If the processed warning was the head of the chain clear
+                        if (warn == statement.getWarnings()) {
+                            //System.out.println("Clearing warnings");
+                            statement.clearWarnings();
+                        }
+                    } else {
+                        //Not required for this test, but a good idea adding some delay for production code
+                        //to avoid high cpu usage while the query is running and no warnings are coming in.
+                        //Alternatively use JDK9's Thread.onSpinWait()
+                        Thread.sleep(10);
+                    }
+                }
+                assertEquals("Warning " + iterations, lastProcessed.getMessage(), "Didn't receive expected last warning");
+                return null;
+            }
+        };
+
+        final ExecutorService executor = Executors.newSingleThreadExecutor();
         try {
-          ps.close();
-        } catch (Throwable t) {
-          cleanupFailure.compareAndSet(null, t);
+            final Future warningReaderThread = executor.submit(warningReader);
+            statement.execute();
+            //If the reader doesn't return after 2 seconds, it failed.
+            warningReaderThread.get(2, TimeUnit.SECONDS);
+        } finally {
+            executor.shutdownNow();
         }
-        leaks.incrementAndGet();
-      });
     }
-    if (cleanupFailure.get() != null) {
-      throw new IllegalStateException("Detected failure in cleanup thread", cleanupFailure.get());
+
+    /**
+     * The parser tries to break multiple statements into individual queries as required by the V3
+     * extended query protocol. It can be a little overzealous sometimes and this test ensures we keep
+     * multiple rule actions together in one statement.
+     */
+    @Test
+    void parsingSemiColons() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.execute(
+                "CREATE RULE r1 AS ON INSERT TO escapetest DO (DELETE FROM test_statement ; INSERT INTO test_statement VALUES (1); INSERT INTO test_statement VALUES (2); );");
+        stmt.executeUpdate("INSERT INTO escapetest(ts) VALUES (NULL)");
+        ResultSet rs = stmt.executeQuery("SELECT i from test_statement ORDER BY i");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        assertFalse(rs.next());
     }
-  }
 
-  /**
-   * Test that $JAVASCRIPT$ protects curly braces from JDBC {fn now()} kind of syntax.
-   * @throws SQLException if something goes wrong
-   */
-  @Test
-  void javaScriptFunction() throws SQLException {
-    String str = "  var _modules = {};\n"
-        + "  var _current_stack = [];\n"
-        + "\n"
-        + "  // modules start\n"
-        + "  _modules[\"/root/aidbox/fhirbase/src/core\"] = {\n"
-        + "  init:  function(){\n"
-        + "    var exports = {};\n"
-        + "    _current_stack.push({file: \"core\", dir: \"/root/aidbox/fhirbase/src\"})\n"
-        + "    var module = {exports: exports};";
+    @Test
+    void parsingDollarQuotes() throws SQLException {
+        // dollar-quotes are supported in the backend since version 8.0
+        Statement st = con.createStatement();
+        ResultSet rs;
 
-    PreparedStatement ps = null;
-    try {
-      ps = con.prepareStatement("select $JAVASCRIPT$" + str + "$JAVASCRIPT$");
-      ResultSet rs = ps.executeQuery();
-      rs.next();
-      assertEquals(str, rs.getString(1), "JavaScript code has been protected with $JAVASCRIPT$");
-    } finally {
-      TestUtil.closeQuietly(ps);
+        rs = st.executeQuery("SELECT '$a$ ; $a$'");
+        assertTrue(rs.next());
+        assertEquals("$a$ ; $a$", rs.getObject(1));
+        rs.close();
+
+        rs = st.executeQuery("SELECT $$;$$");
+        assertTrue(rs.next());
+        assertEquals(";", rs.getObject(1));
+        rs.close();
+
+        rs = st.executeQuery("SELECT $OR$$a$'$b$a$$OR$ WHERE '$a$''$b$a$'=$OR$$a$'$b$a$$OR$OR ';'=''");
+        assertTrue(rs.next());
+        assertEquals("$a$'$b$a$", rs.getObject(1));
+        assertFalse(rs.next());
+        rs.close();
+
+        rs = st.executeQuery("SELECT $B$;$b$B$");
+        assertTrue(rs.next());
+        assertEquals(";$b", rs.getObject(1));
+        rs.close();
+
+        rs = st.executeQuery("SELECT $c$c$;$c$");
+        assertTrue(rs.next());
+        assertEquals("c$;", rs.getObject(1));
+        rs.close();
+
+        rs = st.executeQuery("SELECT $A0$;$A0$ WHERE ''=$t$t$t$ OR ';$t$'=';$t$'");
+        assertTrue(rs.next());
+        assertEquals(";", rs.getObject(1));
+        assertFalse(rs.next());
+        rs.close();
+
+        st.executeQuery("SELECT /* */$$;$$/**//*;*/").close();
+        st.executeQuery("SELECT /* */--;\n$$a$$/**/--\n--;\n").close();
+
+        st.close();
     }
-  }
 
-  @Test
-  void unterminatedDollarQuotes() throws SQLException {
-    ensureSyntaxException("dollar quotes", "CREATE OR REPLACE FUNCTION update_on_change() RETURNS TRIGGER AS $$\n"
-        + "BEGIN");
-  }
-
-  @Test
-  void unterminatedNamedDollarQuotes() throws SQLException {
-    ensureSyntaxException("dollar quotes", "CREATE OR REPLACE FUNCTION update_on_change() RETURNS TRIGGER AS $ABC$\n"
-        + "BEGIN");
-  }
-
-  @Test
-  void unterminatedComment() throws SQLException {
-    ensureSyntaxException("block comment", "CREATE OR REPLACE FUNCTION update_on_change() RETURNS TRIGGER AS /* $$\n"
-        + "BEGIN $$");
-  }
-
-  @Test
-  void unterminatedLiteral() throws SQLException {
-    ensureSyntaxException("string literal", "CREATE OR REPLACE FUNCTION update_on_change() 'RETURNS TRIGGER AS $$\n"
-        + "BEGIN $$");
-  }
-
-  @Test
-  void unterminatedIdentifier() throws SQLException {
-    ensureSyntaxException("string literal", "CREATE OR REPLACE FUNCTION \"update_on_change() RETURNS TRIGGER AS $$\n"
-        + "BEGIN $$");
-  }
-
-  private void ensureSyntaxException(String errorType, String sql) throws SQLException {
-    PreparedStatement ps = null;
-    try {
-      ps = con.prepareStatement(sql);
-      ps.executeUpdate();
-      fail("Query with unterminated " + errorType + " should fail");
-    } catch (SQLException e) {
-      assertEquals(PSQLState.SYNTAX_ERROR.getState(), e.getSQLState(), "Query should fail with unterminated " + errorType);
-    } finally {
-      TestUtil.closeQuietly(ps);
+    @Test
+    void unbalancedParensParseError() throws SQLException {
+        Statement stmt = con.createStatement();
+        try {
+            stmt.executeQuery("SELECT i FROM test_statement WHERE (1 > 0)) ORDER BY i");
+            fail("Should have thrown a parse error.");
+        } catch (SQLException sqle) {
+        }
+    }
+
+    @Test
+    void executeUpdateFailsOnSelect() throws SQLException {
+        Statement stmt = con.createStatement();
+        try {
+            stmt.executeUpdate("SELECT 1");
+            fail("Should have thrown an error.");
+        } catch (SQLException sqle) {
+        }
+    }
+
+    @Test
+    void executeUpdateFailsOnMultiStatementSelect() throws SQLException {
+        Statement stmt = con.createStatement();
+        try {
+            stmt.executeUpdate("/* */; SELECT 1");
+            fail("Should have thrown an error.");
+        } catch (SQLException sqle) {
+        }
+    }
+
+    @Test
+    void setQueryTimeout() throws SQLException {
+        Statement stmt = con.createStatement();
+        long start = 0;
+        boolean cancelReceived = false;
+        try {
+            stmt.setQueryTimeout(1);
+            start = System.nanoTime();
+            stmt.execute("select pg_sleep(10)");
+        } catch (SQLException sqle) {
+            // state for cancel
+            if ("57014".equals(sqle.getSQLState())) {
+                cancelReceived = true;
+            }
+        }
+        long duration = System.nanoTime() - start;
+        if (!cancelReceived || duration > TimeUnit.SECONDS.toNanos(5)) {
+            fail("Query should have been cancelled since the timeout was set to 1 sec."
+                    + " Cancel state: " + cancelReceived + ", duration: " + duration);
+        }
+    }
+
+    @Test
+    void longQueryTimeout() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.setQueryTimeout(Integer.MAX_VALUE);
+        assertEquals(Integer.MAX_VALUE,
+                stmt.getQueryTimeout(),
+                "setQueryTimeout(Integer.MAX_VALUE)");
+        stmt.setQueryTimeout(Integer.MAX_VALUE - 1);
+        assertEquals(Integer.MAX_VALUE - 1,
+                stmt.getQueryTimeout(),
+                "setQueryTimeout(Integer.MAX_VALUE-1)");
+    }
+
+    /**
+     * Test executes two queries one after another. The first one has timeout of 1ms, and the second
+     * one does not. The timeout of the first query should not impact the second one.
+     */
+    @Test
+    void shortQueryTimeout() throws SQLException {
+        assumeLongTest();
+
+        long deadLine = System.nanoTime() + TimeUnit.SECONDS.toNanos(10);
+        Statement stmt = con.createStatement();
+        ((PgStatement) stmt).setQueryTimeoutMs(1);
+        Statement stmt2 = con.createStatement();
+        while (System.nanoTime() < deadLine) {
+            try {
+                // This usually won't time out but scheduler jitter, server load
+                // etc can cause a timeout.
+                stmt.executeQuery("select 1;");
+            } catch (SQLException e) {
+                // Expect "57014 query_canceled" (en-msg is "canceling statement due to statement timeout")
+                // but anything else is fatal. We can't differentiate other causes of statement cancel like
+                // "canceling statement due to user request" without error message matching though, and we
+                // don't want to do that.
+                assertEquals(
+                        PSQLState.QUERY_CANCELED.getState(),
+                        e.getSQLState(),
+                        "Query is expected to be cancelled via st.close(), got " + e.getMessage());
+            }
+            // Must never time out.
+            stmt2.executeQuery("select 1;");
+        }
+    }
+
+    @Test
+    void setQueryTimeoutWithSleep() throws SQLException, InterruptedException {
+        // check that the timeout starts ticking at execute, not at the
+        // setQueryTimeout call.
+        Statement stmt = con.createStatement();
+        try {
+            stmt.setQueryTimeout(1);
+            Thread.sleep(3000);
+            stmt.execute("select pg_sleep(5)");
+            fail("statement should have been canceled by query timeout");
+        } catch (SQLException sqle) {
+            // state for cancel
+            if (sqle.getSQLState().compareTo("57014") != 0) {
+                throw sqle;
+            }
+        }
+    }
+
+    @Test
+    void setQueryTimeoutOnPrepared() throws SQLException, InterruptedException {
+        // check that a timeout set on a prepared statement works on every
+        // execution.
+        PreparedStatement pstmt = con.prepareStatement("select pg_sleep(5)");
+        pstmt.setQueryTimeout(1);
+        for (int i = 1; i <= 3; i++) {
+            try {
+                ResultSet rs = pstmt.executeQuery();
+                fail("statement should have been canceled by query timeout (execution #" + i + ")");
+            } catch (SQLException sqle) {
+                // state for cancel
+                if (sqle.getSQLState().compareTo("57014") != 0) {
+                    throw sqle;
+                }
+            }
+        }
+    }
+
+    @Test
+    void setQueryTimeoutWithoutExecute() throws SQLException, InterruptedException {
+        // check that a timeout set on one statement doesn't affect another
+        Statement stmt1 = con.createStatement();
+        stmt1.setQueryTimeout(1);
+
+        Statement stmt2 = con.createStatement();
+        ResultSet rs = stmt2.executeQuery("SELECT pg_sleep(2)");
+    }
+
+    @Test
+    void resultSetTwice() throws SQLException {
+        Statement stmt = con.createStatement();
+
+        ResultSet rs = stmt.executeQuery("select {fn abs(-2.3)} as abs ");
+        assertNotNull(rs);
+
+        ResultSet rsOther = stmt.getResultSet();
+        assertNotNull(rsOther);
+    }
+
+    @Test
+    void multipleCancels() throws Exception {
+        SharedTimer sharedTimer = Driver.getSharedTimer();
+
+        Connection connA = null;
+        Connection connB = null;
+        Statement stmtA = null;
+        Statement stmtB = null;
+        ResultSet rsA = null;
+        ResultSet rsB = null;
+        try {
+            assertEquals(0, sharedTimer.getRefCount());
+            connA = TestUtil.openDB();
+            connB = TestUtil.openDB();
+            stmtA = connA.createStatement();
+            stmtB = connB.createStatement();
+            stmtA.setQueryTimeout(1);
+            stmtB.setQueryTimeout(1);
+            try {
+                rsA = stmtA.executeQuery("SELECT pg_sleep(2)");
+            } catch (SQLException e) {
+                // ignore the expected timeout
+            }
+            assertEquals(1, sharedTimer.getRefCount());
+            try {
+                rsB = stmtB.executeQuery("SELECT pg_sleep(2)");
+            } catch (SQLException e) {
+                // ignore the expected timeout
+            }
+        } finally {
+            TestUtil.closeQuietly(rsA);
+            TestUtil.closeQuietly(rsB);
+            TestUtil.closeQuietly(stmtA);
+            TestUtil.closeQuietly(stmtB);
+            TestUtil.closeQuietly(connA);
+            TestUtil.closeQuietly(connB);
+        }
+        assertEquals(0, sharedTimer.getRefCount());
+    }
+
+    @Test
+    @Timeout(30)
+    void cancelQueryWithBrokenNetwork() throws SQLException, IOException, InterruptedException {
+        // check that stmt.cancel() doesn't hang forever if the network is broken
+
+        ExecutorService executor = Executors.newCachedThreadPool();
+
+        try (StrangeProxyServer proxyServer = new StrangeProxyServer(TestUtil.getServer(), TestUtil.getPort())) {
+            Properties props = new Properties();
+            props.setProperty(TestUtil.SERVER_HOST_PORT_PROP, String.format("%s:%s", "localhost", proxyServer.getServerPort()));
+            PGProperty.CANCEL_SIGNAL_TIMEOUT.set(props, 1);
+
+            try (Connection conn = TestUtil.openDB(props); Statement stmt = conn.createStatement()) {
+                executor.submit(() -> stmt.execute("select pg_sleep(60)"));
+
+                Thread.sleep(1000);
+                proxyServer.stopForwardingAllClients();
+
+                stmt.cancel();
+                // Note: network is still inaccessible, so the statement execution is still in progress.
+                // So we abort the connection to allow implicit conn.close()
+                conn.abort(executor);
+            }
+        }
+
+        executor.shutdownNow();
+    }
+
+    @Test
+    @Timeout(10)
+    void closeInProgressStatement() throws Exception {
+        ExecutorService executor = Executors.newSingleThreadExecutor();
+        final Connection outerLockCon = TestUtil.openDB();
+        outerLockCon.setAutoCommit(false);
+        //Acquire an exclusive lock so we can block the notice generating statement
+        outerLockCon.createStatement().execute("LOCK TABLE test_lock IN ACCESS EXCLUSIVE MODE;");
+
+        try {
+            con.createStatement().execute("SET SESSION client_min_messages = 'NOTICE'");
+            con.createStatement()
+                    .execute("CREATE OR REPLACE FUNCTION notify_then_sleep() RETURNS VOID AS "
+                            + "$BODY$ "
+                            + "BEGIN "
+                            + "RAISE NOTICE 'start';"
+                            + "LOCK TABLE test_lock IN ACCESS EXCLUSIVE MODE;"
+                            + "END "
+                            + "$BODY$ "
+                            + "LANGUAGE plpgsql;");
+            int cancels = 0;
+            for (int i = 0; i < 100; i++) {
+                final Statement st = con.createStatement();
+                executor.submit(new Callable<Void>() {
+                    @Override
+                    public Void call() throws Exception {
+                        long start = System.nanoTime();
+                        while (st.getWarnings() == null) {
+                            long dt = System.nanoTime() - start;
+                            if (dt > TimeUnit.SECONDS.toNanos(10)) {
+                                throw new IllegalStateException("Expected to receive a notice within 10 seconds");
+                            }
+                        }
+                        st.close();
+                        return null;
+                    }
+                });
+                st.setQueryTimeout(120);
+                try {
+                    st.execute("select notify_then_sleep()");
+                } catch (SQLException e) {
+                    assertEquals(
+                            PSQLState.QUERY_CANCELED.getState(),
+                            e.getSQLState(),
+                            "Query is expected to be cancelled via st.close(), got " + e.getMessage()
+                    );
+                    cancels++;
+                    break;
+                } finally {
+                    TestUtil.closeQuietly(st);
+                }
+            }
+            assertNotEquals(0, cancels, "At least one QUERY_CANCELED state is expected");
+        } finally {
+            executor.shutdown();
+            TestUtil.closeQuietly(outerLockCon);
+        }
+    }
+
+    @Test
+    @Timeout(10)
+    void concurrentIsValid() throws Throwable {
+        ExecutorService executor = Executors.newCachedThreadPool();
+        try {
+            List<Future<?>> results = new ArrayList<>();
+            Random rnd = new Random();
+            for (int i = 0; i < 10; i++) {
+                Future<?> future = executor.submit(() -> {
+                    try {
+                        for (int j = 0; j < 50; j++) {
+                            con.isValid(2);
+                            try (PreparedStatement ps =
+                                         con.prepareStatement("select * from generate_series(1,?) as x(id)")) {
+                                int limit = rnd.nextInt(10);
+                                ps.setInt(1, limit);
+                                try (ResultSet r = ps.executeQuery()) {
+                                    int cnt = 0;
+                                    String callName = "generate_series(1, " + limit + ") in thread "
+                                            + Thread.currentThread().getName();
+                                    while (r.next()) {
+                                        cnt++;
+                                        assertEquals(cnt, r.getInt(1), callName + ", row " + cnt);
+                                    }
+                                    assertEquals(limit, cnt, callName + " number of rows");
+                                }
+                            }
+                        }
+                    } catch (SQLException e) {
+                        throw new RuntimeException(e);
+                    }
+                });
+                results.add(future);
+            }
+            for (Future<?> result : results) {
+                // Propagate exception if any
+                result.get();
+            }
+        } catch (ExecutionException e) {
+            throw e.getCause();
+        } finally {
+            executor.shutdown();
+            executor.awaitTermination(10, TimeUnit.SECONDS);
+        }
+    }
+
+    @Test
+    @Timeout(20)
+    void fastCloses() throws SQLException {
+        ExecutorService executor = Executors.newSingleThreadExecutor();
+        con.createStatement().execute("SET SESSION client_min_messages = 'NOTICE'");
+        con.createStatement()
+                .execute("CREATE OR REPLACE FUNCTION notify_then_sleep() RETURNS VOID AS "
+                        + "$BODY$ "
+                        + "BEGIN "
+                        + "RAISE NOTICE 'start';"
+                        + "EXECUTE pg_sleep(1);" // Note: timeout value does not matter here, we just test if test crashes or locks somehow
+                        + "END "
+                        + "$BODY$ "
+                        + "LANGUAGE plpgsql;");
+        Map<String, Integer> cnt = new HashMap<>();
+        final Random rnd = new Random();
+        for (int i = 0; i < 1000; i++) {
+            final Statement st = con.createStatement();
+            executor.submit(new Callable<Void>() {
+                @Override
+                public Void call() throws Exception {
+                    int s = rnd.nextInt(10);
+                    if (s > 8) {
+                        try {
+                            Thread.sleep(s - 9);
+                        } catch (InterruptedException ex) {
+                            // don't execute the close here as this thread was cancelled below in shutdownNow
+                            return null;
+                        }
+                    }
+                    st.close();
+                    return null;
+                }
+            });
+            ResultSet rs = null;
+            String sqlState = "0";
+            try {
+                rs = st.executeQuery("select 1");
+                // Acceptable
+            } catch (SQLException e) {
+                sqlState = e.getSQLState();
+                if (!PSQLState.OBJECT_NOT_IN_STATE.getState().equals(sqlState)
+                        && !PSQLState.QUERY_CANCELED.getState().equals(sqlState)) {
+                    assertEquals(
+                            PSQLState.QUERY_CANCELED.getState(),
+                            e.getSQLState(),
+                            "Query is expected to be cancelled via st.close(), got " + e.getMessage()
+                    );
+                }
+            } finally {
+                TestUtil.closeQuietly(rs);
+                TestUtil.closeQuietly(st);
+            }
+            Integer val = cnt.get(sqlState);
+            val = (val == null ? 0 : val) + 1;
+            cnt.put(sqlState, val);
+        }
+        System.out.println("[testFastCloses] total counts for each sql state: " + cnt);
+        executor.shutdown();
+    }
+
+    /**
+     * Tests that calling {@code java.sql.Statement#close()} from a concurrent thread does not result
+     * in {@link java.util.ConcurrentModificationException}.
+     */
+    @Test
+    void sideStatementFinalizers() throws SQLException {
+        long deadline = System.nanoTime() + TimeUnit.SECONDS.toNanos(2);
+
+        final AtomicInteger leaks = new AtomicInteger();
+        final AtomicReference<Throwable> cleanupFailure = new AtomicReference<>();
+        // Create several cleaners, so they can clean leaks concurrently
+        List<LazyCleaner> cleaners = new ArrayList<>();
+        for (int i = 0; i < 16; i++) {
+            cleaners.add(new LazyCleaner(Duration.ofSeconds(2), "pgjdbc-test-cleaner-" + i));
+        }
+
+        for (int q = 0; System.nanoTime() < deadline || leaks.get() < 10000; q++) {
+            for (int i = 0; i < 100; i++) {
+                PreparedStatement ps = con.prepareStatement("select " + (i + q));
+                ps.close();
+            }
+            final int nextId = q;
+            int cleanerId = ThreadLocalRandom.current().nextInt(cleaners.size());
+            PreparedStatement ps = con.prepareStatement("select /*leak*/ " + nextId);
+            cleaners.get(cleanerId).register(new Object(), leak -> {
+                try {
+                    ps.close();
+                } catch (Throwable t) {
+                    cleanupFailure.compareAndSet(null, t);
+                }
+                leaks.incrementAndGet();
+            });
+        }
+        if (cleanupFailure.get() != null) {
+            throw new IllegalStateException("Detected failure in cleanup thread", cleanupFailure.get());
+        }
+    }
+
+    /**
+     * Test that $JAVASCRIPT$ protects curly braces from JDBC {fn now()} kind of syntax.
+     *
+     * @throws SQLException if something goes wrong
+     */
+    @Test
+    void javaScriptFunction() throws SQLException {
+        String str = "  var _modules = {};\n"
+                + "  var _current_stack = [];\n"
+                + "\n"
+                + "  // modules start\n"
+                + "  _modules[\"/root/aidbox/fhirbase/src/core\"] = {\n"
+                + "  init:  function(){\n"
+                + "    var exports = {};\n"
+                + "    _current_stack.push({file: \"core\", dir: \"/root/aidbox/fhirbase/src\"})\n"
+                + "    var module = {exports: exports};";
+
+        PreparedStatement ps = null;
+        try {
+            ps = con.prepareStatement("select $JAVASCRIPT$" + str + "$JAVASCRIPT$");
+            ResultSet rs = ps.executeQuery();
+            rs.next();
+            assertEquals(str, rs.getString(1), "JavaScript code has been protected with $JAVASCRIPT$");
+        } finally {
+            TestUtil.closeQuietly(ps);
+        }
+    }
+
+    @Test
+    void unterminatedDollarQuotes() throws SQLException {
+        ensureSyntaxException("dollar quotes", "CREATE OR REPLACE FUNCTION update_on_change() RETURNS TRIGGER AS $$\n"
+                + "BEGIN");
+    }
+
+    @Test
+    void unterminatedNamedDollarQuotes() throws SQLException {
+        ensureSyntaxException("dollar quotes", "CREATE OR REPLACE FUNCTION update_on_change() RETURNS TRIGGER AS $ABC$\n"
+                + "BEGIN");
+    }
+
+    @Test
+    void unterminatedComment() throws SQLException {
+        ensureSyntaxException("block comment", "CREATE OR REPLACE FUNCTION update_on_change() RETURNS TRIGGER AS /* $$\n"
+                + "BEGIN $$");
+    }
+
+    @Test
+    void unterminatedLiteral() throws SQLException {
+        ensureSyntaxException("string literal", "CREATE OR REPLACE FUNCTION update_on_change() 'RETURNS TRIGGER AS $$\n"
+                + "BEGIN $$");
+    }
+
+    @Test
+    void unterminatedIdentifier() throws SQLException {
+        ensureSyntaxException("string literal", "CREATE OR REPLACE FUNCTION \"update_on_change() RETURNS TRIGGER AS $$\n"
+                + "BEGIN $$");
+    }
+
+    private void ensureSyntaxException(String errorType, String sql) throws SQLException {
+        PreparedStatement ps = null;
+        try {
+            ps = con.prepareStatement(sql);
+            ps.executeUpdate();
+            fail("Query with unterminated " + errorType + " should fail");
+        } catch (SQLException e) {
+            assertEquals(PSQLState.SYNTAX_ERROR.getState(), e.getSQLState(), "Query should fail with unterminated " + errorType);
+        } finally {
+            TestUtil.closeQuietly(ps);
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/StringTypeUnspecifiedArrayTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/StringTypeUnspecifiedArrayTest.java
index 18942e6..caf3a48 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/StringTypeUnspecifiedArrayTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/StringTypeUnspecifiedArrayTest.java
@@ -20,29 +20,29 @@ import java.util.Properties;
 
 @RunWith(Parameterized.class)
 public class StringTypeUnspecifiedArrayTest extends BaseTest4 {
-  public StringTypeUnspecifiedArrayTest(BinaryMode binaryMode) {
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
+    public StringTypeUnspecifiedArrayTest(BinaryMode binaryMode) {
+        setBinaryMode(binaryMode);
     }
-    return ids;
-  }
 
-  @Override
-  protected void updateProperties(Properties props) {
-    PGProperty.STRING_TYPE.set(props, "unspecified");
-    super.updateProperties(props);
-  }
+    @Parameterized.Parameters(name = "binary = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
+        }
+        return ids;
+    }
 
-  @Test
-  public void testCreateArrayWithNonCachedType() throws Exception {
-    PGbox[] in = new PGbox[0];
-    Array a = con.createArrayOf("box", in);
-    Assert.assertEquals(1111, a.getBaseType());
-  }
+    @Override
+    protected void updateProperties(Properties props) {
+        PGProperty.STRING_TYPE.set(props, "unspecified");
+        super.updateProperties(props);
+    }
+
+    @Test
+    public void testCreateArrayWithNonCachedType() throws Exception {
+        PGbox[] in = new PGbox[0];
+        Array a = con.createArrayOf("box", in);
+        Assert.assertEquals(1111, a.getBaseType());
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TestACL.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TestACL.java
index ebf9cf3..6e15937 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TestACL.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TestACL.java
@@ -5,28 +5,27 @@
 
 package org.postgresql.test.jdbc2;
 
+import org.junit.jupiter.api.Test;
 import org.postgresql.jdbc.PgConnection;
 import org.postgresql.jdbc.PgDatabaseMetaData;
 
-import org.junit.jupiter.api.Test;
-
 class TestACL {
 
-  @Test
-  void parseACL() {
-    PgConnection pgConnection = null;
-    PgDatabaseMetaData a = new PgDatabaseMetaData(pgConnection) {
-    };
-    a.parseACL("{jurka=arwdRxt/jurka,permuser=rw*/jurka}", "jurka");
-    a.parseACL("{jurka=a*r*w*d*R*x*t*/jurka,permuser=rw*/jurka}", "jurka");
-    a.parseACL("{=,jurka=arwdRxt,permuser=rw}", "jurka");
-    a.parseACL("{jurka=arwdRxt/jurka,permuser=rw*/jurka,grantuser=w/permuser}", "jurka");
-    a.parseACL("{jurka=a*r*w*d*R*x*t*/jurka,permuser=rw*/jurka,grantuser=w/permuser}", "jurka");
-    a.parseACL(
-        "{jurka=arwdRxt/jurka,permuser=rw*/jurka,grantuser=w/permuser,\"group permgroup=a/jurka\"}",
-        "jurka");
-    a.parseACL(
-        "{jurka=a*r*w*d*R*x*t*/jurka,permuser=rw*/jurka,grantuser=w/permuser,\"group permgroup=a/jurka\"}",
-        "jurka");
-  }
+    @Test
+    void parseACL() {
+        PgConnection pgConnection = null;
+        PgDatabaseMetaData a = new PgDatabaseMetaData(pgConnection) {
+        };
+        a.parseACL("{jurka=arwdRxt/jurka,permuser=rw*/jurka}", "jurka");
+        a.parseACL("{jurka=a*r*w*d*R*x*t*/jurka,permuser=rw*/jurka}", "jurka");
+        a.parseACL("{=,jurka=arwdRxt,permuser=rw}", "jurka");
+        a.parseACL("{jurka=arwdRxt/jurka,permuser=rw*/jurka,grantuser=w/permuser}", "jurka");
+        a.parseACL("{jurka=a*r*w*d*R*x*t*/jurka,permuser=rw*/jurka,grantuser=w/permuser}", "jurka");
+        a.parseACL(
+                "{jurka=arwdRxt/jurka,permuser=rw*/jurka,grantuser=w/permuser,\"group permgroup=a/jurka\"}",
+                "jurka");
+        a.parseACL(
+                "{jurka=a*r*w*d*R*x*t*/jurka,permuser=rw*/jurka,grantuser=w/permuser,\"group permgroup=a/jurka\"}",
+                "jurka");
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimeTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimeTest.java
index 04b900f..43335e4 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimeTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimeTest.java
@@ -5,17 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.postgresql.test.TestUtil;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
@@ -26,252 +15,260 @@ import java.sql.Timestamp;
 import java.sql.Types;
 import java.util.Calendar;
 import java.util.TimeZone;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.test.TestUtil;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /*
-* Some simple tests based on problems reported by users. Hopefully these will help prevent previous
-* problems from re-occurring ;-)
-*
-*/
+ * Some simple tests based on problems reported by users. Hopefully these will help prevent previous
+ * problems from re-occurring ;-)
+ *
+ */
 class TimeTest {
-  private Connection con;
-  private boolean testSetTime;
+    private Connection con;
+    private boolean testSetTime;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    con = TestUtil.openDB();
-    TestUtil.createTempTable(con, "testtime", "tm time, tz time with time zone");
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    TestUtil.dropTable(con, "testtime");
-    TestUtil.closeDB(con);
-  }
-
-  private long extractMillis(long time) {
-    return time >= 0 ? (time % 1000) : (time % 1000 + 1000);
-  }
-
-  /*
-   *
-   * Test use of calendar
-   */
-  @Test
-  void getTimeZone() throws Exception {
-    final Time midnight = new Time(0, 0, 0);
-    Statement stmt = con.createStatement();
-    Calendar cal = Calendar.getInstance();
-
-    cal.setTimeZone(TimeZone.getTimeZone("GMT"));
-
-    int localOffset = Calendar.getInstance().getTimeZone().getOffset(midnight.getTime());
-
-    // set the time to midnight to make this easy
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'00:00:00','00:00:00'")));
-    assertEquals(1,
-        stmt.executeUpdate(TestUtil.insertSQL("testtime", "'00:00:00.1','00:00:00.01'")));
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime",
-        "CAST(CAST(now() AS timestamp without time zone) AS time),now()")));
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("testtime", "tm,tz"));
-    assertNotNull(rs);
-    assertTrue(rs.next());
-
-    Time time = rs.getTime(1);
-    Timestamp timestamp = rs.getTimestamp(1);
-    assertNotNull(timestamp);
-
-    Timestamp timestamptz = rs.getTimestamp(2);
-    assertNotNull(timestamptz);
-
-    assertEquals(midnight, time);
-
-    time = rs.getTime(1, cal);
-    assertEquals(midnight.getTime(), time.getTime() - localOffset);
-
-    assertTrue(rs.next());
-
-    time = rs.getTime(1);
-    assertNotNull(time);
-    assertEquals(100, extractMillis(time.getTime()));
-    timestamp = rs.getTimestamp(1);
-    assertNotNull(timestamp);
-
-    assertEquals(100, extractMillis(timestamp.getTime()));
-
-    assertEquals(100000000, timestamp.getNanos());
-
-    Time timetz = rs.getTime(2);
-    assertNotNull(timetz);
-    assertEquals(10, extractMillis(timetz.getTime()));
-    timestamptz = rs.getTimestamp(2);
-    assertNotNull(timestamptz);
-    assertEquals(10, extractMillis(timestamptz.getTime()));
-
-    assertEquals(10000000, timestamptz.getNanos());
-
-    assertTrue(rs.next());
-
-    time = rs.getTime(1);
-    assertNotNull(time);
-    timestamp = rs.getTimestamp(1);
-    assertNotNull(timestamp);
-
-    timetz = rs.getTime(2);
-    assertNotNull(timetz);
-    timestamptz = rs.getTimestamp(2);
-    assertNotNull(timestamptz);
-  }
-
-  /*
-   * Tests the time methods in ResultSet
-   */
-  @Test
-  void getTime() throws SQLException {
-    Statement stmt = con.createStatement();
-
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'01:02:03'")));
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'23:59:59'")));
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'12:00:00'")));
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'05:15:21'")));
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'16:21:51'")));
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'12:15:12'")));
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'22:12:01'")));
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'08:46:44'")));
-
-    // Fall through helper
-    timeTest();
-
-    assertEquals(8, stmt.executeUpdate("DELETE FROM testtime"));
-    stmt.close();
-  }
-
-  /*
-   * Tests the time methods in PreparedStatement
-   */
-  @Test
-  void setTime() throws SQLException {
-    PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("testtime", "?"));
-    Statement stmt = con.createStatement();
-
-    ps.setTime(1, makeTime(1, 2, 3));
-    assertEquals(1, ps.executeUpdate());
-
-    ps.setTime(1, makeTime(23, 59, 59));
-    assertEquals(1, ps.executeUpdate());
-
-    ps.setObject(1, Time.valueOf("12:00:00"), Types.TIME);
-    assertEquals(1, ps.executeUpdate());
-
-    ps.setObject(1, Time.valueOf("05:15:21"), Types.TIME);
-    assertEquals(1, ps.executeUpdate());
-
-    ps.setObject(1, Time.valueOf("16:21:51"), Types.TIME);
-    assertEquals(1, ps.executeUpdate());
-
-    ps.setObject(1, Time.valueOf("12:15:12"), Types.TIME);
-    assertEquals(1, ps.executeUpdate());
-
-    ps.setObject(1, "22:12:1", Types.TIME);
-    assertEquals(1, ps.executeUpdate());
-
-    ps.setObject(1, "8:46:44", Types.TIME);
-    assertEquals(1, ps.executeUpdate());
-
-    ps.setObject(1, "5:1:2-03", Types.TIME);
-    assertEquals(1, ps.executeUpdate());
-
-    ps.setObject(1, "23:59:59+11", Types.TIME);
-    assertEquals(1, ps.executeUpdate());
-
-    // Need to let the test know this one has extra test cases.
-    testSetTime = true;
-    // Fall through helper
-    timeTest();
-    testSetTime = false;
-
-    assertEquals(10, stmt.executeUpdate("DELETE FROM testtime"));
-    stmt.close();
-    ps.close();
-  }
-
-  /*
-   * Helper for the TimeTests. It tests what should be in the db
-   */
-  private void timeTest() throws SQLException {
-    Statement st = con.createStatement();
-    ResultSet rs;
-    Time t;
-
-    rs = st.executeQuery(TestUtil.selectSQL("testtime", "tm"));
-    assertNotNull(rs);
-
-    assertTrue(rs.next());
-    t = rs.getTime(1);
-    assertNotNull(t);
-    assertEquals(makeTime(1, 2, 3), t);
-
-    assertTrue(rs.next());
-    t = rs.getTime(1);
-    assertNotNull(t);
-    assertEquals(makeTime(23, 59, 59), t);
-
-    assertTrue(rs.next());
-    t = rs.getTime(1);
-    assertNotNull(t);
-    assertEquals(makeTime(12, 0, 0), t);
-
-    assertTrue(rs.next());
-    t = rs.getTime(1);
-    assertNotNull(t);
-    assertEquals(makeTime(5, 15, 21), t);
-
-    assertTrue(rs.next());
-    t = rs.getTime(1);
-    assertNotNull(t);
-    assertEquals(makeTime(16, 21, 51), t);
-
-    assertTrue(rs.next());
-    t = rs.getTime(1);
-    assertNotNull(t);
-    assertEquals(makeTime(12, 15, 12), t);
-
-    assertTrue(rs.next());
-    t = rs.getTime(1);
-    assertNotNull(t);
-    assertEquals(makeTime(22, 12, 1), t);
-
-    assertTrue(rs.next());
-    t = rs.getTime(1);
-    assertNotNull(t);
-    assertEquals(makeTime(8, 46, 44), t);
-
-    // If we're checking for timezones.
-    if (testSetTime) {
-      assertTrue(rs.next());
-      t = rs.getTime(1);
-      assertNotNull(t);
-      Time tmpTime = Time.valueOf("5:1:2");
-      int localOffset = Calendar.getInstance().getTimeZone().getOffset(tmpTime.getTime());
-      int timeOffset = 3 * 60 * 60 * 1000;
-      tmpTime.setTime(tmpTime.getTime() + timeOffset + localOffset);
-      assertEquals(makeTime(tmpTime.getHours(), tmpTime.getMinutes(), tmpTime.getSeconds()), t);
-
-      assertTrue(rs.next());
-      t = rs.getTime(1);
-      assertNotNull(t);
-      tmpTime = Time.valueOf("23:59:59");
-      localOffset = Calendar.getInstance().getTimeZone().getOffset(tmpTime.getTime());
-      timeOffset = -11 * 60 * 60 * 1000;
-      tmpTime.setTime(tmpTime.getTime() + timeOffset + localOffset);
-      assertEquals(makeTime(tmpTime.getHours(), tmpTime.getMinutes(), tmpTime.getSeconds()), t);
+    @BeforeEach
+    void setUp() throws Exception {
+        con = TestUtil.openDB();
+        TestUtil.createTempTable(con, "testtime", "tm time, tz time with time zone");
     }
 
-    assertFalse(rs.next());
+    @AfterEach
+    void tearDown() throws Exception {
+        TestUtil.dropTable(con, "testtime");
+        TestUtil.closeDB(con);
+    }
 
-    rs.close();
-  }
+    private long extractMillis(long time) {
+        return time >= 0 ? (time % 1000) : (time % 1000 + 1000);
+    }
 
-  private Time makeTime(int h, int m, int s) {
-    return Time.valueOf(TestUtil.fix(h, 2) + ":" + TestUtil.fix(m, 2) + ":" + TestUtil.fix(s, 2));
-  }
+    /*
+     *
+     * Test use of calendar
+     */
+    @Test
+    void getTimeZone() throws Exception {
+        final Time midnight = new Time(0, 0, 0);
+        Statement stmt = con.createStatement();
+        Calendar cal = Calendar.getInstance();
+
+        cal.setTimeZone(TimeZone.getTimeZone("GMT"));
+
+        int localOffset = Calendar.getInstance().getTimeZone().getOffset(midnight.getTime());
+
+        // set the time to midnight to make this easy
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'00:00:00','00:00:00'")));
+        assertEquals(1,
+                stmt.executeUpdate(TestUtil.insertSQL("testtime", "'00:00:00.1','00:00:00.01'")));
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime",
+                "CAST(CAST(now() AS timestamp without time zone) AS time),now()")));
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("testtime", "tm,tz"));
+        assertNotNull(rs);
+        assertTrue(rs.next());
+
+        Time time = rs.getTime(1);
+        Timestamp timestamp = rs.getTimestamp(1);
+        assertNotNull(timestamp);
+
+        Timestamp timestamptz = rs.getTimestamp(2);
+        assertNotNull(timestamptz);
+
+        assertEquals(midnight, time);
+
+        time = rs.getTime(1, cal);
+        assertEquals(midnight.getTime(), time.getTime() - localOffset);
+
+        assertTrue(rs.next());
+
+        time = rs.getTime(1);
+        assertNotNull(time);
+        assertEquals(100, extractMillis(time.getTime()));
+        timestamp = rs.getTimestamp(1);
+        assertNotNull(timestamp);
+
+        assertEquals(100, extractMillis(timestamp.getTime()));
+
+        assertEquals(100000000, timestamp.getNanos());
+
+        Time timetz = rs.getTime(2);
+        assertNotNull(timetz);
+        assertEquals(10, extractMillis(timetz.getTime()));
+        timestamptz = rs.getTimestamp(2);
+        assertNotNull(timestamptz);
+        assertEquals(10, extractMillis(timestamptz.getTime()));
+
+        assertEquals(10000000, timestamptz.getNanos());
+
+        assertTrue(rs.next());
+
+        time = rs.getTime(1);
+        assertNotNull(time);
+        timestamp = rs.getTimestamp(1);
+        assertNotNull(timestamp);
+
+        timetz = rs.getTime(2);
+        assertNotNull(timetz);
+        timestamptz = rs.getTimestamp(2);
+        assertNotNull(timestamptz);
+    }
+
+    /*
+     * Tests the time methods in ResultSet
+     */
+    @Test
+    void getTime() throws SQLException {
+        Statement stmt = con.createStatement();
+
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'01:02:03'")));
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'23:59:59'")));
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'12:00:00'")));
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'05:15:21'")));
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'16:21:51'")));
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'12:15:12'")));
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'22:12:01'")));
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL("testtime", "'08:46:44'")));
+
+        // Fall through helper
+        timeTest();
+
+        assertEquals(8, stmt.executeUpdate("DELETE FROM testtime"));
+        stmt.close();
+    }
+
+    /*
+     * Tests the time methods in PreparedStatement
+     */
+    @Test
+    void setTime() throws SQLException {
+        PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("testtime", "?"));
+        Statement stmt = con.createStatement();
+
+        ps.setTime(1, makeTime(1, 2, 3));
+        assertEquals(1, ps.executeUpdate());
+
+        ps.setTime(1, makeTime(23, 59, 59));
+        assertEquals(1, ps.executeUpdate());
+
+        ps.setObject(1, Time.valueOf("12:00:00"), Types.TIME);
+        assertEquals(1, ps.executeUpdate());
+
+        ps.setObject(1, Time.valueOf("05:15:21"), Types.TIME);
+        assertEquals(1, ps.executeUpdate());
+
+        ps.setObject(1, Time.valueOf("16:21:51"), Types.TIME);
+        assertEquals(1, ps.executeUpdate());
+
+        ps.setObject(1, Time.valueOf("12:15:12"), Types.TIME);
+        assertEquals(1, ps.executeUpdate());
+
+        ps.setObject(1, "22:12:1", Types.TIME);
+        assertEquals(1, ps.executeUpdate());
+
+        ps.setObject(1, "8:46:44", Types.TIME);
+        assertEquals(1, ps.executeUpdate());
+
+        ps.setObject(1, "5:1:2-03", Types.TIME);
+        assertEquals(1, ps.executeUpdate());
+
+        ps.setObject(1, "23:59:59+11", Types.TIME);
+        assertEquals(1, ps.executeUpdate());
+
+        // Need to let the test know this one has extra test cases.
+        testSetTime = true;
+        // Fall through helper
+        timeTest();
+        testSetTime = false;
+
+        assertEquals(10, stmt.executeUpdate("DELETE FROM testtime"));
+        stmt.close();
+        ps.close();
+    }
+
+    /*
+     * Helper for the TimeTests. It tests what should be in the db
+     */
+    private void timeTest() throws SQLException {
+        Statement st = con.createStatement();
+        ResultSet rs;
+        Time t;
+
+        rs = st.executeQuery(TestUtil.selectSQL("testtime", "tm"));
+        assertNotNull(rs);
+
+        assertTrue(rs.next());
+        t = rs.getTime(1);
+        assertNotNull(t);
+        assertEquals(makeTime(1, 2, 3), t);
+
+        assertTrue(rs.next());
+        t = rs.getTime(1);
+        assertNotNull(t);
+        assertEquals(makeTime(23, 59, 59), t);
+
+        assertTrue(rs.next());
+        t = rs.getTime(1);
+        assertNotNull(t);
+        assertEquals(makeTime(12, 0, 0), t);
+
+        assertTrue(rs.next());
+        t = rs.getTime(1);
+        assertNotNull(t);
+        assertEquals(makeTime(5, 15, 21), t);
+
+        assertTrue(rs.next());
+        t = rs.getTime(1);
+        assertNotNull(t);
+        assertEquals(makeTime(16, 21, 51), t);
+
+        assertTrue(rs.next());
+        t = rs.getTime(1);
+        assertNotNull(t);
+        assertEquals(makeTime(12, 15, 12), t);
+
+        assertTrue(rs.next());
+        t = rs.getTime(1);
+        assertNotNull(t);
+        assertEquals(makeTime(22, 12, 1), t);
+
+        assertTrue(rs.next());
+        t = rs.getTime(1);
+        assertNotNull(t);
+        assertEquals(makeTime(8, 46, 44), t);
+
+        // If we're checking for timezones.
+        if (testSetTime) {
+            assertTrue(rs.next());
+            t = rs.getTime(1);
+            assertNotNull(t);
+            Time tmpTime = Time.valueOf("5:1:2");
+            int localOffset = Calendar.getInstance().getTimeZone().getOffset(tmpTime.getTime());
+            int timeOffset = 3 * 60 * 60 * 1000;
+            tmpTime.setTime(tmpTime.getTime() + timeOffset + localOffset);
+            assertEquals(makeTime(tmpTime.getHours(), tmpTime.getMinutes(), tmpTime.getSeconds()), t);
+
+            assertTrue(rs.next());
+            t = rs.getTime(1);
+            assertNotNull(t);
+            tmpTime = Time.valueOf("23:59:59");
+            localOffset = Calendar.getInstance().getTimeZone().getOffset(tmpTime.getTime());
+            timeOffset = -11 * 60 * 60 * 1000;
+            tmpTime.setTime(tmpTime.getTime() + timeOffset + localOffset);
+            assertEquals(makeTime(tmpTime.getHours(), tmpTime.getMinutes(), tmpTime.getSeconds()), t);
+        }
+
+        assertFalse(rs.next());
+
+        rs.close();
+    }
+
+    private Time makeTime(int h, int m, int s) {
+        return Time.valueOf(TestUtil.fix(h, 2) + ":" + TestUtil.fix(m, 2) + ":" + TestUtil.fix(s, 2));
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimestampTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimestampTest.java
index 19f97be..c813018 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimestampTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimestampTest.java
@@ -5,22 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeTrue;
-
-import org.postgresql.PGStatement;
-import org.postgresql.core.BaseConnection;
-import org.postgresql.core.ServerVersion;
-import org.postgresql.jdbc.TimestampUtils;
-import org.postgresql.test.TestUtil;
-
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
 import java.sql.Date;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
@@ -35,6 +19,19 @@ import java.util.Calendar;
 import java.util.Collection;
 import java.util.GregorianCalendar;
 import java.util.TimeZone;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.postgresql.PGStatement;
+import org.postgresql.core.BaseConnection;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.jdbc.TimestampUtils;
+import org.postgresql.test.TestUtil;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
 
 /*
  * Test get/setTimestamp for both timestamp with time zone and timestamp without time zone datatypes
@@ -44,694 +41,673 @@ import java.util.TimeZone;
 @RunWith(Parameterized.class)
 public class TimestampTest extends BaseTest4 {
 
-  public TimestampTest(BinaryMode binaryMode) {
-    setBinaryMode(binaryMode);
-  }
-
-  private TimeZone currentTZ;
-
-  @Parameterized.Parameters(name = "binary = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
-    }
-    return ids;
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTable(con, TSWTZ_TABLE, "ts timestamp with time zone");
-    TestUtil.createTable(con, TSWOTZ_TABLE, "ts timestamp without time zone");
-    TestUtil.createTable(con, DATE_TABLE, "ts date");
-    currentTZ = TimeZone.getDefault();
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, TSWTZ_TABLE);
-    TestUtil.dropTable(con, TSWOTZ_TABLE);
-    TestUtil.dropTable(con, DATE_TABLE);
-    TimeZone.setDefault(currentTZ);
-    super.tearDown();
-  }
-
-  /**
-   * Ensure the driver doesn't modify a Calendar that is passed in.
-   */
-  @Test
-  public void testCalendarModification() throws SQLException {
-    Calendar cal = Calendar.getInstance();
-    Calendar origCal = (Calendar) cal.clone();
-    PreparedStatement ps = con.prepareStatement("INSERT INTO " + TSWOTZ_TABLE + " VALUES (?)");
-
-    ps.setDate(1, new Date(0), cal);
-    ps.executeUpdate();
-    assertEquals(origCal, cal);
-
-    ps.setTimestamp(1, new Timestamp(0), cal);
-    ps.executeUpdate();
-    assertEquals(origCal, cal);
-
-    ps.setTime(1, new Time(0), cal);
-    // Can't actually execute this one because of type mismatch,
-    // but all we're really concerned about is the set call.
-    // ps.executeUpdate();
-    assertEquals(origCal, cal);
-
-    ps.close();
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT ts FROM " + TSWOTZ_TABLE);
-    assertTrue(rs.next());
-
-    rs.getDate(1, cal);
-    assertEquals(origCal, cal);
-
-    rs.getTimestamp(1, cal);
-    assertEquals(origCal, cal);
-
-    rs.getTime(1, cal);
-    assertEquals(origCal, cal);
-
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testInfinity() throws SQLException {
-    runInfinityTests(TSWTZ_TABLE, PGStatement.DATE_POSITIVE_INFINITY);
-    runInfinityTests(TSWTZ_TABLE, PGStatement.DATE_NEGATIVE_INFINITY);
-    runInfinityTests(TSWOTZ_TABLE, PGStatement.DATE_POSITIVE_INFINITY);
-    runInfinityTests(TSWOTZ_TABLE, PGStatement.DATE_NEGATIVE_INFINITY);
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) {
-      runInfinityTests(DATE_TABLE, PGStatement.DATE_POSITIVE_INFINITY);
-      runInfinityTests(DATE_TABLE, PGStatement.DATE_NEGATIVE_INFINITY);
-    }
-  }
-
-  private void runInfinityTests(String table, long value) throws SQLException {
-    GregorianCalendar cal = new GregorianCalendar();
-    // Pick some random timezone that is hopefully different than ours
-    // and exists in this JVM.
-    cal.setTimeZone(TimeZone.getTimeZone("Europe/Warsaw"));
-
-    String strValue;
-    if (value == PGStatement.DATE_POSITIVE_INFINITY) {
-      strValue = "infinity";
-    } else {
-      strValue = "-infinity";
+    private static final Timestamp TS1WTZ =
+            getTimestamp(1950, 2, 7, 15, 0, 0, 100000000, "PST");
+    private static final String TS1WTZ_PGFORMAT = "1950-02-07 15:00:00.1-08";
+    private static final Timestamp TS2WTZ =
+            getTimestamp(2000, 2, 7, 15, 0, 0, 120000000, "GMT");
+    private static final String TS2WTZ_PGFORMAT = "2000-02-07 15:00:00.12+00";
+    private static final Timestamp TS3WTZ =
+            getTimestamp(2000, 7, 7, 15, 0, 0, 123000000, "GMT");
+    private static final String TS3WTZ_PGFORMAT = "2000-07-07 15:00:00.123+00";
+    private static final Timestamp TS4WTZ =
+            getTimestamp(2000, 7, 7, 15, 0, 0, 123456000, "GMT");
+    private static final String TS4WTZ_PGFORMAT = "2000-07-07 15:00:00.123456+00";
+    private static final Timestamp TS1WOTZ =
+            getTimestamp(1950, 2, 7, 15, 0, 0, 100000000, null);
+    private static final String TS1WOTZ_PGFORMAT = "1950-02-07 15:00:00.1";
+    private static final Timestamp TS2WOTZ =
+            getTimestamp(2000, 2, 7, 15, 0, 0, 120000000, null);
+    private static final String TS2WOTZ_PGFORMAT = "2000-02-07 15:00:00.12";
+    private static final Timestamp TS3WOTZ =
+            getTimestamp(2000, 7, 7, 15, 0, 0, 123000000, null);
+    private static final String TS3WOTZ_PGFORMAT = "2000-07-07 15:00:00.123";
+    private static final Timestamp TS4WOTZ =
+            getTimestamp(2000, 7, 7, 15, 0, 0, 123456000, null);
+    private static final String TS4WOTZ_PGFORMAT = "2000-07-07 15:00:00.123456";
+    private static final Timestamp TS5WOTZ =
+            new Timestamp(PGStatement.DATE_NEGATIVE_INFINITY);
+    private static final String TS5WOTZ_PGFORMAT = "-infinity";
+    private static final Timestamp TS6WOTZ =
+            new Timestamp(PGStatement.DATE_POSITIVE_INFINITY);
+    private static final String TS6WOTZ_PGFORMAT = "infinity";
+    private static final Timestamp TS7WOTZ =
+            getTimestamp(2000, 7, 7, 15, 0, 0, 0, null);
+    private static final String TS7WOTZ_PGFORMAT = "2000-07-07 15:00:00";
+    private static final Timestamp TS8WOTZ =
+            getTimestamp(2000, 7, 7, 15, 0, 0, 20400000, null);
+    private static final String TS8WOTZ_PGFORMAT = "2000-07-07 15:00:00.0204";
+    private static final Timestamp TS9WOTZ =
+            getTimestamp(2000, 2, 7, 15, 0, 0, 789, null);
+    private static final String TS9WOTZ_PGFORMAT = "2000-02-07 15:00:00.000000789";
+    private static final Timestamp TS9WOTZ_ROUNDED =
+            getTimestamp(2000, 2, 7, 15, 0, 0, 1000, null);
+    private static final String TS9WOTZ_ROUNDED_PGFORMAT = "2000-02-07 15:00:00.000001";
+    private static final Timestamp TS10WOTZ =
+            getTimestamp(2018, 12, 31, 23, 59, 59, 999999500, null);
+    private static final String TS10WOTZ_PGFORMAT = "2018-12-31 23:59:59.999999500";
+    private static final Timestamp TS10WOTZ_ROUNDED =
+            getTimestamp(2019, 1, 1, 0, 0, 0, 0, null);
+    private static final String TS10WOTZ_ROUNDED_PGFORMAT = "2019-01-01 00:00:00";
+    private static final Timestamp[] TS__WOTZ = {
+            TS1WOTZ, TS2WOTZ, TS3WOTZ, TS4WOTZ, TS5WOTZ,
+            TS6WOTZ, TS7WOTZ, TS8WOTZ, TS9WOTZ, TS10WOTZ,
+    };
+    private static final String[] TS__WOTZ_PGFORMAT = {
+            TS1WOTZ_PGFORMAT, TS2WOTZ_PGFORMAT, TS3WOTZ_PGFORMAT, TS4WOTZ_PGFORMAT, TS5WOTZ_PGFORMAT,
+            TS6WOTZ_PGFORMAT, TS7WOTZ_PGFORMAT, TS8WOTZ_PGFORMAT, TS9WOTZ_PGFORMAT, TS10WOTZ_PGFORMAT,
+    };
+    private static final String TSWTZ_TABLE = "testtimestampwtz";
+    private static final String TSWOTZ_TABLE = "testtimestampwotz";
+    private static final String DATE_TABLE = "testtimestampdate";
+    private static final java.sql.Date tmpDate1 = new java.sql.Date(TS1WTZ.getTime());
+    private static final java.sql.Time tmpTime1 = new java.sql.Time(TS1WTZ.getTime());
+    private static final java.sql.Date tmpDate2 = new java.sql.Date(TS2WTZ.getTime());
+    private static final java.sql.Time tmpTime2 = new java.sql.Time(TS2WTZ.getTime());
+    private static final java.sql.Date tmpDate3 = new java.sql.Date(TS3WTZ.getTime());
+    private static final java.sql.Time tmpTime3 = new java.sql.Time(TS3WTZ.getTime());
+    private static final java.sql.Date tmpDate4 = new java.sql.Date(TS4WTZ.getTime());
+    private static final java.sql.Time tmpTime4 = new java.sql.Time(TS4WTZ.getTime());
+    private static final java.sql.Date tmpDate1WOTZ = new java.sql.Date(TS1WOTZ.getTime());
+    private static final java.sql.Time tmpTime1WOTZ = new java.sql.Time(TS1WOTZ.getTime());
+    private static final java.sql.Date tmpDate2WOTZ = new java.sql.Date(TS2WOTZ.getTime());
+    private static final java.sql.Time tmpTime2WOTZ = new java.sql.Time(TS2WOTZ.getTime());
+    private static final java.sql.Date tmpDate3WOTZ = new java.sql.Date(TS3WOTZ.getTime());
+    private static final java.sql.Time tmpTime3WOTZ = new java.sql.Time(TS3WOTZ.getTime());
+    private static final java.sql.Date tmpDate4WOTZ = new java.sql.Date(TS4WOTZ.getTime());
+    private static final java.sql.Time tmpTime4WOTZ = new java.sql.Time(TS4WOTZ.getTime());
+    private static final java.sql.Date tmpDate5WOTZ = new java.sql.Date(TS5WOTZ.getTime());
+    private static final java.sql.Date tmpTime5WOTZ = new java.sql.Date(TS5WOTZ.getTime());
+    private static final java.sql.Date tmpDate6WOTZ = new java.sql.Date(TS6WOTZ.getTime());
+    private static final java.sql.Date tmpTime6WOTZ = new java.sql.Date(TS6WOTZ.getTime());
+    private static final java.sql.Date tmpDate7WOTZ = new java.sql.Date(TS7WOTZ.getTime());
+    private static final java.sql.Time tmpTime7WOTZ = new java.sql.Time(TS7WOTZ.getTime());
+    private static final java.sql.Date tmpDate8WOTZ = new java.sql.Date(TS8WOTZ.getTime());
+    private static final java.sql.Time tmpTime8WOTZ = new java.sql.Time(TS8WOTZ.getTime());
+    private static final java.sql.Date tmpDate9WOTZ = new java.sql.Date(TS9WOTZ.getTime());
+    private static final java.sql.Time tmpTime9WOTZ = new java.sql.Time(TS9WOTZ.getTime());
+    private static final java.sql.Date tmpDate10WOTZ = new java.sql.Date(TS10WOTZ.getTime());
+    private static final java.sql.Time tmpTime10WOTZ = new java.sql.Time(TS10WOTZ.getTime());
+    private static final java.util.Date[] TEST_DATE_TIMES = {
+            tmpDate1WOTZ, tmpDate2WOTZ, tmpDate3WOTZ, tmpDate4WOTZ, tmpDate5WOTZ,
+            tmpDate6WOTZ, tmpDate7WOTZ, tmpDate8WOTZ, tmpDate9WOTZ, tmpDate10WOTZ,
+            tmpTime1WOTZ, tmpTime2WOTZ, tmpTime3WOTZ, tmpTime4WOTZ, tmpTime5WOTZ,
+            tmpTime6WOTZ, tmpTime7WOTZ, tmpTime8WOTZ, tmpTime9WOTZ, tmpTime10WOTZ,
+    };
+    private TimeZone currentTZ;
+    public TimestampTest(BinaryMode binaryMode) {
+        setBinaryMode(binaryMode);
     }
 
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL(table, "'" + strValue + "'"));
-    stmt.close();
-
-    PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL(table, "?"));
-    ps.setTimestamp(1, new Timestamp(value));
-    ps.executeUpdate();
-    ps.setTimestamp(1, new Timestamp(value), cal);
-    ps.executeUpdate();
-    ps.close();
-
-    stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("select ts from " + table);
-    while (rs.next()) {
-      assertEquals(strValue, rs.getString(1));
-
-      Timestamp ts = rs.getTimestamp(1);
-      assertEquals(value, ts.getTime());
-
-      Date d = rs.getDate(1);
-      assertEquals(value, d.getTime());
-
-      Timestamp tscal = rs.getTimestamp(1, cal);
-      assertEquals(value, tscal.getTime());
-    }
-    rs.close();
-
-    assertEquals(3, stmt.executeUpdate("DELETE FROM " + table));
-    stmt.close();
-  }
-
-  /*
-   * Tests the timestamp methods in ResultSet on timestamp with time zone we insert a known string
-   * value (don't use setTimestamp) then see that we get back the same value from getTimestamp
-   */
-  @Test
-  public void testGetTimestampWTZ() throws SQLException {
-    assumeTrue(TestUtil.haveIntegerDateTimes(con));
-
-    Statement stmt = con.createStatement();
-    TimestampUtils tsu = ((BaseConnection) con).getTimestampUtils();
-
-    // Insert the three timestamp values in raw pg format
-    for (int i = 0; i < 3; i++) {
-      assertEquals(1,
-          stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, "'" + TS1WTZ_PGFORMAT + "'")));
-      assertEquals(1,
-          stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, "'" + TS2WTZ_PGFORMAT + "'")));
-      assertEquals(1,
-          stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, "'" + TS3WTZ_PGFORMAT + "'")));
-      assertEquals(1,
-          stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, "'" + TS4WTZ_PGFORMAT + "'")));
-    }
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
-        "'" + tsu.toString(null, new Timestamp(tmpDate1.getTime())) + "'")));
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
-        "'" + tsu.toString(null, new Timestamp(tmpDate2.getTime())) + "'")));
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
-        "'" + tsu.toString(null, new Timestamp(tmpDate3.getTime())) + "'")));
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
-        "'" + tsu.toString(null, new Timestamp(tmpDate4.getTime())) + "'")));
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
-        "'" + tsu.toString(null, new Timestamp(tmpTime1.getTime())) + "'")));
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
-        "'" + tsu.toString(null, new Timestamp(tmpTime2.getTime())) + "'")));
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
-        "'" + tsu.toString(null, new Timestamp(tmpTime3.getTime())) + "'")));
-    assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
-        "'" + tsu.toString(null, new Timestamp(tmpTime4.getTime())) + "'")));
-
-    // Fall through helper
-    timestampTestWTZ();
-
-    assertEquals(20, stmt.executeUpdate("DELETE FROM " + TSWTZ_TABLE));
-
-    stmt.close();
-  }
-
-  /*
-   * Tests the timestamp methods in PreparedStatement on timestamp with time zone we insert a value
-   * using setTimestamp then see that we get back the same value from getTimestamp (which we know
-   * works as it was tested independently of setTimestamp
-   */
-  @Test
-  public void testSetTimestampWTZ() throws SQLException {
-    assumeTrue(TestUtil.haveIntegerDateTimes(con));
-
-    Statement stmt = con.createStatement();
-    PreparedStatement pstmt = con.prepareStatement(TestUtil.insertSQL(TSWTZ_TABLE, "?"));
-
-    pstmt.setTimestamp(1, TS1WTZ);
-    assertEquals(1, pstmt.executeUpdate());
-
-    pstmt.setTimestamp(1, TS2WTZ);
-    assertEquals(1, pstmt.executeUpdate());
-
-    pstmt.setTimestamp(1, TS3WTZ);
-    assertEquals(1, pstmt.executeUpdate());
-
-    pstmt.setTimestamp(1, TS4WTZ);
-    assertEquals(1, pstmt.executeUpdate());
-
-    // With java.sql.Timestamp
-    pstmt.setObject(1, TS1WTZ, Types.TIMESTAMP);
-    assertEquals(1, pstmt.executeUpdate());
-    pstmt.setObject(1, TS2WTZ, Types.TIMESTAMP);
-    assertEquals(1, pstmt.executeUpdate());
-    pstmt.setObject(1, TS3WTZ, Types.TIMESTAMP);
-    assertEquals(1, pstmt.executeUpdate());
-    pstmt.setObject(1, TS4WTZ, Types.TIMESTAMP);
-    assertEquals(1, pstmt.executeUpdate());
-
-    // With Strings
-    pstmt.setObject(1, TS1WTZ_PGFORMAT, Types.TIMESTAMP);
-    assertEquals(1, pstmt.executeUpdate());
-    pstmt.setObject(1, TS2WTZ_PGFORMAT, Types.TIMESTAMP);
-    assertEquals(1, pstmt.executeUpdate());
-    pstmt.setObject(1, TS3WTZ_PGFORMAT, Types.TIMESTAMP);
-    assertEquals(1, pstmt.executeUpdate());
-    pstmt.setObject(1, TS4WTZ_PGFORMAT, Types.TIMESTAMP);
-    assertEquals(1, pstmt.executeUpdate());
-
-    // With java.sql.Date
-    pstmt.setObject(1, tmpDate1, Types.TIMESTAMP);
-    assertEquals(1, pstmt.executeUpdate());
-    pstmt.setObject(1, tmpDate2, Types.TIMESTAMP);
-    assertEquals(1, pstmt.executeUpdate());
-    pstmt.setObject(1, tmpDate3, Types.TIMESTAMP);
-    assertEquals(1, pstmt.executeUpdate());
-    pstmt.setObject(1, tmpDate4, Types.TIMESTAMP);
-    assertEquals(1, pstmt.executeUpdate());
-
-    // With java.sql.Time
-    pstmt.setObject(1, tmpTime1, Types.TIMESTAMP);
-    assertEquals(1, pstmt.executeUpdate());
-    pstmt.setObject(1, tmpTime2, Types.TIMESTAMP);
-    assertEquals(1, pstmt.executeUpdate());
-    pstmt.setObject(1, tmpTime3, Types.TIMESTAMP);
-    assertEquals(1, pstmt.executeUpdate());
-    pstmt.setObject(1, tmpTime4, Types.TIMESTAMP);
-    assertEquals(1, pstmt.executeUpdate());
-    // Fall through helper
-    timestampTestWTZ();
-
-    assertEquals(20, stmt.executeUpdate("DELETE FROM " + TSWTZ_TABLE));
-
-    pstmt.close();
-    stmt.close();
-  }
-
-  /*
-   * Tests the timestamp methods in ResultSet on timestamp without time zone we insert a known
-   * string value (don't use setTimestamp) then see that we get back the same value from
-   * getTimestamp
-   */
-  @Test
-  public void testGetTimestampWOTZ() throws SQLException {
-    assumeTrue(TestUtil.haveIntegerDateTimes(con));
-    //Refer to #896
-    assumeMinimumServerVersion(ServerVersion.v8_4);
-
-    Statement stmt = con.createStatement();
-    TimestampUtils tsu = ((BaseConnection) con).getTimestampUtils();
-
-    // Insert the three timestamp values in raw pg format
-    for (int i = 0; i < 3; i++) {
-      for (String value : TS__WOTZ_PGFORMAT) {
-        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWOTZ_TABLE, "'" + value + "'")));
-      }
+    @Parameterized.Parameters(name = "binary = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
+        }
+        return ids;
     }
 
-    for (java.util.Date date : TEST_DATE_TIMES) {
-      String stringValue = "'" + tsu.toString(null, new Timestamp(date.getTime())) + "'";
-      assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWOTZ_TABLE, stringValue)));
+    private static Timestamp getTimestamp(int y, int m, int d, int h, int mn, int se, int f,
+                                          String tz) {
+        Timestamp result = null;
+        java.text.DateFormat dateFormat;
+        try {
+            String ts;
+            ts = TestUtil.fix(y, 4) + "-"
+                    + TestUtil.fix(m, 2) + "-"
+                    + TestUtil.fix(d, 2) + " "
+                    + TestUtil.fix(h, 2) + ":"
+                    + TestUtil.fix(mn, 2) + ":"
+                    + TestUtil.fix(se, 2) + " ";
+
+            if (tz == null) {
+                dateFormat = new SimpleDateFormat("y-M-d H:m:s");
+            } else {
+                ts = ts + tz;
+                dateFormat = new SimpleDateFormat("y-M-d H:m:s z");
+            }
+            java.util.Date date = dateFormat.parse(ts);
+            result = new Timestamp(date.getTime());
+            result.setNanos(f);
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        }
+        return result;
     }
 
-    // Fall through helper
-    timestampTestWOTZ();
-
-    assertEquals(50, stmt.executeUpdate("DELETE FROM " + TSWOTZ_TABLE));
-
-    stmt.close();
-  }
-
-  /*
-   * Tests the timestamp methods in PreparedStatement on timestamp without time zone we insert a
-   * value using setTimestamp then see that we get back the same value from getTimestamp (which we
-   * know works as it was tested independently of setTimestamp
-   */
-  @Test
-  public void testSetTimestampWOTZ() throws SQLException {
-    assumeTrue(TestUtil.haveIntegerDateTimes(con));
-    //Refer to #896
-    assumeMinimumServerVersion(ServerVersion.v8_4);
-
-    Statement stmt = con.createStatement();
-    PreparedStatement pstmt = con.prepareStatement(TestUtil.insertSQL(TSWOTZ_TABLE, "?"));
-
-    for (Timestamp timestamp : TS__WOTZ) {
-      pstmt.setTimestamp(1, timestamp);
-      assertEquals(1, pstmt.executeUpdate());
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTable(con, TSWTZ_TABLE, "ts timestamp with time zone");
+        TestUtil.createTable(con, TSWOTZ_TABLE, "ts timestamp without time zone");
+        TestUtil.createTable(con, DATE_TABLE, "ts date");
+        currentTZ = TimeZone.getDefault();
     }
 
-    // With java.sql.Timestamp
-    for (Timestamp timestamp : TS__WOTZ) {
-      pstmt.setObject(1, timestamp, Types.TIMESTAMP);
-      assertEquals(1, pstmt.executeUpdate());
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, TSWTZ_TABLE);
+        TestUtil.dropTable(con, TSWOTZ_TABLE);
+        TestUtil.dropTable(con, DATE_TABLE);
+        TimeZone.setDefault(currentTZ);
+        super.tearDown();
     }
 
-    // With Strings
-    for (String value : TS__WOTZ_PGFORMAT) {
-      pstmt.setObject(1, value, Types.TIMESTAMP);
-      assertEquals(1, pstmt.executeUpdate());
+    /**
+     * Ensure the driver doesn't modify a Calendar that is passed in.
+     */
+    @Test
+    public void testCalendarModification() throws SQLException {
+        Calendar cal = Calendar.getInstance();
+        Calendar origCal = (Calendar) cal.clone();
+        PreparedStatement ps = con.prepareStatement("INSERT INTO " + TSWOTZ_TABLE + " VALUES (?)");
+
+        ps.setDate(1, new Date(0), cal);
+        ps.executeUpdate();
+        assertEquals(origCal, cal);
+
+        ps.setTimestamp(1, new Timestamp(0), cal);
+        ps.executeUpdate();
+        assertEquals(origCal, cal);
+
+        ps.setTime(1, new Time(0), cal);
+        // Can't actually execute this one because of type mismatch,
+        // but all we're really concerned about is the set call.
+        // ps.executeUpdate();
+        assertEquals(origCal, cal);
+
+        ps.close();
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT ts FROM " + TSWOTZ_TABLE);
+        assertTrue(rs.next());
+
+        rs.getDate(1, cal);
+        assertEquals(origCal, cal);
+
+        rs.getTimestamp(1, cal);
+        assertEquals(origCal, cal);
+
+        rs.getTime(1, cal);
+        assertEquals(origCal, cal);
+
+        rs.close();
+        stmt.close();
     }
 
-    // With java.sql.Date, java.sql.Time
-    for (java.util.Date date : TEST_DATE_TIMES) {
-      pstmt.setObject(1, date, Types.TIMESTAMP);
-      assertEquals("insert into TSWOTZ_TABLE via setObject(1, " + date
-          + ", Types.TIMESTAMP) -> expecting one row inserted", 1, pstmt.executeUpdate());
+    @Test
+    public void testInfinity() throws SQLException {
+        runInfinityTests(TSWTZ_TABLE, PGStatement.DATE_POSITIVE_INFINITY);
+        runInfinityTests(TSWTZ_TABLE, PGStatement.DATE_NEGATIVE_INFINITY);
+        runInfinityTests(TSWOTZ_TABLE, PGStatement.DATE_POSITIVE_INFINITY);
+        runInfinityTests(TSWOTZ_TABLE, PGStatement.DATE_NEGATIVE_INFINITY);
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4)) {
+            runInfinityTests(DATE_TABLE, PGStatement.DATE_POSITIVE_INFINITY);
+            runInfinityTests(DATE_TABLE, PGStatement.DATE_NEGATIVE_INFINITY);
+        }
     }
 
-    // Fall through helper
-    timestampTestWOTZ();
+    private void runInfinityTests(String table, long value) throws SQLException {
+        GregorianCalendar cal = new GregorianCalendar();
+        // Pick some random timezone that is hopefully different than ours
+        // and exists in this JVM.
+        cal.setTimeZone(TimeZone.getTimeZone("Europe/Warsaw"));
 
-    assertEquals(50, stmt.executeUpdate("DELETE FROM " + TSWOTZ_TABLE));
+        String strValue;
+        if (value == PGStatement.DATE_POSITIVE_INFINITY) {
+            strValue = "infinity";
+        } else {
+            strValue = "-infinity";
+        }
 
-    pstmt.close();
-    stmt.close();
-  }
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL(table, "'" + strValue + "'"));
+        stmt.close();
 
-  /*
-   * Helper for the TimestampTests. It tests what should be in the db
-   */
-  private void timestampTestWTZ() throws SQLException {
-    Statement stmt = con.createStatement();
-    ResultSet rs;
-    Timestamp t;
+        PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL(table, "?"));
+        ps.setTimestamp(1, new Timestamp(value));
+        ps.executeUpdate();
+        ps.setTimestamp(1, new Timestamp(value), cal);
+        ps.executeUpdate();
+        ps.close();
 
-    rs = stmt.executeQuery("select ts from " + TSWTZ_TABLE); // removed the order by ts
-    assertNotNull(rs);
+        stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("select ts from " + table);
+        while (rs.next()) {
+            assertEquals(strValue, rs.getString(1));
 
-    for (int i = 0; i < 3; i++) {
-      assertTrue(rs.next());
-      t = rs.getTimestamp(1);
-      assertNotNull(t);
-      assertEquals(TS1WTZ, t);
+            Timestamp ts = rs.getTimestamp(1);
+            assertEquals(value, ts.getTime());
 
-      assertTrue(rs.next());
-      t = rs.getTimestamp(1);
-      assertNotNull(t);
-      assertEquals(TS2WTZ, t);
+            Date d = rs.getDate(1);
+            assertEquals(value, d.getTime());
 
-      assertTrue(rs.next());
-      t = rs.getTimestamp(1);
-      assertNotNull(t);
-      assertEquals(TS3WTZ, t);
+            Timestamp tscal = rs.getTimestamp(1, cal);
+            assertEquals(value, tscal.getTime());
+        }
+        rs.close();
 
-      assertTrue(rs.next());
-      t = rs.getTimestamp(1);
-      assertNotNull(t);
-      assertEquals(TS4WTZ, t);
+        assertEquals(3, stmt.executeUpdate("DELETE FROM " + table));
+        stmt.close();
     }
 
-    // Testing for Date
-    assertTrue(rs.next());
-    t = rs.getTimestamp(1);
-    assertNotNull(t);
-    assertEquals(tmpDate1.getTime(), t.getTime());
+    /*
+     * Tests the timestamp methods in ResultSet on timestamp with time zone we insert a known string
+     * value (don't use setTimestamp) then see that we get back the same value from getTimestamp
+     */
+    @Test
+    public void testGetTimestampWTZ() throws SQLException {
+        assumeTrue(TestUtil.haveIntegerDateTimes(con));
 
-    assertTrue(rs.next());
-    t = rs.getTimestamp(1);
-    assertNotNull(t);
-    assertEquals(tmpDate2.getTime(), t.getTime());
+        Statement stmt = con.createStatement();
+        TimestampUtils tsu = ((BaseConnection) con).getTimestampUtils();
 
-    assertTrue(rs.next());
-    t = rs.getTimestamp(1);
-    assertNotNull(t);
-    assertEquals(tmpDate3.getTime(), t.getTime());
+        // Insert the three timestamp values in raw pg format
+        for (int i = 0; i < 3; i++) {
+            assertEquals(1,
+                    stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, "'" + TS1WTZ_PGFORMAT + "'")));
+            assertEquals(1,
+                    stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, "'" + TS2WTZ_PGFORMAT + "'")));
+            assertEquals(1,
+                    stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, "'" + TS3WTZ_PGFORMAT + "'")));
+            assertEquals(1,
+                    stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE, "'" + TS4WTZ_PGFORMAT + "'")));
+        }
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
+                "'" + tsu.toString(null, new Timestamp(tmpDate1.getTime())) + "'")));
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
+                "'" + tsu.toString(null, new Timestamp(tmpDate2.getTime())) + "'")));
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
+                "'" + tsu.toString(null, new Timestamp(tmpDate3.getTime())) + "'")));
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
+                "'" + tsu.toString(null, new Timestamp(tmpDate4.getTime())) + "'")));
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
+                "'" + tsu.toString(null, new Timestamp(tmpTime1.getTime())) + "'")));
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
+                "'" + tsu.toString(null, new Timestamp(tmpTime2.getTime())) + "'")));
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
+                "'" + tsu.toString(null, new Timestamp(tmpTime3.getTime())) + "'")));
+        assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWTZ_TABLE,
+                "'" + tsu.toString(null, new Timestamp(tmpTime4.getTime())) + "'")));
 
-    assertTrue(rs.next());
-    t = rs.getTimestamp(1);
-    assertNotNull(t);
-    assertEquals(tmpDate4.getTime(), t.getTime());
+        // Fall through helper
+        timestampTestWTZ();
 
-    // Testing for Time
-    assertTrue(rs.next());
-    t = rs.getTimestamp(1);
-    assertNotNull(t);
-    assertEquals(tmpTime1.getTime(), t.getTime());
+        assertEquals(20, stmt.executeUpdate("DELETE FROM " + TSWTZ_TABLE));
 
-    assertTrue(rs.next());
-    t = rs.getTimestamp(1);
-    assertNotNull(t);
-    assertEquals(tmpTime2.getTime(), t.getTime());
-
-    assertTrue(rs.next());
-    t = rs.getTimestamp(1);
-    assertNotNull(t);
-    assertEquals(tmpTime3.getTime(), t.getTime());
-
-    assertTrue(rs.next());
-    t = rs.getTimestamp(1);
-    assertNotNull(t);
-    assertEquals(tmpTime4.getTime(), t.getTime());
-
-    assertTrue(!rs.next()); // end of table. Fail if more entries exist.
-
-    rs.close();
-    stmt.close();
-  }
-
-  /*
-   * Helper for the TimestampTests. It tests what should be in the db
-   */
-  private void timestampTestWOTZ() throws SQLException {
-    Statement stmt = con.createStatement();
-    Timestamp t;
-    String tString;
-
-    ResultSet rs = stmt.executeQuery("select ts from " + TSWOTZ_TABLE); // removed the order by ts
-    assertNotNull(rs);
-
-    for (int i = 0; i < 3; i++) {
-      assertTrue(rs.next());
-      t = rs.getTimestamp(1);
-      assertNotNull(t);
-      assertEquals(TS1WOTZ, t);
-
-      tString = rs.getString(1);
-      assertNotNull(tString);
-      assertEquals(TS1WOTZ_PGFORMAT, tString);
-
-      assertTrue(rs.next());
-      t = rs.getTimestamp(1);
-      assertNotNull(t);
-      assertEquals(TS2WOTZ, t);
-
-      tString = rs.getString(1);
-      assertNotNull(tString);
-      assertEquals(TS2WOTZ_PGFORMAT, tString);
-
-      assertTrue(rs.next());
-      t = rs.getTimestamp(1);
-      assertNotNull(t);
-      assertEquals(TS3WOTZ, t);
-
-      tString = rs.getString(1);
-      assertNotNull(tString);
-      assertEquals(TS3WOTZ_PGFORMAT, tString);
-
-      assertTrue(rs.next());
-      t = rs.getTimestamp(1);
-      assertNotNull(t);
-      assertEquals(TS4WOTZ, t);
-
-      tString = rs.getString(1);
-      assertNotNull(tString);
-      assertEquals(TS4WOTZ_PGFORMAT, tString);
-
-      assertTrue(rs.next());
-      t = rs.getTimestamp(1);
-      assertNotNull(t);
-      assertEquals(TS5WOTZ, t);
-
-      tString = rs.getString(1);
-      assertNotNull(tString);
-      assertEquals(TS5WOTZ_PGFORMAT, tString);
-
-      assertTrue(rs.next());
-      t = rs.getTimestamp(1);
-      assertNotNull(t);
-      assertEquals(TS6WOTZ, t);
-
-      tString = rs.getString(1);
-      assertNotNull(tString);
-      assertEquals(TS6WOTZ_PGFORMAT, tString);
-
-      assertTrue(rs.next());
-      t = rs.getTimestamp(1);
-      assertNotNull(t);
-      assertEquals(TS7WOTZ, t);
-
-      tString = rs.getString(1);
-      assertNotNull(tString);
-      assertEquals(TS7WOTZ_PGFORMAT, tString);
-
-      assertTrue(rs.next());
-      t = rs.getTimestamp(1);
-      assertNotNull(t);
-      assertEquals(TS8WOTZ, t);
-
-      tString = rs.getString(1);
-      assertNotNull(tString);
-      assertEquals(TS8WOTZ_PGFORMAT, tString);
-
-      assertTrue(rs.next());
-      t = rs.getTimestamp(1);
-      assertNotNull(t);
-      assertEquals(TS9WOTZ_ROUNDED, t);
-
-      tString = rs.getString(1);
-      assertNotNull(tString);
-      assertEquals(TS9WOTZ_ROUNDED_PGFORMAT, tString);
-
-      assertTrue(rs.next());
-      t = rs.getTimestamp(1);
-      assertNotNull(t);
-      assertEquals(TS10WOTZ_ROUNDED, t);
-
-      tString = rs.getString(1);
-      assertNotNull(tString);
-      assertEquals(TS10WOTZ_ROUNDED_PGFORMAT, tString);
+        stmt.close();
     }
 
-    // Testing for Date
-    for (java.util.Date expected : TEST_DATE_TIMES) {
-      assertTrue(rs.next());
-      t = rs.getTimestamp(1);
-      assertNotNull(t);
-      assertEquals("rs.getTimestamp(1).getTime()", expected.getTime(), t.getTime());
+    /*
+     * Tests the timestamp methods in PreparedStatement on timestamp with time zone we insert a value
+     * using setTimestamp then see that we get back the same value from getTimestamp (which we know
+     * works as it was tested independently of setTimestamp
+     */
+    @Test
+    public void testSetTimestampWTZ() throws SQLException {
+        assumeTrue(TestUtil.haveIntegerDateTimes(con));
+
+        Statement stmt = con.createStatement();
+        PreparedStatement pstmt = con.prepareStatement(TestUtil.insertSQL(TSWTZ_TABLE, "?"));
+
+        pstmt.setTimestamp(1, TS1WTZ);
+        assertEquals(1, pstmt.executeUpdate());
+
+        pstmt.setTimestamp(1, TS2WTZ);
+        assertEquals(1, pstmt.executeUpdate());
+
+        pstmt.setTimestamp(1, TS3WTZ);
+        assertEquals(1, pstmt.executeUpdate());
+
+        pstmt.setTimestamp(1, TS4WTZ);
+        assertEquals(1, pstmt.executeUpdate());
+
+        // With java.sql.Timestamp
+        pstmt.setObject(1, TS1WTZ, Types.TIMESTAMP);
+        assertEquals(1, pstmt.executeUpdate());
+        pstmt.setObject(1, TS2WTZ, Types.TIMESTAMP);
+        assertEquals(1, pstmt.executeUpdate());
+        pstmt.setObject(1, TS3WTZ, Types.TIMESTAMP);
+        assertEquals(1, pstmt.executeUpdate());
+        pstmt.setObject(1, TS4WTZ, Types.TIMESTAMP);
+        assertEquals(1, pstmt.executeUpdate());
+
+        // With Strings
+        pstmt.setObject(1, TS1WTZ_PGFORMAT, Types.TIMESTAMP);
+        assertEquals(1, pstmt.executeUpdate());
+        pstmt.setObject(1, TS2WTZ_PGFORMAT, Types.TIMESTAMP);
+        assertEquals(1, pstmt.executeUpdate());
+        pstmt.setObject(1, TS3WTZ_PGFORMAT, Types.TIMESTAMP);
+        assertEquals(1, pstmt.executeUpdate());
+        pstmt.setObject(1, TS4WTZ_PGFORMAT, Types.TIMESTAMP);
+        assertEquals(1, pstmt.executeUpdate());
+
+        // With java.sql.Date
+        pstmt.setObject(1, tmpDate1, Types.TIMESTAMP);
+        assertEquals(1, pstmt.executeUpdate());
+        pstmt.setObject(1, tmpDate2, Types.TIMESTAMP);
+        assertEquals(1, pstmt.executeUpdate());
+        pstmt.setObject(1, tmpDate3, Types.TIMESTAMP);
+        assertEquals(1, pstmt.executeUpdate());
+        pstmt.setObject(1, tmpDate4, Types.TIMESTAMP);
+        assertEquals(1, pstmt.executeUpdate());
+
+        // With java.sql.Time
+        pstmt.setObject(1, tmpTime1, Types.TIMESTAMP);
+        assertEquals(1, pstmt.executeUpdate());
+        pstmt.setObject(1, tmpTime2, Types.TIMESTAMP);
+        assertEquals(1, pstmt.executeUpdate());
+        pstmt.setObject(1, tmpTime3, Types.TIMESTAMP);
+        assertEquals(1, pstmt.executeUpdate());
+        pstmt.setObject(1, tmpTime4, Types.TIMESTAMP);
+        assertEquals(1, pstmt.executeUpdate());
+        // Fall through helper
+        timestampTestWTZ();
+
+        assertEquals(20, stmt.executeUpdate("DELETE FROM " + TSWTZ_TABLE));
+
+        pstmt.close();
+        stmt.close();
     }
 
-    assertTrue(!rs.next()); // end of table. Fail if more entries exist.
+    /*
+     * Tests the timestamp methods in ResultSet on timestamp without time zone we insert a known
+     * string value (don't use setTimestamp) then see that we get back the same value from
+     * getTimestamp
+     */
+    @Test
+    public void testGetTimestampWOTZ() throws SQLException {
+        assumeTrue(TestUtil.haveIntegerDateTimes(con));
+        //Refer to #896
+        assumeMinimumServerVersion(ServerVersion.v8_4);
 
-    rs.close();
-    stmt.close();
-  }
+        Statement stmt = con.createStatement();
+        TimestampUtils tsu = ((BaseConnection) con).getTimestampUtils();
 
-  @Test
-  public void testJavaTimestampFromSQLTime() throws SQLException {
-    Statement st = con.createStatement();
-    ResultSet rs = st.executeQuery("SELECT '00:00:05.123456'::time as t, '1970-01-01 00:00:05.123456'::timestamp as ts, "
-        + "'00:00:05.123456 +0300'::time with time zone as tz, '1970-01-01 00:00:05.123456 +0300'::timestamp with time zone as tstz ");
-    rs.next();
-    Timestamp t = rs.getTimestamp("t");
-    Timestamp ts = rs.getTimestamp("ts");
-    Timestamp tz = rs.getTimestamp("tz");
+        // Insert the three timestamp values in raw pg format
+        for (int i = 0; i < 3; i++) {
+            for (String value : TS__WOTZ_PGFORMAT) {
+                assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWOTZ_TABLE, "'" + value + "'")));
+            }
+        }
 
-    Timestamp tstz = rs.getTimestamp("tstz");
+        for (java.util.Date date : TEST_DATE_TIMES) {
+            String stringValue = "'" + tsu.toString(null, new Timestamp(date.getTime())) + "'";
+            assertEquals(1, stmt.executeUpdate(TestUtil.insertSQL(TSWOTZ_TABLE, stringValue)));
+        }
 
-    Integer desiredNanos = 123456000;
-    Integer tNanos = t.getNanos();
-    Integer tzNanos = tz.getNanos();
+        // Fall through helper
+        timestampTestWOTZ();
 
-    assertEquals("Time should be microsecond-accurate", desiredNanos, tNanos);
-    assertEquals("Time with time zone should be microsecond-accurate", desiredNanos, tzNanos);
-    assertEquals("Unix epoch timestamp and Time should match", ts, t);
-    assertEquals("Unix epoch timestamp with time zone and time with time zone should match", tstz, tz);
-  }
+        assertEquals(50, stmt.executeUpdate("DELETE FROM " + TSWOTZ_TABLE));
 
-  private static Timestamp getTimestamp(int y, int m, int d, int h, int mn, int se, int f,
-      String tz) {
-    Timestamp result = null;
-    java.text.DateFormat dateFormat;
-    try {
-      String ts;
-      ts = TestUtil.fix(y, 4) + "-"
-          + TestUtil.fix(m, 2) + "-"
-          + TestUtil.fix(d, 2) + " "
-          + TestUtil.fix(h, 2) + ":"
-          + TestUtil.fix(mn, 2) + ":"
-          + TestUtil.fix(se, 2) + " ";
-
-      if (tz == null) {
-        dateFormat = new SimpleDateFormat("y-M-d H:m:s");
-      } else {
-        ts = ts + tz;
-        dateFormat = new SimpleDateFormat("y-M-d H:m:s z");
-      }
-      java.util.Date date = dateFormat.parse(ts);
-      result = new Timestamp(date.getTime());
-      result.setNanos(f);
-    } catch (Exception ex) {
-      fail(ex.getMessage());
+        stmt.close();
     }
-    return result;
-  }
 
-  private static final Timestamp TS1WTZ =
-      getTimestamp(1950, 2, 7, 15, 0, 0, 100000000, "PST");
-  private static final String TS1WTZ_PGFORMAT = "1950-02-07 15:00:00.1-08";
+    /*
+     * Tests the timestamp methods in PreparedStatement on timestamp without time zone we insert a
+     * value using setTimestamp then see that we get back the same value from getTimestamp (which we
+     * know works as it was tested independently of setTimestamp
+     */
+    @Test
+    public void testSetTimestampWOTZ() throws SQLException {
+        assumeTrue(TestUtil.haveIntegerDateTimes(con));
+        //Refer to #896
+        assumeMinimumServerVersion(ServerVersion.v8_4);
 
-  private static final Timestamp TS2WTZ =
-      getTimestamp(2000, 2, 7, 15, 0, 0, 120000000, "GMT");
-  private static final String TS2WTZ_PGFORMAT = "2000-02-07 15:00:00.12+00";
+        Statement stmt = con.createStatement();
+        PreparedStatement pstmt = con.prepareStatement(TestUtil.insertSQL(TSWOTZ_TABLE, "?"));
 
-  private static final Timestamp TS3WTZ =
-      getTimestamp(2000, 7, 7, 15, 0, 0, 123000000, "GMT");
-  private static final String TS3WTZ_PGFORMAT = "2000-07-07 15:00:00.123+00";
+        for (Timestamp timestamp : TS__WOTZ) {
+            pstmt.setTimestamp(1, timestamp);
+            assertEquals(1, pstmt.executeUpdate());
+        }
 
-  private static final Timestamp TS4WTZ =
-      getTimestamp(2000, 7, 7, 15, 0, 0, 123456000, "GMT");
-  private static final String TS4WTZ_PGFORMAT = "2000-07-07 15:00:00.123456+00";
+        // With java.sql.Timestamp
+        for (Timestamp timestamp : TS__WOTZ) {
+            pstmt.setObject(1, timestamp, Types.TIMESTAMP);
+            assertEquals(1, pstmt.executeUpdate());
+        }
 
-  private static final Timestamp TS1WOTZ =
-      getTimestamp(1950, 2, 7, 15, 0, 0, 100000000, null);
-  private static final String TS1WOTZ_PGFORMAT = "1950-02-07 15:00:00.1";
+        // With Strings
+        for (String value : TS__WOTZ_PGFORMAT) {
+            pstmt.setObject(1, value, Types.TIMESTAMP);
+            assertEquals(1, pstmt.executeUpdate());
+        }
 
-  private static final Timestamp TS2WOTZ =
-      getTimestamp(2000, 2, 7, 15, 0, 0, 120000000, null);
-  private static final String TS2WOTZ_PGFORMAT = "2000-02-07 15:00:00.12";
+        // With java.sql.Date, java.sql.Time
+        for (java.util.Date date : TEST_DATE_TIMES) {
+            pstmt.setObject(1, date, Types.TIMESTAMP);
+            assertEquals("insert into TSWOTZ_TABLE via setObject(1, " + date
+                    + ", Types.TIMESTAMP) -> expecting one row inserted", 1, pstmt.executeUpdate());
+        }
 
-  private static final Timestamp TS3WOTZ =
-      getTimestamp(2000, 7, 7, 15, 0, 0, 123000000, null);
-  private static final String TS3WOTZ_PGFORMAT = "2000-07-07 15:00:00.123";
+        // Fall through helper
+        timestampTestWOTZ();
 
-  private static final Timestamp TS4WOTZ =
-      getTimestamp(2000, 7, 7, 15, 0, 0, 123456000, null);
-  private static final String TS4WOTZ_PGFORMAT = "2000-07-07 15:00:00.123456";
+        assertEquals(50, stmt.executeUpdate("DELETE FROM " + TSWOTZ_TABLE));
 
-  private static final Timestamp TS5WOTZ =
-      new Timestamp(PGStatement.DATE_NEGATIVE_INFINITY);
-  private static final String TS5WOTZ_PGFORMAT = "-infinity";
+        pstmt.close();
+        stmt.close();
+    }
 
-  private static final Timestamp TS6WOTZ =
-      new Timestamp(PGStatement.DATE_POSITIVE_INFINITY);
-  private static final String TS6WOTZ_PGFORMAT = "infinity";
+    /*
+     * Helper for the TimestampTests. It tests what should be in the db
+     */
+    private void timestampTestWTZ() throws SQLException {
+        Statement stmt = con.createStatement();
+        ResultSet rs;
+        Timestamp t;
 
-  private static final Timestamp TS7WOTZ =
-      getTimestamp(2000, 7, 7, 15, 0, 0, 0, null);
-  private static final String TS7WOTZ_PGFORMAT = "2000-07-07 15:00:00";
+        rs = stmt.executeQuery("select ts from " + TSWTZ_TABLE); // removed the order by ts
+        assertNotNull(rs);
 
-  private static final Timestamp TS8WOTZ =
-      getTimestamp(2000, 7, 7, 15, 0, 0, 20400000, null);
-  private static final String TS8WOTZ_PGFORMAT = "2000-07-07 15:00:00.0204";
+        for (int i = 0; i < 3; i++) {
+            assertTrue(rs.next());
+            t = rs.getTimestamp(1);
+            assertNotNull(t);
+            assertEquals(TS1WTZ, t);
 
-  private static final Timestamp TS9WOTZ =
-      getTimestamp(2000, 2, 7, 15, 0, 0, 789, null);
-  private static final String TS9WOTZ_PGFORMAT = "2000-02-07 15:00:00.000000789";
-  private static final Timestamp TS9WOTZ_ROUNDED =
-      getTimestamp(2000, 2, 7, 15, 0, 0, 1000, null);
-  private static final String TS9WOTZ_ROUNDED_PGFORMAT = "2000-02-07 15:00:00.000001";
+            assertTrue(rs.next());
+            t = rs.getTimestamp(1);
+            assertNotNull(t);
+            assertEquals(TS2WTZ, t);
 
-  private static final Timestamp TS10WOTZ =
-      getTimestamp(2018, 12, 31, 23, 59, 59, 999999500, null);
-  private static final String TS10WOTZ_PGFORMAT = "2018-12-31 23:59:59.999999500";
-  private static final Timestamp TS10WOTZ_ROUNDED =
-      getTimestamp(2019, 1, 1, 0, 0, 0, 0, null);
-  private static final String TS10WOTZ_ROUNDED_PGFORMAT = "2019-01-01 00:00:00";
+            assertTrue(rs.next());
+            t = rs.getTimestamp(1);
+            assertNotNull(t);
+            assertEquals(TS3WTZ, t);
 
-  private static final Timestamp[] TS__WOTZ = {
-    TS1WOTZ, TS2WOTZ, TS3WOTZ, TS4WOTZ, TS5WOTZ,
-    TS6WOTZ, TS7WOTZ, TS8WOTZ, TS9WOTZ, TS10WOTZ,
-  };
+            assertTrue(rs.next());
+            t = rs.getTimestamp(1);
+            assertNotNull(t);
+            assertEquals(TS4WTZ, t);
+        }
 
-  private static final String[] TS__WOTZ_PGFORMAT = {
-    TS1WOTZ_PGFORMAT, TS2WOTZ_PGFORMAT, TS3WOTZ_PGFORMAT, TS4WOTZ_PGFORMAT, TS5WOTZ_PGFORMAT,
-    TS6WOTZ_PGFORMAT, TS7WOTZ_PGFORMAT, TS8WOTZ_PGFORMAT, TS9WOTZ_PGFORMAT, TS10WOTZ_PGFORMAT,
-  };
+        // Testing for Date
+        assertTrue(rs.next());
+        t = rs.getTimestamp(1);
+        assertNotNull(t);
+        assertEquals(tmpDate1.getTime(), t.getTime());
 
-  private static final String TSWTZ_TABLE = "testtimestampwtz";
-  private static final String TSWOTZ_TABLE = "testtimestampwotz";
-  private static final String DATE_TABLE = "testtimestampdate";
+        assertTrue(rs.next());
+        t = rs.getTimestamp(1);
+        assertNotNull(t);
+        assertEquals(tmpDate2.getTime(), t.getTime());
 
-  private static final java.sql.Date tmpDate1 = new java.sql.Date(TS1WTZ.getTime());
-  private static final java.sql.Time tmpTime1 = new java.sql.Time(TS1WTZ.getTime());
-  private static final java.sql.Date tmpDate2 = new java.sql.Date(TS2WTZ.getTime());
-  private static final java.sql.Time tmpTime2 = new java.sql.Time(TS2WTZ.getTime());
-  private static final java.sql.Date tmpDate3 = new java.sql.Date(TS3WTZ.getTime());
-  private static final java.sql.Time tmpTime3 = new java.sql.Time(TS3WTZ.getTime());
-  private static final java.sql.Date tmpDate4 = new java.sql.Date(TS4WTZ.getTime());
-  private static final java.sql.Time tmpTime4 = new java.sql.Time(TS4WTZ.getTime());
+        assertTrue(rs.next());
+        t = rs.getTimestamp(1);
+        assertNotNull(t);
+        assertEquals(tmpDate3.getTime(), t.getTime());
 
-  private static final java.sql.Date tmpDate1WOTZ = new java.sql.Date(TS1WOTZ.getTime());
-  private static final java.sql.Time tmpTime1WOTZ = new java.sql.Time(TS1WOTZ.getTime());
-  private static final java.sql.Date tmpDate2WOTZ = new java.sql.Date(TS2WOTZ.getTime());
-  private static final java.sql.Time tmpTime2WOTZ = new java.sql.Time(TS2WOTZ.getTime());
-  private static final java.sql.Date tmpDate3WOTZ = new java.sql.Date(TS3WOTZ.getTime());
-  private static final java.sql.Time tmpTime3WOTZ = new java.sql.Time(TS3WOTZ.getTime());
-  private static final java.sql.Date tmpDate4WOTZ = new java.sql.Date(TS4WOTZ.getTime());
-  private static final java.sql.Time tmpTime4WOTZ = new java.sql.Time(TS4WOTZ.getTime());
-  private static final java.sql.Date tmpDate5WOTZ = new java.sql.Date(TS5WOTZ.getTime());
-  private static final java.sql.Date tmpTime5WOTZ = new java.sql.Date(TS5WOTZ.getTime());
-  private static final java.sql.Date tmpDate6WOTZ = new java.sql.Date(TS6WOTZ.getTime());
-  private static final java.sql.Date tmpTime6WOTZ = new java.sql.Date(TS6WOTZ.getTime());
-  private static final java.sql.Date tmpDate7WOTZ = new java.sql.Date(TS7WOTZ.getTime());
-  private static final java.sql.Time tmpTime7WOTZ = new java.sql.Time(TS7WOTZ.getTime());
-  private static final java.sql.Date tmpDate8WOTZ = new java.sql.Date(TS8WOTZ.getTime());
-  private static final java.sql.Time tmpTime8WOTZ = new java.sql.Time(TS8WOTZ.getTime());
-  private static final java.sql.Date tmpDate9WOTZ = new java.sql.Date(TS9WOTZ.getTime());
-  private static final java.sql.Time tmpTime9WOTZ = new java.sql.Time(TS9WOTZ.getTime());
-  private static final java.sql.Date tmpDate10WOTZ = new java.sql.Date(TS10WOTZ.getTime());
-  private static final java.sql.Time tmpTime10WOTZ = new java.sql.Time(TS10WOTZ.getTime());
+        assertTrue(rs.next());
+        t = rs.getTimestamp(1);
+        assertNotNull(t);
+        assertEquals(tmpDate4.getTime(), t.getTime());
 
-  private static final java.util.Date[] TEST_DATE_TIMES = {
-      tmpDate1WOTZ, tmpDate2WOTZ, tmpDate3WOTZ, tmpDate4WOTZ, tmpDate5WOTZ,
-      tmpDate6WOTZ, tmpDate7WOTZ, tmpDate8WOTZ, tmpDate9WOTZ, tmpDate10WOTZ,
-      tmpTime1WOTZ, tmpTime2WOTZ, tmpTime3WOTZ, tmpTime4WOTZ, tmpTime5WOTZ,
-      tmpTime6WOTZ, tmpTime7WOTZ, tmpTime8WOTZ, tmpTime9WOTZ, tmpTime10WOTZ,
-  };
+        // Testing for Time
+        assertTrue(rs.next());
+        t = rs.getTimestamp(1);
+        assertNotNull(t);
+        assertEquals(tmpTime1.getTime(), t.getTime());
+
+        assertTrue(rs.next());
+        t = rs.getTimestamp(1);
+        assertNotNull(t);
+        assertEquals(tmpTime2.getTime(), t.getTime());
+
+        assertTrue(rs.next());
+        t = rs.getTimestamp(1);
+        assertNotNull(t);
+        assertEquals(tmpTime3.getTime(), t.getTime());
+
+        assertTrue(rs.next());
+        t = rs.getTimestamp(1);
+        assertNotNull(t);
+        assertEquals(tmpTime4.getTime(), t.getTime());
+
+        assertTrue(!rs.next()); // end of table. Fail if more entries exist.
+
+        rs.close();
+        stmt.close();
+    }
+
+    /*
+     * Helper for the TimestampTests. It tests what should be in the db
+     */
+    private void timestampTestWOTZ() throws SQLException {
+        Statement stmt = con.createStatement();
+        Timestamp t;
+        String tString;
+
+        ResultSet rs = stmt.executeQuery("select ts from " + TSWOTZ_TABLE); // removed the order by ts
+        assertNotNull(rs);
+
+        for (int i = 0; i < 3; i++) {
+            assertTrue(rs.next());
+            t = rs.getTimestamp(1);
+            assertNotNull(t);
+            assertEquals(TS1WOTZ, t);
+
+            tString = rs.getString(1);
+            assertNotNull(tString);
+            assertEquals(TS1WOTZ_PGFORMAT, tString);
+
+            assertTrue(rs.next());
+            t = rs.getTimestamp(1);
+            assertNotNull(t);
+            assertEquals(TS2WOTZ, t);
+
+            tString = rs.getString(1);
+            assertNotNull(tString);
+            assertEquals(TS2WOTZ_PGFORMAT, tString);
+
+            assertTrue(rs.next());
+            t = rs.getTimestamp(1);
+            assertNotNull(t);
+            assertEquals(TS3WOTZ, t);
+
+            tString = rs.getString(1);
+            assertNotNull(tString);
+            assertEquals(TS3WOTZ_PGFORMAT, tString);
+
+            assertTrue(rs.next());
+            t = rs.getTimestamp(1);
+            assertNotNull(t);
+            assertEquals(TS4WOTZ, t);
+
+            tString = rs.getString(1);
+            assertNotNull(tString);
+            assertEquals(TS4WOTZ_PGFORMAT, tString);
+
+            assertTrue(rs.next());
+            t = rs.getTimestamp(1);
+            assertNotNull(t);
+            assertEquals(TS5WOTZ, t);
+
+            tString = rs.getString(1);
+            assertNotNull(tString);
+            assertEquals(TS5WOTZ_PGFORMAT, tString);
+
+            assertTrue(rs.next());
+            t = rs.getTimestamp(1);
+            assertNotNull(t);
+            assertEquals(TS6WOTZ, t);
+
+            tString = rs.getString(1);
+            assertNotNull(tString);
+            assertEquals(TS6WOTZ_PGFORMAT, tString);
+
+            assertTrue(rs.next());
+            t = rs.getTimestamp(1);
+            assertNotNull(t);
+            assertEquals(TS7WOTZ, t);
+
+            tString = rs.getString(1);
+            assertNotNull(tString);
+            assertEquals(TS7WOTZ_PGFORMAT, tString);
+
+            assertTrue(rs.next());
+            t = rs.getTimestamp(1);
+            assertNotNull(t);
+            assertEquals(TS8WOTZ, t);
+
+            tString = rs.getString(1);
+            assertNotNull(tString);
+            assertEquals(TS8WOTZ_PGFORMAT, tString);
+
+            assertTrue(rs.next());
+            t = rs.getTimestamp(1);
+            assertNotNull(t);
+            assertEquals(TS9WOTZ_ROUNDED, t);
+
+            tString = rs.getString(1);
+            assertNotNull(tString);
+            assertEquals(TS9WOTZ_ROUNDED_PGFORMAT, tString);
+
+            assertTrue(rs.next());
+            t = rs.getTimestamp(1);
+            assertNotNull(t);
+            assertEquals(TS10WOTZ_ROUNDED, t);
+
+            tString = rs.getString(1);
+            assertNotNull(tString);
+            assertEquals(TS10WOTZ_ROUNDED_PGFORMAT, tString);
+        }
+
+        // Testing for Date
+        for (java.util.Date expected : TEST_DATE_TIMES) {
+            assertTrue(rs.next());
+            t = rs.getTimestamp(1);
+            assertNotNull(t);
+            assertEquals("rs.getTimestamp(1).getTime()", expected.getTime(), t.getTime());
+        }
+
+        assertTrue(!rs.next()); // end of table. Fail if more entries exist.
+
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testJavaTimestampFromSQLTime() throws SQLException {
+        Statement st = con.createStatement();
+        ResultSet rs = st.executeQuery("SELECT '00:00:05.123456'::time as t, '1970-01-01 00:00:05.123456'::timestamp as ts, "
+                + "'00:00:05.123456 +0300'::time with time zone as tz, '1970-01-01 00:00:05.123456 +0300'::timestamp with time zone as tstz ");
+        rs.next();
+        Timestamp t = rs.getTimestamp("t");
+        Timestamp ts = rs.getTimestamp("ts");
+        Timestamp tz = rs.getTimestamp("tz");
+
+        Timestamp tstz = rs.getTimestamp("tstz");
+
+        Integer desiredNanos = 123456000;
+        Integer tNanos = t.getNanos();
+        Integer tzNanos = tz.getNanos();
+
+        assertEquals("Time should be microsecond-accurate", desiredNanos, tNanos);
+        assertEquals("Time with time zone should be microsecond-accurate", desiredNanos, tzNanos);
+        assertEquals("Unix epoch timestamp and Time should match", ts, t);
+        assertEquals("Unix epoch timestamp with time zone and time with time zone should match", tstz, tz);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimezoneCachingTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimezoneCachingTest.java
index 97430e5..ec74813 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimezoneCachingTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimezoneCachingTest.java
@@ -29,368 +29,368 @@ import java.util.TimeZone;
 
 public class TimezoneCachingTest extends BaseTest4 {
 
-  /**
-   * Test to check the internal cached timezone of a prepared statement is set/cleared as expected.
-   */
-  @Test
-  public void testPreparedStatementCachedTimezoneInstance() throws SQLException {
-    Timestamp ts = new Timestamp(2016 - 1900, 0, 31, 0, 0, 0, 0);
-    Date date = new Date(2016 - 1900, 0, 31);
-    Time time = new Time(System.currentTimeMillis());
-    TimeZone tz = TimeZone.getDefault();
-    PreparedStatement pstmt = null;
-    try {
-      pstmt = con.prepareStatement("INSERT INTO testtz VALUES (?,?)");
-      assertEquals(
-          "Cache never initialized: must be null",
-          null, getTimeZoneCache(pstmt));
-      pstmt.setInt(1, 1);
-      assertEquals(
-          "Cache never initialized: must be null",
-          null, getTimeZoneCache(pstmt));
-      pstmt.setTimestamp(2, ts);
-      assertEquals(
-          "Cache initialized by setTimestamp(xx): must not be null",
-          tz, getTimeZoneCache(pstmt));
-      pstmt.addBatch();
-      assertEquals(
-          "Cache was initialized, addBatch does not change that: must not be null",
-          tz, getTimeZoneCache(pstmt));
-      pstmt.setInt(1, 2);
-      pstmt.setNull(2, java.sql.Types.DATE);
-      assertEquals(
-          "Cache was initialized, setNull does not change that: must not be null",
-          tz, getTimeZoneCache(pstmt));
-      pstmt.addBatch();
-      assertEquals(
-          "Cache was initialized, addBatch does not change that: must not be null",
-          tz, getTimeZoneCache(pstmt));
-      pstmt.executeBatch();
-      assertEquals(
-          "Cache reset by executeBatch(): must be null",
-          null, getTimeZoneCache(pstmt));
-      pstmt.setInt(1, 3);
-      assertEquals(
-          "Cache not initialized: must be null",
-          null, getTimeZoneCache(pstmt));
-      pstmt.setInt(1, 4);
-      pstmt.setNull(2, java.sql.Types.DATE);
-      assertEquals(
-          "Cache was not initialized, setNull does not change that: must be null",
-          null, getTimeZoneCache(pstmt));
-      pstmt.setTimestamp(2, ts);
-      assertEquals(
-          "Cache initialized by setTimestamp(xx): must not be null",
-          tz, getTimeZoneCache(pstmt));
-      pstmt.clearParameters();
-      assertEquals(
-          "Cache was initialized, clearParameters does not change that: must not be null",
-          tz, getTimeZoneCache(pstmt));
-      pstmt.setInt(1, 5);
-      pstmt.setTimestamp(2, ts);
-      pstmt.addBatch();
-      pstmt.executeBatch();
-      pstmt.close();
-      pstmt = con.prepareStatement("UPDATE testtz SET col2 = ? WHERE col1 = 1");
-      assertEquals(
-          "Cache not initialized: must be null",
-          null, getTimeZoneCache(pstmt));
-      pstmt.setDate(1, date);
-      assertEquals(
-          "Cache initialized by setDate(xx): must not be null",
-          tz, getTimeZoneCache(pstmt));
-      pstmt.execute();
-      assertEquals(
-          "Cache reset by execute(): must be null",
-          null, getTimeZoneCache(pstmt));
-      pstmt.setDate(1, date);
-      assertEquals(
-          "Cache initialized by setDate(xx): must not be null",
-          tz, getTimeZoneCache(pstmt));
-      pstmt.executeUpdate();
-      assertEquals(
-          "Cache reset by executeUpdate(): must be null",
-          null, getTimeZoneCache(pstmt));
-      pstmt.setTime(1, time);
-      assertEquals(
-          "Cache initialized by setTime(xx): must not be null",
-          tz, getTimeZoneCache(pstmt));
-      pstmt.close();
-      pstmt = con.prepareStatement("SELECT * FROM testtz WHERE col2 = ?");
-      pstmt.setDate(1, date);
-      assertEquals(
-          "Cache initialized by setDate(xx): must not be null",
-          tz, getTimeZoneCache(pstmt));
-      pstmt.executeQuery();
-      assertEquals(
-          "Cache reset by executeQuery(): must be null",
-          null, getTimeZoneCache(pstmt));
-    } finally {
-      TestUtil.closeQuietly(pstmt);
+    /**
+     * Test to check the internal cached timezone of a prepared statement is set/cleared as expected.
+     */
+    @Test
+    public void testPreparedStatementCachedTimezoneInstance() throws SQLException {
+        Timestamp ts = new Timestamp(2016 - 1900, 0, 31, 0, 0, 0, 0);
+        Date date = new Date(2016 - 1900, 0, 31);
+        Time time = new Time(System.currentTimeMillis());
+        TimeZone tz = TimeZone.getDefault();
+        PreparedStatement pstmt = null;
+        try {
+            pstmt = con.prepareStatement("INSERT INTO testtz VALUES (?,?)");
+            assertEquals(
+                    "Cache never initialized: must be null",
+                    null, getTimeZoneCache(pstmt));
+            pstmt.setInt(1, 1);
+            assertEquals(
+                    "Cache never initialized: must be null",
+                    null, getTimeZoneCache(pstmt));
+            pstmt.setTimestamp(2, ts);
+            assertEquals(
+                    "Cache initialized by setTimestamp(xx): must not be null",
+                    tz, getTimeZoneCache(pstmt));
+            pstmt.addBatch();
+            assertEquals(
+                    "Cache was initialized, addBatch does not change that: must not be null",
+                    tz, getTimeZoneCache(pstmt));
+            pstmt.setInt(1, 2);
+            pstmt.setNull(2, java.sql.Types.DATE);
+            assertEquals(
+                    "Cache was initialized, setNull does not change that: must not be null",
+                    tz, getTimeZoneCache(pstmt));
+            pstmt.addBatch();
+            assertEquals(
+                    "Cache was initialized, addBatch does not change that: must not be null",
+                    tz, getTimeZoneCache(pstmt));
+            pstmt.executeBatch();
+            assertEquals(
+                    "Cache reset by executeBatch(): must be null",
+                    null, getTimeZoneCache(pstmt));
+            pstmt.setInt(1, 3);
+            assertEquals(
+                    "Cache not initialized: must be null",
+                    null, getTimeZoneCache(pstmt));
+            pstmt.setInt(1, 4);
+            pstmt.setNull(2, java.sql.Types.DATE);
+            assertEquals(
+                    "Cache was not initialized, setNull does not change that: must be null",
+                    null, getTimeZoneCache(pstmt));
+            pstmt.setTimestamp(2, ts);
+            assertEquals(
+                    "Cache initialized by setTimestamp(xx): must not be null",
+                    tz, getTimeZoneCache(pstmt));
+            pstmt.clearParameters();
+            assertEquals(
+                    "Cache was initialized, clearParameters does not change that: must not be null",
+                    tz, getTimeZoneCache(pstmt));
+            pstmt.setInt(1, 5);
+            pstmt.setTimestamp(2, ts);
+            pstmt.addBatch();
+            pstmt.executeBatch();
+            pstmt.close();
+            pstmt = con.prepareStatement("UPDATE testtz SET col2 = ? WHERE col1 = 1");
+            assertEquals(
+                    "Cache not initialized: must be null",
+                    null, getTimeZoneCache(pstmt));
+            pstmt.setDate(1, date);
+            assertEquals(
+                    "Cache initialized by setDate(xx): must not be null",
+                    tz, getTimeZoneCache(pstmt));
+            pstmt.execute();
+            assertEquals(
+                    "Cache reset by execute(): must be null",
+                    null, getTimeZoneCache(pstmt));
+            pstmt.setDate(1, date);
+            assertEquals(
+                    "Cache initialized by setDate(xx): must not be null",
+                    tz, getTimeZoneCache(pstmt));
+            pstmt.executeUpdate();
+            assertEquals(
+                    "Cache reset by executeUpdate(): must be null",
+                    null, getTimeZoneCache(pstmt));
+            pstmt.setTime(1, time);
+            assertEquals(
+                    "Cache initialized by setTime(xx): must not be null",
+                    tz, getTimeZoneCache(pstmt));
+            pstmt.close();
+            pstmt = con.prepareStatement("SELECT * FROM testtz WHERE col2 = ?");
+            pstmt.setDate(1, date);
+            assertEquals(
+                    "Cache initialized by setDate(xx): must not be null",
+                    tz, getTimeZoneCache(pstmt));
+            pstmt.executeQuery();
+            assertEquals(
+                    "Cache reset by executeQuery(): must be null",
+                    null, getTimeZoneCache(pstmt));
+        } finally {
+            TestUtil.closeQuietly(pstmt);
+        }
     }
-  }
 
-  /**
-   * Test to check the internal cached timezone of a prepared statement is used as expected.
-   */
-  @Test
-  public void testPreparedStatementCachedTimezoneUsage() throws SQLException {
-    Timestamp ts = new Timestamp(2016 - 1900, 0, 31, 0, 0, 0, 0);
-    Statement stmt = null;
-    PreparedStatement pstmt = null;
-    TimeZone tz1 = TimeZone.getTimeZone("GMT+8:00");
-    TimeZone tz2 = TimeZone.getTimeZone("GMT-2:00");
-    TimeZone tz3 = TimeZone.getTimeZone("UTC+2");
-    TimeZone tz4 = TimeZone.getTimeZone("UTC+3");
-    Calendar c3 = new GregorianCalendar(tz3);
-    Calendar c4 = new GregorianCalendar(tz4);
-    try {
-      stmt = con.createStatement();
-      TimeZone.setDefault(tz1);
-      pstmt = con.prepareStatement("INSERT INTO testtz VALUES(1, ?)");
-      pstmt.setTimestamp(1, ts);
-      pstmt.executeUpdate();
-      checkTimestamp("Default is tz2, was saved as tz1, expecting tz1", stmt, ts, tz1);
-      pstmt.close();
-      pstmt = con.prepareStatement("UPDATE testtz SET col2 = ? WHERE col1 = ?");
-      pstmt.setTimestamp(1, ts);
-      TimeZone.setDefault(tz2);
-      pstmt.setInt(2, 1);
-      pstmt.addBatch();
-      pstmt.executeBatch();
-      checkTimestamp("Default is tz2, but was saved as tz1, expecting tz1", stmt, ts, tz1);
-      pstmt.setTimestamp(1, ts);
-      pstmt.setInt(2, 1);
-      pstmt.addBatch();
-      pstmt.executeBatch();
-      checkTimestamp("Default is tz2, was saved as tz2, expecting tz2", stmt, ts, tz2);
-      pstmt.setTimestamp(1, ts);
-      pstmt.setInt(2, 1);
-      pstmt.clearParameters();
-      TimeZone.setDefault(tz1);
-      pstmt.setTimestamp(1, ts);
-      pstmt.setInt(2, 1);
-      pstmt.addBatch();
-      pstmt.executeBatch();
-      checkTimestamp(
-          "Default is tz1, but was first saved as tz2, next save used tz2 cache, expecting tz2",
-          stmt, ts, tz2);
-      pstmt.setTimestamp(1, ts, c3);
-      pstmt.setInt(2, 1);
-      pstmt.addBatch();
-      pstmt.executeBatch();
-      checkTimestamp("Explicit use of tz3, expecting tz3", stmt, ts, tz3);
-      pstmt.setTimestamp(1, ts, c3);
-      pstmt.setInt(2, 1);
-      pstmt.addBatch();
-      pstmt.setTimestamp(1, ts, c4);
-      pstmt.setInt(2, 1);
-      pstmt.addBatch();
-      pstmt.executeBatch();
-      checkTimestamp("Last set explicitly used tz4, expecting tz4", stmt, ts, tz4);
-      pstmt.setTimestamp(1, ts, c3);
-      pstmt.setInt(2, 1);
-      pstmt.addBatch();
-      pstmt.setTimestamp(1, ts);
-      pstmt.setInt(2, 1);
-      pstmt.addBatch();
-      pstmt.setTimestamp(1, ts, c4);
-      pstmt.setInt(2, 1);
-      pstmt.addBatch();
-      pstmt.executeBatch();
-      checkTimestamp("Last set explicitly used tz4, expecting tz4", stmt, ts, tz4);
-      pstmt.setTimestamp(1, ts, c3);
-      pstmt.setInt(2, 1);
-      pstmt.addBatch();
-      pstmt.setTimestamp(1, ts);
-      pstmt.setInt(2, 1);
-      pstmt.addBatch();
-      pstmt.executeBatch();
-      checkTimestamp(
-          "Default is tz1, was first saved as tz1, last save used tz1 cache, expecting tz1", stmt,
-          ts, tz1);
-      pstmt.setTimestamp(1, ts);
-      pstmt.setInt(2, 1);
-      pstmt.addBatch();
-      pstmt.setTimestamp(1, ts, c4);
-      pstmt.setInt(2, 1);
-      pstmt.addBatch();
-      pstmt.setTimestamp(1, ts);
-      pstmt.setInt(2, 1);
-      pstmt.addBatch();
-      pstmt.executeBatch();
-      checkTimestamp(
-          "Default is tz1, was first saved as tz1, last save used tz1 cache, expecting tz1", stmt,
-          ts, tz1);
-    } catch (BatchUpdateException ex) {
-      SQLException nextException = ex.getNextException();
-      nextException.printStackTrace();
-    } finally {
-      TimeZone.setDefault(null);
-      TestUtil.closeQuietly(pstmt);
-      TestUtil.closeQuietly(stmt);
+    /**
+     * Test to check the internal cached timezone of a prepared statement is used as expected.
+     */
+    @Test
+    public void testPreparedStatementCachedTimezoneUsage() throws SQLException {
+        Timestamp ts = new Timestamp(2016 - 1900, 0, 31, 0, 0, 0, 0);
+        Statement stmt = null;
+        PreparedStatement pstmt = null;
+        TimeZone tz1 = TimeZone.getTimeZone("GMT+8:00");
+        TimeZone tz2 = TimeZone.getTimeZone("GMT-2:00");
+        TimeZone tz3 = TimeZone.getTimeZone("UTC+2");
+        TimeZone tz4 = TimeZone.getTimeZone("UTC+3");
+        Calendar c3 = new GregorianCalendar(tz3);
+        Calendar c4 = new GregorianCalendar(tz4);
+        try {
+            stmt = con.createStatement();
+            TimeZone.setDefault(tz1);
+            pstmt = con.prepareStatement("INSERT INTO testtz VALUES(1, ?)");
+            pstmt.setTimestamp(1, ts);
+            pstmt.executeUpdate();
+            checkTimestamp("Default is tz2, was saved as tz1, expecting tz1", stmt, ts, tz1);
+            pstmt.close();
+            pstmt = con.prepareStatement("UPDATE testtz SET col2 = ? WHERE col1 = ?");
+            pstmt.setTimestamp(1, ts);
+            TimeZone.setDefault(tz2);
+            pstmt.setInt(2, 1);
+            pstmt.addBatch();
+            pstmt.executeBatch();
+            checkTimestamp("Default is tz2, but was saved as tz1, expecting tz1", stmt, ts, tz1);
+            pstmt.setTimestamp(1, ts);
+            pstmt.setInt(2, 1);
+            pstmt.addBatch();
+            pstmt.executeBatch();
+            checkTimestamp("Default is tz2, was saved as tz2, expecting tz2", stmt, ts, tz2);
+            pstmt.setTimestamp(1, ts);
+            pstmt.setInt(2, 1);
+            pstmt.clearParameters();
+            TimeZone.setDefault(tz1);
+            pstmt.setTimestamp(1, ts);
+            pstmt.setInt(2, 1);
+            pstmt.addBatch();
+            pstmt.executeBatch();
+            checkTimestamp(
+                    "Default is tz1, but was first saved as tz2, next save used tz2 cache, expecting tz2",
+                    stmt, ts, tz2);
+            pstmt.setTimestamp(1, ts, c3);
+            pstmt.setInt(2, 1);
+            pstmt.addBatch();
+            pstmt.executeBatch();
+            checkTimestamp("Explicit use of tz3, expecting tz3", stmt, ts, tz3);
+            pstmt.setTimestamp(1, ts, c3);
+            pstmt.setInt(2, 1);
+            pstmt.addBatch();
+            pstmt.setTimestamp(1, ts, c4);
+            pstmt.setInt(2, 1);
+            pstmt.addBatch();
+            pstmt.executeBatch();
+            checkTimestamp("Last set explicitly used tz4, expecting tz4", stmt, ts, tz4);
+            pstmt.setTimestamp(1, ts, c3);
+            pstmt.setInt(2, 1);
+            pstmt.addBatch();
+            pstmt.setTimestamp(1, ts);
+            pstmt.setInt(2, 1);
+            pstmt.addBatch();
+            pstmt.setTimestamp(1, ts, c4);
+            pstmt.setInt(2, 1);
+            pstmt.addBatch();
+            pstmt.executeBatch();
+            checkTimestamp("Last set explicitly used tz4, expecting tz4", stmt, ts, tz4);
+            pstmt.setTimestamp(1, ts, c3);
+            pstmt.setInt(2, 1);
+            pstmt.addBatch();
+            pstmt.setTimestamp(1, ts);
+            pstmt.setInt(2, 1);
+            pstmt.addBatch();
+            pstmt.executeBatch();
+            checkTimestamp(
+                    "Default is tz1, was first saved as tz1, last save used tz1 cache, expecting tz1", stmt,
+                    ts, tz1);
+            pstmt.setTimestamp(1, ts);
+            pstmt.setInt(2, 1);
+            pstmt.addBatch();
+            pstmt.setTimestamp(1, ts, c4);
+            pstmt.setInt(2, 1);
+            pstmt.addBatch();
+            pstmt.setTimestamp(1, ts);
+            pstmt.setInt(2, 1);
+            pstmt.addBatch();
+            pstmt.executeBatch();
+            checkTimestamp(
+                    "Default is tz1, was first saved as tz1, last save used tz1 cache, expecting tz1", stmt,
+                    ts, tz1);
+        } catch (BatchUpdateException ex) {
+            SQLException nextException = ex.getNextException();
+            nextException.printStackTrace();
+        } finally {
+            TimeZone.setDefault(null);
+            TestUtil.closeQuietly(pstmt);
+            TestUtil.closeQuietly(stmt);
+        }
     }
-  }
 
-  /**
-   * Test to check the internal cached timezone of a result set is set/cleared as expected.
-   */
-  @Test
-  public void testResultSetCachedTimezoneInstance() throws SQLException {
-    Timestamp ts = new Timestamp(2016 - 1900, 0, 31, 0, 0, 0, 0);
-    TimeZone tz = TimeZone.getDefault();
-    Statement stmt = null;
-    PreparedStatement pstmt = null;
-    ResultSet rs = null;
-    try {
-      pstmt = con.prepareStatement("INSERT INTO testtz VALUES (?,?)");
-      pstmt.setInt(1, 1);
-      pstmt.setTimestamp(2, ts);
-      pstmt.addBatch();
-      pstmt.executeBatch();
-      stmt = con.createStatement();
-      rs = stmt.executeQuery("SELECT col1, col2 FROM testtz");
-      rs.next();
-      assertEquals("Cache never initialized: must be null", null, getTimeZoneCache(rs));
-      rs.getInt(1);
-      assertEquals("Cache never initialized: must be null", null, getTimeZoneCache(rs));
-      rs.getTimestamp(2);
-      assertEquals("Cache initialized by getTimestamp(x): must not be null",
-          tz, getTimeZoneCache(rs));
-      rs.close();
-      rs = stmt.executeQuery("SELECT col1, col2 FROM testtz");
-      rs.next();
-      rs.getInt(1);
-      assertEquals("Cache never initialized: must be null", null, getTimeZoneCache(rs));
-      rs.getObject(2);
-      assertEquals("Cache initialized by getObject(x) on a DATE column: must not be null",
-          tz, getTimeZoneCache(rs));
-      rs.close();
-      rs = stmt.executeQuery("SELECT col1, col2 FROM testtz");
-      rs.next();
-      assertEquals("Cache should NOT be set", null, getTimeZoneCache(rs));
-      rs.getInt(1);
-      assertEquals("Cache never initialized: must be null", null, getTimeZoneCache(rs));
-      rs.getDate(2);
-      assertEquals("Cache initialized by getDate(x): must not be null", tz, getTimeZoneCache(rs));
-      rs.close();
-    } finally {
-      TestUtil.closeQuietly(rs);
-      TestUtil.closeQuietly(pstmt);
-      TestUtil.closeQuietly(stmt);
+    /**
+     * Test to check the internal cached timezone of a result set is set/cleared as expected.
+     */
+    @Test
+    public void testResultSetCachedTimezoneInstance() throws SQLException {
+        Timestamp ts = new Timestamp(2016 - 1900, 0, 31, 0, 0, 0, 0);
+        TimeZone tz = TimeZone.getDefault();
+        Statement stmt = null;
+        PreparedStatement pstmt = null;
+        ResultSet rs = null;
+        try {
+            pstmt = con.prepareStatement("INSERT INTO testtz VALUES (?,?)");
+            pstmt.setInt(1, 1);
+            pstmt.setTimestamp(2, ts);
+            pstmt.addBatch();
+            pstmt.executeBatch();
+            stmt = con.createStatement();
+            rs = stmt.executeQuery("SELECT col1, col2 FROM testtz");
+            rs.next();
+            assertEquals("Cache never initialized: must be null", null, getTimeZoneCache(rs));
+            rs.getInt(1);
+            assertEquals("Cache never initialized: must be null", null, getTimeZoneCache(rs));
+            rs.getTimestamp(2);
+            assertEquals("Cache initialized by getTimestamp(x): must not be null",
+                    tz, getTimeZoneCache(rs));
+            rs.close();
+            rs = stmt.executeQuery("SELECT col1, col2 FROM testtz");
+            rs.next();
+            rs.getInt(1);
+            assertEquals("Cache never initialized: must be null", null, getTimeZoneCache(rs));
+            rs.getObject(2);
+            assertEquals("Cache initialized by getObject(x) on a DATE column: must not be null",
+                    tz, getTimeZoneCache(rs));
+            rs.close();
+            rs = stmt.executeQuery("SELECT col1, col2 FROM testtz");
+            rs.next();
+            assertEquals("Cache should NOT be set", null, getTimeZoneCache(rs));
+            rs.getInt(1);
+            assertEquals("Cache never initialized: must be null", null, getTimeZoneCache(rs));
+            rs.getDate(2);
+            assertEquals("Cache initialized by getDate(x): must not be null", tz, getTimeZoneCache(rs));
+            rs.close();
+        } finally {
+            TestUtil.closeQuietly(rs);
+            TestUtil.closeQuietly(pstmt);
+            TestUtil.closeQuietly(stmt);
+        }
     }
-  }
 
-  /**
-   * Test to check the internal cached timezone of a result set is used as expected.
-   */
-  @Test
-  public void testResultSetCachedTimezoneUsage() throws SQLException {
-    Statement stmt = null;
-    PreparedStatement pstmt = null;
-    ResultSet rs = null;
-    TimeZone tz1 = TimeZone.getTimeZone("GMT+8:00");
-    TimeZone tz2 = TimeZone.getTimeZone("GMT-2:00"); // 10 hour difference
-    Timestamp ts1 = new Timestamp(2016 - 1900, 0, 31, 3, 0, 0, 0);
-    Timestamp ts2 = new Timestamp(2016 - 1900, 0, 31, 13, 0, 0, 0); // 10 hour difference
-    Calendar c1 = new GregorianCalendar(tz1);
-    Calendar c2 = new GregorianCalendar(tz2);
-    try {
-      TimeZone.setDefault(tz1);
-      pstmt = con.prepareStatement("INSERT INTO testtz VALUES (?,?)");
-      pstmt.setInt(1, 1);
-      // We are in tz1, so timestamp added as tz1.
-      pstmt.setTimestamp(2, ts1);
-      pstmt.addBatch();
-      pstmt.executeBatch();
-      stmt = con.createStatement();
-      rs = stmt.executeQuery("SELECT col1, col2 FROM testtz");
-      rs.next();
-      rs.getInt(1);
-      assertEquals(
-          "Current TZ is tz1, empty cache to be initialized to tz1 => retrieve in tz1, timestamps must be equal",
-          ts1, rs.getTimestamp(2));
-      rs.close();
-      rs = stmt.executeQuery("SELECT col1, col2 FROM testtz");
-      rs.next();
-      rs.getInt(1);
-      TimeZone.setDefault(tz2);
-      assertEquals(
-          "Current TZ is tz2, empty cache to be initialized to tz2 => retrieve in tz2, timestamps cannot be equal",
-          ts2, rs.getTimestamp(2));
-      assertEquals(
-          "Explicit tz1 calendar, so timestamps must be equal",
-          ts1, rs.getTimestamp(2, c1));
-      assertEquals(
-          "Cache was initialized to tz2, so timestamps cannot be equal",
-          ts2, rs.getTimestamp(2));
-      TimeZone.setDefault(tz1);
-      assertEquals(
-          "Cache was initialized to tz2, so timestamps cannot be equal",
-          ts2, rs.getTimestamp(2));
-      rs.close();
-      rs = stmt.executeQuery("SELECT col1, col2 FROM testtz");
-      rs.next();
-      rs.getInt(1);
-      assertEquals(
-          "Explicit tz2 calendar, so timestamps cannot be equal",
-          ts2, rs.getTimestamp(2, c2));
-      assertEquals(
-          "Current TZ is tz1, empty cache to be initialized to tz1 => retrieve in tz1, timestamps must be equal",
-          ts1, rs.getTimestamp(2));
-      assertEquals(
-          "Explicit tz2 calendar, so timestamps cannot be equal",
-          ts2, rs.getTimestamp(2, c2));
-      assertEquals(
-          "Explicit tz2 calendar, so timestamps must be equal",
-          ts1, rs.getTimestamp(2, c1));
-      rs.close();
-    } finally {
-      TimeZone.setDefault(null);
-      TestUtil.closeQuietly(rs);
-      TestUtil.closeQuietly(pstmt);
-      TestUtil.closeQuietly(stmt);
+    /**
+     * Test to check the internal cached timezone of a result set is used as expected.
+     */
+    @Test
+    public void testResultSetCachedTimezoneUsage() throws SQLException {
+        Statement stmt = null;
+        PreparedStatement pstmt = null;
+        ResultSet rs = null;
+        TimeZone tz1 = TimeZone.getTimeZone("GMT+8:00");
+        TimeZone tz2 = TimeZone.getTimeZone("GMT-2:00"); // 10 hour difference
+        Timestamp ts1 = new Timestamp(2016 - 1900, 0, 31, 3, 0, 0, 0);
+        Timestamp ts2 = new Timestamp(2016 - 1900, 0, 31, 13, 0, 0, 0); // 10 hour difference
+        Calendar c1 = new GregorianCalendar(tz1);
+        Calendar c2 = new GregorianCalendar(tz2);
+        try {
+            TimeZone.setDefault(tz1);
+            pstmt = con.prepareStatement("INSERT INTO testtz VALUES (?,?)");
+            pstmt.setInt(1, 1);
+            // We are in tz1, so timestamp added as tz1.
+            pstmt.setTimestamp(2, ts1);
+            pstmt.addBatch();
+            pstmt.executeBatch();
+            stmt = con.createStatement();
+            rs = stmt.executeQuery("SELECT col1, col2 FROM testtz");
+            rs.next();
+            rs.getInt(1);
+            assertEquals(
+                    "Current TZ is tz1, empty cache to be initialized to tz1 => retrieve in tz1, timestamps must be equal",
+                    ts1, rs.getTimestamp(2));
+            rs.close();
+            rs = stmt.executeQuery("SELECT col1, col2 FROM testtz");
+            rs.next();
+            rs.getInt(1);
+            TimeZone.setDefault(tz2);
+            assertEquals(
+                    "Current TZ is tz2, empty cache to be initialized to tz2 => retrieve in tz2, timestamps cannot be equal",
+                    ts2, rs.getTimestamp(2));
+            assertEquals(
+                    "Explicit tz1 calendar, so timestamps must be equal",
+                    ts1, rs.getTimestamp(2, c1));
+            assertEquals(
+                    "Cache was initialized to tz2, so timestamps cannot be equal",
+                    ts2, rs.getTimestamp(2));
+            TimeZone.setDefault(tz1);
+            assertEquals(
+                    "Cache was initialized to tz2, so timestamps cannot be equal",
+                    ts2, rs.getTimestamp(2));
+            rs.close();
+            rs = stmt.executeQuery("SELECT col1, col2 FROM testtz");
+            rs.next();
+            rs.getInt(1);
+            assertEquals(
+                    "Explicit tz2 calendar, so timestamps cannot be equal",
+                    ts2, rs.getTimestamp(2, c2));
+            assertEquals(
+                    "Current TZ is tz1, empty cache to be initialized to tz1 => retrieve in tz1, timestamps must be equal",
+                    ts1, rs.getTimestamp(2));
+            assertEquals(
+                    "Explicit tz2 calendar, so timestamps cannot be equal",
+                    ts2, rs.getTimestamp(2, c2));
+            assertEquals(
+                    "Explicit tz2 calendar, so timestamps must be equal",
+                    ts1, rs.getTimestamp(2, c1));
+            rs.close();
+        } finally {
+            TimeZone.setDefault(null);
+            TestUtil.closeQuietly(rs);
+            TestUtil.closeQuietly(pstmt);
+            TestUtil.closeQuietly(stmt);
+        }
     }
-  }
 
-  private void checkTimestamp(String checkText, Statement stmt, Timestamp ts, TimeZone tz)
-      throws SQLException {
-    TimeZone prevTz = TimeZone.getDefault();
-    TimeZone.setDefault(tz);
-    ResultSet rs = stmt.executeQuery("SELECT col2 FROM testtz");
-    rs.next();
-    Timestamp dbTs = rs.getTimestamp(1);
-    rs.close();
-    TimeZone.setDefault(prevTz);
-    assertEquals(checkText, ts, dbTs);
-  }
-
-  private TimeZone getTimeZoneCache(Object stmt) {
-    try {
-      Field defaultTimeZoneField = stmt.getClass().getDeclaredField("defaultTimeZone");
-      defaultTimeZoneField.setAccessible(true);
-      return (TimeZone) defaultTimeZoneField.get(stmt);
-    } catch (Exception e) {
+    private void checkTimestamp(String checkText, Statement stmt, Timestamp ts, TimeZone tz)
+            throws SQLException {
+        TimeZone prevTz = TimeZone.getDefault();
+        TimeZone.setDefault(tz);
+        ResultSet rs = stmt.executeQuery("SELECT col2 FROM testtz");
+        rs.next();
+        Timestamp dbTs = rs.getTimestamp(1);
+        rs.close();
+        TimeZone.setDefault(prevTz);
+        assertEquals(checkText, ts, dbTs);
     }
-    return null;
-  }
 
-  /* Set up the fixture for this test case: a connection to a database with
-  a table for this test. */
-  public void setUp() throws Exception {
-    super.setUp();
-    TimestampUtils timestampUtils = ((BaseConnection) con).getTimestampUtils();
-    Assume.assumeFalse("If connection has fast access to TimeZone.getDefault,"
-        + " then no cache is needed", timestampUtils.hasFastDefaultTimeZone());
+    private TimeZone getTimeZoneCache(Object stmt) {
+        try {
+            Field defaultTimeZoneField = stmt.getClass().getDeclaredField("defaultTimeZone");
+            defaultTimeZoneField.setAccessible(true);
+            return (TimeZone) defaultTimeZoneField.get(stmt);
+        } catch (Exception e) {
+        }
+        return null;
+    }
+
+    /* Set up the fixture for this test case: a connection to a database with
+    a table for this test. */
+    public void setUp() throws Exception {
+        super.setUp();
+        TimestampUtils timestampUtils = ((BaseConnection) con).getTimestampUtils();
+        Assume.assumeFalse("If connection has fast access to TimeZone.getDefault,"
+                + " then no cache is needed", timestampUtils.hasFastDefaultTimeZone());
     /* Drop the test table if it already exists for some reason. It is
     not an error if it doesn't exist. */
-    TestUtil.createTable(con, "testtz", "col1 INTEGER, col2 TIMESTAMP");
-  }
+        TestUtil.createTable(con, "testtz", "col1 INTEGER, col2 TIMESTAMP");
+    }
 
-  // Tear down the fixture for this test case.
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "testtz");
-    super.tearDown();
-  }
+    // Tear down the fixture for this test case.
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "testtz");
+        super.tearDown();
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimezoneTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimezoneTest.java
index 42ca9f7..76697c8 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimezoneTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TimezoneTest.java
@@ -5,17 +5,6 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.postgresql.PGProperty;
-import org.postgresql.test.TestUtil;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
 import java.sql.Connection;
 import java.sql.Date;
 import java.sql.PreparedStatement;
@@ -30,6 +19,14 @@ import java.util.Calendar;
 import java.util.List;
 import java.util.Properties;
 import java.util.TimeZone;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGProperty;
+import org.postgresql.test.TestUtil;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * <p>Tests for time and date types with calendars involved. TimestampTest was melting my brain, so I
@@ -52,923 +49,921 @@ import java.util.TimeZone;
  * <p>(this matches what we must support per JDBC 3.0, tables B-5 and B-6)</p>
  */
 public class TimezoneTest {
-  private static final int DAY = 24 * 3600 * 1000;
-  private static final TimeZone saveTZ = TimeZone.getDefault();
-  private static final int PREPARE_THRESHOLD = 2;
-
-  private Connection con;
-
-  //
-  // We set up everything in different timezones to try to exercise many cases:
-  //
-  // default JVM timezone: GMT+0100
-  // server timezone: GMT+0300
-  // test timezones: GMT+0000 GMT+0100 GMT+0300 GMT+1300 GMT-0500
-
-  private final Calendar cUTC;
-  private final Calendar cGMT03;
-  private final Calendar cGMT05;
-  private final Calendar cGMT13;
-
-  public TimezoneTest() {
-    TimeZone tzUTC = TimeZone.getTimeZone("UTC"); // +0000 always
-    TimeZone tzGMT03 = TimeZone.getTimeZone("GMT+03"); // +0300 always
-    TimeZone tzGMT05 = TimeZone.getTimeZone("GMT-05"); // -0500 always
-    TimeZone tzGMT13 = TimeZone.getTimeZone("GMT+13"); // +1000 always
-
-    cUTC = Calendar.getInstance(tzUTC);
-    cGMT03 = Calendar.getInstance(tzGMT03);
-    cGMT05 = Calendar.getInstance(tzGMT05);
-    cGMT13 = Calendar.getInstance(tzGMT13);
-  }
-
-  @BeforeEach
-  void setUp() throws Exception {
-    // We must change the default TZ before establishing the connection.
-    // Arbitrary timezone that doesn't match our test timezones
-    TimeZone.setDefault(TimeZone.getTimeZone("GMT+01"));
-
-    connect();
-    TestUtil.createTable(con, "testtimezone",
-        "seq int4, tstz timestamp with time zone, ts timestamp without time zone, t time without time zone, tz time with time zone, d date");
-
-    // This is not obvious, but the "gmt-3" timezone is actually 3 hours *ahead* of GMT
-    // so will produce +03 timestamptz output
-    con.createStatement().executeUpdate("set timezone = 'gmt-3'");
-
-    // System.err.println("++++++ TESTS START (" + getName() + ") ++++++");
-  }
-
-  private void connect() throws Exception {
-    Properties p = new Properties();
-    PGProperty.PREPARE_THRESHOLD.set(p, 1);
-    con = TestUtil.openDB(p);
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    // System.err.println("++++++ TESTS END (" + getName() + ") ++++++");
-    TimeZone.setDefault(saveTZ);
-
-    TestUtil.dropTable(con, "testtimezone");
-    TestUtil.closeDB(con);
-  }
-
-  @Test
-  void getTimestamp() throws Exception {
-    con.createStatement().executeUpdate(
-        "INSERT INTO testtimezone(tstz,ts,t,tz,d) VALUES('2005-01-01 15:00:00 +0300', '2005-01-01 15:00:00', '15:00:00', '15:00:00 +0300', '2005-01-01')");
-
-    for (int i = 0; i < PREPARE_THRESHOLD; i++) {
-      String format = i == 0 ? ", text" : ", binary";
-      PreparedStatement ps = con.prepareStatement("SELECT tstz,ts,t,tz,d from testtimezone");
-      ResultSet rs = ps.executeQuery();
-
-      assertTrue(rs.next());
-      checkDatabaseContents("SELECT tstz::text,ts::text,t::text,tz::text,d::text from testtimezone",
-          new String[]{"2005-01-01 12:00:00+00", "2005-01-01 15:00:00", "15:00:00", "15:00:00+03",
-              "2005-01-01"});
-
-      Timestamp ts;
-      String str;
-
-      // timestamptz: 2005-01-01 15:00:00+03
-      ts = rs.getTimestamp(1); // Represents an instant in time, timezone is irrelevant.
-      assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 12:00:00 UTC
-      ts = rs.getTimestamp(1, cUTC); // Represents an instant in time, timezone is irrelevant.
-      assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 12:00:00 UTC
-      ts = rs.getTimestamp(1, cGMT03); // Represents an instant in time, timezone is irrelevant.
-      assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 12:00:00 UTC
-      ts = rs.getTimestamp(1, cGMT05); // Represents an instant in time, timezone is irrelevant.
-      assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 12:00:00 UTC
-      ts = rs.getTimestamp(1, cGMT13); // Represents an instant in time, timezone is irrelevant.
-      assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 12:00:00 UTC
-      str = rs.getString(1);
-      assertEquals("2005-01-01 15:00:00+03", str, "tstz -> getString" + format);
-
-      // timestamp: 2005-01-01 15:00:00
-      ts = rs.getTimestamp(2); // Convert timestamp to +0100
-      assertEquals(1104588000000L, ts.getTime()); // 2005-01-01 15:00:00 +0100
-      ts = rs.getTimestamp(2, cUTC); // Convert timestamp to UTC
-      assertEquals(1104591600000L, ts.getTime()); // 2005-01-01 15:00:00 +0000
-      ts = rs.getTimestamp(2, cGMT03); // Convert timestamp to +0300
-      assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 15:00:00 +0300
-      ts = rs.getTimestamp(2, cGMT05); // Convert timestamp to -0500
-      assertEquals(1104609600000L, ts.getTime()); // 2005-01-01 15:00:00 -0500
-      ts = rs.getTimestamp(2, cGMT13); // Convert timestamp to +1300
-      assertEquals(1104544800000L, ts.getTime()); // 2005-01-01 15:00:00 +1300
-      str = rs.getString(2);
-      assertEquals("2005-01-01 15:00:00", str, "ts -> getString" + format);
-
-      // time: 15:00:00
-      ts = rs.getTimestamp(3);
-      assertEquals(50400000L, ts.getTime()); // 1970-01-01 15:00:00 +0100
-      ts = rs.getTimestamp(3, cUTC);
-      assertEquals(54000000L, ts.getTime()); // 1970-01-01 15:00:00 +0000
-      ts = rs.getTimestamp(3, cGMT03);
-      assertEquals(43200000L, ts.getTime()); // 1970-01-01 15:00:00 +0300
-      ts = rs.getTimestamp(3, cGMT05);
-      assertEquals(72000000L, ts.getTime()); // 1970-01-01 15:00:00 -0500
-      ts = rs.getTimestamp(3, cGMT13);
-      assertEquals(7200000L, ts.getTime()); // 1970-01-01 15:00:00 +1300
-      str = rs.getString(3);
-      assertEquals("15:00:00", str, "time -> getString" + format);
-
-      // timetz: 15:00:00+03
-      ts = rs.getTimestamp(4);
-      // 1970-01-01 15:00:00 +0300 -> 1970-01-01 13:00:00 +0100
-      assertEquals(43200000L, ts.getTime());
-      ts = rs.getTimestamp(4, cUTC);
-      // 1970-01-01 15:00:00 +0300 -> 1970-01-01 12:00:00 +0000
-      assertEquals(43200000L, ts.getTime());
-      ts = rs.getTimestamp(4, cGMT03);
-      // 1970-01-01 15:00:00 +0300 -> 1970-01-01 15:00:00 +0300
-      assertEquals(43200000L, ts.getTime());
-      ts = rs.getTimestamp(4, cGMT05);
-      // 1970-01-01 15:00:00 +0300 -> 1970-01-01 07:00:00 -0500
-      assertEquals(43200000L, ts.getTime());
-      ts = rs.getTimestamp(4, cGMT13);
-      // 1970-01-01 15:00:00 +0300 -> 1970-01-02 01:00:00 +1300
-      assertEquals(43200000L, ts.getTime());
-      str = rs.getString(4);
-      assertEquals("15:00:00+03", str, "timetz -> getString" + format);
-
-      // date: 2005-01-01
-      ts = rs.getTimestamp(5);
-      assertEquals(1104534000000L, ts.getTime()); // 2005-01-01 00:00:00 +0100
-      ts = rs.getTimestamp(5, cUTC);
-      assertEquals(1104537600000L, ts.getTime()); // 2005-01-01 00:00:00 +0000
-      ts = rs.getTimestamp(5, cGMT03);
-      assertEquals(1104526800000L, ts.getTime()); // 2005-01-01 00:00:00 +0300
-      ts = rs.getTimestamp(5, cGMT05);
-      assertEquals(1104555600000L, ts.getTime()); // 2005-01-01 00:00:00 -0500
-      ts = rs.getTimestamp(5, cGMT13);
-      assertEquals(1104490800000L, ts.getTime()); // 2005-01-01 00:00:00 +1300
-      str = rs.getString(5);
-      assertEquals("2005-01-01", str, "date -> getString" + format);
-
-      assertFalse(rs.next());
-      ps.close();
-    }
-  }
-
-  @Test
-  void getDate() throws Exception {
-    con.createStatement().executeUpdate(
-        "INSERT INTO testtimezone(tstz,ts,d) VALUES('2005-01-01 15:00:00 +0300', '2005-01-01 15:00:00', '2005-01-01')");
-
-    PreparedStatement ps = con.prepareStatement("SELECT tstz,ts,d from testtimezone");
-    for (int i = 0; i < PREPARE_THRESHOLD; i++) {
-      ResultSet rs = ps.executeQuery();
-
-      assertTrue(rs.next());
-      checkDatabaseContents("SELECT tstz::text,ts::text,d::text from testtimezone",
-          new String[]{"2005-01-01 12:00:00+00", "2005-01-01 15:00:00", "2005-01-01"});
-
-      Date d;
-
-      // timestamptz: 2005-01-01 15:00:00+03
-      d = rs.getDate(1); // 2005-01-01 13:00:00 +0100 -> 2005-01-01 00:00:00 +0100
-      assertEquals(1104534000000L, d.getTime());
-      d = rs.getDate(1, cUTC); // 2005-01-01 12:00:00 +0000 -> 2005-01-01 00:00:00 +0000
-      assertEquals(1104537600000L, d.getTime());
-      d = rs.getDate(1, cGMT03); // 2005-01-01 15:00:00 +0300 -> 2005-01-01 00:00:00 +0300
-      assertEquals(1104526800000L, d.getTime());
-      d = rs.getDate(1, cGMT05); // 2005-01-01 07:00:00 -0500 -> 2005-01-01 00:00:00 -0500
-      assertEquals(1104555600000L, d.getTime());
-      d = rs.getDate(1, cGMT13); // 2005-01-02 01:00:00 +1300 -> 2005-01-02 00:00:00 +1300
-      assertEquals(1104577200000L, d.getTime());
-
-      // timestamp: 2005-01-01 15:00:00
-      d = rs.getDate(2); // 2005-01-01 00:00:00 +0100
-      assertEquals(1104534000000L, d.getTime());
-      d = rs.getDate(2, cUTC); // 2005-01-01 00:00:00 +0000
-      assertEquals(1104537600000L, d.getTime());
-      d = rs.getDate(2, cGMT03); // 2005-01-01 00:00:00 +0300
-      assertEquals(1104526800000L, d.getTime());
-      d = rs.getDate(2, cGMT05); // 2005-01-01 00:00:00 -0500
-      assertEquals(1104555600000L, d.getTime());
-      d = rs.getDate(2, cGMT13); // 2005-01-01 00:00:00 +1300
-      assertEquals(1104490800000L, d.getTime());
-
-      // date: 2005-01-01
-      d = rs.getDate(3); // 2005-01-01 00:00:00 +0100
-      assertEquals(1104534000000L, d.getTime());
-      d = rs.getDate(3, cUTC); // 2005-01-01 00:00:00 +0000
-      assertEquals(1104537600000L, d.getTime());
-      d = rs.getDate(3, cGMT03); // 2005-01-01 00:00:00 +0300
-      assertEquals(1104526800000L, d.getTime());
-      d = rs.getDate(3, cGMT05); // 2005-01-01 00:00:00 -0500
-      assertEquals(1104555600000L, d.getTime());
-      d = rs.getDate(3, cGMT13); // 2005-01-01 00:00:00 +1300
-      assertEquals(1104490800000L, d.getTime());
-
-      assertFalse(rs.next());
-      rs.close();
-    }
-  }
-
-  @Test
-  void getTime() throws Exception {
-    con.createStatement().executeUpdate(
-        "INSERT INTO testtimezone(tstz,ts,t,tz) VALUES('2005-01-01 15:00:00 +0300', '2005-01-01 15:00:00', '15:00:00', '15:00:00 +0300')");
-
-    PreparedStatement ps = con.prepareStatement("SELECT tstz,ts,t,tz from testtimezone");
-    for (int i = 0; i < PREPARE_THRESHOLD; i++) {
-      ResultSet rs = ps.executeQuery();
-
-      assertTrue(rs.next());
-      checkDatabaseContents("SELECT tstz::text,ts::text,t::text,tz::text,d::text from testtimezone",
-          new String[]{"2005-01-01 12:00:00+00", "2005-01-01 15:00:00", "15:00:00", "15:00:00+03"});
-
-      Time t;
-
-      // timestamptz: 2005-01-01 15:00:00+03
-      t = rs.getTime(1);
-      // 2005-01-01 13:00:00 +0100 -> 1970-01-01 13:00:00 +0100
-      assertEquals(43200000L, t.getTime());
-      t = rs.getTime(1, cUTC);
-      // 2005-01-01 12:00:00 +0000 -> 1970-01-01 12:00:00 +0000
-      assertEquals(43200000L, t.getTime());
-      t = rs.getTime(1, cGMT03);
-      // 2005-01-01 15:00:00 +0300 -> 1970-01-01 15:00:00 +0300
-      assertEquals(43200000L, t.getTime());
-      t = rs.getTime(1, cGMT05);
-      // 2005-01-01 07:00:00 -0500 -> 1970-01-01 07:00:00 -0500
-      assertEquals(43200000L, t.getTime());
-      t = rs.getTime(1, cGMT13);
-      // 2005-01-02 01:00:00 +1300 -> 1970-01-01 01:00:00 +1300
-      assertEquals(43200000L, t.getTime());
-
-      // timestamp: 2005-01-01 15:00:00
-      t = rs.getTime(2);
-      assertEquals(50400000L, t.getTime()); // 1970-01-01 15:00:00 +0100
-      t = rs.getTime(2, cUTC);
-      assertEquals(54000000L, t.getTime()); // 1970-01-01 15:00:00 +0000
-      t = rs.getTime(2, cGMT03);
-      assertEquals(43200000L, t.getTime()); // 1970-01-01 15:00:00 +0300
-      t = rs.getTime(2, cGMT05);
-      assertEquals(72000000L, t.getTime()); // 1970-01-01 15:00:00 -0500
-      t = rs.getTime(2, cGMT13);
-      assertEquals(7200000L, t.getTime()); // 1970-01-01 15:00:00 +1300
-
-      // time: 15:00:00
-      t = rs.getTime(3);
-      assertEquals(50400000L, t.getTime()); // 1970-01-01 15:00:00 +0100
-      t = rs.getTime(3, cUTC);
-      assertEquals(54000000L, t.getTime()); // 1970-01-01 15:00:00 +0000
-      t = rs.getTime(3, cGMT03);
-      assertEquals(43200000L, t.getTime()); // 1970-01-01 15:00:00 +0300
-      t = rs.getTime(3, cGMT05);
-      assertEquals(72000000L, t.getTime()); // 1970-01-01 15:00:00 -0500
-      t = rs.getTime(3, cGMT13);
-      assertEquals(7200000L, t.getTime()); // 1970-01-01 15:00:00 +1300
-
-      // timetz: 15:00:00+03
-      t = rs.getTime(4);
-      assertEquals(43200000L, t.getTime()); // 1970-01-01 13:00:00 +0100
-      t = rs.getTime(4, cUTC);
-      assertEquals(43200000L, t.getTime()); // 1970-01-01 12:00:00 +0000
-      t = rs.getTime(4, cGMT03);
-      assertEquals(43200000L, t.getTime()); // 1970-01-01 15:00:00 +0300
-      t = rs.getTime(4, cGMT05);
-      assertEquals(43200000L, t.getTime()); // 1970-01-01 07:00:00 -0500
-      t = rs.getTime(4, cGMT13);
-      assertEquals(43200000L, t.getTime()); // 1970-01-01 01:00:00 +1300
-      rs.close();
-    }
-  }
-
-  /**
-   * This test is broken off from testSetTimestamp because it does not work for pre-7.4 servers and
-   * putting tons of conditionals in that test makes it largely unreadable. The time data type does
-   * not accept timestamp with time zone style input on these servers.
-   */
-  @Test
-  void setTimestampOnTime() throws Exception {
-    // Pre-7.4 servers cannot convert timestamps with timezones to times.
-    for (int i = 0; i < PREPARE_THRESHOLD; i++) {
-      con.createStatement().execute("delete from testtimezone");
-      PreparedStatement insertTimestamp =
-          con.prepareStatement("INSERT INTO testtimezone(seq,t) VALUES (?,?)");
-      int seq = 1;
-
-      Timestamp instant = new Timestamp(1104580800000L); // 2005-01-01 12:00:00 UTC
-      Timestamp instantTime = new Timestamp(instant.getTime() % DAY);
-
-      // +0100 (JVM default)
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setTimestamp(2, instant); // 13:00:00
-      insertTimestamp.executeUpdate();
-
-      // UTC
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setTimestamp(2, instant, cUTC); // 12:00:00
-      insertTimestamp.executeUpdate();
-
-      // +0300
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setTimestamp(2, instant, cGMT03); // 15:00:00
-      insertTimestamp.executeUpdate();
-
-      // -0500
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setTimestamp(2, instant, cGMT05); // 07:00:00
-      insertTimestamp.executeUpdate();
-
-      // +1300
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setTimestamp(2, instant, cGMT13); // 01:00:00
-      insertTimestamp.executeUpdate();
-
-      insertTimestamp.close();
-
-      checkDatabaseContents("SELECT seq::text,t::text from testtimezone ORDER BY seq",
-          new String[][]{new String[]{"1", "13:00:00"}, new String[]{"2", "12:00:00"},
-              new String[]{"3", "15:00:00"}, new String[]{"4", "07:00:00"},
-              new String[]{"5", "01:00:00"}});
-
-      seq = 1;
-      PreparedStatement ps = con.prepareStatement("SELECT seq,t FROM testtimezone ORDER BY seq");
-      ResultSet rs = ps.executeQuery();
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(instantTime, rs.getTimestamp(2));
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(instantTime, rs.getTimestamp(2, cUTC));
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(instantTime, rs.getTimestamp(2, cGMT03));
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(instantTime, rs.getTimestamp(2, cGMT05));
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(normalizeTimeOfDayPart(instantTime, cGMT13), rs.getTimestamp(2, cGMT13));
-
-      assertFalse(rs.next());
-      ps.close();
-    }
-  }
-
-  @Test
-  void setTimestamp() throws Exception {
-    for (int i = 0; i < PREPARE_THRESHOLD; i++) {
-      con.createStatement().execute("delete from testtimezone");
-      PreparedStatement insertTimestamp =
-          con.prepareStatement("INSERT INTO testtimezone(seq,tstz,ts,tz,d) VALUES (?,?,?,?,?)");
-      int seq = 1;
-
-      Timestamp instant = new Timestamp(1104580800000L); // 2005-01-01 12:00:00 UTC
-      Timestamp instantTime = new Timestamp(instant.getTime() % DAY);
-      Timestamp instantDateJVM = new Timestamp(
-          instant.getTime() - (instant.getTime() % DAY) - TimeZone.getDefault().getRawOffset());
-      Timestamp instantDateUTC = new Timestamp(
-          instant.getTime() - (instant.getTime() % DAY) - cUTC.getTimeZone().getRawOffset());
-      Timestamp instantDateGMT03 = new Timestamp(
-          instant.getTime() - (instant.getTime() % DAY) - cGMT03.getTimeZone().getRawOffset());
-      Timestamp instantDateGMT05 = new Timestamp(
-          instant.getTime() - (instant.getTime() % DAY) - cGMT05.getTimeZone().getRawOffset());
-      Timestamp instantDateGMT13 = new Timestamp(instant.getTime() - (instant.getTime() % DAY)
-          - cGMT13.getTimeZone().getRawOffset() + DAY);
-
-      // +0100 (JVM default)
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setTimestamp(2, instant); // 2005-01-01 13:00:00 +0100
-      insertTimestamp.setTimestamp(3, instant); // 2005-01-01 13:00:00
-      insertTimestamp.setTimestamp(4, instant); // 13:00:00 +0100
-      insertTimestamp.setTimestamp(5, instant); // 2005-01-01
-      insertTimestamp.executeUpdate();
-
-      // UTC
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setTimestamp(2, instant, cUTC); // 2005-01-01 12:00:00 +0000
-      insertTimestamp.setTimestamp(3, instant, cUTC); // 2005-01-01 12:00:00
-      insertTimestamp.setTimestamp(4, instant, cUTC); // 12:00:00 +0000
-      insertTimestamp.setTimestamp(5, instant, cUTC); // 2005-01-01
-      insertTimestamp.executeUpdate();
-
-      // +0300
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setTimestamp(2, instant, cGMT03); // 2005-01-01 15:00:00 +0300
-      insertTimestamp.setTimestamp(3, instant, cGMT03); // 2005-01-01 15:00:00
-      insertTimestamp.setTimestamp(4, instant, cGMT03); // 15:00:00 +0300
-      insertTimestamp.setTimestamp(5, instant, cGMT03); // 2005-01-01
-      insertTimestamp.executeUpdate();
-
-      // -0500
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setTimestamp(2, instant, cGMT05); // 2005-01-01 07:00:00 -0500
-      insertTimestamp.setTimestamp(3, instant, cGMT05); // 2005-01-01 07:00:00
-      insertTimestamp.setTimestamp(4, instant, cGMT05); // 07:00:00 -0500
-      insertTimestamp.setTimestamp(5, instant, cGMT05); // 2005-01-01
-      insertTimestamp.executeUpdate();
-
-      // +1300
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setTimestamp(2, instant, cGMT13); // 2005-01-02 01:00:00 +1300
-      insertTimestamp.setTimestamp(3, instant, cGMT13); // 2005-01-02 01:00:00
-      insertTimestamp.setTimestamp(4, instant, cGMT13); // 01:00:00 +1300
-      insertTimestamp.setTimestamp(5, instant, cGMT13); // 2005-01-02
-      insertTimestamp.executeUpdate();
-
-      insertTimestamp.close();
-
-      // check that insert went correctly by parsing the raw contents in UTC
-      checkDatabaseContents(
-          "SELECT seq::text,tstz::text,ts::text,tz::text,d::text from testtimezone ORDER BY seq",
-          new String[][]{
-              new String[]{"1", "2005-01-01 12:00:00+00", "2005-01-01 13:00:00", "13:00:00+01",
-                  "2005-01-01"},
-              new String[]{"2", "2005-01-01 12:00:00+00", "2005-01-01 12:00:00", "12:00:00+00",
-                  "2005-01-01"},
-              new String[]{"3", "2005-01-01 12:00:00+00", "2005-01-01 15:00:00", "15:00:00+03",
-                  "2005-01-01"},
-              new String[]{"4", "2005-01-01 12:00:00+00", "2005-01-01 07:00:00", "07:00:00-05",
-                  "2005-01-01"},
-              new String[]{"5", "2005-01-01 12:00:00+00", "2005-01-02 01:00:00", "01:00:00+13",
-                  "2005-01-02"}});
-
-      //
-      // check results
-      //
-
-      seq = 1;
-      PreparedStatement ps =
-          con.prepareStatement("SELECT seq,tstz,ts,tz,d FROM testtimezone ORDER BY seq");
-      ResultSet rs = ps.executeQuery();
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(instant, rs.getTimestamp(2));
-      assertEquals(instant, rs.getTimestamp(3));
-      assertEquals(instantTime, rs.getTimestamp(4));
-      assertEquals(instantDateJVM, rs.getTimestamp(5));
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(instant, rs.getTimestamp(2, cUTC));
-      assertEquals(instant, rs.getTimestamp(3, cUTC));
-      assertEquals(instantTime, rs.getTimestamp(4, cUTC));
-      assertEquals(instantDateUTC, rs.getTimestamp(5, cUTC));
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(instant, rs.getTimestamp(2, cGMT03));
-      assertEquals(instant, rs.getTimestamp(3, cGMT03));
-      assertEquals(instantTime, rs.getTimestamp(4, cGMT03));
-      assertEquals(instantDateGMT03, rs.getTimestamp(5, cGMT03));
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(instant, rs.getTimestamp(2, cGMT05));
-      assertEquals(instant, rs.getTimestamp(3, cGMT05));
-      assertEquals(instantTime, rs.getTimestamp(4, cGMT05));
-      assertEquals(instantDateGMT05, rs.getTimestamp(5, cGMT05));
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(instant, rs.getTimestamp(2, cGMT13));
-      assertEquals(instant, rs.getTimestamp(3, cGMT13));
-      assertEquals(normalizeTimeOfDayPart(instantTime, cGMT13), rs.getTimestamp(4, cGMT13));
-      assertEquals(instantDateGMT13, rs.getTimestamp(5, cGMT13));
-
-      assertFalse(rs.next());
-      ps.close();
-    }
-  }
-
-  @Test
-  void setDate() throws Exception {
-    for (int i = 0; i < PREPARE_THRESHOLD; i++) {
-      con.createStatement().execute("delete from testtimezone");
-      PreparedStatement insertTimestamp =
-          con.prepareStatement("INSERT INTO testtimezone(seq,tstz,ts,d) VALUES (?,?,?,?)");
-
-      int seq = 1;
-
-      Date dJVM;
-      Date dUTC;
-      Date dGMT03;
-      Date dGMT05;
-      Date dGMT13 = null;
-
-      // +0100 (JVM default)
-      dJVM = new Date(1104534000000L); // 2005-01-01 00:00:00 +0100
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setDate(2, dJVM); // 2005-01-01 00:00:00 +0100
-      insertTimestamp.setDate(3, dJVM); // 2005-01-01 00:00:00
-      insertTimestamp.setDate(4, dJVM); // 2005-01-01
-      insertTimestamp.executeUpdate();
-
-      // UTC
-      dUTC = new Date(1104537600000L); // 2005-01-01 00:00:00 +0000
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setDate(2, dUTC, cUTC); // 2005-01-01 00:00:00 +0000
-      insertTimestamp.setDate(3, dUTC, cUTC); // 2005-01-01 00:00:00
-      insertTimestamp.setDate(4, dUTC, cUTC); // 2005-01-01
-      insertTimestamp.executeUpdate();
-
-      // +0300
-      dGMT03 = new Date(1104526800000L); // 2005-01-01 00:00:00 +0300
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setDate(2, dGMT03, cGMT03); // 2005-01-01 00:00:00 +0300
-      insertTimestamp.setDate(3, dGMT03, cGMT03); // 2005-01-01 00:00:00
-      insertTimestamp.setDate(4, dGMT03, cGMT03); // 2005-01-01
-      insertTimestamp.executeUpdate();
-
-      // -0500
-      dGMT05 = new Date(1104555600000L); // 2005-01-01 00:00:00 -0500
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setDate(2, dGMT05, cGMT05); // 2005-01-01 00:00:00 -0500
-      insertTimestamp.setDate(3, dGMT05, cGMT05); // 2005-01-01 00:00:00
-      insertTimestamp.setDate(4, dGMT05, cGMT05); // 2005-01-01
-      insertTimestamp.executeUpdate();
-
-      // +1300
-      dGMT13 = new Date(1104490800000L); // 2005-01-01 00:00:00 +1300
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setDate(2, dGMT13, cGMT13); // 2005-01-01 00:00:00 +1300
-      insertTimestamp.setDate(3, dGMT13, cGMT13); // 2005-01-01 00:00:00
-      insertTimestamp.setDate(4, dGMT13, cGMT13); // 2005-01-01
-      insertTimestamp.executeUpdate();
-
-      insertTimestamp.close();
-
-      // check that insert went correctly by parsing the raw contents in UTC
-      checkDatabaseContents(
-          "SELECT seq::text,tstz::text,ts::text,d::text from testtimezone ORDER BY seq",
-          new String[][]{
-              new String[]{"1", "2004-12-31 23:00:00+00", "2005-01-01 00:00:00", "2005-01-01"},
-              new String[]{"2", "2005-01-01 00:00:00+00", "2005-01-01 00:00:00", "2005-01-01"},
-              new String[]{"3", "2004-12-31 21:00:00+00", "2005-01-01 00:00:00", "2005-01-01"},
-              new String[]{"4", "2005-01-01 05:00:00+00", "2005-01-01 00:00:00", "2005-01-01"},
-              new String[]{"5", "2004-12-31 11:00:00+00", "2005-01-01 00:00:00", "2005-01-01"}});
-      //
-      // check results
-      //
-
-      seq = 1;
-      PreparedStatement ps =
-          con.prepareStatement("SELECT seq,tstz,ts,d FROM testtimezone ORDER BY seq");
-      ResultSet rs = ps.executeQuery();
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(dJVM, rs.getDate(2));
-      assertEquals(dJVM, rs.getDate(3));
-      assertEquals(dJVM, rs.getDate(4));
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(dUTC, rs.getDate(2, cUTC));
-      assertEquals(dUTC, rs.getDate(3, cUTC));
-      assertEquals(dUTC, rs.getDate(4, cUTC));
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(dGMT03, rs.getDate(2, cGMT03));
-      assertEquals(dGMT03, rs.getDate(3, cGMT03));
-      assertEquals(dGMT03, rs.getDate(4, cGMT03));
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(dGMT05, rs.getDate(2, cGMT05));
-      assertEquals(dGMT05, rs.getDate(3, cGMT05));
-      assertEquals(dGMT05, rs.getDate(4, cGMT05));
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(dGMT13, rs.getDate(2, cGMT13));
-      assertEquals(dGMT13, rs.getDate(3, cGMT13));
-      assertEquals(dGMT13, rs.getDate(4, cGMT13));
-
-      assertFalse(rs.next());
-      ps.close();
-    }
-  }
-
-  @Test
-  void setTime() throws Exception {
-    for (int i = 0; i < PREPARE_THRESHOLD; i++) {
-      con.createStatement().execute("delete from testtimezone");
-      PreparedStatement insertTimestamp =
-          con.prepareStatement("INSERT INTO testtimezone(seq,t,tz) VALUES (?,?,?)");
-
-      int seq = 1;
-
-      Time tJVM;
-      Time tUTC;
-      Time tGMT03;
-      Time tGMT05;
-      Time tGMT13;
-
-      // +0100 (JVM default)
-      tJVM = new Time(50400000L); // 1970-01-01 15:00:00 +0100
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setTime(2, tJVM); // 15:00:00
-      insertTimestamp.setTime(3, tJVM); // 15:00:00+03
-      insertTimestamp.executeUpdate();
-
-      // UTC
-      tUTC = new Time(54000000L); // 1970-01-01 15:00:00 +0000
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setTime(2, tUTC, cUTC); // 15:00:00
-      insertTimestamp.setTime(3, tUTC, cUTC); // 15:00:00+00
-      insertTimestamp.executeUpdate();
-
-      // +0300
-      tGMT03 = new Time(43200000L); // 1970-01-01 15:00:00 +0300
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setTime(2, tGMT03, cGMT03); // 15:00:00
-      insertTimestamp.setTime(3, tGMT03, cGMT03); // 15:00:00+03
-      insertTimestamp.executeUpdate();
-
-      // -0500
-      tGMT05 = new Time(72000000L); // 1970-01-01 15:00:00 -0500
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setTime(2, tGMT05, cGMT05); // 15:00:00
-      insertTimestamp.setTime(3, tGMT05, cGMT05); // 15:00:00-05
-      insertTimestamp.executeUpdate();
-
-      // +1300
-      tGMT13 = new Time(7200000L); // 1970-01-01 15:00:00 +1300
-      insertTimestamp.setInt(1, seq++);
-      insertTimestamp.setTime(2, tGMT13, cGMT13); // 15:00:00
-      insertTimestamp.setTime(3, tGMT13, cGMT13); // 15:00:00+13
-      insertTimestamp.executeUpdate();
-
-      insertTimestamp.close();
-
-      // check that insert went correctly by parsing the raw contents in UTC
-      checkDatabaseContents("SELECT seq::text,t::text,tz::text from testtimezone ORDER BY seq",
-          new String[][]{new String[]{"1", "15:00:00", "15:00:00+01",},
-              new String[]{"2", "15:00:00", "15:00:00+00",},
-              new String[]{"3", "15:00:00", "15:00:00+03",},
-              new String[]{"4", "15:00:00", "15:00:00-05",},
-              new String[]{"5", "15:00:00", "15:00:00+13",}});
-
-      //
-      // check results
-      //
-
-      seq = 1;
-      PreparedStatement ps = con.prepareStatement("SELECT seq,t,tz FROM testtimezone ORDER BY seq");
-      ResultSet rs = ps.executeQuery();
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(tJVM, rs.getTime(2));
-      assertEquals(tJVM, rs.getTime(3));
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(tUTC, rs.getTime(2, cUTC));
-      assertEquals(tUTC, rs.getTime(3, cUTC));
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(tGMT03, rs.getTime(2, cGMT03));
-      assertEquals(tGMT03, rs.getTime(3, cGMT03));
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(tGMT05, rs.getTime(2, cGMT05));
-      assertEquals(tGMT05, rs.getTime(3, cGMT05));
-
-      assertTrue(rs.next());
-      assertEquals(seq++, rs.getInt(1));
-      assertEquals(tGMT13, rs.getTime(2, cGMT13));
-      assertEquals(tGMT13, rs.getTime(3, cGMT13));
-
-      assertFalse(rs.next());
-      ps.close();
-    }
-  }
-
-  @Test
-  void halfHourTimezone() throws Exception {
-    Statement stmt = con.createStatement();
-    stmt.execute("SET TimeZone = 'GMT+3:30'");
-    for (int i = 0; i < PREPARE_THRESHOLD; i++) {
-      PreparedStatement ps = con.prepareStatement("SELECT '1969-12-31 20:30:00'::timestamptz");
-      ResultSet rs = ps.executeQuery();
-      assertTrue(rs.next());
-      assertEquals(0L, rs.getTimestamp(1).getTime());
-      ps.close();
-    }
-  }
-
-  @Test
-  void timezoneWithSeconds() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.execute("SET TimeZone = 'Europe/Paris'");
-    for (int i = 0; i < PREPARE_THRESHOLD; i++) {
-      PreparedStatement ps = con.prepareStatement("SELECT '1920-01-01'::timestamptz");
-      ResultSet rs = ps.executeQuery();
-      rs.next();
-      // select extract(epoch from '1920-01-01'::timestamptz - 'epoch'::timestamptz) * 1000;
-
-      assertEquals(-1577923200000L, rs.getTimestamp(1).getTime());
-      ps.close();
-    }
-  }
-
-  @Test
-  void localTimestampsInNonDSTZones() throws Exception {
-    for (int i = -12; i <= 13; i++) {
-      localTimestamps(String.format("GMT%02d", i));
-    }
-  }
-
-  @Test
-  void localTimestampsInAfricaCasablanca() throws Exception {
-    localTimestamps("Africa/Casablanca"); // It is something like GMT+0..GMT+1
-  }
-
-  @Test
-  void localTimestampsInAtlanticAzores() throws Exception {
-    localTimestamps("Atlantic/Azores"); // It is something like GMT-1..GMT+0
-  }
-
-  @Test
-  void localTimestampsInEuropeMoscow() throws Exception {
-    localTimestamps("Europe/Moscow"); // It is something like GMT+3..GMT+4 for 2000s
-  }
-
-  @Test
-  void localTimestampsInPacificApia() throws Exception {
-    localTimestamps("Pacific/Apia"); // It is something like GMT+13..GMT+14
-  }
-
-  @Test
-  void localTimestampsInPacificNiue() throws Exception {
-    localTimestamps("Pacific/Niue"); // It is something like GMT-11..GMT-11
-  }
-
-  @Test
-  void localTimestampsInAmericaAdak() throws Exception {
-    localTimestamps("America/Adak"); // It is something like GMT-10..GMT-9
-  }
-
-  private String setTimeTo00_00_00(String timestamp) {
-    return timestamp.substring(0, 11) + "00:00:00";
-  }
-
-  public void localTimestamps(String timeZone) throws Exception {
-    TimeZone.setDefault(TimeZone.getTimeZone(timeZone));
-
-    final String testDateFormat = "yyyy-MM-dd HH:mm:ss";
-    final List<String> datesToTest = Arrays.asList("2015-09-03 12:00:00", "2015-06-30 23:59:58",
-        "1997-06-30 23:59:59", "1997-07-01 00:00:00", "2012-06-30 23:59:59", "2012-07-01 00:00:00",
-        "2015-06-30 23:59:59", "2015-07-01 00:00:00", "2005-12-31 23:59:59", "2006-01-01 00:00:00",
-        "2008-12-31 23:59:59", "2009-01-01 00:00:00", "2015-06-30 23:59:60", "2015-07-31 00:00:00",
-        "2015-07-31 00:00:01",
-
-        // On 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00
-        "2000-03-26 01:59:59", "2000-03-26 02:00:00", "2000-03-26 02:00:01", "2000-03-26 02:59:59",
-        "2000-03-26 03:00:00", "2000-03-26 03:00:01", "2000-03-26 03:59:59", "2000-03-26 04:00:00",
-        "2000-03-26 04:00:01",
-
-        // This is a pre-1970 date, so check if it is rounded properly
-        "1950-07-20 02:00:00",
-
-        // On 2000-10-29 03:00:00 Moscow went to regular time, thus local time became 02:00:00
-        "2000-10-29 01:59:59", "2000-10-29 02:00:00", "2000-10-29 02:00:01", "2000-10-29 02:59:59",
-        "2000-10-29 03:00:00", "2000-10-29 03:00:01", "2000-10-29 03:59:59", "2000-10-29 04:00:00",
-        "2000-10-29 04:00:01");
-
-    con.createStatement().execute("delete from testtimezone");
-    Statement stmt = con.createStatement();
-
-    for (int i = 0; i < datesToTest.size(); i++) {
-      stmt.execute(
-          "insert into testtimezone (ts, d, seq) values ("
-              + "'" + datesToTest.get(i) + "'"
-              + ", '" + setTimeTo00_00_00(datesToTest.get(i)) + "'"
-              + ", " + i + ")");
+    private static final int DAY = 24 * 3600 * 1000;
+    private static final TimeZone saveTZ = TimeZone.getDefault();
+    private static final int PREPARE_THRESHOLD = 2;
+    private final Calendar cUTC;
+
+    //
+    // We set up everything in different timezones to try to exercise many cases:
+    //
+    // default JVM timezone: GMT+0100
+    // server timezone: GMT+0300
+    // test timezones: GMT+0000 GMT+0100 GMT+0300 GMT+1300 GMT-0500
+    private final Calendar cGMT03;
+    private final Calendar cGMT05;
+    private final Calendar cGMT13;
+    private Connection con;
+
+    public TimezoneTest() {
+        TimeZone tzUTC = TimeZone.getTimeZone("UTC"); // +0000 always
+        TimeZone tzGMT03 = TimeZone.getTimeZone("GMT+03"); // +0300 always
+        TimeZone tzGMT05 = TimeZone.getTimeZone("GMT-05"); // -0500 always
+        TimeZone tzGMT13 = TimeZone.getTimeZone("GMT+13"); // +1000 always
+
+        cUTC = Calendar.getInstance(tzUTC);
+        cGMT03 = Calendar.getInstance(tzGMT03);
+        cGMT05 = Calendar.getInstance(tzGMT05);
+        cGMT13 = Calendar.getInstance(tzGMT13);
     }
 
-    // Different timezone test should have different sql text, so we test both text and binary modes
-    PreparedStatement pstmt =
-        con.prepareStatement("SELECT ts, d FROM testtimezone order by seq /*" + timeZone + "*/");
+    @BeforeEach
+    void setUp() throws Exception {
+        // We must change the default TZ before establishing the connection.
+        // Arbitrary timezone that doesn't match our test timezones
+        TimeZone.setDefault(TimeZone.getTimeZone("GMT+01"));
 
-    Calendar expectedTimestamp = Calendar.getInstance();
+        connect();
+        TestUtil.createTable(con, "testtimezone",
+                "seq int4, tstz timestamp with time zone, ts timestamp without time zone, t time without time zone, tz time with time zone, d date");
 
-    SimpleDateFormat sdf = new SimpleDateFormat(testDateFormat);
+        // This is not obvious, but the "gmt-3" timezone is actually 3 hours *ahead* of GMT
+        // so will produce +03 timestamptz output
+        con.createStatement().executeUpdate("set timezone = 'gmt-3'");
 
-    for (int i = 0; i < PREPARE_THRESHOLD; i++) {
-      ResultSet rs = pstmt.executeQuery();
-      for (int j = 0; rs.next(); j++) {
-        String testDate = datesToTest.get(j);
-        Date getDate = rs.getDate(1);
-        Date getDateFromDateColumn = rs.getDate(2);
-        Timestamp getTimestamp = rs.getTimestamp(1);
-        String getString = rs.getString(1);
-        Time getTime = rs.getTime(1);
-        expectedTimestamp.setTime(sdf.parse(testDate));
+        // System.err.println("++++++ TESTS START (" + getName() + ") ++++++");
+    }
 
-        assertEquals(
-            sdf.format(expectedTimestamp.getTimeInMillis()), sdf.format(getTimestamp), "getTimestamp: " + testDate + ", transfer format: " + (i == 0 ? "text" : "binary")
-                + ", timeZone: " + timeZone);
+    private void connect() throws Exception {
+        Properties p = new Properties();
+        PGProperty.PREPARE_THRESHOLD.set(p, 1);
+        con = TestUtil.openDB(p);
+    }
 
-        assertEquals(
-            sdf.format(expectedTimestamp.getTimeInMillis()), sdf.format(sdf.parse(getString)), "getString: " + testDate + ", transfer format: " + (i == 0 ? "text" : "binary")
-                + ", timeZone: " + timeZone);
+    @AfterEach
+    void tearDown() throws Exception {
+        // System.err.println("++++++ TESTS END (" + getName() + ") ++++++");
+        TimeZone.setDefault(saveTZ);
 
-        expectedTimestamp.set(Calendar.HOUR_OF_DAY, 0);
-        expectedTimestamp.set(Calendar.MINUTE, 0);
-        expectedTimestamp.set(Calendar.SECOND, 0);
+        TestUtil.dropTable(con, "testtimezone");
+        TestUtil.closeDB(con);
+    }
 
-        assertEquals(
-            sdf.format(expectedTimestamp.getTimeInMillis()), sdf.format(getDate), "TIMESTAMP -> getDate: " + testDate + ", transfer format: " + (i == 0 ? "text" : "binary")
-                + ", timeZone: " + timeZone);
+    @Test
+    void getTimestamp() throws Exception {
+        con.createStatement().executeUpdate(
+                "INSERT INTO testtimezone(tstz,ts,t,tz,d) VALUES('2005-01-01 15:00:00 +0300', '2005-01-01 15:00:00', '15:00:00', '15:00:00 +0300', '2005-01-01')");
 
-        String expectedDateFromDateColumn = setTimeTo00_00_00(testDate);
-        if ("Atlantic/Azores".equals(timeZone) && testDate.startsWith("2000-03-26")) {
-          // Atlantic/Azores does not have 2000-03-26 00:00:00
-          // They go right to 2000-03-26 01:00:00 due to DST.
-          // Vladimir Sitnikov: I have no idea how do they represent 2000-03-26 00:00:00 :(
-          // So the assumption is 2000-03-26 01:00:00 is the expected for that time zone
-          expectedDateFromDateColumn = "2000-03-26 01:00:00";
+        for (int i = 0; i < PREPARE_THRESHOLD; i++) {
+            String format = i == 0 ? ", text" : ", binary";
+            PreparedStatement ps = con.prepareStatement("SELECT tstz,ts,t,tz,d from testtimezone");
+            ResultSet rs = ps.executeQuery();
+
+            assertTrue(rs.next());
+            checkDatabaseContents("SELECT tstz::text,ts::text,t::text,tz::text,d::text from testtimezone",
+                    new String[]{"2005-01-01 12:00:00+00", "2005-01-01 15:00:00", "15:00:00", "15:00:00+03",
+                            "2005-01-01"});
+
+            Timestamp ts;
+            String str;
+
+            // timestamptz: 2005-01-01 15:00:00+03
+            ts = rs.getTimestamp(1); // Represents an instant in time, timezone is irrelevant.
+            assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 12:00:00 UTC
+            ts = rs.getTimestamp(1, cUTC); // Represents an instant in time, timezone is irrelevant.
+            assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 12:00:00 UTC
+            ts = rs.getTimestamp(1, cGMT03); // Represents an instant in time, timezone is irrelevant.
+            assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 12:00:00 UTC
+            ts = rs.getTimestamp(1, cGMT05); // Represents an instant in time, timezone is irrelevant.
+            assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 12:00:00 UTC
+            ts = rs.getTimestamp(1, cGMT13); // Represents an instant in time, timezone is irrelevant.
+            assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 12:00:00 UTC
+            str = rs.getString(1);
+            assertEquals("2005-01-01 15:00:00+03", str, "tstz -> getString" + format);
+
+            // timestamp: 2005-01-01 15:00:00
+            ts = rs.getTimestamp(2); // Convert timestamp to +0100
+            assertEquals(1104588000000L, ts.getTime()); // 2005-01-01 15:00:00 +0100
+            ts = rs.getTimestamp(2, cUTC); // Convert timestamp to UTC
+            assertEquals(1104591600000L, ts.getTime()); // 2005-01-01 15:00:00 +0000
+            ts = rs.getTimestamp(2, cGMT03); // Convert timestamp to +0300
+            assertEquals(1104580800000L, ts.getTime()); // 2005-01-01 15:00:00 +0300
+            ts = rs.getTimestamp(2, cGMT05); // Convert timestamp to -0500
+            assertEquals(1104609600000L, ts.getTime()); // 2005-01-01 15:00:00 -0500
+            ts = rs.getTimestamp(2, cGMT13); // Convert timestamp to +1300
+            assertEquals(1104544800000L, ts.getTime()); // 2005-01-01 15:00:00 +1300
+            str = rs.getString(2);
+            assertEquals("2005-01-01 15:00:00", str, "ts -> getString" + format);
+
+            // time: 15:00:00
+            ts = rs.getTimestamp(3);
+            assertEquals(50400000L, ts.getTime()); // 1970-01-01 15:00:00 +0100
+            ts = rs.getTimestamp(3, cUTC);
+            assertEquals(54000000L, ts.getTime()); // 1970-01-01 15:00:00 +0000
+            ts = rs.getTimestamp(3, cGMT03);
+            assertEquals(43200000L, ts.getTime()); // 1970-01-01 15:00:00 +0300
+            ts = rs.getTimestamp(3, cGMT05);
+            assertEquals(72000000L, ts.getTime()); // 1970-01-01 15:00:00 -0500
+            ts = rs.getTimestamp(3, cGMT13);
+            assertEquals(7200000L, ts.getTime()); // 1970-01-01 15:00:00 +1300
+            str = rs.getString(3);
+            assertEquals("15:00:00", str, "time -> getString" + format);
+
+            // timetz: 15:00:00+03
+            ts = rs.getTimestamp(4);
+            // 1970-01-01 15:00:00 +0300 -> 1970-01-01 13:00:00 +0100
+            assertEquals(43200000L, ts.getTime());
+            ts = rs.getTimestamp(4, cUTC);
+            // 1970-01-01 15:00:00 +0300 -> 1970-01-01 12:00:00 +0000
+            assertEquals(43200000L, ts.getTime());
+            ts = rs.getTimestamp(4, cGMT03);
+            // 1970-01-01 15:00:00 +0300 -> 1970-01-01 15:00:00 +0300
+            assertEquals(43200000L, ts.getTime());
+            ts = rs.getTimestamp(4, cGMT05);
+            // 1970-01-01 15:00:00 +0300 -> 1970-01-01 07:00:00 -0500
+            assertEquals(43200000L, ts.getTime());
+            ts = rs.getTimestamp(4, cGMT13);
+            // 1970-01-01 15:00:00 +0300 -> 1970-01-02 01:00:00 +1300
+            assertEquals(43200000L, ts.getTime());
+            str = rs.getString(4);
+            assertEquals("15:00:00+03", str, "timetz -> getString" + format);
+
+            // date: 2005-01-01
+            ts = rs.getTimestamp(5);
+            assertEquals(1104534000000L, ts.getTime()); // 2005-01-01 00:00:00 +0100
+            ts = rs.getTimestamp(5, cUTC);
+            assertEquals(1104537600000L, ts.getTime()); // 2005-01-01 00:00:00 +0000
+            ts = rs.getTimestamp(5, cGMT03);
+            assertEquals(1104526800000L, ts.getTime()); // 2005-01-01 00:00:00 +0300
+            ts = rs.getTimestamp(5, cGMT05);
+            assertEquals(1104555600000L, ts.getTime()); // 2005-01-01 00:00:00 -0500
+            ts = rs.getTimestamp(5, cGMT13);
+            assertEquals(1104490800000L, ts.getTime()); // 2005-01-01 00:00:00 +1300
+            str = rs.getString(5);
+            assertEquals("2005-01-01", str, "date -> getString" + format);
+
+            assertFalse(rs.next());
+            ps.close();
+        }
+    }
+
+    @Test
+    void getDate() throws Exception {
+        con.createStatement().executeUpdate(
+                "INSERT INTO testtimezone(tstz,ts,d) VALUES('2005-01-01 15:00:00 +0300', '2005-01-01 15:00:00', '2005-01-01')");
+
+        PreparedStatement ps = con.prepareStatement("SELECT tstz,ts,d from testtimezone");
+        for (int i = 0; i < PREPARE_THRESHOLD; i++) {
+            ResultSet rs = ps.executeQuery();
+
+            assertTrue(rs.next());
+            checkDatabaseContents("SELECT tstz::text,ts::text,d::text from testtimezone",
+                    new String[]{"2005-01-01 12:00:00+00", "2005-01-01 15:00:00", "2005-01-01"});
+
+            Date d;
+
+            // timestamptz: 2005-01-01 15:00:00+03
+            d = rs.getDate(1); // 2005-01-01 13:00:00 +0100 -> 2005-01-01 00:00:00 +0100
+            assertEquals(1104534000000L, d.getTime());
+            d = rs.getDate(1, cUTC); // 2005-01-01 12:00:00 +0000 -> 2005-01-01 00:00:00 +0000
+            assertEquals(1104537600000L, d.getTime());
+            d = rs.getDate(1, cGMT03); // 2005-01-01 15:00:00 +0300 -> 2005-01-01 00:00:00 +0300
+            assertEquals(1104526800000L, d.getTime());
+            d = rs.getDate(1, cGMT05); // 2005-01-01 07:00:00 -0500 -> 2005-01-01 00:00:00 -0500
+            assertEquals(1104555600000L, d.getTime());
+            d = rs.getDate(1, cGMT13); // 2005-01-02 01:00:00 +1300 -> 2005-01-02 00:00:00 +1300
+            assertEquals(1104577200000L, d.getTime());
+
+            // timestamp: 2005-01-01 15:00:00
+            d = rs.getDate(2); // 2005-01-01 00:00:00 +0100
+            assertEquals(1104534000000L, d.getTime());
+            d = rs.getDate(2, cUTC); // 2005-01-01 00:00:00 +0000
+            assertEquals(1104537600000L, d.getTime());
+            d = rs.getDate(2, cGMT03); // 2005-01-01 00:00:00 +0300
+            assertEquals(1104526800000L, d.getTime());
+            d = rs.getDate(2, cGMT05); // 2005-01-01 00:00:00 -0500
+            assertEquals(1104555600000L, d.getTime());
+            d = rs.getDate(2, cGMT13); // 2005-01-01 00:00:00 +1300
+            assertEquals(1104490800000L, d.getTime());
+
+            // date: 2005-01-01
+            d = rs.getDate(3); // 2005-01-01 00:00:00 +0100
+            assertEquals(1104534000000L, d.getTime());
+            d = rs.getDate(3, cUTC); // 2005-01-01 00:00:00 +0000
+            assertEquals(1104537600000L, d.getTime());
+            d = rs.getDate(3, cGMT03); // 2005-01-01 00:00:00 +0300
+            assertEquals(1104526800000L, d.getTime());
+            d = rs.getDate(3, cGMT05); // 2005-01-01 00:00:00 -0500
+            assertEquals(1104555600000L, d.getTime());
+            d = rs.getDate(3, cGMT13); // 2005-01-01 00:00:00 +1300
+            assertEquals(1104490800000L, d.getTime());
+
+            assertFalse(rs.next());
+            rs.close();
+        }
+    }
+
+    @Test
+    void getTime() throws Exception {
+        con.createStatement().executeUpdate(
+                "INSERT INTO testtimezone(tstz,ts,t,tz) VALUES('2005-01-01 15:00:00 +0300', '2005-01-01 15:00:00', '15:00:00', '15:00:00 +0300')");
+
+        PreparedStatement ps = con.prepareStatement("SELECT tstz,ts,t,tz from testtimezone");
+        for (int i = 0; i < PREPARE_THRESHOLD; i++) {
+            ResultSet rs = ps.executeQuery();
+
+            assertTrue(rs.next());
+            checkDatabaseContents("SELECT tstz::text,ts::text,t::text,tz::text,d::text from testtimezone",
+                    new String[]{"2005-01-01 12:00:00+00", "2005-01-01 15:00:00", "15:00:00", "15:00:00+03"});
+
+            Time t;
+
+            // timestamptz: 2005-01-01 15:00:00+03
+            t = rs.getTime(1);
+            // 2005-01-01 13:00:00 +0100 -> 1970-01-01 13:00:00 +0100
+            assertEquals(43200000L, t.getTime());
+            t = rs.getTime(1, cUTC);
+            // 2005-01-01 12:00:00 +0000 -> 1970-01-01 12:00:00 +0000
+            assertEquals(43200000L, t.getTime());
+            t = rs.getTime(1, cGMT03);
+            // 2005-01-01 15:00:00 +0300 -> 1970-01-01 15:00:00 +0300
+            assertEquals(43200000L, t.getTime());
+            t = rs.getTime(1, cGMT05);
+            // 2005-01-01 07:00:00 -0500 -> 1970-01-01 07:00:00 -0500
+            assertEquals(43200000L, t.getTime());
+            t = rs.getTime(1, cGMT13);
+            // 2005-01-02 01:00:00 +1300 -> 1970-01-01 01:00:00 +1300
+            assertEquals(43200000L, t.getTime());
+
+            // timestamp: 2005-01-01 15:00:00
+            t = rs.getTime(2);
+            assertEquals(50400000L, t.getTime()); // 1970-01-01 15:00:00 +0100
+            t = rs.getTime(2, cUTC);
+            assertEquals(54000000L, t.getTime()); // 1970-01-01 15:00:00 +0000
+            t = rs.getTime(2, cGMT03);
+            assertEquals(43200000L, t.getTime()); // 1970-01-01 15:00:00 +0300
+            t = rs.getTime(2, cGMT05);
+            assertEquals(72000000L, t.getTime()); // 1970-01-01 15:00:00 -0500
+            t = rs.getTime(2, cGMT13);
+            assertEquals(7200000L, t.getTime()); // 1970-01-01 15:00:00 +1300
+
+            // time: 15:00:00
+            t = rs.getTime(3);
+            assertEquals(50400000L, t.getTime()); // 1970-01-01 15:00:00 +0100
+            t = rs.getTime(3, cUTC);
+            assertEquals(54000000L, t.getTime()); // 1970-01-01 15:00:00 +0000
+            t = rs.getTime(3, cGMT03);
+            assertEquals(43200000L, t.getTime()); // 1970-01-01 15:00:00 +0300
+            t = rs.getTime(3, cGMT05);
+            assertEquals(72000000L, t.getTime()); // 1970-01-01 15:00:00 -0500
+            t = rs.getTime(3, cGMT13);
+            assertEquals(7200000L, t.getTime()); // 1970-01-01 15:00:00 +1300
+
+            // timetz: 15:00:00+03
+            t = rs.getTime(4);
+            assertEquals(43200000L, t.getTime()); // 1970-01-01 13:00:00 +0100
+            t = rs.getTime(4, cUTC);
+            assertEquals(43200000L, t.getTime()); // 1970-01-01 12:00:00 +0000
+            t = rs.getTime(4, cGMT03);
+            assertEquals(43200000L, t.getTime()); // 1970-01-01 15:00:00 +0300
+            t = rs.getTime(4, cGMT05);
+            assertEquals(43200000L, t.getTime()); // 1970-01-01 07:00:00 -0500
+            t = rs.getTime(4, cGMT13);
+            assertEquals(43200000L, t.getTime()); // 1970-01-01 01:00:00 +1300
+            rs.close();
+        }
+    }
+
+    /**
+     * This test is broken off from testSetTimestamp because it does not work for pre-7.4 servers and
+     * putting tons of conditionals in that test makes it largely unreadable. The time data type does
+     * not accept timestamp with time zone style input on these servers.
+     */
+    @Test
+    void setTimestampOnTime() throws Exception {
+        // Pre-7.4 servers cannot convert timestamps with timezones to times.
+        for (int i = 0; i < PREPARE_THRESHOLD; i++) {
+            con.createStatement().execute("delete from testtimezone");
+            PreparedStatement insertTimestamp =
+                    con.prepareStatement("INSERT INTO testtimezone(seq,t) VALUES (?,?)");
+            int seq = 1;
+
+            Timestamp instant = new Timestamp(1104580800000L); // 2005-01-01 12:00:00 UTC
+            Timestamp instantTime = new Timestamp(instant.getTime() % DAY);
+
+            // +0100 (JVM default)
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setTimestamp(2, instant); // 13:00:00
+            insertTimestamp.executeUpdate();
+
+            // UTC
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setTimestamp(2, instant, cUTC); // 12:00:00
+            insertTimestamp.executeUpdate();
+
+            // +0300
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setTimestamp(2, instant, cGMT03); // 15:00:00
+            insertTimestamp.executeUpdate();
+
+            // -0500
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setTimestamp(2, instant, cGMT05); // 07:00:00
+            insertTimestamp.executeUpdate();
+
+            // +1300
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setTimestamp(2, instant, cGMT13); // 01:00:00
+            insertTimestamp.executeUpdate();
+
+            insertTimestamp.close();
+
+            checkDatabaseContents("SELECT seq::text,t::text from testtimezone ORDER BY seq",
+                    new String[][]{new String[]{"1", "13:00:00"}, new String[]{"2", "12:00:00"},
+                            new String[]{"3", "15:00:00"}, new String[]{"4", "07:00:00"},
+                            new String[]{"5", "01:00:00"}});
+
+            seq = 1;
+            PreparedStatement ps = con.prepareStatement("SELECT seq,t FROM testtimezone ORDER BY seq");
+            ResultSet rs = ps.executeQuery();
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(instantTime, rs.getTimestamp(2));
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(instantTime, rs.getTimestamp(2, cUTC));
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(instantTime, rs.getTimestamp(2, cGMT03));
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(instantTime, rs.getTimestamp(2, cGMT05));
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(normalizeTimeOfDayPart(instantTime, cGMT13), rs.getTimestamp(2, cGMT13));
+
+            assertFalse(rs.next());
+            ps.close();
+        }
+    }
+
+    @Test
+    void setTimestamp() throws Exception {
+        for (int i = 0; i < PREPARE_THRESHOLD; i++) {
+            con.createStatement().execute("delete from testtimezone");
+            PreparedStatement insertTimestamp =
+                    con.prepareStatement("INSERT INTO testtimezone(seq,tstz,ts,tz,d) VALUES (?,?,?,?,?)");
+            int seq = 1;
+
+            Timestamp instant = new Timestamp(1104580800000L); // 2005-01-01 12:00:00 UTC
+            Timestamp instantTime = new Timestamp(instant.getTime() % DAY);
+            Timestamp instantDateJVM = new Timestamp(
+                    instant.getTime() - (instant.getTime() % DAY) - TimeZone.getDefault().getRawOffset());
+            Timestamp instantDateUTC = new Timestamp(
+                    instant.getTime() - (instant.getTime() % DAY) - cUTC.getTimeZone().getRawOffset());
+            Timestamp instantDateGMT03 = new Timestamp(
+                    instant.getTime() - (instant.getTime() % DAY) - cGMT03.getTimeZone().getRawOffset());
+            Timestamp instantDateGMT05 = new Timestamp(
+                    instant.getTime() - (instant.getTime() % DAY) - cGMT05.getTimeZone().getRawOffset());
+            Timestamp instantDateGMT13 = new Timestamp(instant.getTime() - (instant.getTime() % DAY)
+                    - cGMT13.getTimeZone().getRawOffset() + DAY);
+
+            // +0100 (JVM default)
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setTimestamp(2, instant); // 2005-01-01 13:00:00 +0100
+            insertTimestamp.setTimestamp(3, instant); // 2005-01-01 13:00:00
+            insertTimestamp.setTimestamp(4, instant); // 13:00:00 +0100
+            insertTimestamp.setTimestamp(5, instant); // 2005-01-01
+            insertTimestamp.executeUpdate();
+
+            // UTC
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setTimestamp(2, instant, cUTC); // 2005-01-01 12:00:00 +0000
+            insertTimestamp.setTimestamp(3, instant, cUTC); // 2005-01-01 12:00:00
+            insertTimestamp.setTimestamp(4, instant, cUTC); // 12:00:00 +0000
+            insertTimestamp.setTimestamp(5, instant, cUTC); // 2005-01-01
+            insertTimestamp.executeUpdate();
+
+            // +0300
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setTimestamp(2, instant, cGMT03); // 2005-01-01 15:00:00 +0300
+            insertTimestamp.setTimestamp(3, instant, cGMT03); // 2005-01-01 15:00:00
+            insertTimestamp.setTimestamp(4, instant, cGMT03); // 15:00:00 +0300
+            insertTimestamp.setTimestamp(5, instant, cGMT03); // 2005-01-01
+            insertTimestamp.executeUpdate();
+
+            // -0500
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setTimestamp(2, instant, cGMT05); // 2005-01-01 07:00:00 -0500
+            insertTimestamp.setTimestamp(3, instant, cGMT05); // 2005-01-01 07:00:00
+            insertTimestamp.setTimestamp(4, instant, cGMT05); // 07:00:00 -0500
+            insertTimestamp.setTimestamp(5, instant, cGMT05); // 2005-01-01
+            insertTimestamp.executeUpdate();
+
+            // +1300
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setTimestamp(2, instant, cGMT13); // 2005-01-02 01:00:00 +1300
+            insertTimestamp.setTimestamp(3, instant, cGMT13); // 2005-01-02 01:00:00
+            insertTimestamp.setTimestamp(4, instant, cGMT13); // 01:00:00 +1300
+            insertTimestamp.setTimestamp(5, instant, cGMT13); // 2005-01-02
+            insertTimestamp.executeUpdate();
+
+            insertTimestamp.close();
+
+            // check that insert went correctly by parsing the raw contents in UTC
+            checkDatabaseContents(
+                    "SELECT seq::text,tstz::text,ts::text,tz::text,d::text from testtimezone ORDER BY seq",
+                    new String[][]{
+                            new String[]{"1", "2005-01-01 12:00:00+00", "2005-01-01 13:00:00", "13:00:00+01",
+                                    "2005-01-01"},
+                            new String[]{"2", "2005-01-01 12:00:00+00", "2005-01-01 12:00:00", "12:00:00+00",
+                                    "2005-01-01"},
+                            new String[]{"3", "2005-01-01 12:00:00+00", "2005-01-01 15:00:00", "15:00:00+03",
+                                    "2005-01-01"},
+                            new String[]{"4", "2005-01-01 12:00:00+00", "2005-01-01 07:00:00", "07:00:00-05",
+                                    "2005-01-01"},
+                            new String[]{"5", "2005-01-01 12:00:00+00", "2005-01-02 01:00:00", "01:00:00+13",
+                                    "2005-01-02"}});
+
+            //
+            // check results
+            //
+
+            seq = 1;
+            PreparedStatement ps =
+                    con.prepareStatement("SELECT seq,tstz,ts,tz,d FROM testtimezone ORDER BY seq");
+            ResultSet rs = ps.executeQuery();
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(instant, rs.getTimestamp(2));
+            assertEquals(instant, rs.getTimestamp(3));
+            assertEquals(instantTime, rs.getTimestamp(4));
+            assertEquals(instantDateJVM, rs.getTimestamp(5));
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(instant, rs.getTimestamp(2, cUTC));
+            assertEquals(instant, rs.getTimestamp(3, cUTC));
+            assertEquals(instantTime, rs.getTimestamp(4, cUTC));
+            assertEquals(instantDateUTC, rs.getTimestamp(5, cUTC));
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(instant, rs.getTimestamp(2, cGMT03));
+            assertEquals(instant, rs.getTimestamp(3, cGMT03));
+            assertEquals(instantTime, rs.getTimestamp(4, cGMT03));
+            assertEquals(instantDateGMT03, rs.getTimestamp(5, cGMT03));
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(instant, rs.getTimestamp(2, cGMT05));
+            assertEquals(instant, rs.getTimestamp(3, cGMT05));
+            assertEquals(instantTime, rs.getTimestamp(4, cGMT05));
+            assertEquals(instantDateGMT05, rs.getTimestamp(5, cGMT05));
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(instant, rs.getTimestamp(2, cGMT13));
+            assertEquals(instant, rs.getTimestamp(3, cGMT13));
+            assertEquals(normalizeTimeOfDayPart(instantTime, cGMT13), rs.getTimestamp(4, cGMT13));
+            assertEquals(instantDateGMT13, rs.getTimestamp(5, cGMT13));
+
+            assertFalse(rs.next());
+            ps.close();
+        }
+    }
+
+    @Test
+    void setDate() throws Exception {
+        for (int i = 0; i < PREPARE_THRESHOLD; i++) {
+            con.createStatement().execute("delete from testtimezone");
+            PreparedStatement insertTimestamp =
+                    con.prepareStatement("INSERT INTO testtimezone(seq,tstz,ts,d) VALUES (?,?,?,?)");
+
+            int seq = 1;
+
+            Date dJVM;
+            Date dUTC;
+            Date dGMT03;
+            Date dGMT05;
+            Date dGMT13 = null;
+
+            // +0100 (JVM default)
+            dJVM = new Date(1104534000000L); // 2005-01-01 00:00:00 +0100
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setDate(2, dJVM); // 2005-01-01 00:00:00 +0100
+            insertTimestamp.setDate(3, dJVM); // 2005-01-01 00:00:00
+            insertTimestamp.setDate(4, dJVM); // 2005-01-01
+            insertTimestamp.executeUpdate();
+
+            // UTC
+            dUTC = new Date(1104537600000L); // 2005-01-01 00:00:00 +0000
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setDate(2, dUTC, cUTC); // 2005-01-01 00:00:00 +0000
+            insertTimestamp.setDate(3, dUTC, cUTC); // 2005-01-01 00:00:00
+            insertTimestamp.setDate(4, dUTC, cUTC); // 2005-01-01
+            insertTimestamp.executeUpdate();
+
+            // +0300
+            dGMT03 = new Date(1104526800000L); // 2005-01-01 00:00:00 +0300
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setDate(2, dGMT03, cGMT03); // 2005-01-01 00:00:00 +0300
+            insertTimestamp.setDate(3, dGMT03, cGMT03); // 2005-01-01 00:00:00
+            insertTimestamp.setDate(4, dGMT03, cGMT03); // 2005-01-01
+            insertTimestamp.executeUpdate();
+
+            // -0500
+            dGMT05 = new Date(1104555600000L); // 2005-01-01 00:00:00 -0500
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setDate(2, dGMT05, cGMT05); // 2005-01-01 00:00:00 -0500
+            insertTimestamp.setDate(3, dGMT05, cGMT05); // 2005-01-01 00:00:00
+            insertTimestamp.setDate(4, dGMT05, cGMT05); // 2005-01-01
+            insertTimestamp.executeUpdate();
+
+            // +1300
+            dGMT13 = new Date(1104490800000L); // 2005-01-01 00:00:00 +1300
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setDate(2, dGMT13, cGMT13); // 2005-01-01 00:00:00 +1300
+            insertTimestamp.setDate(3, dGMT13, cGMT13); // 2005-01-01 00:00:00
+            insertTimestamp.setDate(4, dGMT13, cGMT13); // 2005-01-01
+            insertTimestamp.executeUpdate();
+
+            insertTimestamp.close();
+
+            // check that insert went correctly by parsing the raw contents in UTC
+            checkDatabaseContents(
+                    "SELECT seq::text,tstz::text,ts::text,d::text from testtimezone ORDER BY seq",
+                    new String[][]{
+                            new String[]{"1", "2004-12-31 23:00:00+00", "2005-01-01 00:00:00", "2005-01-01"},
+                            new String[]{"2", "2005-01-01 00:00:00+00", "2005-01-01 00:00:00", "2005-01-01"},
+                            new String[]{"3", "2004-12-31 21:00:00+00", "2005-01-01 00:00:00", "2005-01-01"},
+                            new String[]{"4", "2005-01-01 05:00:00+00", "2005-01-01 00:00:00", "2005-01-01"},
+                            new String[]{"5", "2004-12-31 11:00:00+00", "2005-01-01 00:00:00", "2005-01-01"}});
+            //
+            // check results
+            //
+
+            seq = 1;
+            PreparedStatement ps =
+                    con.prepareStatement("SELECT seq,tstz,ts,d FROM testtimezone ORDER BY seq");
+            ResultSet rs = ps.executeQuery();
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(dJVM, rs.getDate(2));
+            assertEquals(dJVM, rs.getDate(3));
+            assertEquals(dJVM, rs.getDate(4));
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(dUTC, rs.getDate(2, cUTC));
+            assertEquals(dUTC, rs.getDate(3, cUTC));
+            assertEquals(dUTC, rs.getDate(4, cUTC));
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(dGMT03, rs.getDate(2, cGMT03));
+            assertEquals(dGMT03, rs.getDate(3, cGMT03));
+            assertEquals(dGMT03, rs.getDate(4, cGMT03));
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(dGMT05, rs.getDate(2, cGMT05));
+            assertEquals(dGMT05, rs.getDate(3, cGMT05));
+            assertEquals(dGMT05, rs.getDate(4, cGMT05));
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(dGMT13, rs.getDate(2, cGMT13));
+            assertEquals(dGMT13, rs.getDate(3, cGMT13));
+            assertEquals(dGMT13, rs.getDate(4, cGMT13));
+
+            assertFalse(rs.next());
+            ps.close();
+        }
+    }
+
+    @Test
+    void setTime() throws Exception {
+        for (int i = 0; i < PREPARE_THRESHOLD; i++) {
+            con.createStatement().execute("delete from testtimezone");
+            PreparedStatement insertTimestamp =
+                    con.prepareStatement("INSERT INTO testtimezone(seq,t,tz) VALUES (?,?,?)");
+
+            int seq = 1;
+
+            Time tJVM;
+            Time tUTC;
+            Time tGMT03;
+            Time tGMT05;
+            Time tGMT13;
+
+            // +0100 (JVM default)
+            tJVM = new Time(50400000L); // 1970-01-01 15:00:00 +0100
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setTime(2, tJVM); // 15:00:00
+            insertTimestamp.setTime(3, tJVM); // 15:00:00+03
+            insertTimestamp.executeUpdate();
+
+            // UTC
+            tUTC = new Time(54000000L); // 1970-01-01 15:00:00 +0000
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setTime(2, tUTC, cUTC); // 15:00:00
+            insertTimestamp.setTime(3, tUTC, cUTC); // 15:00:00+00
+            insertTimestamp.executeUpdate();
+
+            // +0300
+            tGMT03 = new Time(43200000L); // 1970-01-01 15:00:00 +0300
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setTime(2, tGMT03, cGMT03); // 15:00:00
+            insertTimestamp.setTime(3, tGMT03, cGMT03); // 15:00:00+03
+            insertTimestamp.executeUpdate();
+
+            // -0500
+            tGMT05 = new Time(72000000L); // 1970-01-01 15:00:00 -0500
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setTime(2, tGMT05, cGMT05); // 15:00:00
+            insertTimestamp.setTime(3, tGMT05, cGMT05); // 15:00:00-05
+            insertTimestamp.executeUpdate();
+
+            // +1300
+            tGMT13 = new Time(7200000L); // 1970-01-01 15:00:00 +1300
+            insertTimestamp.setInt(1, seq++);
+            insertTimestamp.setTime(2, tGMT13, cGMT13); // 15:00:00
+            insertTimestamp.setTime(3, tGMT13, cGMT13); // 15:00:00+13
+            insertTimestamp.executeUpdate();
+
+            insertTimestamp.close();
+
+            // check that insert went correctly by parsing the raw contents in UTC
+            checkDatabaseContents("SELECT seq::text,t::text,tz::text from testtimezone ORDER BY seq",
+                    new String[][]{new String[]{"1", "15:00:00", "15:00:00+01",},
+                            new String[]{"2", "15:00:00", "15:00:00+00",},
+                            new String[]{"3", "15:00:00", "15:00:00+03",},
+                            new String[]{"4", "15:00:00", "15:00:00-05",},
+                            new String[]{"5", "15:00:00", "15:00:00+13",}});
+
+            //
+            // check results
+            //
+
+            seq = 1;
+            PreparedStatement ps = con.prepareStatement("SELECT seq,t,tz FROM testtimezone ORDER BY seq");
+            ResultSet rs = ps.executeQuery();
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(tJVM, rs.getTime(2));
+            assertEquals(tJVM, rs.getTime(3));
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(tUTC, rs.getTime(2, cUTC));
+            assertEquals(tUTC, rs.getTime(3, cUTC));
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(tGMT03, rs.getTime(2, cGMT03));
+            assertEquals(tGMT03, rs.getTime(3, cGMT03));
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(tGMT05, rs.getTime(2, cGMT05));
+            assertEquals(tGMT05, rs.getTime(3, cGMT05));
+
+            assertTrue(rs.next());
+            assertEquals(seq++, rs.getInt(1));
+            assertEquals(tGMT13, rs.getTime(2, cGMT13));
+            assertEquals(tGMT13, rs.getTime(3, cGMT13));
+
+            assertFalse(rs.next());
+            ps.close();
+        }
+    }
+
+    @Test
+    void halfHourTimezone() throws Exception {
+        Statement stmt = con.createStatement();
+        stmt.execute("SET TimeZone = 'GMT+3:30'");
+        for (int i = 0; i < PREPARE_THRESHOLD; i++) {
+            PreparedStatement ps = con.prepareStatement("SELECT '1969-12-31 20:30:00'::timestamptz");
+            ResultSet rs = ps.executeQuery();
+            assertTrue(rs.next());
+            assertEquals(0L, rs.getTimestamp(1).getTime());
+            ps.close();
+        }
+    }
+
+    @Test
+    void timezoneWithSeconds() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.execute("SET TimeZone = 'Europe/Paris'");
+        for (int i = 0; i < PREPARE_THRESHOLD; i++) {
+            PreparedStatement ps = con.prepareStatement("SELECT '1920-01-01'::timestamptz");
+            ResultSet rs = ps.executeQuery();
+            rs.next();
+            // select extract(epoch from '1920-01-01'::timestamptz - 'epoch'::timestamptz) * 1000;
+
+            assertEquals(-1577923200000L, rs.getTimestamp(1).getTime());
+            ps.close();
+        }
+    }
+
+    @Test
+    void localTimestampsInNonDSTZones() throws Exception {
+        for (int i = -12; i <= 13; i++) {
+            localTimestamps(String.format("GMT%02d", i));
+        }
+    }
+
+    @Test
+    void localTimestampsInAfricaCasablanca() throws Exception {
+        localTimestamps("Africa/Casablanca"); // It is something like GMT+0..GMT+1
+    }
+
+    @Test
+    void localTimestampsInAtlanticAzores() throws Exception {
+        localTimestamps("Atlantic/Azores"); // It is something like GMT-1..GMT+0
+    }
+
+    @Test
+    void localTimestampsInEuropeMoscow() throws Exception {
+        localTimestamps("Europe/Moscow"); // It is something like GMT+3..GMT+4 for 2000s
+    }
+
+    @Test
+    void localTimestampsInPacificApia() throws Exception {
+        localTimestamps("Pacific/Apia"); // It is something like GMT+13..GMT+14
+    }
+
+    @Test
+    void localTimestampsInPacificNiue() throws Exception {
+        localTimestamps("Pacific/Niue"); // It is something like GMT-11..GMT-11
+    }
+
+    @Test
+    void localTimestampsInAmericaAdak() throws Exception {
+        localTimestamps("America/Adak"); // It is something like GMT-10..GMT-9
+    }
+
+    private String setTimeTo00_00_00(String timestamp) {
+        return timestamp.substring(0, 11) + "00:00:00";
+    }
+
+    public void localTimestamps(String timeZone) throws Exception {
+        TimeZone.setDefault(TimeZone.getTimeZone(timeZone));
+
+        final String testDateFormat = "yyyy-MM-dd HH:mm:ss";
+        final List<String> datesToTest = Arrays.asList("2015-09-03 12:00:00", "2015-06-30 23:59:58",
+                "1997-06-30 23:59:59", "1997-07-01 00:00:00", "2012-06-30 23:59:59", "2012-07-01 00:00:00",
+                "2015-06-30 23:59:59", "2015-07-01 00:00:00", "2005-12-31 23:59:59", "2006-01-01 00:00:00",
+                "2008-12-31 23:59:59", "2009-01-01 00:00:00", "2015-06-30 23:59:60", "2015-07-31 00:00:00",
+                "2015-07-31 00:00:01",
+
+                // On 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00
+                "2000-03-26 01:59:59", "2000-03-26 02:00:00", "2000-03-26 02:00:01", "2000-03-26 02:59:59",
+                "2000-03-26 03:00:00", "2000-03-26 03:00:01", "2000-03-26 03:59:59", "2000-03-26 04:00:00",
+                "2000-03-26 04:00:01",
+
+                // This is a pre-1970 date, so check if it is rounded properly
+                "1950-07-20 02:00:00",
+
+                // On 2000-10-29 03:00:00 Moscow went to regular time, thus local time became 02:00:00
+                "2000-10-29 01:59:59", "2000-10-29 02:00:00", "2000-10-29 02:00:01", "2000-10-29 02:59:59",
+                "2000-10-29 03:00:00", "2000-10-29 03:00:01", "2000-10-29 03:59:59", "2000-10-29 04:00:00",
+                "2000-10-29 04:00:01");
+
+        con.createStatement().execute("delete from testtimezone");
+        Statement stmt = con.createStatement();
+
+        for (int i = 0; i < datesToTest.size(); i++) {
+            stmt.execute(
+                    "insert into testtimezone (ts, d, seq) values ("
+                            + "'" + datesToTest.get(i) + "'"
+                            + ", '" + setTimeTo00_00_00(datesToTest.get(i)) + "'"
+                            + ", " + i + ")");
         }
 
-        assertEquals(
-            expectedDateFromDateColumn, sdf.format(getDateFromDateColumn), "DATE -> getDate: " + expectedDateFromDateColumn + ", transfer format: " + (i == 0 ? "text" : "binary")
-                + ", timeZone: " + timeZone);
+        // Different timezone test should have different sql text, so we test both text and binary modes
+        PreparedStatement pstmt =
+                con.prepareStatement("SELECT ts, d FROM testtimezone order by seq /*" + timeZone + "*/");
 
-        expectedTimestamp.setTime(sdf.parse(testDate));
-        expectedTimestamp.set(Calendar.YEAR, 1970);
-        expectedTimestamp.set(Calendar.MONTH, 0);
-        expectedTimestamp.set(Calendar.DAY_OF_MONTH, 1);
+        Calendar expectedTimestamp = Calendar.getInstance();
 
-        assertEquals(
-            sdf.format(expectedTimestamp.getTimeInMillis()), sdf.format(getTime), "getTime: " + testDate + ", transfer format: " + (i == 0 ? "text" : "binary")
-                + ", timeZone: " + timeZone);
+        SimpleDateFormat sdf = new SimpleDateFormat(testDateFormat);
 
-      }
-      rs.close();
+        for (int i = 0; i < PREPARE_THRESHOLD; i++) {
+            ResultSet rs = pstmt.executeQuery();
+            for (int j = 0; rs.next(); j++) {
+                String testDate = datesToTest.get(j);
+                Date getDate = rs.getDate(1);
+                Date getDateFromDateColumn = rs.getDate(2);
+                Timestamp getTimestamp = rs.getTimestamp(1);
+                String getString = rs.getString(1);
+                Time getTime = rs.getTime(1);
+                expectedTimestamp.setTime(sdf.parse(testDate));
+
+                assertEquals(
+                        sdf.format(expectedTimestamp.getTimeInMillis()), sdf.format(getTimestamp), "getTimestamp: " + testDate + ", transfer format: " + (i == 0 ? "text" : "binary")
+                                + ", timeZone: " + timeZone);
+
+                assertEquals(
+                        sdf.format(expectedTimestamp.getTimeInMillis()), sdf.format(sdf.parse(getString)), "getString: " + testDate + ", transfer format: " + (i == 0 ? "text" : "binary")
+                                + ", timeZone: " + timeZone);
+
+                expectedTimestamp.set(Calendar.HOUR_OF_DAY, 0);
+                expectedTimestamp.set(Calendar.MINUTE, 0);
+                expectedTimestamp.set(Calendar.SECOND, 0);
+
+                assertEquals(
+                        sdf.format(expectedTimestamp.getTimeInMillis()), sdf.format(getDate), "TIMESTAMP -> getDate: " + testDate + ", transfer format: " + (i == 0 ? "text" : "binary")
+                                + ", timeZone: " + timeZone);
+
+                String expectedDateFromDateColumn = setTimeTo00_00_00(testDate);
+                if ("Atlantic/Azores".equals(timeZone) && testDate.startsWith("2000-03-26")) {
+                    // Atlantic/Azores does not have 2000-03-26 00:00:00
+                    // They go right to 2000-03-26 01:00:00 due to DST.
+                    // Vladimir Sitnikov: I have no idea how do they represent 2000-03-26 00:00:00 :(
+                    // So the assumption is 2000-03-26 01:00:00 is the expected for that time zone
+                    expectedDateFromDateColumn = "2000-03-26 01:00:00";
+                }
+
+                assertEquals(
+                        expectedDateFromDateColumn, sdf.format(getDateFromDateColumn), "DATE -> getDate: " + expectedDateFromDateColumn + ", transfer format: " + (i == 0 ? "text" : "binary")
+                                + ", timeZone: " + timeZone);
+
+                expectedTimestamp.setTime(sdf.parse(testDate));
+                expectedTimestamp.set(Calendar.YEAR, 1970);
+                expectedTimestamp.set(Calendar.MONTH, 0);
+                expectedTimestamp.set(Calendar.DAY_OF_MONTH, 1);
+
+                assertEquals(
+                        sdf.format(expectedTimestamp.getTimeInMillis()), sdf.format(getTime), "getTime: " + testDate + ", transfer format: " + (i == 0 ? "text" : "binary")
+                                + ", timeZone: " + timeZone);
+
+            }
+            rs.close();
+        }
     }
-  }
 
-  /**
-   * Does a query in UTC time zone to database to check that the inserted values are correct.
-   *
-   * @param query The query to run.
-   * @param correct The correct answers in UTC time zone as formatted by backend.
-   */
-  private void checkDatabaseContents(String query, String[] correct) throws Exception {
-    checkDatabaseContents(query, new String[][]{correct});
-  }
-
-  private void checkDatabaseContents(String query, String[][] correct) throws Exception {
-    Connection con2 = TestUtil.openDB();
-    Statement s = con2.createStatement();
-    assertFalse(s.execute("set time zone 'UTC'"));
-    assertTrue(s.execute(query));
-    ResultSet rs = s.getResultSet();
-    for (int j = 0; j < correct.length; j++) {
-      assertTrue(rs.next());
-      for (int i = 0; i < correct[j].length; i++) {
-        assertEquals(correct[j][i], rs.getString(i + 1), "On row " + (j + 1));
-      }
+    /**
+     * Does a query in UTC time zone to database to check that the inserted values are correct.
+     *
+     * @param query   The query to run.
+     * @param correct The correct answers in UTC time zone as formatted by backend.
+     */
+    private void checkDatabaseContents(String query, String[] correct) throws Exception {
+        checkDatabaseContents(query, new String[][]{correct});
     }
-    assertFalse(rs.next());
-    rs.close();
-    s.close();
-    con2.close();
-  }
 
-  /**
-   * Converts the given time.
-   *
-   * @param t The time of day. Must be within -24 and + 24 hours of epoc.
-   * @param tz The timezone to normalize to.
-   * @return the Time normalized to 0 to 24 hours of epoc adjusted with given timezone.
-   */
-  private Timestamp normalizeTimeOfDayPart(Timestamp t, Calendar tz) {
-    return new Timestamp(normalizeTimeOfDayPart(t.getTime(), tz.getTimeZone()));
-  }
-
-  private long normalizeTimeOfDayPart(long t, TimeZone tz) {
-    long millis = t;
-    long low = -tz.getOffset(millis);
-    long high = low + DAY;
-    if (millis < low) {
-      do {
-        millis += DAY;
-      } while (millis < low);
-    } else if (millis >= high) {
-      do {
-        millis -= DAY;
-      } while (millis > high);
+    private void checkDatabaseContents(String query, String[][] correct) throws Exception {
+        Connection con2 = TestUtil.openDB();
+        Statement s = con2.createStatement();
+        assertFalse(s.execute("set time zone 'UTC'"));
+        assertTrue(s.execute(query));
+        ResultSet rs = s.getResultSet();
+        for (int j = 0; j < correct.length; j++) {
+            assertTrue(rs.next());
+            for (int i = 0; i < correct[j].length; i++) {
+                assertEquals(correct[j][i], rs.getString(i + 1), "On row " + (j + 1));
+            }
+        }
+        assertFalse(rs.next());
+        rs.close();
+        s.close();
+        con2.close();
+    }
+
+    /**
+     * Converts the given time.
+     *
+     * @param t  The time of day. Must be within -24 and + 24 hours of epoc.
+     * @param tz The timezone to normalize to.
+     * @return the Time normalized to 0 to 24 hours of epoc adjusted with given timezone.
+     */
+    private Timestamp normalizeTimeOfDayPart(Timestamp t, Calendar tz) {
+        return new Timestamp(normalizeTimeOfDayPart(t.getTime(), tz.getTimeZone()));
+    }
+
+    private long normalizeTimeOfDayPart(long t, TimeZone tz) {
+        long millis = t;
+        long low = -tz.getOffset(millis);
+        long high = low + DAY;
+        if (millis < low) {
+            do {
+                millis += DAY;
+            } while (millis < low);
+        } else if (millis >= high) {
+            do {
+                millis -= DAY;
+            } while (millis > high);
+        }
+        return millis;
     }
-    return millis;
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TypeCacheDLLStressTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TypeCacheDLLStressTest.java
index 1402f8a..572a801 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TypeCacheDLLStressTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/TypeCacheDLLStressTest.java
@@ -24,79 +24,79 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
 public class TypeCacheDLLStressTest extends BaseTest4 {
-  private static final int DURATION = Integer.getInteger("TypeCacheDLLStressTest.DURATION", 5);
+    private static final int DURATION = Integer.getInteger("TypeCacheDLLStressTest.DURATION", 5);
 
-  private Connection con2;
+    private Connection con2;
 
-  @Override
-  protected void updateProperties(Properties props) {
-    try {
-      con2 = TestUtil.openDB(props);
-    } catch (Exception e) {
-      throw new IllegalStateException("Unable to open second DB connection", e);
-    }
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTable(con, "create_and_drop_table", "user_id serial PRIMARY KEY");
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.closeDB(con2);
-  }
-
-  @Test
-  public void createDropTableAndGetTypeInfo() throws Throwable {
-    ExecutorService executor = Executors.newFixedThreadPool(2);
-
-    Future<Void> typeInfoCache = executor.submit(new Callable<Void>() {
-      public Void call() throws Exception {
-        while (!Thread.currentThread().isInterrupted()) {
-          ResultSet rs = con.getMetaData().getTypeInfo();
-          rs.close();
+    @Override
+    protected void updateProperties(Properties props) {
+        try {
+            con2 = TestUtil.openDB(props);
+        } catch (Exception e) {
+            throw new IllegalStateException("Unable to open second DB connection", e);
         }
-        return null;
-      }
-    });
+    }
 
-    Future<Void> createAndDrop = executor.submit(new Callable<Void>() {
-      public Void call() throws Exception {
-        Statement stmt = con2.createStatement();
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTable(con, "create_and_drop_table", "user_id serial PRIMARY KEY");
+    }
 
-        while (!Thread.currentThread().isInterrupted()) {
-          stmt.execute("drop TABLE create_and_drop_table");
-          stmt.execute("CREATE TABLE create_and_drop_table"
-              + "( user_id serial PRIMARY KEY, username VARCHAR (50) UNIQUE NOT NULL"
-              + ", password VARCHAR (50) NOT NULL, email VARCHAR (355) UNIQUE NOT NULL"
-              + ", created_on TIMESTAMP NOT NULL, last_login TIMESTAMP)");
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.closeDB(con2);
+    }
+
+    @Test
+    public void createDropTableAndGetTypeInfo() throws Throwable {
+        ExecutorService executor = Executors.newFixedThreadPool(2);
+
+        Future<Void> typeInfoCache = executor.submit(new Callable<Void>() {
+            public Void call() throws Exception {
+                while (!Thread.currentThread().isInterrupted()) {
+                    ResultSet rs = con.getMetaData().getTypeInfo();
+                    rs.close();
+                }
+                return null;
+            }
+        });
+
+        Future<Void> createAndDrop = executor.submit(new Callable<Void>() {
+            public Void call() throws Exception {
+                Statement stmt = con2.createStatement();
+
+                while (!Thread.currentThread().isInterrupted()) {
+                    stmt.execute("drop TABLE create_and_drop_table");
+                    stmt.execute("CREATE TABLE create_and_drop_table"
+                            + "( user_id serial PRIMARY KEY, username VARCHAR (50) UNIQUE NOT NULL"
+                            + ", password VARCHAR (50) NOT NULL, email VARCHAR (355) UNIQUE NOT NULL"
+                            + ", created_on TIMESTAMP NOT NULL, last_login TIMESTAMP)");
+                }
+                return null;
+            }
+        });
+
+        try {
+            typeInfoCache.get(DURATION, TimeUnit.SECONDS);
+        } catch (ExecutionException e) {
+            createAndDrop.cancel(true);
+            throw e.getCause();
+        } catch (TimeoutException e) {
+            // Test is expected to run as long as it can
         }
-        return null;
-      }
-    });
 
-    try {
-      typeInfoCache.get(DURATION, TimeUnit.SECONDS);
-    } catch (ExecutionException e) {
-      createAndDrop.cancel(true);
-      throw e.getCause();
-    } catch (TimeoutException e) {
-      // Test is expected to run as long as it can
+        typeInfoCache.cancel(true);
+        createAndDrop.cancel(true);
+
+        try {
+            createAndDrop.get(DURATION, TimeUnit.SECONDS);
+        } catch (ExecutionException e) {
+            throw e.getCause();
+        } catch (TimeoutException e) {
+            // Test is expected to run as long as it can
+        } catch (CancellationException e) {
+            // Ignore
+        }
     }
-
-    typeInfoCache.cancel(true);
-    createAndDrop.cancel(true);
-
-    try {
-      createAndDrop.get(DURATION, TimeUnit.SECONDS);
-    } catch (ExecutionException e) {
-      throw e.getCause();
-    } catch (TimeoutException e) {
-      // Test is expected to run as long as it can
-    } catch (CancellationException e) {
-      // Ignore
-    }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/UpdateableResultTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/UpdateableResultTest.java
index c878b24..ce2838e 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/UpdateableResultTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/UpdateableResultTest.java
@@ -36,868 +36,868 @@ import java.util.TimeZone;
 
 public class UpdateableResultTest extends BaseTest4 {
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTable(con, "updateable",
-        "id int primary key, name text, notselected text, ts timestamp with time zone, intarr int[]");
-    TestUtil.createTable(con, "hasdate", "id int primary key, dt date unique, name text");
-    TestUtil.createTable(con, "unique_null_constraint", "u1 int unique, name1 text");
-    TestUtil.createTable(con, "uniquekeys", "id int unique not null, id2 int unique, dt date");
-    TestUtil.createTable(con, "partialunique", "subject text, target text, success boolean");
-    TestUtil.execute(con, "CREATE UNIQUE INDEX tests_success_constraint ON partialunique (subject, target) WHERE success");
-    TestUtil.createTable(con, "second", "id1 int primary key, name1 text");
-    TestUtil.createTable(con, "primaryunique", "id int primary key, name text unique not null, dt date");
-    TestUtil.createTable(con, "serialtable", "gen_id serial primary key, name text");
-    TestUtil.createTable(con, "compositepktable", "gen_id serial, name text, dec_id serial");
-    TestUtil.execute(con, "alter sequence compositepktable_dec_id_seq increment by 10; alter sequence compositepktable_dec_id_seq restart with 10");
-    TestUtil.execute(con, "alter table compositepktable add primary key ( gen_id, dec_id )");
-    TestUtil.createTable(con, "stream", "id int primary key, asi text, chr text, bin bytea");
-    TestUtil.createTable(con, "multicol", "id1 int not null, id2 int not null, val text");
-    TestUtil.createTable(con, "nopkmulticol", "id1 int not null, id2 int not null, val text");
-    TestUtil.createTable(con, "booltable", "id int not null primary key, b boolean default false");
-    TestUtil.execute(con, "insert into booltable (id) values (1)");
-    TestUtil.execute(con, "insert into uniquekeys(id, id2, dt) values (1, 2, now())");
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTable(con, "updateable",
+                "id int primary key, name text, notselected text, ts timestamp with time zone, intarr int[]");
+        TestUtil.createTable(con, "hasdate", "id int primary key, dt date unique, name text");
+        TestUtil.createTable(con, "unique_null_constraint", "u1 int unique, name1 text");
+        TestUtil.createTable(con, "uniquekeys", "id int unique not null, id2 int unique, dt date");
+        TestUtil.createTable(con, "partialunique", "subject text, target text, success boolean");
+        TestUtil.execute(con, "CREATE UNIQUE INDEX tests_success_constraint ON partialunique (subject, target) WHERE success");
+        TestUtil.createTable(con, "second", "id1 int primary key, name1 text");
+        TestUtil.createTable(con, "primaryunique", "id int primary key, name text unique not null, dt date");
+        TestUtil.createTable(con, "serialtable", "gen_id serial primary key, name text");
+        TestUtil.createTable(con, "compositepktable", "gen_id serial, name text, dec_id serial");
+        TestUtil.execute(con, "alter sequence compositepktable_dec_id_seq increment by 10; alter sequence compositepktable_dec_id_seq restart with 10");
+        TestUtil.execute(con, "alter table compositepktable add primary key ( gen_id, dec_id )");
+        TestUtil.createTable(con, "stream", "id int primary key, asi text, chr text, bin bytea");
+        TestUtil.createTable(con, "multicol", "id1 int not null, id2 int not null, val text");
+        TestUtil.createTable(con, "nopkmulticol", "id1 int not null, id2 int not null, val text");
+        TestUtil.createTable(con, "booltable", "id int not null primary key, b boolean default false");
+        TestUtil.execute(con, "insert into booltable (id) values (1)");
+        TestUtil.execute(con, "insert into uniquekeys(id, id2, dt) values (1, 2, now())");
 
-    Statement st2 = con.createStatement();
-    // create pk for multicol table
-    st2.execute("ALTER TABLE multicol ADD CONSTRAINT multicol_pk PRIMARY KEY (id1, id2)");
-    // put some dummy data into second
-    st2.execute("insert into second values (1,'anyvalue' )");
-    st2.close();
-    TestUtil.execute(con, "insert into unique_null_constraint values (1, 'dave')");
-    TestUtil.execute(con, "insert into unique_null_constraint values (null, 'unknown')");
-    TestUtil.execute(con, "insert into primaryunique values (1, 'dave', now())");
+        Statement st2 = con.createStatement();
+        // create pk for multicol table
+        st2.execute("ALTER TABLE multicol ADD CONSTRAINT multicol_pk PRIMARY KEY (id1, id2)");
+        // put some dummy data into second
+        st2.execute("insert into second values (1,'anyvalue' )");
+        st2.close();
+        TestUtil.execute(con, "insert into unique_null_constraint values (1, 'dave')");
+        TestUtil.execute(con, "insert into unique_null_constraint values (null, 'unknown')");
+        TestUtil.execute(con, "insert into primaryunique values (1, 'dave', now())");
 
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "updateable");
-    TestUtil.dropTable(con, "second");
-    TestUtil.dropTable(con, "serialtable");
-    TestUtil.dropTable(con, "compositepktable");
-    TestUtil.dropTable(con, "stream");
-    TestUtil.dropTable(con, "nopkmulticol");
-    TestUtil.dropTable(con, "booltable");
-    TestUtil.dropTable(con, "unique_null_constraint");
-    TestUtil.dropTable(con, "hasdate");
-    TestUtil.dropTable(con, "uniquekeys");
-    TestUtil.dropTable(con, "partialunique");
-    TestUtil.dropTable(con, "primaryunique");
-    super.tearDown();
-  }
-
-  @Test
-  public void testDeleteRows() throws SQLException {
-    Statement st = con.createStatement();
-    st.executeUpdate("INSERT INTO second values (2,'two')");
-    st.executeUpdate("INSERT INTO second values (3,'three')");
-    st.executeUpdate("INSERT INTO second values (4,'four')");
-    st.close();
-
-    st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = st.executeQuery("select id1,name1 from second order by id1");
-
-    assertTrue(rs.next());
-    assertEquals(1, rs.getInt("id1"));
-    rs.deleteRow();
-    assertTrue(rs.isBeforeFirst());
-
-    assertTrue(rs.next());
-    assertTrue(rs.next());
-    assertEquals(3, rs.getInt("id1"));
-    rs.deleteRow();
-    assertEquals(2, rs.getInt("id1"));
-
-    rs.close();
-    st.close();
-  }
-
-  @Test
-  public void testCancelRowUpdates() throws Exception {
-    Statement st =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = st.executeQuery("select * from second");
-
-    // make sure we're dealing with the correct row.
-    rs.first();
-    assertEquals(1, rs.getInt(1));
-    assertEquals("anyvalue", rs.getString(2));
-
-    // update, cancel and make sure nothings changed.
-    rs.updateInt(1, 99);
-    rs.cancelRowUpdates();
-    assertEquals(1, rs.getInt(1));
-    assertEquals("anyvalue", rs.getString(2));
-
-    // real update
-    rs.updateInt(1, 999);
-    rs.updateRow();
-    assertEquals(999, rs.getInt(1));
-    assertEquals("anyvalue", rs.getString(2));
-
-    // scroll some and make sure the update is still there
-    rs.beforeFirst();
-    rs.next();
-    assertEquals(999, rs.getInt(1));
-    assertEquals("anyvalue", rs.getString(2));
-
-    // make sure the update got to the db and the driver isn't lying to us.
-    rs.close();
-    rs = st.executeQuery("select * from second");
-    rs.first();
-    assertEquals(999, rs.getInt(1));
-    assertEquals("anyvalue", rs.getString(2));
-
-    rs.close();
-    st.close();
-  }
-
-  private void checkPositioning(ResultSet rs) throws SQLException {
-    try {
-      rs.getInt(1);
-      fail("Can't use an incorrectly positioned result set.");
-    } catch (SQLException sqle) {
     }
 
-    try {
-      rs.updateInt(1, 2);
-      fail("Can't use an incorrectly positioned result set.");
-    } catch (SQLException sqle) {
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "updateable");
+        TestUtil.dropTable(con, "second");
+        TestUtil.dropTable(con, "serialtable");
+        TestUtil.dropTable(con, "compositepktable");
+        TestUtil.dropTable(con, "stream");
+        TestUtil.dropTable(con, "nopkmulticol");
+        TestUtil.dropTable(con, "booltable");
+        TestUtil.dropTable(con, "unique_null_constraint");
+        TestUtil.dropTable(con, "hasdate");
+        TestUtil.dropTable(con, "uniquekeys");
+        TestUtil.dropTable(con, "partialunique");
+        TestUtil.dropTable(con, "primaryunique");
+        super.tearDown();
     }
 
-    try {
-      rs.updateRow();
-      fail("Can't use an incorrectly positioned result set.");
-    } catch (SQLException sqle) {
+    @Test
+    public void testDeleteRows() throws SQLException {
+        Statement st = con.createStatement();
+        st.executeUpdate("INSERT INTO second values (2,'two')");
+        st.executeUpdate("INSERT INTO second values (3,'three')");
+        st.executeUpdate("INSERT INTO second values (4,'four')");
+        st.close();
+
+        st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = st.executeQuery("select id1,name1 from second order by id1");
+
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt("id1"));
+        rs.deleteRow();
+        assertTrue(rs.isBeforeFirst());
+
+        assertTrue(rs.next());
+        assertTrue(rs.next());
+        assertEquals(3, rs.getInt("id1"));
+        rs.deleteRow();
+        assertEquals(2, rs.getInt("id1"));
+
+        rs.close();
+        st.close();
     }
 
-    try {
-      rs.deleteRow();
-      fail("Can't use an incorrectly positioned result set.");
-    } catch (SQLException sqle) {
-    }
-  }
+    @Test
+    public void testCancelRowUpdates() throws Exception {
+        Statement st =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = st.executeQuery("select * from second");
 
-  @Test
-  public void testPositioning() throws SQLException {
-    Statement stmt =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = stmt.executeQuery("SELECT id1,name1 FROM second");
+        // make sure we're dealing with the correct row.
+        rs.first();
+        assertEquals(1, rs.getInt(1));
+        assertEquals("anyvalue", rs.getString(2));
 
-    checkPositioning(rs);
+        // update, cancel and make sure nothings changed.
+        rs.updateInt(1, 99);
+        rs.cancelRowUpdates();
+        assertEquals(1, rs.getInt(1));
+        assertEquals("anyvalue", rs.getString(2));
 
-    assertTrue(rs.next());
-    rs.beforeFirst();
-    checkPositioning(rs);
+        // real update
+        rs.updateInt(1, 999);
+        rs.updateRow();
+        assertEquals(999, rs.getInt(1));
+        assertEquals("anyvalue", rs.getString(2));
 
-    rs.afterLast();
-    checkPositioning(rs);
+        // scroll some and make sure the update is still there
+        rs.beforeFirst();
+        rs.next();
+        assertEquals(999, rs.getInt(1));
+        assertEquals("anyvalue", rs.getString(2));
 
-    rs.beforeFirst();
-    assertTrue(rs.next());
-    assertTrue(!rs.next());
-    checkPositioning(rs);
+        // make sure the update got to the db and the driver isn't lying to us.
+        rs.close();
+        rs = st.executeQuery("select * from second");
+        rs.first();
+        assertEquals(999, rs.getInt(1));
+        assertEquals("anyvalue", rs.getString(2));
 
-    rs.afterLast();
-    assertTrue(rs.previous());
-    assertTrue(!rs.previous());
-    checkPositioning(rs);
-
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testReturnSerial() throws Exception {
-    final String ole = "Ole";
-
-    Statement st = null;
-    ResultSet rs = null;
-    try {
-      st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-      rs = st.executeQuery("SELECT * FROM serialtable");
-
-      rs.moveToInsertRow();
-      rs.updateString("name", ole);
-      rs.insertRow();
-
-      assertTrue(rs.first());
-      assertEquals(1, rs.getInt("gen_id"));
-      assertEquals(ole, rs.getString("name"));
-
-    } finally {
-      TestUtil.closeQuietly(rs);
-      TestUtil.closeQuietly(st);
+        rs.close();
+        st.close();
     }
 
-    final String ole2 = "OleOle";
-    try {
-      st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-      rs = st.executeQuery("SELECT name, gen_id FROM serialtable");
+    private void checkPositioning(ResultSet rs) throws SQLException {
+        try {
+            rs.getInt(1);
+            fail("Can't use an incorrectly positioned result set.");
+        } catch (SQLException sqle) {
+        }
 
-      rs.moveToInsertRow();
-      rs.updateString("name", ole2);
-      rs.insertRow();
+        try {
+            rs.updateInt(1, 2);
+            fail("Can't use an incorrectly positioned result set.");
+        } catch (SQLException sqle) {
+        }
 
-      assertTrue(rs.first());
-      assertEquals(1, rs.getInt("gen_id"));
-      assertEquals(ole, rs.getString("name"));
+        try {
+            rs.updateRow();
+            fail("Can't use an incorrectly positioned result set.");
+        } catch (SQLException sqle) {
+        }
 
-      assertTrue(rs.last());
-      assertEquals(2, rs.getInt("gen_id"));
-      assertEquals(ole2, rs.getString("name"));
-
-    } finally {
-      TestUtil.closeQuietly(rs);
-      TestUtil.closeQuietly(st);
+        try {
+            rs.deleteRow();
+            fail("Can't use an incorrectly positioned result set.");
+        } catch (SQLException sqle) {
+        }
     }
 
-    final String dec = "Dec";
-    try {
-      st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-      rs = st.executeQuery("SELECT * FROM compositepktable");
+    @Test
+    public void testPositioning() throws SQLException {
+        Statement stmt =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = stmt.executeQuery("SELECT id1,name1 FROM second");
 
-      rs.moveToInsertRow();
-      rs.updateString("name", dec);
-      rs.insertRow();
+        checkPositioning(rs);
 
-      assertTrue(rs.first());
-      assertEquals(1, rs.getInt("gen_id"));
-      assertEquals(dec, rs.getString("name"));
-      assertEquals(10, rs.getInt("dec_id"));
+        assertTrue(rs.next());
+        rs.beforeFirst();
+        checkPositioning(rs);
 
-      rs.moveToInsertRow();
-      rs.updateString("name", dec);
-      rs.insertRow();
+        rs.afterLast();
+        checkPositioning(rs);
 
-      assertTrue(rs.last());
-      assertEquals(2, rs.getInt("gen_id"));
-      assertEquals(dec, rs.getString("name"));
-      assertEquals(20, rs.getInt("dec_id"));
+        rs.beforeFirst();
+        assertTrue(rs.next());
+        assertTrue(!rs.next());
+        checkPositioning(rs);
 
-    } finally {
-      TestUtil.closeQuietly(rs);
-      TestUtil.closeQuietly(st);
-    }
-  }
+        rs.afterLast();
+        assertTrue(rs.previous());
+        assertTrue(!rs.previous());
+        checkPositioning(rs);
 
-  @Test
-  public void testUpdateTimestamp() throws SQLException {
-    TimeZone origTZ = TimeZone.getDefault();
-    try {
-      // We choose a timezone which has a partial hour portion
-      // Asia/Tehran is +3:30
-      TimeZone.setDefault(TimeZone.getTimeZone("Asia/Tehran"));
-      Timestamp ts = Timestamp.valueOf("2006-11-20 16:17:18");
-
-      Statement stmt =
-          con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-      ResultSet rs = stmt.executeQuery("SELECT id, ts FROM updateable");
-      rs.moveToInsertRow();
-      rs.updateInt(1, 1);
-      rs.updateTimestamp(2, ts);
-      rs.insertRow();
-      rs.first();
-      assertEquals(ts, rs.getTimestamp(2));
-    } finally {
-      TimeZone.setDefault(origTZ);
-    }
-  }
-
-  @Test
-  public void testUpdateStreams() throws SQLException, UnsupportedEncodingException {
-    assumeByteaSupported();
-    String string = "Hello";
-    byte[] bytes = new byte[]{0, '\\', (byte) 128, (byte) 255};
-
-    Statement stmt =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = stmt.executeQuery("SELECT id, asi, chr, bin FROM stream");
-
-    rs.moveToInsertRow();
-    rs.updateInt(1, 1);
-    rs.updateAsciiStream("asi", null, 17);
-    rs.updateCharacterStream("chr", null, 81);
-    rs.updateBinaryStream("bin", null, 0);
-    rs.insertRow();
-
-    rs.moveToInsertRow();
-    rs.updateInt(1, 3);
-    rs.updateAsciiStream("asi", new ByteArrayInputStream(string.getBytes("US-ASCII")), 5);
-    rs.updateCharacterStream("chr", new StringReader(string), 5);
-    rs.updateBinaryStream("bin", new ByteArrayInputStream(bytes), bytes.length);
-    rs.insertRow();
-
-    rs.beforeFirst();
-    rs.next();
-
-    assertEquals(1, rs.getInt(1));
-    assertNull(rs.getString(2));
-    assertNull(rs.getString(3));
-    assertNull(rs.getBytes(4));
-
-    rs.updateInt("id", 2);
-    rs.updateAsciiStream("asi", new ByteArrayInputStream(string.getBytes("US-ASCII")), 5);
-    rs.updateCharacterStream("chr", new StringReader(string), 5);
-    rs.updateBinaryStream("bin", new ByteArrayInputStream(bytes), bytes.length);
-    rs.updateRow();
-
-    assertEquals(2, rs.getInt(1));
-    assertEquals(string, rs.getString(2));
-    assertEquals(string, rs.getString(3));
-    assertArrayEquals(bytes, rs.getBytes(4));
-
-    rs.refreshRow();
-
-    assertEquals(2, rs.getInt(1));
-    assertEquals(string, rs.getString(2));
-    assertEquals(string, rs.getString(3));
-    assertArrayEquals(bytes, rs.getBytes(4));
-
-    rs.next();
-
-    assertEquals(3, rs.getInt(1));
-    assertEquals(string, rs.getString(2));
-    assertEquals(string, rs.getString(3));
-    assertArrayEquals(bytes, rs.getBytes(4));
-
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testZeroRowResult() throws SQLException {
-    Statement st =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = st.executeQuery("select * from updateable WHERE 0 > 1");
-    assertTrue(!rs.next());
-    rs.moveToInsertRow();
-    rs.moveToCurrentRow();
-  }
-
-  @Test
-  public void testUpdateable() throws SQLException {
-    Statement st =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = st.executeQuery("select * from updateable");
-    assertNotNull(rs);
-    rs.moveToInsertRow();
-    rs.updateInt(1, 1);
-    rs.updateString(2, "jake");
-    rs.updateString(3, "avalue");
-    rs.insertRow();
-    rs.first();
-
-    rs.updateInt("id", 2);
-    rs.updateString("name", "dave");
-    rs.updateRow();
-
-    assertEquals(2, rs.getInt("id"));
-    assertEquals("dave", rs.getString("name"));
-    assertEquals("avalue", rs.getString("notselected"));
-
-    rs.deleteRow();
-    rs.moveToInsertRow();
-    rs.updateInt("id", 3);
-    rs.updateString("name", "paul");
-
-    rs.insertRow();
-
-    try {
-      rs.refreshRow();
-      fail("Can't refresh when on the insert row.");
-    } catch (SQLException sqle) {
+        rs.close();
+        stmt.close();
     }
 
-    assertEquals(3, rs.getInt("id"));
-    assertEquals("paul", rs.getString("name"));
-    assertNull(rs.getString("notselected"));
+    @Test
+    public void testReturnSerial() throws Exception {
+        final String ole = "Ole";
 
-    rs.close();
+        Statement st = null;
+        ResultSet rs = null;
+        try {
+            st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+            rs = st.executeQuery("SELECT * FROM serialtable");
+
+            rs.moveToInsertRow();
+            rs.updateString("name", ole);
+            rs.insertRow();
+
+            assertTrue(rs.first());
+            assertEquals(1, rs.getInt("gen_id"));
+            assertEquals(ole, rs.getString("name"));
+
+        } finally {
+            TestUtil.closeQuietly(rs);
+            TestUtil.closeQuietly(st);
+        }
+
+        final String ole2 = "OleOle";
+        try {
+            st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+            rs = st.executeQuery("SELECT name, gen_id FROM serialtable");
+
+            rs.moveToInsertRow();
+            rs.updateString("name", ole2);
+            rs.insertRow();
+
+            assertTrue(rs.first());
+            assertEquals(1, rs.getInt("gen_id"));
+            assertEquals(ole, rs.getString("name"));
+
+            assertTrue(rs.last());
+            assertEquals(2, rs.getInt("gen_id"));
+            assertEquals(ole2, rs.getString("name"));
+
+        } finally {
+            TestUtil.closeQuietly(rs);
+            TestUtil.closeQuietly(st);
+        }
+
+        final String dec = "Dec";
+        try {
+            st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+            rs = st.executeQuery("SELECT * FROM compositepktable");
+
+            rs.moveToInsertRow();
+            rs.updateString("name", dec);
+            rs.insertRow();
+
+            assertTrue(rs.first());
+            assertEquals(1, rs.getInt("gen_id"));
+            assertEquals(dec, rs.getString("name"));
+            assertEquals(10, rs.getInt("dec_id"));
+
+            rs.moveToInsertRow();
+            rs.updateString("name", dec);
+            rs.insertRow();
+
+            assertTrue(rs.last());
+            assertEquals(2, rs.getInt("gen_id"));
+            assertEquals(dec, rs.getString("name"));
+            assertEquals(20, rs.getInt("dec_id"));
+
+        } finally {
+            TestUtil.closeQuietly(rs);
+            TestUtil.closeQuietly(st);
+        }
+    }
+
+    @Test
+    public void testUpdateTimestamp() throws SQLException {
+        TimeZone origTZ = TimeZone.getDefault();
+        try {
+            // We choose a timezone which has a partial hour portion
+            // Asia/Tehran is +3:30
+            TimeZone.setDefault(TimeZone.getTimeZone("Asia/Tehran"));
+            Timestamp ts = Timestamp.valueOf("2006-11-20 16:17:18");
+
+            Statement stmt =
+                    con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+            ResultSet rs = stmt.executeQuery("SELECT id, ts FROM updateable");
+            rs.moveToInsertRow();
+            rs.updateInt(1, 1);
+            rs.updateTimestamp(2, ts);
+            rs.insertRow();
+            rs.first();
+            assertEquals(ts, rs.getTimestamp(2));
+        } finally {
+            TimeZone.setDefault(origTZ);
+        }
+    }
+
+    @Test
+    public void testUpdateStreams() throws SQLException, UnsupportedEncodingException {
+        assumeByteaSupported();
+        String string = "Hello";
+        byte[] bytes = new byte[]{0, '\\', (byte) 128, (byte) 255};
+
+        Statement stmt =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = stmt.executeQuery("SELECT id, asi, chr, bin FROM stream");
+
+        rs.moveToInsertRow();
+        rs.updateInt(1, 1);
+        rs.updateAsciiStream("asi", null, 17);
+        rs.updateCharacterStream("chr", null, 81);
+        rs.updateBinaryStream("bin", null, 0);
+        rs.insertRow();
+
+        rs.moveToInsertRow();
+        rs.updateInt(1, 3);
+        rs.updateAsciiStream("asi", new ByteArrayInputStream(string.getBytes("US-ASCII")), 5);
+        rs.updateCharacterStream("chr", new StringReader(string), 5);
+        rs.updateBinaryStream("bin", new ByteArrayInputStream(bytes), bytes.length);
+        rs.insertRow();
+
+        rs.beforeFirst();
+        rs.next();
+
+        assertEquals(1, rs.getInt(1));
+        assertNull(rs.getString(2));
+        assertNull(rs.getString(3));
+        assertNull(rs.getBytes(4));
+
+        rs.updateInt("id", 2);
+        rs.updateAsciiStream("asi", new ByteArrayInputStream(string.getBytes("US-ASCII")), 5);
+        rs.updateCharacterStream("chr", new StringReader(string), 5);
+        rs.updateBinaryStream("bin", new ByteArrayInputStream(bytes), bytes.length);
+        rs.updateRow();
+
+        assertEquals(2, rs.getInt(1));
+        assertEquals(string, rs.getString(2));
+        assertEquals(string, rs.getString(3));
+        assertArrayEquals(bytes, rs.getBytes(4));
+
+        rs.refreshRow();
+
+        assertEquals(2, rs.getInt(1));
+        assertEquals(string, rs.getString(2));
+        assertEquals(string, rs.getString(3));
+        assertArrayEquals(bytes, rs.getBytes(4));
+
+        rs.next();
+
+        assertEquals(3, rs.getInt(1));
+        assertEquals(string, rs.getString(2));
+        assertEquals(string, rs.getString(3));
+        assertArrayEquals(bytes, rs.getBytes(4));
+
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testZeroRowResult() throws SQLException {
+        Statement st =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = st.executeQuery("select * from updateable WHERE 0 > 1");
+        assertTrue(!rs.next());
+        rs.moveToInsertRow();
+        rs.moveToCurrentRow();
+    }
+
+    @Test
+    public void testUpdateable() throws SQLException {
+        Statement st =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = st.executeQuery("select * from updateable");
+        assertNotNull(rs);
+        rs.moveToInsertRow();
+        rs.updateInt(1, 1);
+        rs.updateString(2, "jake");
+        rs.updateString(3, "avalue");
+        rs.insertRow();
+        rs.first();
 
-    rs = st.executeQuery("select id1, id, name, name1 from updateable, second");
-    try {
-      while (rs.next()) {
         rs.updateInt("id", 2);
         rs.updateString("name", "dave");
         rs.updateRow();
-      }
-      fail("should not get here, update should fail");
-    } catch (SQLException ex) {
-    }
 
-    rs = st.executeQuery("select * from updateable");
-    assertTrue(rs.first());
-    rs.updateInt("id", 3);
-    rs.updateString("name", "dave3");
-    rs.updateRow();
-    assertEquals(3, rs.getInt("id"));
-    assertEquals("dave3", rs.getString("name"));
+        assertEquals(2, rs.getInt("id"));
+        assertEquals("dave", rs.getString("name"));
+        assertEquals("avalue", rs.getString("notselected"));
 
-    rs.moveToInsertRow();
-    rs.updateInt("id", 4);
-    rs.updateString("name", "dave4");
+        rs.deleteRow();
+        rs.moveToInsertRow();
+        rs.updateInt("id", 3);
+        rs.updateString("name", "paul");
 
-    rs.insertRow();
-    rs.updateInt("id", 5);
-    rs.updateString("name", "dave5");
-    rs.insertRow();
+        rs.insertRow();
 
-    rs.moveToCurrentRow();
-    assertEquals(3, rs.getInt("id"));
-    assertEquals("dave3", rs.getString("name"));
+        try {
+            rs.refreshRow();
+            fail("Can't refresh when on the insert row.");
+        } catch (SQLException sqle) {
+        }
 
-    assertTrue(rs.next());
-    assertEquals(4, rs.getInt("id"));
-    assertEquals("dave4", rs.getString("name"));
+        assertEquals(3, rs.getInt("id"));
+        assertEquals("paul", rs.getString("name"));
+        assertNull(rs.getString("notselected"));
 
-    assertTrue(rs.next());
-    assertEquals(5, rs.getInt("id"));
-    assertEquals("dave5", rs.getString("name"));
+        rs.close();
 
-    rs.close();
-    st.close();
-  }
+        rs = st.executeQuery("select id1, id, name, name1 from updateable, second");
+        try {
+            while (rs.next()) {
+                rs.updateInt("id", 2);
+                rs.updateString("name", "dave");
+                rs.updateRow();
+            }
+            fail("should not get here, update should fail");
+        } catch (SQLException ex) {
+        }
 
-  @Test
-  public void testUpdateDate() throws Exception {
-    Date testDate = Date.valueOf("2021-01-01");
-    TestUtil.execute(con, "insert into hasdate values (1,'2021-01-01'::date)");
-    con.setAutoCommit(false);
-    String sql = "SELECT * FROM hasdate where id=1";
-    ResultSet rs = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
-        ResultSet.CONCUR_UPDATABLE).executeQuery(sql);
-    assertTrue(rs.next());
-    assertEquals(testDate, rs.getDate("dt"));
-    rs.updateDate("dt", Date.valueOf("2020-01-01"));
-    rs.updateRow();
-    assertEquals(Date.valueOf("2020-01-01"), rs.getDate("dt"));
-    con.commit();
-    rs = con.createStatement().executeQuery("select dt from hasdate where id=1");
-    assertTrue(rs.next());
-    assertEquals(Date.valueOf("2020-01-01"), rs.getDate("dt"));
-    rs.close();
-  }
-
-  @Test
-  public void test2193() throws Exception {
-    Statement st =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = st.executeQuery("select * from updateable");
-    assertNotNull(rs);
-    rs.moveToInsertRow();
-    rs.updateInt(1, 1);
-    rs.updateString(2, "jake");
-    rs.updateString(3, "avalue");
-    rs.insertRow();
-    rs.first();
-
-    rs.updateString(2, "bob");
-    rs.updateRow();
-    rs.refreshRow();
-    rs.updateString(2, "jake");
-    rs.updateRow();
-  }
-
-  @Test
-  public void testInsertRowIllegalMethods() throws Exception {
-    Statement st =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = st.executeQuery("select * from updateable");
-    assertNotNull(rs);
-    rs.moveToInsertRow();
-
-    try {
-      rs.cancelRowUpdates();
-      fail("expected an exception when calling cancelRowUpdates() on the insert row");
-    } catch (SQLException e) {
-    }
-
-    try {
-      rs.updateRow();
-      fail("expected an exception when calling updateRow() on the insert row");
-    } catch (SQLException e) {
-    }
-
-    try {
-      rs.deleteRow();
-      fail("expected an exception when calling deleteRow() on the insert row");
-    } catch (SQLException e) {
-    }
-
-    try {
-      rs.refreshRow();
-      fail("expected an exception when calling refreshRow() on the insert row");
-    } catch (SQLException e) {
-    }
-
-    rs.close();
-    st.close();
-  }
-
-  @Test
-  public void testUpdateablePreparedStatement() throws Exception {
-    // No args.
-    PreparedStatement st = con.prepareStatement("select * from updateable",
-        ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = st.executeQuery();
-    rs.moveToInsertRow();
-    rs.close();
-    st.close();
-
-    // With args.
-    st = con.prepareStatement("select * from updateable where id = ?",
-        ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    st.setInt(1, 1);
-    rs = st.executeQuery();
-    rs.moveToInsertRow();
-    rs.close();
-    st.close();
-  }
-
-  @Test
-  public void testUpdateSelectOnly() throws Exception {
-    Statement st =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-
-    ResultSet rs = st.executeQuery("select * from only second");
-    assertTrue(rs.next());
-    rs.updateInt(1, 2);
-    rs.updateRow();
-  }
-
-  @Test
-  public void testUpdateReadOnlyResultSet() throws Exception {
-    Statement st =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
-    ResultSet rs = st.executeQuery("select * from updateable");
-    try {
-      rs.moveToInsertRow();
-      fail("expected an exception when calling moveToInsertRow() on a read-only resultset");
-    } catch (SQLException e) {
-    }
-  }
-
-  @Test
-  public void testBadColumnIndexes() throws Exception {
-    Statement st =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = st.executeQuery("select * from updateable");
-    rs.moveToInsertRow();
-    try {
-      rs.updateInt(0, 1);
-      fail("Should have thrown an exception on bad column index.");
-    } catch (SQLException sqle) {
-    }
-    try {
-      rs.updateString(1000, "hi");
-      fail("Should have thrown an exception on bad column index.");
-    } catch (SQLException sqle) {
-    }
-    try {
-      rs.updateNull(1000);
-      fail("Should have thrown an exception on bad column index.");
-    } catch (SQLException sqle) {
-    }
-  }
-
-  @Test
-  public void testArray() throws SQLException {
-    Statement stmt =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    stmt.executeUpdate("INSERT INTO updateable (id, intarr) VALUES (1, '{1,2,3}'::int4[])");
-    ResultSet rs = stmt.executeQuery("SELECT id, intarr FROM updateable");
-    assertTrue(rs.next());
-    rs.updateObject(2, rs.getArray(2));
-    rs.updateRow();
-
-    Array arr = rs.getArray(2);
-    assertEquals(Types.INTEGER, arr.getBaseType());
-    Integer[] intarr = (Integer[]) arr.getArray();
-    assertEquals(3, intarr.length);
-    assertEquals(1, intarr[0].intValue());
-    assertEquals(2, intarr[1].intValue());
-    assertEquals(3, intarr[2].intValue());
-    rs.close();
-
-    rs = stmt.executeQuery("SELECT id,intarr FROM updateable");
-    assertTrue(rs.next());
-    arr = rs.getArray(2);
-    assertEquals(Types.INTEGER, arr.getBaseType());
-    intarr = (Integer[]) arr.getArray();
-    assertEquals(3, intarr.length);
-    assertEquals(1, intarr[0].intValue());
-    assertEquals(2, intarr[1].intValue());
-    assertEquals(3, intarr[2].intValue());
-
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testMultiColumnUpdateWithoutAllColumns() throws Exception {
-    Statement st =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = st.executeQuery("select id1,val from multicol");
-    try {
-      rs.moveToInsertRow();
-      fail("Move to insert row succeeded. It should not");
-    } catch (SQLException sqle) {
-      // Ensure we're reporting that the RS is not updatable.
-      assertEquals("24000", sqle.getSQLState());
-    } finally {
-      TestUtil.closeQuietly(rs);
-      TestUtil.closeQuietly(st);
-    }
-  }
-
-  @Test
-  public void testMultiColumnUpdateWithoutPrimaryKey() throws Exception {
-    Statement st =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = st.executeQuery("select * from nopkmulticol");
-    try {
-      rs.moveToInsertRow();
-      fail("Move to insert row succeeded. It should not");
-    } catch (SQLException sqle) {
-      // Ensure we're reporting that the RS is not updatable.
-      assertEquals("24000", sqle.getSQLState());
-    } finally {
-      TestUtil.closeQuietly(rs);
-      TestUtil.closeQuietly(st);
-    }
-  }
-
-  @Test
-  public void testMultiColumnUpdate() throws Exception {
-    Statement st =
-        con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
-    st.executeUpdate("INSERT INTO multicol (id1,id2,val) VALUES (1,2,'val')");
-
-    ResultSet rs = st.executeQuery("SELECT id1, id2, val FROM multicol");
-    assertTrue(rs.next());
-    assertEquals("val", rs.getString("val"));
-    rs.updateString("val", "newval");
-    rs.updateRow();
-    rs.close();
-
-    rs = st.executeQuery("SELECT id1, id2, val FROM multicol");
-    assertTrue(rs.next());
-    assertEquals("newval", rs.getString("val"));
-    rs.close();
-    st.close();
-  }
-
-  @Test
-  public void simpleAndUpdateableSameQuery() throws Exception {
-    PGConnection unwrap = con.unwrap(PGConnection.class);
-    Assume.assumeNotNull(unwrap);
-    int prepareThreshold = unwrap.getPrepareThreshold();
-    String sql = "select * from second where id1=?";
-    for (int i = 0; i <= prepareThreshold; i++) {
-      PreparedStatement ps = null;
-      ResultSet rs = null;
-      try {
-        ps = con.prepareStatement(sql);
-        ps.setInt(1, 1);
-        rs = ps.executeQuery();
-        rs.next();
-        String name1 = rs.getString("name1");
-        Assert.assertEquals("anyvalue", name1);
-        int id1 = rs.getInt("id1");
-        Assert.assertEquals(1, id1);
-      } finally {
-        TestUtil.closeQuietly(rs);
-        TestUtil.closeQuietly(ps);
-      }
-    }
-    // The same SQL, and use updateable ResultSet
-    {
-      PreparedStatement ps = null;
-      ResultSet rs = null;
-      try {
-        ps = con.prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
-        ps.setInt(1, 1);
-        rs = ps.executeQuery();
-        rs.next();
-        String name1 = rs.getString("name1");
-        Assert.assertEquals("anyvalue", name1);
-        int id1 = rs.getInt("id1");
-        Assert.assertEquals(1, id1);
-        rs.updateString("name1", "updatedValue");
+        rs = st.executeQuery("select * from updateable");
+        assertTrue(rs.first());
+        rs.updateInt("id", 3);
+        rs.updateString("name", "dave3");
         rs.updateRow();
-      } finally {
-        TestUtil.closeQuietly(rs);
-        TestUtil.closeQuietly(ps);
-      }
+        assertEquals(3, rs.getInt("id"));
+        assertEquals("dave3", rs.getString("name"));
+
+        rs.moveToInsertRow();
+        rs.updateInt("id", 4);
+        rs.updateString("name", "dave4");
+
+        rs.insertRow();
+        rs.updateInt("id", 5);
+        rs.updateString("name", "dave5");
+        rs.insertRow();
+
+        rs.moveToCurrentRow();
+        assertEquals(3, rs.getInt("id"));
+        assertEquals("dave3", rs.getString("name"));
+
+        assertTrue(rs.next());
+        assertEquals(4, rs.getInt("id"));
+        assertEquals("dave4", rs.getString("name"));
+
+        assertTrue(rs.next());
+        assertEquals(5, rs.getInt("id"));
+        assertEquals("dave5", rs.getString("name"));
+
+        rs.close();
+        st.close();
     }
-  }
 
-  @Test
-  public void testUpdateBoolean() throws Exception {
-
-    Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
-        ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = st.executeQuery("SELECT * FROM booltable WHERE id=1");
-    assertTrue(rs.next());
-    assertFalse(rs.getBoolean("b"));
-    rs.updateBoolean("b", true);
-    rs.updateRow();
-    //rs.refreshRow(); //fetches the value stored
-    assertTrue(rs.getBoolean("b"));
-  }
-
-  @Test
-  public void testOidUpdatable() throws Exception {
-    Connection privilegedCon = TestUtil.openPrivilegedDB();
-    try {
-      Statement st = privilegedCon.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
-          ResultSet.CONCUR_UPDATABLE);
-      ResultSet rs = st.executeQuery("SELECT oid,* FROM pg_class WHERE relname = 'pg_class'");
-      assertTrue(rs.next());
-      assertTrue(rs.first());
-      rs.updateString("relname", "pg_class");
-      rs.updateRow();
-      rs.close();
-      st.close();
-    } finally {
-      privilegedCon.close();
+    @Test
+    public void testUpdateDate() throws Exception {
+        Date testDate = Date.valueOf("2021-01-01");
+        TestUtil.execute(con, "insert into hasdate values (1,'2021-01-01'::date)");
+        con.setAutoCommit(false);
+        String sql = "SELECT * FROM hasdate where id=1";
+        ResultSet rs = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
+                ResultSet.CONCUR_UPDATABLE).executeQuery(sql);
+        assertTrue(rs.next());
+        assertEquals(testDate, rs.getDate("dt"));
+        rs.updateDate("dt", Date.valueOf("2020-01-01"));
+        rs.updateRow();
+        assertEquals(Date.valueOf("2020-01-01"), rs.getDate("dt"));
+        con.commit();
+        rs = con.createStatement().executeQuery("select dt from hasdate where id=1");
+        assertTrue(rs.next());
+        assertEquals(Date.valueOf("2020-01-01"), rs.getDate("dt"));
+        rs.close();
     }
-  }
 
-  @Test
-  public void testUniqueWithNullableColumnsNotUpdatable() throws Exception {
-    Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
-        ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = st.executeQuery("SELECT u1, name1 from unique_null_constraint");
-    assertTrue(rs.next());
-    assertTrue(rs.first());
-    try {
-      rs.updateString("name1", "bob");
-      fail("Should have failed since unique column u1 is nullable");
-    } catch (SQLException ex) {
-      assertEquals("No eligible primary or unique key found for table unique_null_constraint.",
-          ex.getMessage());
+    @Test
+    public void test2193() throws Exception {
+        Statement st =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = st.executeQuery("select * from updateable");
+        assertNotNull(rs);
+        rs.moveToInsertRow();
+        rs.updateInt(1, 1);
+        rs.updateString(2, "jake");
+        rs.updateString(3, "avalue");
+        rs.insertRow();
+        rs.first();
+
+        rs.updateString(2, "bob");
+        rs.updateRow();
+        rs.refreshRow();
+        rs.updateString(2, "jake");
+        rs.updateRow();
     }
-    rs.close();
-    st.close();
-  }
 
-  @Test
-  public void testPrimaryAndUniqueUpdateableByPrimary() throws Exception {
-    Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
-        ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = st.executeQuery("SELECT id, dt from primaryunique");
-    assertTrue(rs.next());
-    assertTrue(rs.first());
-    int id = rs.getInt("id");
-    rs.updateDate("dt", Date.valueOf("1999-01-01"));
-    rs.updateRow();
-    assertFalse(rs.next());
-    rs.close();
-    rs = st.executeQuery("select dt from primaryunique where id = " + id);
-    assertTrue(rs.next());
-    assertEquals(Date.valueOf("1999-01-01"), rs.getDate("dt"));
-    rs.close();
-    st.close();
-  }
+    @Test
+    public void testInsertRowIllegalMethods() throws Exception {
+        Statement st =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = st.executeQuery("select * from updateable");
+        assertNotNull(rs);
+        rs.moveToInsertRow();
 
-  @Test
-  public void testPrimaryAndUniqueUpdateableByUnique() throws Exception {
-    Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
-        ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = st.executeQuery("SELECT name, dt from primaryunique");
-    assertTrue(rs.next());
-    assertTrue(rs.first());
-    String name = rs.getString("name");
-    rs.updateDate("dt", Date.valueOf("1999-01-01"));
-    rs.updateRow();
-    assertFalse(rs.next());
-    rs.close();
-    rs = st.executeQuery("select dt from primaryunique where name = '" + name + "'");
-    assertTrue(rs.next());
-    assertEquals(Date.valueOf("1999-01-01"), rs.getDate("dt"));
-    rs.close();
-    st.close();
-  }
+        try {
+            rs.cancelRowUpdates();
+            fail("expected an exception when calling cancelRowUpdates() on the insert row");
+        } catch (SQLException e) {
+        }
 
-  @Test
-  public void testUniqueWithNullAndNotNullableColumnUpdateable() throws Exception {
-    Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
-        ResultSet.CONCUR_UPDATABLE);
-    int id = 0;
-    int id2 = 0;
-    ResultSet rs = st.executeQuery("SELECT id, id2, dt from uniquekeys");
-    assertTrue(rs.next());
-    assertTrue(rs.first());
-    id = rs.getInt("id");
-    id2 = rs.getInt("id2");
-    rs.updateDate("dt", Date.valueOf("1999-01-01"));
-    rs.updateRow();
-    rs.close();
-    rs = st.executeQuery("select dt from uniquekeys where id = " + id + " and id2 = " + id2);
-    assertNotNull(rs);
-    assertTrue(rs.next());
-    assertEquals(Date.valueOf("1999-01-01"), rs.getDate("dt"));
-    rs.close();
-    st.close();
-  }
+        try {
+            rs.updateRow();
+            fail("expected an exception when calling updateRow() on the insert row");
+        } catch (SQLException e) {
+        }
 
-  @Test
-  public void testUniqueWithNotNullableColumnUpdateable() throws Exception {
-    Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
-        ResultSet.CONCUR_UPDATABLE);
-    int id = 0;
-    ResultSet rs = st.executeQuery("SELECT id, dt from uniquekeys");
-    assertTrue(rs.next());
-    assertTrue(rs.first());
-    id = rs.getInt("id");
-    rs.updateDate("dt", Date.valueOf("1999-01-01"));
-    rs.updateRow();
-    rs.close();
-    rs = st.executeQuery("select id, dt from uniquekeys where id = " + id);
-    assertNotNull(rs);
-    assertTrue(rs.next());
-    assertEquals(Date.valueOf("1999-01-01"), rs.getDate("dt"));
-    rs.close();
-    st.close();
-  }
+        try {
+            rs.deleteRow();
+            fail("expected an exception when calling deleteRow() on the insert row");
+        } catch (SQLException e) {
+        }
 
-  @Test
-  public void testUniqueWithNullableColumnNotUpdateable() throws Exception {
-    Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
-        ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = st.executeQuery("SELECT id2, dt from uniquekeys");
-    assertTrue(rs.next());
-    assertTrue(rs.first());
-    try {
-      rs.updateDate("dt", Date.valueOf("1999-01-01"));
-      fail("Should have failed since id2 is nullable column");
-    } catch (SQLException ex) {
-      assertEquals("No eligible primary or unique key found for table uniquekeys.",
-          ex.getMessage());
+        try {
+            rs.refreshRow();
+            fail("expected an exception when calling refreshRow() on the insert row");
+        } catch (SQLException e) {
+        }
+
+        rs.close();
+        st.close();
     }
-    rs.close();
-    st.close();
-  }
 
-  @Test
-  public void testNoUniqueNotUpdateable() throws SQLException {
-    Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
-        ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = st.executeQuery("SELECT dt from uniquekeys");
-    assertTrue(rs.next());
-    assertTrue(rs.first());
-    try {
-      rs.updateDate("dt", Date.valueOf("1999-01-01"));
-      fail("Should have failed since no UK/PK are in the select statement");
-    } catch (SQLException ex) {
-      assertEquals("No eligible primary or unique key found for table uniquekeys.",
-          ex.getMessage());
+    @Test
+    public void testUpdateablePreparedStatement() throws Exception {
+        // No args.
+        PreparedStatement st = con.prepareStatement("select * from updateable",
+                ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = st.executeQuery();
+        rs.moveToInsertRow();
+        rs.close();
+        st.close();
+
+        // With args.
+        st = con.prepareStatement("select * from updateable where id = ?",
+                ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        st.setInt(1, 1);
+        rs = st.executeQuery();
+        rs.moveToInsertRow();
+        rs.close();
+        st.close();
+    }
+
+    @Test
+    public void testUpdateSelectOnly() throws Exception {
+        Statement st =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+
+        ResultSet rs = st.executeQuery("select * from only second");
+        assertTrue(rs.next());
+        rs.updateInt(1, 2);
+        rs.updateRow();
+    }
+
+    @Test
+    public void testUpdateReadOnlyResultSet() throws Exception {
+        Statement st =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
+        ResultSet rs = st.executeQuery("select * from updateable");
+        try {
+            rs.moveToInsertRow();
+            fail("expected an exception when calling moveToInsertRow() on a read-only resultset");
+        } catch (SQLException e) {
+        }
+    }
+
+    @Test
+    public void testBadColumnIndexes() throws Exception {
+        Statement st =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = st.executeQuery("select * from updateable");
+        rs.moveToInsertRow();
+        try {
+            rs.updateInt(0, 1);
+            fail("Should have thrown an exception on bad column index.");
+        } catch (SQLException sqle) {
+        }
+        try {
+            rs.updateString(1000, "hi");
+            fail("Should have thrown an exception on bad column index.");
+        } catch (SQLException sqle) {
+        }
+        try {
+            rs.updateNull(1000);
+            fail("Should have thrown an exception on bad column index.");
+        } catch (SQLException sqle) {
+        }
+    }
+
+    @Test
+    public void testArray() throws SQLException {
+        Statement stmt =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        stmt.executeUpdate("INSERT INTO updateable (id, intarr) VALUES (1, '{1,2,3}'::int4[])");
+        ResultSet rs = stmt.executeQuery("SELECT id, intarr FROM updateable");
+        assertTrue(rs.next());
+        rs.updateObject(2, rs.getArray(2));
+        rs.updateRow();
+
+        Array arr = rs.getArray(2);
+        assertEquals(Types.INTEGER, arr.getBaseType());
+        Integer[] intarr = (Integer[]) arr.getArray();
+        assertEquals(3, intarr.length);
+        assertEquals(1, intarr[0].intValue());
+        assertEquals(2, intarr[1].intValue());
+        assertEquals(3, intarr[2].intValue());
+        rs.close();
+
+        rs = stmt.executeQuery("SELECT id,intarr FROM updateable");
+        assertTrue(rs.next());
+        arr = rs.getArray(2);
+        assertEquals(Types.INTEGER, arr.getBaseType());
+        intarr = (Integer[]) arr.getArray();
+        assertEquals(3, intarr.length);
+        assertEquals(1, intarr[0].intValue());
+        assertEquals(2, intarr[1].intValue());
+        assertEquals(3, intarr[2].intValue());
+
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testMultiColumnUpdateWithoutAllColumns() throws Exception {
+        Statement st =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = st.executeQuery("select id1,val from multicol");
+        try {
+            rs.moveToInsertRow();
+            fail("Move to insert row succeeded. It should not");
+        } catch (SQLException sqle) {
+            // Ensure we're reporting that the RS is not updatable.
+            assertEquals("24000", sqle.getSQLState());
+        } finally {
+            TestUtil.closeQuietly(rs);
+            TestUtil.closeQuietly(st);
+        }
+    }
+
+    @Test
+    public void testMultiColumnUpdateWithoutPrimaryKey() throws Exception {
+        Statement st =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = st.executeQuery("select * from nopkmulticol");
+        try {
+            rs.moveToInsertRow();
+            fail("Move to insert row succeeded. It should not");
+        } catch (SQLException sqle) {
+            // Ensure we're reporting that the RS is not updatable.
+            assertEquals("24000", sqle.getSQLState());
+        } finally {
+            TestUtil.closeQuietly(rs);
+            TestUtil.closeQuietly(st);
+        }
+    }
+
+    @Test
+    public void testMultiColumnUpdate() throws Exception {
+        Statement st =
+                con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
+        st.executeUpdate("INSERT INTO multicol (id1,id2,val) VALUES (1,2,'val')");
+
+        ResultSet rs = st.executeQuery("SELECT id1, id2, val FROM multicol");
+        assertTrue(rs.next());
+        assertEquals("val", rs.getString("val"));
+        rs.updateString("val", "newval");
+        rs.updateRow();
+        rs.close();
+
+        rs = st.executeQuery("SELECT id1, id2, val FROM multicol");
+        assertTrue(rs.next());
+        assertEquals("newval", rs.getString("val"));
+        rs.close();
+        st.close();
+    }
+
+    @Test
+    public void simpleAndUpdateableSameQuery() throws Exception {
+        PGConnection unwrap = con.unwrap(PGConnection.class);
+        Assume.assumeNotNull(unwrap);
+        int prepareThreshold = unwrap.getPrepareThreshold();
+        String sql = "select * from second where id1=?";
+        for (int i = 0; i <= prepareThreshold; i++) {
+            PreparedStatement ps = null;
+            ResultSet rs = null;
+            try {
+                ps = con.prepareStatement(sql);
+                ps.setInt(1, 1);
+                rs = ps.executeQuery();
+                rs.next();
+                String name1 = rs.getString("name1");
+                Assert.assertEquals("anyvalue", name1);
+                int id1 = rs.getInt("id1");
+                Assert.assertEquals(1, id1);
+            } finally {
+                TestUtil.closeQuietly(rs);
+                TestUtil.closeQuietly(ps);
+            }
+        }
+        // The same SQL, and use updateable ResultSet
+        {
+            PreparedStatement ps = null;
+            ResultSet rs = null;
+            try {
+                ps = con.prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
+                ps.setInt(1, 1);
+                rs = ps.executeQuery();
+                rs.next();
+                String name1 = rs.getString("name1");
+                Assert.assertEquals("anyvalue", name1);
+                int id1 = rs.getInt("id1");
+                Assert.assertEquals(1, id1);
+                rs.updateString("name1", "updatedValue");
+                rs.updateRow();
+            } finally {
+                TestUtil.closeQuietly(rs);
+                TestUtil.closeQuietly(ps);
+            }
+        }
+    }
+
+    @Test
+    public void testUpdateBoolean() throws Exception {
+
+        Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
+                ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = st.executeQuery("SELECT * FROM booltable WHERE id=1");
+        assertTrue(rs.next());
+        assertFalse(rs.getBoolean("b"));
+        rs.updateBoolean("b", true);
+        rs.updateRow();
+        //rs.refreshRow(); //fetches the value stored
+        assertTrue(rs.getBoolean("b"));
+    }
+
+    @Test
+    public void testOidUpdatable() throws Exception {
+        Connection privilegedCon = TestUtil.openPrivilegedDB();
+        try {
+            Statement st = privilegedCon.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
+                    ResultSet.CONCUR_UPDATABLE);
+            ResultSet rs = st.executeQuery("SELECT oid,* FROM pg_class WHERE relname = 'pg_class'");
+            assertTrue(rs.next());
+            assertTrue(rs.first());
+            rs.updateString("relname", "pg_class");
+            rs.updateRow();
+            rs.close();
+            st.close();
+        } finally {
+            privilegedCon.close();
+        }
+    }
+
+    @Test
+    public void testUniqueWithNullableColumnsNotUpdatable() throws Exception {
+        Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
+                ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = st.executeQuery("SELECT u1, name1 from unique_null_constraint");
+        assertTrue(rs.next());
+        assertTrue(rs.first());
+        try {
+            rs.updateString("name1", "bob");
+            fail("Should have failed since unique column u1 is nullable");
+        } catch (SQLException ex) {
+            assertEquals("No eligible primary or unique key found for table unique_null_constraint.",
+                    ex.getMessage());
+        }
+        rs.close();
+        st.close();
+    }
+
+    @Test
+    public void testPrimaryAndUniqueUpdateableByPrimary() throws Exception {
+        Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
+                ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = st.executeQuery("SELECT id, dt from primaryunique");
+        assertTrue(rs.next());
+        assertTrue(rs.first());
+        int id = rs.getInt("id");
+        rs.updateDate("dt", Date.valueOf("1999-01-01"));
+        rs.updateRow();
+        assertFalse(rs.next());
+        rs.close();
+        rs = st.executeQuery("select dt from primaryunique where id = " + id);
+        assertTrue(rs.next());
+        assertEquals(Date.valueOf("1999-01-01"), rs.getDate("dt"));
+        rs.close();
+        st.close();
+    }
+
+    @Test
+    public void testPrimaryAndUniqueUpdateableByUnique() throws Exception {
+        Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
+                ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = st.executeQuery("SELECT name, dt from primaryunique");
+        assertTrue(rs.next());
+        assertTrue(rs.first());
+        String name = rs.getString("name");
+        rs.updateDate("dt", Date.valueOf("1999-01-01"));
+        rs.updateRow();
+        assertFalse(rs.next());
+        rs.close();
+        rs = st.executeQuery("select dt from primaryunique where name = '" + name + "'");
+        assertTrue(rs.next());
+        assertEquals(Date.valueOf("1999-01-01"), rs.getDate("dt"));
+        rs.close();
+        st.close();
+    }
+
+    @Test
+    public void testUniqueWithNullAndNotNullableColumnUpdateable() throws Exception {
+        Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
+                ResultSet.CONCUR_UPDATABLE);
+        int id = 0;
+        int id2 = 0;
+        ResultSet rs = st.executeQuery("SELECT id, id2, dt from uniquekeys");
+        assertTrue(rs.next());
+        assertTrue(rs.first());
+        id = rs.getInt("id");
+        id2 = rs.getInt("id2");
+        rs.updateDate("dt", Date.valueOf("1999-01-01"));
+        rs.updateRow();
+        rs.close();
+        rs = st.executeQuery("select dt from uniquekeys where id = " + id + " and id2 = " + id2);
+        assertNotNull(rs);
+        assertTrue(rs.next());
+        assertEquals(Date.valueOf("1999-01-01"), rs.getDate("dt"));
+        rs.close();
+        st.close();
+    }
+
+    @Test
+    public void testUniqueWithNotNullableColumnUpdateable() throws Exception {
+        Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
+                ResultSet.CONCUR_UPDATABLE);
+        int id = 0;
+        ResultSet rs = st.executeQuery("SELECT id, dt from uniquekeys");
+        assertTrue(rs.next());
+        assertTrue(rs.first());
+        id = rs.getInt("id");
+        rs.updateDate("dt", Date.valueOf("1999-01-01"));
+        rs.updateRow();
+        rs.close();
+        rs = st.executeQuery("select id, dt from uniquekeys where id = " + id);
+        assertNotNull(rs);
+        assertTrue(rs.next());
+        assertEquals(Date.valueOf("1999-01-01"), rs.getDate("dt"));
+        rs.close();
+        st.close();
+    }
+
+    @Test
+    public void testUniqueWithNullableColumnNotUpdateable() throws Exception {
+        Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
+                ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = st.executeQuery("SELECT id2, dt from uniquekeys");
+        assertTrue(rs.next());
+        assertTrue(rs.first());
+        try {
+            rs.updateDate("dt", Date.valueOf("1999-01-01"));
+            fail("Should have failed since id2 is nullable column");
+        } catch (SQLException ex) {
+            assertEquals("No eligible primary or unique key found for table uniquekeys.",
+                    ex.getMessage());
+        }
+        rs.close();
+        st.close();
+    }
+
+    @Test
+    public void testNoUniqueNotUpdateable() throws SQLException {
+        Statement st = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
+                ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = st.executeQuery("SELECT dt from uniquekeys");
+        assertTrue(rs.next());
+        assertTrue(rs.first());
+        try {
+            rs.updateDate("dt", Date.valueOf("1999-01-01"));
+            fail("Should have failed since no UK/PK are in the select statement");
+        } catch (SQLException ex) {
+            assertEquals("No eligible primary or unique key found for table uniquekeys.",
+                    ex.getMessage());
+        }
+        rs.close();
+        st.close();
     }
-    rs.close();
-    st.close();
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/UpsertTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/UpsertTest.java
index 6e713d4..d251970 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/UpsertTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/UpsertTest.java
@@ -5,188 +5,185 @@
 
 package org.postgresql.test.jdbc2;
 
-import static org.junit.Assert.assertEquals;
-
-import org.postgresql.core.ServerVersion;
-import org.postgresql.test.TestUtil;
-
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.ArrayList;
 import java.util.Collection;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.test.TestUtil;
+import static org.junit.Assert.assertEquals;
 
 /**
  * Tests {@code INSERT .. ON CONFLICT} introduced in PostgreSQL 9.5.
  */
 @RunWith(Parameterized.class)
 public class UpsertTest extends BaseTest4 {
-  public UpsertTest(BinaryMode binaryMode, ReWriteBatchedInserts rewrite) {
-    setBinaryMode(binaryMode);
-    setReWriteBatchedInserts(rewrite);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}, reWriteBatchedInserts = {1}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      for (ReWriteBatchedInserts rewrite : ReWriteBatchedInserts.values()) {
-        ids.add(new Object[]{binaryMode, rewrite});
-      }
+    public UpsertTest(BinaryMode binaryMode, ReWriteBatchedInserts rewrite) {
+        setBinaryMode(binaryMode);
+        setReWriteBatchedInserts(rewrite);
     }
-    return ids;
-  }
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    assumeMinimumServerVersion(ServerVersion.v9_5);
-
-    TestUtil.createTempTable(con, "test_statement", "i int primary key, t varchar(5)");
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate("INSERT INTO test_statement(i, t) VALUES (42, '42')");
-    TestUtil.closeQuietly(stmt);
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "test_statement");
-    super.tearDown();
-  }
-
-  protected int executeUpdate(String sql) throws SQLException {
-    PreparedStatement ps = con.prepareStatement(sql);
-    int count = ps.executeUpdate();
-    ps.close();
-    return count;
-  }
-
-  @Test
-  public void testUpsertDoNothingConflict() throws SQLException {
-    int count = executeUpdate(
-        "INSERT INTO test_statement(i, t) VALUES (42, '42') ON CONFLICT DO NOTHING");
-    assertEquals("insert on CONFLICT DO NOTHING should report 0 modified rows on CONFLICT",
-        0, count);
-  }
-
-  @Test
-  public void testUpsertDoNothingNoConflict() throws SQLException {
-    int count = executeUpdate(
-        "INSERT INTO test_statement(i, t) VALUES (43, '43') ON CONFLICT DO NOTHING");
-    assertEquals("insert on conflict DO NOTHING should report 1 modified row on plain insert",
-        1, count);
-  }
-
-  @Test
-  public void testUpsertDoUpdateConflict() throws SQLException {
-    int count = executeUpdate(
-        "INSERT INTO test_statement(i, t) VALUES (42, '42') ON CONFLICT(i) DO UPDATE SET t='43'");
-    assertEquals("insert ON CONFLICT DO UPDATE should report 1 modified row on CONFLICT",
-        1, count);
-  }
-
-  @Test
-  public void testUpsertDoUpdateNoConflict() throws SQLException {
-    int count = executeUpdate(
-        "INSERT INTO test_statement(i, t) VALUES (43, '43') ON CONFLICT(i) DO UPDATE SET t='43'");
-    assertEquals("insert on conflict do update should report 1 modified row on plain insert",
-        1, count);
-  }
-
-  @Test
-  public void testSingleValuedUpsertBatch() throws SQLException {
-    PreparedStatement ps = null;
-    try {
-      ps = con.prepareStatement(
-          "insert into test_statement(i, t) values (?,?) ON CONFLICT (i) DO NOTHING");
-      ps.setInt(1, 50);
-      ps.setString(2, "50");
-      ps.addBatch();
-      ps.setInt(1, 53);
-      ps.setString(2, "53");
-      ps.addBatch();
-      int[] actual = ps.executeBatch();
-      BatchExecuteTest.assertSimpleInsertBatch(2, actual);
-    } finally {
-      TestUtil.closeQuietly(ps);
+    @Parameterized.Parameters(name = "binary = {0}, reWriteBatchedInserts = {1}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            for (ReWriteBatchedInserts rewrite : ReWriteBatchedInserts.values()) {
+                ids.add(new Object[]{binaryMode, rewrite});
+            }
+        }
+        return ids;
     }
-  }
 
-  @Test
-  public void testMultiValuedUpsertBatch() throws SQLException {
-    PreparedStatement ps = null;
-    try {
-      ps = con.prepareStatement(
-          "insert into test_statement(i, t) values (?,?),(?,?) ON CONFLICT (i) DO NOTHING");
-      ps.setInt(1, 50);
-      ps.setString(2, "50");
-      ps.setInt(3, 51);
-      ps.setString(4, "51");
-      ps.addBatch();
-      ps.setInt(1, 52);
-      ps.setString(2, "52");
-      ps.setInt(3, 53);
-      ps.setString(4, "53");
-      ps.addBatch();
-      int[] actual = ps.executeBatch();
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        assumeMinimumServerVersion(ServerVersion.v9_5);
 
-      BatchExecuteTest.assertBatchResult("2 batched rows, 2-values each", new int[]{2, 2}, actual);
-
-      Statement st = con.createStatement();
-      ResultSet rs =
-          st.executeQuery("select count(*) from test_statement where i between 50 and 53");
-      rs.next();
-      Assert.assertEquals("test_statement should have 4 rows with 'i' of 50..53", 4, rs.getInt(1));
-    } finally {
-      TestUtil.closeQuietly(ps);
+        TestUtil.createTempTable(con, "test_statement", "i int primary key, t varchar(5)");
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate("INSERT INTO test_statement(i, t) VALUES (42, '42')");
+        TestUtil.closeQuietly(stmt);
     }
-  }
 
-  @Test
-  public void testSingleValuedUpsertUpdateBatch() throws SQLException {
-    PreparedStatement ps = null;
-    try {
-      ps = con.prepareStatement(
-          "insert into test_statement(i, t) values (?,?) ON CONFLICT (i) DO update set t=?");
-      ps.setInt(1, 50);
-      ps.setString(2, "50U");
-      ps.setString(3, "50U");
-      ps.addBatch();
-      ps.setInt(1, 53);
-      ps.setString(2, "53U");
-      ps.setString(3, "53U");
-      ps.addBatch();
-      int[] actual = ps.executeBatch();
-      BatchExecuteTest.assertSimpleInsertBatch(2, actual);
-    } finally {
-      TestUtil.closeQuietly(ps);
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "test_statement");
+        super.tearDown();
     }
-  }
 
-  @Test
-  public void testSingleValuedUpsertUpdateConstantBatch() throws SQLException {
-    PreparedStatement ps = null;
-    try {
-      // For reWriteBatchedInserts=YES the following is expected
-      // FE=> Parse(stmt=null,query="insert into test_statement(i, t) values ($1,$2),($3,$4) ON CONFLICT (i) DO update set t='DEF'",oids={23,1043,23,1043})
-      ps = con.prepareStatement(
-          "insert into test_statement(i, t) values (?,?) ON CONFLICT (i) DO update set t='DEF'");
-      ps.setInt(1, 50);
-      ps.setString(2, "50");
-      ps.addBatch();
-      ps.setInt(1, 53);
-      ps.setString(2, "53");
-      ps.addBatch();
-      int[] actual = ps.executeBatch();
-      BatchExecuteTest.assertSimpleInsertBatch(2, actual);
-    } finally {
-      TestUtil.closeQuietly(ps);
+    protected int executeUpdate(String sql) throws SQLException {
+        PreparedStatement ps = con.prepareStatement(sql);
+        int count = ps.executeUpdate();
+        ps.close();
+        return count;
+    }
+
+    @Test
+    public void testUpsertDoNothingConflict() throws SQLException {
+        int count = executeUpdate(
+                "INSERT INTO test_statement(i, t) VALUES (42, '42') ON CONFLICT DO NOTHING");
+        assertEquals("insert on CONFLICT DO NOTHING should report 0 modified rows on CONFLICT",
+                0, count);
+    }
+
+    @Test
+    public void testUpsertDoNothingNoConflict() throws SQLException {
+        int count = executeUpdate(
+                "INSERT INTO test_statement(i, t) VALUES (43, '43') ON CONFLICT DO NOTHING");
+        assertEquals("insert on conflict DO NOTHING should report 1 modified row on plain insert",
+                1, count);
+    }
+
+    @Test
+    public void testUpsertDoUpdateConflict() throws SQLException {
+        int count = executeUpdate(
+                "INSERT INTO test_statement(i, t) VALUES (42, '42') ON CONFLICT(i) DO UPDATE SET t='43'");
+        assertEquals("insert ON CONFLICT DO UPDATE should report 1 modified row on CONFLICT",
+                1, count);
+    }
+
+    @Test
+    public void testUpsertDoUpdateNoConflict() throws SQLException {
+        int count = executeUpdate(
+                "INSERT INTO test_statement(i, t) VALUES (43, '43') ON CONFLICT(i) DO UPDATE SET t='43'");
+        assertEquals("insert on conflict do update should report 1 modified row on plain insert",
+                1, count);
+    }
+
+    @Test
+    public void testSingleValuedUpsertBatch() throws SQLException {
+        PreparedStatement ps = null;
+        try {
+            ps = con.prepareStatement(
+                    "insert into test_statement(i, t) values (?,?) ON CONFLICT (i) DO NOTHING");
+            ps.setInt(1, 50);
+            ps.setString(2, "50");
+            ps.addBatch();
+            ps.setInt(1, 53);
+            ps.setString(2, "53");
+            ps.addBatch();
+            int[] actual = ps.executeBatch();
+            BatchExecuteTest.assertSimpleInsertBatch(2, actual);
+        } finally {
+            TestUtil.closeQuietly(ps);
+        }
+    }
+
+    @Test
+    public void testMultiValuedUpsertBatch() throws SQLException {
+        PreparedStatement ps = null;
+        try {
+            ps = con.prepareStatement(
+                    "insert into test_statement(i, t) values (?,?),(?,?) ON CONFLICT (i) DO NOTHING");
+            ps.setInt(1, 50);
+            ps.setString(2, "50");
+            ps.setInt(3, 51);
+            ps.setString(4, "51");
+            ps.addBatch();
+            ps.setInt(1, 52);
+            ps.setString(2, "52");
+            ps.setInt(3, 53);
+            ps.setString(4, "53");
+            ps.addBatch();
+            int[] actual = ps.executeBatch();
+
+            BatchExecuteTest.assertBatchResult("2 batched rows, 2-values each", new int[]{2, 2}, actual);
+
+            Statement st = con.createStatement();
+            ResultSet rs =
+                    st.executeQuery("select count(*) from test_statement where i between 50 and 53");
+            rs.next();
+            Assert.assertEquals("test_statement should have 4 rows with 'i' of 50..53", 4, rs.getInt(1));
+        } finally {
+            TestUtil.closeQuietly(ps);
+        }
+    }
+
+    @Test
+    public void testSingleValuedUpsertUpdateBatch() throws SQLException {
+        PreparedStatement ps = null;
+        try {
+            ps = con.prepareStatement(
+                    "insert into test_statement(i, t) values (?,?) ON CONFLICT (i) DO update set t=?");
+            ps.setInt(1, 50);
+            ps.setString(2, "50U");
+            ps.setString(3, "50U");
+            ps.addBatch();
+            ps.setInt(1, 53);
+            ps.setString(2, "53U");
+            ps.setString(3, "53U");
+            ps.addBatch();
+            int[] actual = ps.executeBatch();
+            BatchExecuteTest.assertSimpleInsertBatch(2, actual);
+        } finally {
+            TestUtil.closeQuietly(ps);
+        }
+    }
+
+    @Test
+    public void testSingleValuedUpsertUpdateConstantBatch() throws SQLException {
+        PreparedStatement ps = null;
+        try {
+            // For reWriteBatchedInserts=YES the following is expected
+            // FE=> Parse(stmt=null,query="insert into test_statement(i, t) values ($1,$2),($3,$4) ON CONFLICT (i) DO update set t='DEF'",oids={23,1043,23,1043})
+            ps = con.prepareStatement(
+                    "insert into test_statement(i, t) values (?,?) ON CONFLICT (i) DO update set t='DEF'");
+            ps.setInt(1, 50);
+            ps.setString(2, "50");
+            ps.addBatch();
+            ps.setInt(1, 53);
+            ps.setString(2, "53");
+            ps.addBatch();
+            int[] actual = ps.executeBatch();
+            BatchExecuteTest.assertSimpleInsertBatch(2, actual);
+        } finally {
+            TestUtil.closeQuietly(ps);
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/BaseDataSourceFailoverUrlsTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/BaseDataSourceFailoverUrlsTest.java
index 4e2bb37..ba32549 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/BaseDataSourceFailoverUrlsTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/BaseDataSourceFailoverUrlsTest.java
@@ -5,105 +5,101 @@
 
 package org.postgresql.test.jdbc2.optional;
 
+import java.io.IOException;
+import javax.naming.NamingException;
+import org.junit.jupiter.api.Test;
+import org.postgresql.ds.common.BaseDataSource;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 
-import org.postgresql.ds.common.BaseDataSource;
-
-import org.junit.jupiter.api.Test;
-
-import java.io.IOException;
-
-import javax.naming.NamingException;
-
 /**
  * tests that failover urls survive the parse/rebuild roundtrip with and without specific ports
  */
 class BaseDataSourceFailoverUrlsTest {
 
-  private static final String DEFAULT_PORT = "5432";
+    private static final String DEFAULT_PORT = "5432";
 
-  @Test
-  void fullDefault() throws ClassNotFoundException, NamingException, IOException {
-    roundTripFromUrl("jdbc:postgresql://server/database", "jdbc:postgresql://server:" + DEFAULT_PORT + "/database");
-  }
+    private static String jdbcUrlStripParams(String in) {
+        return in.replaceAll("\\?.*$", "");
+    }
 
-  @Test
-  void twoNoPorts() throws ClassNotFoundException, NamingException, IOException {
-    roundTripFromUrl("jdbc:postgresql://server1,server2/database", "jdbc:postgresql://server1:" + DEFAULT_PORT + ",server2:" + DEFAULT_PORT + "/database");
-  }
+    private static void assertUrlWithoutParamsEquals(String expected, String url) {
+        assertEquals(expected, jdbcUrlStripParams(url));
+    }
 
-  @Test
-  void twoWithPorts() throws ClassNotFoundException, NamingException, IOException {
-    roundTripFromUrl("jdbc:postgresql://server1:1234,server2:2345/database", "jdbc:postgresql://server1:1234,server2:2345/database");
-  }
+    @Test
+    void fullDefault() throws ClassNotFoundException, NamingException, IOException {
+        roundTripFromUrl("jdbc:postgresql://server/database", "jdbc:postgresql://server:" + DEFAULT_PORT + "/database");
+    }
 
-  @Test
-  void twoFirstPort() throws ClassNotFoundException, NamingException, IOException {
-    roundTripFromUrl("jdbc:postgresql://server1,server2:2345/database", "jdbc:postgresql://server1:" + DEFAULT_PORT + ",server2:2345/database");
-  }
+    @Test
+    void twoNoPorts() throws ClassNotFoundException, NamingException, IOException {
+        roundTripFromUrl("jdbc:postgresql://server1,server2/database", "jdbc:postgresql://server1:" + DEFAULT_PORT + ",server2:" + DEFAULT_PORT + "/database");
+    }
 
-  @Test
-  void twoLastPort() throws ClassNotFoundException, NamingException, IOException {
-    roundTripFromUrl("jdbc:postgresql://server1:2345,server2/database", "jdbc:postgresql://server1:2345,server2:" + DEFAULT_PORT + "/database");
-  }
+    @Test
+    void twoWithPorts() throws ClassNotFoundException, NamingException, IOException {
+        roundTripFromUrl("jdbc:postgresql://server1:1234,server2:2345/database", "jdbc:postgresql://server1:1234,server2:2345/database");
+    }
 
-  @Test
-  void nullPorts() {
-    BaseDataSource bds = newDS();
-    bds.setDatabaseName("database");
-    bds.setPortNumbers(null);
-    assertUrlWithoutParamsEquals("jdbc:postgresql://localhost/database", bds.getURL());
-    assertEquals(0, bds.getPortNumber());
-    assertEquals(0, bds.getPortNumbers()[0]);
-  }
+    @Test
+    void twoFirstPort() throws ClassNotFoundException, NamingException, IOException {
+        roundTripFromUrl("jdbc:postgresql://server1,server2:2345/database", "jdbc:postgresql://server1:" + DEFAULT_PORT + ",server2:2345/database");
+    }
 
-  @Test
-  void emptyPorts() {
-    BaseDataSource bds = newDS();
-    bds.setDatabaseName("database");
-    bds.setPortNumbers(new int[0]);
-    assertUrlWithoutParamsEquals("jdbc:postgresql://localhost/database", bds.getURL());
-    assertEquals(0, bds.getPortNumber());
-    assertEquals(0, bds.getPortNumbers()[0]);
-  }
+    @Test
+    void twoLastPort() throws ClassNotFoundException, NamingException, IOException {
+        roundTripFromUrl("jdbc:postgresql://server1:2345,server2/database", "jdbc:postgresql://server1:2345,server2:" + DEFAULT_PORT + "/database");
+    }
 
-  @Test
-  void wrongNumberOfPorts() {
-    BaseDataSource bds = newDS();
-    bds.setDatabaseName("database");
-    bds.setServerNames(new String[]{"localhost", "localhost1"});
-    bds.setPortNumbers(new int[]{6432});
-    assertThrows(IllegalArgumentException.class, bds::getUrl, "Number of ports not equal to the number of servers should throw an exception");
-  }
+    @Test
+    void nullPorts() {
+        BaseDataSource bds = newDS();
+        bds.setDatabaseName("database");
+        bds.setPortNumbers(null);
+        assertUrlWithoutParamsEquals("jdbc:postgresql://localhost/database", bds.getURL());
+        assertEquals(0, bds.getPortNumber());
+        assertEquals(0, bds.getPortNumbers()[0]);
+    }
 
-  private BaseDataSource newDS() {
-    return new BaseDataSource() {
-      @Override
-      public String getDescription() {
-        return "BaseDataSourceFailoverUrlsTest-DS";
-      }
-    };
-  }
+    @Test
+    void emptyPorts() {
+        BaseDataSource bds = newDS();
+        bds.setDatabaseName("database");
+        bds.setPortNumbers(new int[0]);
+        assertUrlWithoutParamsEquals("jdbc:postgresql://localhost/database", bds.getURL());
+        assertEquals(0, bds.getPortNumber());
+        assertEquals(0, bds.getPortNumbers()[0]);
+    }
 
-  private void roundTripFromUrl(String in, String expected) throws NamingException, ClassNotFoundException, IOException {
-    BaseDataSource bds = newDS();
+    @Test
+    void wrongNumberOfPorts() {
+        BaseDataSource bds = newDS();
+        bds.setDatabaseName("database");
+        bds.setServerNames(new String[]{"localhost", "localhost1"});
+        bds.setPortNumbers(new int[]{6432});
+        assertThrows(IllegalArgumentException.class, bds::getUrl, "Number of ports not equal to the number of servers should throw an exception");
+    }
 
-    bds.setUrl(in);
-    assertUrlWithoutParamsEquals(expected, bds.getURL());
+    private BaseDataSource newDS() {
+        return new BaseDataSource() {
+            @Override
+            public String getDescription() {
+                return "BaseDataSourceFailoverUrlsTest-DS";
+            }
+        };
+    }
 
-    bds.setFromReference(bds.getReference());
-    assertUrlWithoutParamsEquals(expected, bds.getURL());
+    private void roundTripFromUrl(String in, String expected) throws NamingException, ClassNotFoundException, IOException {
+        BaseDataSource bds = newDS();
 
-    bds.initializeFrom(bds);
-    assertUrlWithoutParamsEquals(expected, bds.getURL());
-  }
+        bds.setUrl(in);
+        assertUrlWithoutParamsEquals(expected, bds.getURL());
 
-  private static String jdbcUrlStripParams(String in) {
-    return in.replaceAll("\\?.*$", "");
-  }
+        bds.setFromReference(bds.getReference());
+        assertUrlWithoutParamsEquals(expected, bds.getURL());
 
-  private static void assertUrlWithoutParamsEquals(String expected, String url) {
-    assertEquals(expected, jdbcUrlStripParams(url));
-  }
+        bds.initializeFrom(bds);
+        assertUrlWithoutParamsEquals(expected, bds.getURL());
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/BaseDataSourceTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/BaseDataSourceTest.java
index 17e3719..e6cbff9 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/BaseDataSourceTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/BaseDataSourceTest.java
@@ -5,30 +5,26 @@
 
 package org.postgresql.test.jdbc2.optional;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.fail;
-
-import org.postgresql.PGConnection;
-import org.postgresql.ds.common.BaseDataSource;
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.util.MiniJndiContextFactory;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
 import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Hashtable;
-
 import javax.naming.Context;
 import javax.naming.InitialContext;
 import javax.naming.NamingException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.postgresql.PGConnection;
+import org.postgresql.ds.common.BaseDataSource;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.util.MiniJndiContextFactory;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.fail;
 
 /**
  * Common tests for all the BaseDataSource implementations. This is a small variety to make sure
@@ -38,199 +34,199 @@ import javax.naming.NamingException;
  * @author Aaron Mulder (ammulder@chariotsolutions.com)
  */
 public abstract class BaseDataSourceTest {
-  public static final String DATA_SOURCE_JNDI = "BaseDataSource";
+    public static final String DATA_SOURCE_JNDI = "BaseDataSource";
 
-  protected Connection con;
-  protected BaseDataSource bds;
+    protected Connection con;
+    protected BaseDataSource bds;
 
-  /**
-   * Creates a test table using a standard connection (not from a DataSource).
-   */
-  @Before
-  public void setUp() throws Exception {
-    con = TestUtil.openDB();
-    TestUtil.createTable(con, "poolingtest", "id int4 not null primary key, name varchar(50)");
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate("INSERT INTO poolingtest VALUES (1, 'Test Row 1')");
-    stmt.executeUpdate("INSERT INTO poolingtest VALUES (2, 'Test Row 2')");
-    TestUtil.closeDB(con);
-  }
-
-  /**
-   * Removes the test table using a standard connection (not from a DataSource).
-   */
-  @After
-  public void tearDown() throws Exception {
-    TestUtil.closeDB(con);
-    con = TestUtil.openDB();
-    TestUtil.dropTable(con, "poolingtest");
-    TestUtil.closeDB(con);
-  }
-
-  /**
-   * Gets a connection from the current BaseDataSource.
-   */
-  protected Connection getDataSourceConnection() throws SQLException {
-    if (bds == null) {
-      initializeDataSource();
+    public static void setupDataSource(BaseDataSource bds) {
+        bds.setServerName(TestUtil.getServer());
+        bds.setPortNumber(TestUtil.getPort());
+        bds.setDatabaseName(TestUtil.getDatabase());
+        bds.setUser(TestUtil.getUser());
+        bds.setPassword(TestUtil.getPassword());
+        bds.setPrepareThreshold(TestUtil.getPrepareThreshold());
+        bds.setProtocolVersion(TestUtil.getProtocolVersion());
     }
-    return bds.getConnection();
-  }
 
-  /**
-   * Creates an instance of the current BaseDataSource for testing. Must be customized by each
-   * subclass.
-   */
-  protected abstract void initializeDataSource();
-
-  public static void setupDataSource(BaseDataSource bds) {
-    bds.setServerName(TestUtil.getServer());
-    bds.setPortNumber(TestUtil.getPort());
-    bds.setDatabaseName(TestUtil.getDatabase());
-    bds.setUser(TestUtil.getUser());
-    bds.setPassword(TestUtil.getPassword());
-    bds.setPrepareThreshold(TestUtil.getPrepareThreshold());
-    bds.setProtocolVersion(TestUtil.getProtocolVersion());
-  }
-
-  /**
-   * Test to make sure you can instantiate and configure the appropriate DataSource.
-   */
-  @Test
-  public void testCreateDataSource() {
-    initializeDataSource();
-  }
-
-  /**
-   * Test to make sure you can get a connection from the DataSource, which in turn means the
-   * DataSource was able to open it.
-   */
-  @Test
-  public void testGetConnection() {
-    try {
-      con = getDataSourceConnection();
-      con.close();
-    } catch (SQLException e) {
-      fail(e.getMessage());
+    /**
+     * Creates a test table using a standard connection (not from a DataSource).
+     */
+    @Before
+    public void setUp() throws Exception {
+        con = TestUtil.openDB();
+        TestUtil.createTable(con, "poolingtest", "id int4 not null primary key, name varchar(50)");
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate("INSERT INTO poolingtest VALUES (1, 'Test Row 1')");
+        stmt.executeUpdate("INSERT INTO poolingtest VALUES (2, 'Test Row 2')");
+        TestUtil.closeDB(con);
     }
-  }
 
-  /**
-   * A simple test to make sure you can execute SQL using the Connection from the DataSource.
-   */
-  @Test
-  public void testUseConnection() {
-    try {
-      con = getDataSourceConnection();
-      Statement st = con.createStatement();
-      ResultSet rs = st.executeQuery("SELECT COUNT(*) FROM poolingtest");
-      if (rs.next()) {
-        int count = rs.getInt(1);
-        if (rs.next()) {
-          fail("Should only have one row in SELECT COUNT result set");
+    /**
+     * Removes the test table using a standard connection (not from a DataSource).
+     */
+    @After
+    public void tearDown() throws Exception {
+        TestUtil.closeDB(con);
+        con = TestUtil.openDB();
+        TestUtil.dropTable(con, "poolingtest");
+        TestUtil.closeDB(con);
+    }
+
+    /**
+     * Gets a connection from the current BaseDataSource.
+     */
+    protected Connection getDataSourceConnection() throws SQLException {
+        if (bds == null) {
+            initializeDataSource();
         }
-        if (count != 2) {
-          fail("Count returned " + count + " expecting 2");
+        return bds.getConnection();
+    }
+
+    /**
+     * Creates an instance of the current BaseDataSource for testing. Must be customized by each
+     * subclass.
+     */
+    protected abstract void initializeDataSource();
+
+    /**
+     * Test to make sure you can instantiate and configure the appropriate DataSource.
+     */
+    @Test
+    public void testCreateDataSource() {
+        initializeDataSource();
+    }
+
+    /**
+     * Test to make sure you can get a connection from the DataSource, which in turn means the
+     * DataSource was able to open it.
+     */
+    @Test
+    public void testGetConnection() {
+        try {
+            con = getDataSourceConnection();
+            con.close();
+        } catch (SQLException e) {
+            fail(e.getMessage());
         }
-      } else {
-        fail("Should have one row in SELECT COUNT result set");
-      }
-      rs.close();
-      st.close();
-      con.close();
-    } catch (SQLException e) {
-      fail(e.getMessage());
     }
-  }
 
-  /**
-   * A test to make sure you can execute DDL SQL using the Connection from the DataSource.
-   */
-  @Test
-  public void testDdlOverConnection() {
-    try {
-      con = getDataSourceConnection();
-      TestUtil.createTable(con, "poolingtest", "id int4 not null primary key, name varchar(50)");
-      con.close();
-    } catch (SQLException e) {
-      fail(e.getMessage());
+    /**
+     * A simple test to make sure you can execute SQL using the Connection from the DataSource.
+     */
+    @Test
+    public void testUseConnection() {
+        try {
+            con = getDataSourceConnection();
+            Statement st = con.createStatement();
+            ResultSet rs = st.executeQuery("SELECT COUNT(*) FROM poolingtest");
+            if (rs.next()) {
+                int count = rs.getInt(1);
+                if (rs.next()) {
+                    fail("Should only have one row in SELECT COUNT result set");
+                }
+                if (count != 2) {
+                    fail("Count returned " + count + " expecting 2");
+                }
+            } else {
+                fail("Should have one row in SELECT COUNT result set");
+            }
+            rs.close();
+            st.close();
+            con.close();
+        } catch (SQLException e) {
+            fail(e.getMessage());
+        }
     }
-  }
 
-  /**
-   * A test to make sure the connections are not being pooled by the current DataSource. Obviously
-   * need to be overridden in the case of a pooling Datasource.
-   */
-  @Test
-  public void testNotPooledConnection() throws SQLException {
-    Connection con1 = getDataSourceConnection();
-    con1.close();
-    Connection con2 = getDataSourceConnection();
-    con2.close();
-    assertNotSame(con1, con2);
-  }
-
-  /**
-   * Test to make sure that PGConnection methods can be called on the pooled Connection.
-   */
-  @Test
-  public void testPGConnection() {
-    try {
-      con = getDataSourceConnection();
-      ((PGConnection) con).getNotifications();
-      con.close();
-    } catch (Exception e) {
-      fail("Unable to call PGConnection method on pooled connection due to "
-          + e.getClass().getName() + " (" + e.getMessage() + ")");
+    /**
+     * A test to make sure you can execute DDL SQL using the Connection from the DataSource.
+     */
+    @Test
+    public void testDdlOverConnection() {
+        try {
+            con = getDataSourceConnection();
+            TestUtil.createTable(con, "poolingtest", "id int4 not null primary key, name varchar(50)");
+            con.close();
+        } catch (SQLException e) {
+            fail(e.getMessage());
+        }
     }
-  }
 
-  /**
-   * Eventually, we must test stuffing the DataSource in JNDI and then getting it back out and make
-   * sure it's still usable. This should ideally test both Serializable and Referenceable
-   * mechanisms. Will probably be multiple tests when implemented.
-   */
-  @Test
-  public void testJndi() {
-    initializeDataSource();
-    BaseDataSource oldbds = bds;
-    String oldurl = bds.getURL();
-    InitialContext ic = getInitialContext();
-    try {
-      ic.rebind(DATA_SOURCE_JNDI, bds);
-      bds = (BaseDataSource) ic.lookup(DATA_SOURCE_JNDI);
-      assertNotNull("Got null looking up DataSource from JNDI!", bds);
-      compareJndiDataSource(oldbds, bds);
-    } catch (NamingException e) {
-      fail(e.getMessage());
+    /**
+     * A test to make sure the connections are not being pooled by the current DataSource. Obviously
+     * need to be overridden in the case of a pooling Datasource.
+     */
+    @Test
+    public void testNotPooledConnection() throws SQLException {
+        Connection con1 = getDataSourceConnection();
+        con1.close();
+        Connection con2 = getDataSourceConnection();
+        con2.close();
+        assertNotSame(con1, con2);
     }
-    oldbds = bds;
-    String url = bds.getURL();
-    testUseConnection();
-    assertSame("Test should not have changed DataSource (" + bds + " != " + oldbds + ")!",
-        oldbds, bds);
-    assertEquals("Test should not have changed DataSource URL",
-        oldurl, url);
-  }
 
-  /**
-   * Uses the mini-JNDI implementation for testing purposes.
-   */
-  protected InitialContext getInitialContext() {
-    Hashtable<String, Object> env = new Hashtable<>();
-    env.put(Context.INITIAL_CONTEXT_FACTORY, MiniJndiContextFactory.class.getName());
-    try {
-      return new InitialContext(env);
-    } catch (NamingException e) {
-      fail("Unable to create InitialContext: " + e.getMessage());
-      return null;
+    /**
+     * Test to make sure that PGConnection methods can be called on the pooled Connection.
+     */
+    @Test
+    public void testPGConnection() {
+        try {
+            con = getDataSourceConnection();
+            ((PGConnection) con).getNotifications();
+            con.close();
+        } catch (Exception e) {
+            fail("Unable to call PGConnection method on pooled connection due to "
+                    + e.getClass().getName() + " (" + e.getMessage() + ")");
+        }
     }
-  }
 
-  /**
-   * Check whether a DS was dereferenced from JNDI or recreated.
-   */
-  protected void compareJndiDataSource(BaseDataSource oldbds, BaseDataSource bds) {
-    assertNotSame("DataSource was dereferenced, should have been serialized or recreated", oldbds, bds);
-  }
+    /**
+     * Eventually, we must test stuffing the DataSource in JNDI and then getting it back out and make
+     * sure it's still usable. This should ideally test both Serializable and Referenceable
+     * mechanisms. Will probably be multiple tests when implemented.
+     */
+    @Test
+    public void testJndi() {
+        initializeDataSource();
+        BaseDataSource oldbds = bds;
+        String oldurl = bds.getURL();
+        InitialContext ic = getInitialContext();
+        try {
+            ic.rebind(DATA_SOURCE_JNDI, bds);
+            bds = (BaseDataSource) ic.lookup(DATA_SOURCE_JNDI);
+            assertNotNull("Got null looking up DataSource from JNDI!", bds);
+            compareJndiDataSource(oldbds, bds);
+        } catch (NamingException e) {
+            fail(e.getMessage());
+        }
+        oldbds = bds;
+        String url = bds.getURL();
+        testUseConnection();
+        assertSame("Test should not have changed DataSource (" + bds + " != " + oldbds + ")!",
+                oldbds, bds);
+        assertEquals("Test should not have changed DataSource URL",
+                oldurl, url);
+    }
+
+    /**
+     * Uses the mini-JNDI implementation for testing purposes.
+     */
+    protected InitialContext getInitialContext() {
+        Hashtable<String, Object> env = new Hashtable<>();
+        env.put(Context.INITIAL_CONTEXT_FACTORY, MiniJndiContextFactory.class.getName());
+        try {
+            return new InitialContext(env);
+        } catch (NamingException e) {
+            fail("Unable to create InitialContext: " + e.getMessage());
+            return null;
+        }
+    }
+
+    /**
+     * Check whether a DS was dereferenced from JNDI or recreated.
+     */
+    protected void compareJndiDataSource(BaseDataSource oldbds, BaseDataSource bds) {
+        assertNotSame("DataSource was dereferenced, should have been serialized or recreated", oldbds, bds);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/CaseOptimiserDataSourceTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/CaseOptimiserDataSourceTest.java
index 8c750aa..da230b9 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/CaseOptimiserDataSourceTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/CaseOptimiserDataSourceTest.java
@@ -5,98 +5,95 @@
 
 package org.postgresql.test.jdbc2.optional;
 
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.core.BaseConnection;
-import org.postgresql.ds.common.BaseDataSource;
-import org.postgresql.jdbc2.optional.SimpleDataSource;
-import org.postgresql.test.TestUtil;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
 import java.sql.Statement;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.core.BaseConnection;
+import org.postgresql.ds.common.BaseDataSource;
+import org.postgresql.jdbc2.optional.SimpleDataSource;
+import org.postgresql.test.TestUtil;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /**
  * DataSource test to ensure the BaseConnection is configured with column sanitiser disabled.
  */
 public class CaseOptimiserDataSourceTest {
-  private BaseDataSource bds;
-  protected Connection conn;
+    protected Connection conn;
+    private BaseDataSource bds;
 
-  @BeforeEach
-  void setUp() throws SQLException {
-    Connection conn = getDataSourceConnection();
-    assertTrue(conn instanceof BaseConnection);
-    BaseConnection bc = (BaseConnection) conn;
-    assertTrue(bc.isColumnSanitiserDisabled(),
-        "Expected state [TRUE] of base connection configuration failed test.");
-    Statement insert = conn.createStatement();
-    TestUtil.createTable(conn, "allmixedup",
-        "id int primary key, \"DESCRIPTION\" varchar(40), \"fOo\" varchar(3)");
-    insert.execute(TestUtil.insertSQL("allmixedup", "1,'mixed case test', 'bar'"));
-    insert.close();
-    conn.close();
-  }
-
-  @AfterEach
-  void tearDown() throws SQLException {
-    Connection conn = getDataSourceConnection();
-    Statement drop = conn.createStatement();
-    drop.execute("drop table allmixedup");
-    drop.close();
-    conn.close();
-    bds.setDisableColumnSanitiser(false);
-  }
-
-  /*
-   * Test to ensure a datasource can be configured with the column sanitiser optimisation. This test
-   * checks for a side effect of the sanitiser being disabled. The column is not expected to be
-   * found.
-   */
-  @Test
-  void dataSourceDisabledSanitiserPropertySucceeds() throws SQLException {
-    String label = "FOO";
-    Connection conn = getDataSourceConnection();
-    PreparedStatement query =
-        conn.prepareStatement("select * from allmixedup");
-    if (0 < TestUtil.findColumn(query, label)) {
-      fail(String.format("Did not expect to find the column with the label [%1$s].", label));
+    public static void setupDataSource(BaseDataSource bds) {
+        bds.setServerName(TestUtil.getServer());
+        bds.setPortNumber(TestUtil.getPort());
+        bds.setDatabaseName(TestUtil.getDatabase());
+        bds.setUser(TestUtil.getUser());
+        bds.setPassword(TestUtil.getPassword());
+        bds.setPrepareThreshold(TestUtil.getPrepareThreshold());
+        bds.setProtocolVersion(TestUtil.getProtocolVersion());
     }
-    query.close();
-    conn.close();
-  }
 
-  /**
-   * Gets a connection from the current BaseDataSource.
-   */
-  protected Connection getDataSourceConnection() throws SQLException {
-    if (bds == null) {
-      initializeDataSource();
+    @BeforeEach
+    void setUp() throws SQLException {
+        Connection conn = getDataSourceConnection();
+        assertTrue(conn instanceof BaseConnection);
+        BaseConnection bc = (BaseConnection) conn;
+        assertTrue(bc.isColumnSanitiserDisabled(),
+                "Expected state [TRUE] of base connection configuration failed test.");
+        Statement insert = conn.createStatement();
+        TestUtil.createTable(conn, "allmixedup",
+                "id int primary key, \"DESCRIPTION\" varchar(40), \"fOo\" varchar(3)");
+        insert.execute(TestUtil.insertSQL("allmixedup", "1,'mixed case test', 'bar'"));
+        insert.close();
+        conn.close();
     }
-    return bds.getConnection();
-  }
 
-  protected void initializeDataSource() {
-    if (bds == null) {
-      bds = new SimpleDataSource();
-      setupDataSource(bds);
-      bds.setDisableColumnSanitiser(true);
+    @AfterEach
+    void tearDown() throws SQLException {
+        Connection conn = getDataSourceConnection();
+        Statement drop = conn.createStatement();
+        drop.execute("drop table allmixedup");
+        drop.close();
+        conn.close();
+        bds.setDisableColumnSanitiser(false);
     }
-  }
 
-  public static void setupDataSource(BaseDataSource bds) {
-    bds.setServerName(TestUtil.getServer());
-    bds.setPortNumber(TestUtil.getPort());
-    bds.setDatabaseName(TestUtil.getDatabase());
-    bds.setUser(TestUtil.getUser());
-    bds.setPassword(TestUtil.getPassword());
-    bds.setPrepareThreshold(TestUtil.getPrepareThreshold());
-    bds.setProtocolVersion(TestUtil.getProtocolVersion());
-  }
+    /*
+     * Test to ensure a datasource can be configured with the column sanitiser optimisation. This test
+     * checks for a side effect of the sanitiser being disabled. The column is not expected to be
+     * found.
+     */
+    @Test
+    void dataSourceDisabledSanitiserPropertySucceeds() throws SQLException {
+        String label = "FOO";
+        Connection conn = getDataSourceConnection();
+        PreparedStatement query =
+                conn.prepareStatement("select * from allmixedup");
+        if (0 < TestUtil.findColumn(query, label)) {
+            fail(String.format("Did not expect to find the column with the label [%1$s].", label));
+        }
+        query.close();
+        conn.close();
+    }
+
+    /**
+     * Gets a connection from the current BaseDataSource.
+     */
+    protected Connection getDataSourceConnection() throws SQLException {
+        if (bds == null) {
+            initializeDataSource();
+        }
+        return bds.getConnection();
+    }
+
+    protected void initializeDataSource() {
+        if (bds == null) {
+            bds = new SimpleDataSource();
+            setupDataSource(bds);
+            bds.setDisableColumnSanitiser(true);
+        }
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/ConnectionPoolTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/ConnectionPoolTest.java
index 271fc25..46b4c74 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/ConnectionPoolTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/ConnectionPoolTest.java
@@ -5,18 +5,6 @@
 
 package org.postgresql.test.jdbc2.optional;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import org.postgresql.core.ServerVersion;
-import org.postgresql.ds.PGConnectionPoolDataSource;
-import org.postgresql.jdbc2.optional.ConnectionPool;
-import org.postgresql.test.TestUtil;
-
-import org.junit.Assume;
-import org.junit.Test;
-
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
@@ -28,10 +16,18 @@ import java.sql.PreparedStatement;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.ArrayList;
-
 import javax.sql.ConnectionEvent;
 import javax.sql.ConnectionEventListener;
 import javax.sql.PooledConnection;
+import org.junit.Assume;
+import org.junit.Test;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.ds.PGConnectionPoolDataSource;
+import org.postgresql.jdbc2.optional.ConnectionPool;
+import org.postgresql.test.TestUtil;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /**
  * Tests for the ConnectionPoolDataSource and PooledConnection implementations. They are tested
@@ -40,482 +36,482 @@ import javax.sql.PooledConnection;
  * @author Aaron Mulder (ammulder@chariotsolutions.com)
  */
 public class ConnectionPoolTest extends BaseDataSourceTest {
-  private final ArrayList<PooledConnection> connections = new ArrayList<>();
+    private final ArrayList<PooledConnection> connections = new ArrayList<>();
 
-  /**
-   * Creates and configures a ConnectionPool.
-   */
-  @Override
-  protected void initializeDataSource() {
-    if (bds == null) {
-      bds = new ConnectionPool();
-      setupDataSource(bds);
-    }
-  }
-
-  @Override
-  public void tearDown() throws Exception {
-    for (PooledConnection c : connections) {
-      try {
-        c.close();
-      } catch (Exception ex) {
-        // close throws nullptr or other evil things if the connection
-        // is already closed
-      }
-    }
-  }
-
-  /**
-   * Instead of just fetching a Connection from the ConnectionPool, get a PooledConnection, add a
-   * listener to close it when the Connection is closed, and then get the Connection. Without the
-   * listener the PooledConnection (and thus the physical connection) would never by closed.
-   * Probably not a disaster during testing, but you never know.
-   */
-  @Override
-  protected Connection getDataSourceConnection() throws SQLException {
-    initializeDataSource();
-    final PooledConnection pc = getPooledConnection();
-    // Since the pooled connection won't be reused in these basic tests, close it when the
-    // connection is closed
-    pc.addConnectionEventListener(new ConnectionEventListener() {
-      public void connectionClosed(ConnectionEvent event) {
-        try {
-          pc.close();
-        } catch (SQLException e) {
-          fail("Unable to close PooledConnection: " + e);
+    /**
+     * Creates and configures a ConnectionPool.
+     */
+    @Override
+    protected void initializeDataSource() {
+        if (bds == null) {
+            bds = new ConnectionPool();
+            setupDataSource(bds);
         }
-      }
-
-      public void connectionErrorOccurred(ConnectionEvent event) {
-      }
-    });
-    return pc.getConnection();
-  }
-
-  /**
-   * Though the normal client interface is to grab a Connection, in order to test the
-   * middleware/server interface, we need to deal with PooledConnections. Some tests use each.
-   */
-  protected PooledConnection getPooledConnection() throws SQLException {
-    initializeDataSource();
-    // we need to recast to PGConnectionPool rather than
-    // jdbc.optional.ConnectionPool because our ObjectFactory
-    // returns only the top level class, not the specific
-    // jdbc2/jdbc3 implementations.
-    PooledConnection c = ((PGConnectionPoolDataSource) bds).getPooledConnection();
-    connections.add(c);
-    return c;
-  }
-
-  /**
-   * Makes sure that if you get a connection from a PooledConnection, close it, and then get another
-   * one, you're really using the same physical connection. Depends on the implementation of
-   * toString for the connection handle.
-   */
-  @Test
-  public void testPoolReuse() {
-    try {
-      PooledConnection pc = getPooledConnection();
-      con = pc.getConnection();
-      String name = con.toString();
-      con.close();
-      con = pc.getConnection();
-      String name2 = con.toString();
-      con.close();
-      pc.close();
-      assertTrue("Physical connection doesn't appear to be reused across PooledConnection wrappers",
-          name.equals(name2));
-    } catch (SQLException e) {
-      fail(e.getMessage());
-    }
-  }
-
-  /**
-   * Makes sure that when you request a connection from the PooledConnection, and previous
-   * connection it might have given out is closed. See JDBC 2.0 Optional Package spec section 6.2.3
-   */
-  @Test
-  public void testPoolCloseOldWrapper() {
-    try {
-      PooledConnection pc = getPooledConnection();
-      con = pc.getConnection();
-      Connection con2 = pc.getConnection();
-      try {
-        con.createStatement();
-        fail(
-            "Original connection wrapper should be closed when new connection wrapper is generated");
-      } catch (SQLException e) {
-      }
-      con2.close();
-      pc.close();
-    } catch (SQLException e) {
-      fail(e.getMessage());
-    }
-  }
-
-  /**
-   * Makes sure that if you get two connection wrappers from the same PooledConnection, they are
-   * different, even though the represent the same physical connection. See JDBC 2.0 Optional
-   * Package spec section 6.2.2
-   */
-  @Test
-  public void testPoolNewWrapper() {
-    try {
-      PooledConnection pc = getPooledConnection();
-      con = pc.getConnection();
-      Connection con2 = pc.getConnection();
-      con2.close();
-      pc.close();
-      assertTrue(
-          "Two calls to PooledConnection.getConnection should not return the same connection wrapper",
-          con != con2);
-    } catch (SQLException e) {
-      fail(e.getMessage());
-    }
-  }
-
-  /**
-   * Makes sure that exactly one close event is fired for each time a connection handle is closed.
-   * Also checks that events are not fired after a given handle has been closed once.
-   */
-  @Test
-  public void testCloseEvent() {
-    try {
-      PooledConnection pc = getPooledConnection();
-      CountClose cc = new CountClose();
-      pc.addConnectionEventListener(cc);
-      con = pc.getConnection();
-      assertEquals(0, cc.getCount());
-      assertEquals(0, cc.getErrorCount());
-      con.close();
-      assertEquals(1, cc.getCount());
-      assertEquals(0, cc.getErrorCount());
-      con = pc.getConnection();
-      assertEquals(1, cc.getCount());
-      assertEquals(0, cc.getErrorCount());
-      con.close();
-      assertEquals(2, cc.getCount());
-      assertEquals(0, cc.getErrorCount());
-      // a double close shouldn't fire additional events
-      con.close();
-      assertEquals(2, cc.getCount());
-      assertEquals(0, cc.getErrorCount());
-      pc.close();
-    } catch (SQLException e) {
-      fail(e.getMessage());
-    }
-  }
-
-  /**
-   * Makes sure that close events are not fired after a listener has been removed.
-   */
-  @Test
-  public void testNoCloseEvent() {
-    try {
-      PooledConnection pc = getPooledConnection();
-      CountClose cc = new CountClose();
-      pc.addConnectionEventListener(cc);
-      con = pc.getConnection();
-      assertEquals(0, cc.getCount());
-      assertEquals(0, cc.getErrorCount());
-      con.close();
-      assertEquals(1, cc.getCount());
-      assertEquals(0, cc.getErrorCount());
-      pc.removeConnectionEventListener(cc);
-      con = pc.getConnection();
-      assertEquals(1, cc.getCount());
-      assertEquals(0, cc.getErrorCount());
-      con.close();
-      assertEquals(1, cc.getCount());
-      assertEquals(0, cc.getErrorCount());
-    } catch (SQLException e) {
-      fail(e.getMessage());
-    }
-  }
-
-  /**
-   * Makes sure that a listener can be removed while dispatching events. Sometimes this causes a
-   * ConcurrentModificationException or something.
-   */
-  @Test
-  public void testInlineCloseEvent() {
-    try {
-      PooledConnection pc = getPooledConnection();
-      RemoveClose rc1 = new RemoveClose();
-      RemoveClose rc2 = new RemoveClose();
-      RemoveClose rc3 = new RemoveClose();
-      pc.addConnectionEventListener(rc1);
-      pc.addConnectionEventListener(rc2);
-      pc.addConnectionEventListener(rc3);
-      con = pc.getConnection();
-      con.close();
-      con = pc.getConnection();
-      con.close();
-    } catch (Exception e) {
-      fail(e.getMessage());
-    }
-  }
-
-  /**
-   * Tests that a close event is not generated when a connection handle is closed automatically due
-   * to a new connection handle being opened for the same PooledConnection. See JDBC 2.0 Optional
-   * Package spec section 6.3
-   */
-  @Test
-  public void testAutomaticCloseEvent() {
-    try {
-      PooledConnection pc = getPooledConnection();
-      CountClose cc = new CountClose();
-      pc.addConnectionEventListener(cc);
-      con = pc.getConnection();
-      assertEquals(0, cc.getCount());
-      assertEquals(0, cc.getErrorCount());
-      con.close();
-      assertEquals(1, cc.getCount());
-      assertEquals(0, cc.getErrorCount());
-      con = pc.getConnection();
-      assertEquals(1, cc.getCount());
-      assertEquals(0, cc.getErrorCount());
-      // Open a 2nd connection, causing the first to be closed. No even should be generated.
-      Connection con2 = pc.getConnection();
-      assertTrue("Connection handle was not closed when new handle was opened", con.isClosed());
-      assertEquals(1, cc.getCount());
-      assertEquals(0, cc.getErrorCount());
-      con2.close();
-      assertEquals(2, cc.getCount());
-      assertEquals(0, cc.getErrorCount());
-      pc.close();
-    } catch (SQLException e) {
-      fail(e.getMessage());
-    }
-  }
-
-  /**
-   * Makes sure the isClosed method on a connection wrapper does what you'd expect. Checks the usual
-   * case, as well as automatic closure when a new handle is opened on the same physical connection.
-   */
-  @Test
-  public void testIsClosed() {
-    try {
-      PooledConnection pc = getPooledConnection();
-      con = pc.getConnection();
-      assertTrue(!con.isClosed());
-      con.close();
-      assertTrue(con.isClosed());
-      con = pc.getConnection();
-      Connection con2 = pc.getConnection();
-      assertTrue(con.isClosed());
-      assertTrue(!con2.isClosed());
-      con2.close();
-      assertTrue(con.isClosed());
-      pc.close();
-    } catch (SQLException e) {
-      fail(e.getMessage());
-    }
-  }
-
-  /**
-   * Make sure that close status of pooled connection reflect the one of the underlying physical
-   * connection.
-   */
-  @Test
-  public void testBackendIsClosed() throws Exception {
-    try {
-      PooledConnection pc = getPooledConnection();
-      con = pc.getConnection();
-      assertTrue(!con.isClosed());
-
-      Assume.assumeTrue("pg_terminate_backend requires PostgreSQL 8.4+",
-          TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4));
-
-      TestUtil.terminateBackend(con);
-      try {
-        TestUtil.executeQuery(con, "SELECT 1");
-        fail("The connection should not be opened anymore. An exception was expected");
-      } catch (SQLException e) {
-        // this is expected as the connection has been forcibly closed from backend
-      }
-      assertTrue(con.isClosed());
-    } catch (SQLException e) {
-      fail(e.getMessage());
-    }
-  }
-
-  /**
-   * Ensures that a statement generated by a proxied connection returns the proxied connection from
-   * getConnection() [not the physical connection].
-   */
-  @Test
-  public void testStatementConnection() {
-    try {
-      PooledConnection pc = getPooledConnection();
-      con = pc.getConnection();
-      Statement s = con.createStatement();
-      Connection conRetrieved = s.getConnection();
-
-      assertEquals(con.getClass(), conRetrieved.getClass());
-      assertEquals(con, conRetrieved);
-    } catch (SQLException e) {
-      fail(e.getMessage());
-    }
-  }
-
-  /**
-   * Ensures that the Statement proxy generated by the Connection handle throws the correct kind of
-   * exception.
-   */
-  @Test
-  public void testStatementProxy() {
-    Statement s = null;
-    try {
-      PooledConnection pc = getPooledConnection();
-      con = pc.getConnection();
-      s = con.createStatement();
-    } catch (SQLException e) {
-      fail(e.getMessage());
-    }
-    try {
-      s.executeQuery("SELECT * FROM THIS_TABLE_SHOULD_NOT_EXIST");
-      fail("An SQL exception was not thrown that should have been");
-    } catch (SQLException e) {
-      // This is the expected and correct path
-    } catch (Exception e) {
-      fail("bad exception; was expecting SQLException, not" + e.getClass().getName());
-    }
-  }
-
-  /**
-   * Ensures that a prepared statement generated by a proxied connection returns the proxied
-   * connection from getConnection() [not the physical connection].
-   */
-  @Test
-  public void testPreparedStatementConnection() {
-    try {
-      PooledConnection pc = getPooledConnection();
-      con = pc.getConnection();
-      PreparedStatement s = con.prepareStatement("select 'x'");
-      Connection conRetrieved = s.getConnection();
-
-      assertEquals(con.getClass(), conRetrieved.getClass());
-      assertEquals(con, conRetrieved);
-    } catch (SQLException e) {
-      fail(e.getMessage());
-    }
-  }
-
-  /**
-   * Ensures that a callable statement generated by a proxied connection returns the proxied
-   * connection from getConnection() [not the physical connection].
-   */
-  @Test
-  public void testCallableStatementConnection() {
-    try {
-      PooledConnection pc = getPooledConnection();
-      con = pc.getConnection();
-      CallableStatement s = con.prepareCall("select 'x'");
-      Connection conRetrieved = s.getConnection();
-
-      assertEquals(con.getClass(), conRetrieved.getClass());
-      assertEquals(con, conRetrieved);
-    } catch (SQLException e) {
-      fail(e.getMessage());
-    }
-  }
-
-  /**
-   * Ensure that a statement created from a pool can be used like any other statement in regard to
-   * pg extensions.
-   */
-  @Test
-  public void testStatementsProxyPGStatement() {
-    try {
-      PooledConnection pc = getPooledConnection();
-      con = pc.getConnection();
-
-      Statement s = con.createStatement();
-      boolean b = ((org.postgresql.PGStatement) s).isUseServerPrepare();
-
-      PreparedStatement ps = con.prepareStatement("select 'x'");
-      b = ((org.postgresql.PGStatement) ps).isUseServerPrepare();
-
-      CallableStatement cs = con.prepareCall("select 'x'");
-      b = ((org.postgresql.PGStatement) cs).isUseServerPrepare();
-
-    } catch (SQLException e) {
-      fail(e.getMessage());
-    }
-  }
-
-  /**
-   * Helper class to remove a listener during event dispatching.
-   */
-  private class RemoveClose implements ConnectionEventListener {
-    @Override
-    public void connectionClosed(ConnectionEvent event) {
-      ((PooledConnection) event.getSource()).removeConnectionEventListener(this);
     }
 
     @Override
-    public void connectionErrorOccurred(ConnectionEvent event) {
-      ((PooledConnection) event.getSource()).removeConnectionEventListener(this);
+    public void tearDown() throws Exception {
+        for (PooledConnection c : connections) {
+            try {
+                c.close();
+            } catch (Exception ex) {
+                // close throws nullptr or other evil things if the connection
+                // is already closed
+            }
+        }
     }
-  }
-
-  /**
-   * Helper class that implements the event listener interface, and counts the number of events it
-   * sees.
-   */
-  private class CountClose implements ConnectionEventListener {
-    private int count;
-    private int errorCount;
 
+    /**
+     * Instead of just fetching a Connection from the ConnectionPool, get a PooledConnection, add a
+     * listener to close it when the Connection is closed, and then get the Connection. Without the
+     * listener the PooledConnection (and thus the physical connection) would never by closed.
+     * Probably not a disaster during testing, but you never know.
+     */
     @Override
-    public void connectionClosed(ConnectionEvent event) {
-      count++;
+    protected Connection getDataSourceConnection() throws SQLException {
+        initializeDataSource();
+        final PooledConnection pc = getPooledConnection();
+        // Since the pooled connection won't be reused in these basic tests, close it when the
+        // connection is closed
+        pc.addConnectionEventListener(new ConnectionEventListener() {
+            public void connectionClosed(ConnectionEvent event) {
+                try {
+                    pc.close();
+                } catch (SQLException e) {
+                    fail("Unable to close PooledConnection: " + e);
+                }
+            }
+
+            public void connectionErrorOccurred(ConnectionEvent event) {
+            }
+        });
+        return pc.getConnection();
     }
 
-    @Override
-    public void connectionErrorOccurred(ConnectionEvent event) {
-      errorCount++;
+    /**
+     * Though the normal client interface is to grab a Connection, in order to test the
+     * middleware/server interface, we need to deal with PooledConnections. Some tests use each.
+     */
+    protected PooledConnection getPooledConnection() throws SQLException {
+        initializeDataSource();
+        // we need to recast to PGConnectionPool rather than
+        // jdbc.optional.ConnectionPool because our ObjectFactory
+        // returns only the top level class, not the specific
+        // jdbc2/jdbc3 implementations.
+        PooledConnection c = ((PGConnectionPoolDataSource) bds).getPooledConnection();
+        connections.add(c);
+        return c;
     }
 
-    public int getCount() {
-      return count;
+    /**
+     * Makes sure that if you get a connection from a PooledConnection, close it, and then get another
+     * one, you're really using the same physical connection. Depends on the implementation of
+     * toString for the connection handle.
+     */
+    @Test
+    public void testPoolReuse() {
+        try {
+            PooledConnection pc = getPooledConnection();
+            con = pc.getConnection();
+            String name = con.toString();
+            con.close();
+            con = pc.getConnection();
+            String name2 = con.toString();
+            con.close();
+            pc.close();
+            assertTrue("Physical connection doesn't appear to be reused across PooledConnection wrappers",
+                    name.equals(name2));
+        } catch (SQLException e) {
+            fail(e.getMessage());
+        }
     }
 
-    public int getErrorCount() {
-      return errorCount;
+    /**
+     * Makes sure that when you request a connection from the PooledConnection, and previous
+     * connection it might have given out is closed. See JDBC 2.0 Optional Package spec section 6.2.3
+     */
+    @Test
+    public void testPoolCloseOldWrapper() {
+        try {
+            PooledConnection pc = getPooledConnection();
+            con = pc.getConnection();
+            Connection con2 = pc.getConnection();
+            try {
+                con.createStatement();
+                fail(
+                        "Original connection wrapper should be closed when new connection wrapper is generated");
+            } catch (SQLException e) {
+            }
+            con2.close();
+            pc.close();
+        } catch (SQLException e) {
+            fail(e.getMessage());
+        }
     }
 
-    public void clear() {
-      count = errorCount = 0;
+    /**
+     * Makes sure that if you get two connection wrappers from the same PooledConnection, they are
+     * different, even though the represent the same physical connection. See JDBC 2.0 Optional
+     * Package spec section 6.2.2
+     */
+    @Test
+    public void testPoolNewWrapper() {
+        try {
+            PooledConnection pc = getPooledConnection();
+            con = pc.getConnection();
+            Connection con2 = pc.getConnection();
+            con2.close();
+            pc.close();
+            assertTrue(
+                    "Two calls to PooledConnection.getConnection should not return the same connection wrapper",
+                    con != con2);
+        } catch (SQLException e) {
+            fail(e.getMessage());
+        }
     }
-  }
 
-  @Test
-  public void testSerializable() throws IOException, ClassNotFoundException {
-    ConnectionPool pool = new ConnectionPool();
-    pool.setDefaultAutoCommit(false);
-    pool.setServerName("db.myhost.com");
-    pool.setDatabaseName("mydb");
-    pool.setUser("user");
-    pool.setPassword("pass");
-    pool.setPortNumber(1111);
+    /**
+     * Makes sure that exactly one close event is fired for each time a connection handle is closed.
+     * Also checks that events are not fired after a given handle has been closed once.
+     */
+    @Test
+    public void testCloseEvent() {
+        try {
+            PooledConnection pc = getPooledConnection();
+            CountClose cc = new CountClose();
+            pc.addConnectionEventListener(cc);
+            con = pc.getConnection();
+            assertEquals(0, cc.getCount());
+            assertEquals(0, cc.getErrorCount());
+            con.close();
+            assertEquals(1, cc.getCount());
+            assertEquals(0, cc.getErrorCount());
+            con = pc.getConnection();
+            assertEquals(1, cc.getCount());
+            assertEquals(0, cc.getErrorCount());
+            con.close();
+            assertEquals(2, cc.getCount());
+            assertEquals(0, cc.getErrorCount());
+            // a double close shouldn't fire additional events
+            con.close();
+            assertEquals(2, cc.getCount());
+            assertEquals(0, cc.getErrorCount());
+            pc.close();
+        } catch (SQLException e) {
+            fail(e.getMessage());
+        }
+    }
 
-    ByteArrayOutputStream baos = new ByteArrayOutputStream();
-    ObjectOutputStream oos = new ObjectOutputStream(baos);
-    oos.writeObject(pool);
+    /**
+     * Makes sure that close events are not fired after a listener has been removed.
+     */
+    @Test
+    public void testNoCloseEvent() {
+        try {
+            PooledConnection pc = getPooledConnection();
+            CountClose cc = new CountClose();
+            pc.addConnectionEventListener(cc);
+            con = pc.getConnection();
+            assertEquals(0, cc.getCount());
+            assertEquals(0, cc.getErrorCount());
+            con.close();
+            assertEquals(1, cc.getCount());
+            assertEquals(0, cc.getErrorCount());
+            pc.removeConnectionEventListener(cc);
+            con = pc.getConnection();
+            assertEquals(1, cc.getCount());
+            assertEquals(0, cc.getErrorCount());
+            con.close();
+            assertEquals(1, cc.getCount());
+            assertEquals(0, cc.getErrorCount());
+        } catch (SQLException e) {
+            fail(e.getMessage());
+        }
+    }
 
-    ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
-    ObjectInputStream ois = new ObjectInputStream(bais);
-    ConnectionPool pool2 = (ConnectionPool) ois.readObject();
+    /**
+     * Makes sure that a listener can be removed while dispatching events. Sometimes this causes a
+     * ConcurrentModificationException or something.
+     */
+    @Test
+    public void testInlineCloseEvent() {
+        try {
+            PooledConnection pc = getPooledConnection();
+            RemoveClose rc1 = new RemoveClose();
+            RemoveClose rc2 = new RemoveClose();
+            RemoveClose rc3 = new RemoveClose();
+            pc.addConnectionEventListener(rc1);
+            pc.addConnectionEventListener(rc2);
+            pc.addConnectionEventListener(rc3);
+            con = pc.getConnection();
+            con.close();
+            con = pc.getConnection();
+            con.close();
+        } catch (Exception e) {
+            fail(e.getMessage());
+        }
+    }
 
-    assertEquals(pool.isDefaultAutoCommit(), pool2.isDefaultAutoCommit());
-    assertEquals(pool.getServerName(), pool2.getServerName());
-    assertEquals(pool.getDatabaseName(), pool2.getDatabaseName());
-    assertEquals(pool.getUser(), pool2.getUser());
-    assertEquals(pool.getPassword(), pool2.getPassword());
-    assertEquals(pool.getPortNumber(), pool2.getPortNumber());
-  }
+    /**
+     * Tests that a close event is not generated when a connection handle is closed automatically due
+     * to a new connection handle being opened for the same PooledConnection. See JDBC 2.0 Optional
+     * Package spec section 6.3
+     */
+    @Test
+    public void testAutomaticCloseEvent() {
+        try {
+            PooledConnection pc = getPooledConnection();
+            CountClose cc = new CountClose();
+            pc.addConnectionEventListener(cc);
+            con = pc.getConnection();
+            assertEquals(0, cc.getCount());
+            assertEquals(0, cc.getErrorCount());
+            con.close();
+            assertEquals(1, cc.getCount());
+            assertEquals(0, cc.getErrorCount());
+            con = pc.getConnection();
+            assertEquals(1, cc.getCount());
+            assertEquals(0, cc.getErrorCount());
+            // Open a 2nd connection, causing the first to be closed. No even should be generated.
+            Connection con2 = pc.getConnection();
+            assertTrue("Connection handle was not closed when new handle was opened", con.isClosed());
+            assertEquals(1, cc.getCount());
+            assertEquals(0, cc.getErrorCount());
+            con2.close();
+            assertEquals(2, cc.getCount());
+            assertEquals(0, cc.getErrorCount());
+            pc.close();
+        } catch (SQLException e) {
+            fail(e.getMessage());
+        }
+    }
+
+    /**
+     * Makes sure the isClosed method on a connection wrapper does what you'd expect. Checks the usual
+     * case, as well as automatic closure when a new handle is opened on the same physical connection.
+     */
+    @Test
+    public void testIsClosed() {
+        try {
+            PooledConnection pc = getPooledConnection();
+            con = pc.getConnection();
+            assertTrue(!con.isClosed());
+            con.close();
+            assertTrue(con.isClosed());
+            con = pc.getConnection();
+            Connection con2 = pc.getConnection();
+            assertTrue(con.isClosed());
+            assertTrue(!con2.isClosed());
+            con2.close();
+            assertTrue(con.isClosed());
+            pc.close();
+        } catch (SQLException e) {
+            fail(e.getMessage());
+        }
+    }
+
+    /**
+     * Make sure that close status of pooled connection reflect the one of the underlying physical
+     * connection.
+     */
+    @Test
+    public void testBackendIsClosed() throws Exception {
+        try {
+            PooledConnection pc = getPooledConnection();
+            con = pc.getConnection();
+            assertTrue(!con.isClosed());
+
+            Assume.assumeTrue("pg_terminate_backend requires PostgreSQL 8.4+",
+                    TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4));
+
+            TestUtil.terminateBackend(con);
+            try {
+                TestUtil.executeQuery(con, "SELECT 1");
+                fail("The connection should not be opened anymore. An exception was expected");
+            } catch (SQLException e) {
+                // this is expected as the connection has been forcibly closed from backend
+            }
+            assertTrue(con.isClosed());
+        } catch (SQLException e) {
+            fail(e.getMessage());
+        }
+    }
+
+    /**
+     * Ensures that a statement generated by a proxied connection returns the proxied connection from
+     * getConnection() [not the physical connection].
+     */
+    @Test
+    public void testStatementConnection() {
+        try {
+            PooledConnection pc = getPooledConnection();
+            con = pc.getConnection();
+            Statement s = con.createStatement();
+            Connection conRetrieved = s.getConnection();
+
+            assertEquals(con.getClass(), conRetrieved.getClass());
+            assertEquals(con, conRetrieved);
+        } catch (SQLException e) {
+            fail(e.getMessage());
+        }
+    }
+
+    /**
+     * Ensures that the Statement proxy generated by the Connection handle throws the correct kind of
+     * exception.
+     */
+    @Test
+    public void testStatementProxy() {
+        Statement s = null;
+        try {
+            PooledConnection pc = getPooledConnection();
+            con = pc.getConnection();
+            s = con.createStatement();
+        } catch (SQLException e) {
+            fail(e.getMessage());
+        }
+        try {
+            s.executeQuery("SELECT * FROM THIS_TABLE_SHOULD_NOT_EXIST");
+            fail("An SQL exception was not thrown that should have been");
+        } catch (SQLException e) {
+            // This is the expected and correct path
+        } catch (Exception e) {
+            fail("bad exception; was expecting SQLException, not" + e.getClass().getName());
+        }
+    }
+
+    /**
+     * Ensures that a prepared statement generated by a proxied connection returns the proxied
+     * connection from getConnection() [not the physical connection].
+     */
+    @Test
+    public void testPreparedStatementConnection() {
+        try {
+            PooledConnection pc = getPooledConnection();
+            con = pc.getConnection();
+            PreparedStatement s = con.prepareStatement("select 'x'");
+            Connection conRetrieved = s.getConnection();
+
+            assertEquals(con.getClass(), conRetrieved.getClass());
+            assertEquals(con, conRetrieved);
+        } catch (SQLException e) {
+            fail(e.getMessage());
+        }
+    }
+
+    /**
+     * Ensures that a callable statement generated by a proxied connection returns the proxied
+     * connection from getConnection() [not the physical connection].
+     */
+    @Test
+    public void testCallableStatementConnection() {
+        try {
+            PooledConnection pc = getPooledConnection();
+            con = pc.getConnection();
+            CallableStatement s = con.prepareCall("select 'x'");
+            Connection conRetrieved = s.getConnection();
+
+            assertEquals(con.getClass(), conRetrieved.getClass());
+            assertEquals(con, conRetrieved);
+        } catch (SQLException e) {
+            fail(e.getMessage());
+        }
+    }
+
+    /**
+     * Ensure that a statement created from a pool can be used like any other statement in regard to
+     * pg extensions.
+     */
+    @Test
+    public void testStatementsProxyPGStatement() {
+        try {
+            PooledConnection pc = getPooledConnection();
+            con = pc.getConnection();
+
+            Statement s = con.createStatement();
+            boolean b = ((org.postgresql.PGStatement) s).isUseServerPrepare();
+
+            PreparedStatement ps = con.prepareStatement("select 'x'");
+            b = ((org.postgresql.PGStatement) ps).isUseServerPrepare();
+
+            CallableStatement cs = con.prepareCall("select 'x'");
+            b = ((org.postgresql.PGStatement) cs).isUseServerPrepare();
+
+        } catch (SQLException e) {
+            fail(e.getMessage());
+        }
+    }
+
+    @Test
+    public void testSerializable() throws IOException, ClassNotFoundException {
+        ConnectionPool pool = new ConnectionPool();
+        pool.setDefaultAutoCommit(false);
+        pool.setServerName("db.myhost.com");
+        pool.setDatabaseName("mydb");
+        pool.setUser("user");
+        pool.setPassword("pass");
+        pool.setPortNumber(1111);
+
+        ByteArrayOutputStream baos = new ByteArrayOutputStream();
+        ObjectOutputStream oos = new ObjectOutputStream(baos);
+        oos.writeObject(pool);
+
+        ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
+        ObjectInputStream ois = new ObjectInputStream(bais);
+        ConnectionPool pool2 = (ConnectionPool) ois.readObject();
+
+        assertEquals(pool.isDefaultAutoCommit(), pool2.isDefaultAutoCommit());
+        assertEquals(pool.getServerName(), pool2.getServerName());
+        assertEquals(pool.getDatabaseName(), pool2.getDatabaseName());
+        assertEquals(pool.getUser(), pool2.getUser());
+        assertEquals(pool.getPassword(), pool2.getPassword());
+        assertEquals(pool.getPortNumber(), pool2.getPortNumber());
+    }
+
+    /**
+     * Helper class to remove a listener during event dispatching.
+     */
+    private class RemoveClose implements ConnectionEventListener {
+        @Override
+        public void connectionClosed(ConnectionEvent event) {
+            ((PooledConnection) event.getSource()).removeConnectionEventListener(this);
+        }
+
+        @Override
+        public void connectionErrorOccurred(ConnectionEvent event) {
+            ((PooledConnection) event.getSource()).removeConnectionEventListener(this);
+        }
+    }
+
+    /**
+     * Helper class that implements the event listener interface, and counts the number of events it
+     * sees.
+     */
+    private class CountClose implements ConnectionEventListener {
+        private int count;
+        private int errorCount;
+
+        @Override
+        public void connectionClosed(ConnectionEvent event) {
+            count++;
+        }
+
+        @Override
+        public void connectionErrorOccurred(ConnectionEvent event) {
+            errorCount++;
+        }
+
+        public int getCount() {
+            return count;
+        }
+
+        public int getErrorCount() {
+            return errorCount;
+        }
+
+        public void clear() {
+            count = errorCount = 0;
+        }
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/OptionalTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/OptionalTestSuite.java
index 703b310..b9ca724 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/OptionalTestSuite.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/OptionalTestSuite.java
@@ -16,13 +16,13 @@ import org.junit.runners.Suite;
  */
 @RunWith(Suite.class)
 @Suite.SuiteClasses({
-    BaseDataSourceFailoverUrlsTest.class,
-    CaseOptimiserDataSourceTest.class,
-    ConnectionPoolTest.class,
-    PoolingDataSourceTest.class,
-    SimpleDataSourceTest.class,
-    SimpleDataSourceWithSetURLTest.class,
-    SimpleDataSourceWithUrlTest.class,
+        BaseDataSourceFailoverUrlsTest.class,
+        CaseOptimiserDataSourceTest.class,
+        ConnectionPoolTest.class,
+        PoolingDataSourceTest.class,
+        SimpleDataSourceTest.class,
+        SimpleDataSourceWithSetURLTest.class,
+        SimpleDataSourceWithUrlTest.class,
 })
 public class OptionalTestSuite {
 
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/PoolingDataSourceTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/PoolingDataSourceTest.java
index 73a9824..6cbe191 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/PoolingDataSourceTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/PoolingDataSourceTest.java
@@ -5,19 +5,16 @@
 
 package org.postgresql.test.jdbc2.optional;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.fail;
-
-import org.postgresql.ds.common.BaseDataSource;
-import org.postgresql.jdbc2.optional.PoolingDataSource;
-
-import org.junit.Test;
-
 import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
+import org.junit.Test;
+import org.postgresql.ds.common.BaseDataSource;
+import org.postgresql.jdbc2.optional.PoolingDataSource;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.fail;
 
 /**
  * Minimal tests for pooling DataSource. Needs many more.
@@ -25,123 +22,123 @@ import java.sql.Statement;
  * @author Aaron Mulder (ammulder@chariotsolutions.com)
  */
 public class PoolingDataSourceTest extends BaseDataSourceTest {
-  private static final String DS_NAME = "JDBC 2 SE Test DataSource";
+    private static final String DS_NAME = "JDBC 2 SE Test DataSource";
 
-  @Override
-  public void tearDown() throws Exception {
-    if (bds instanceof PoolingDataSource) {
-      ((PoolingDataSource) bds).close();
+    @Override
+    public void tearDown() throws Exception {
+        if (bds instanceof PoolingDataSource) {
+            ((PoolingDataSource) bds).close();
+        }
+        super.tearDown();
     }
-    super.tearDown();
-  }
 
-  /**
-   * Creates and configures a new SimpleDataSource.
-   */
-  @Override
-  protected void initializeDataSource() {
-    if (bds == null) {
-      bds = new PoolingDataSource();
-      setupDataSource(bds);
-      ((PoolingDataSource) bds).setDataSourceName(DS_NAME);
-      ((PoolingDataSource) bds).setInitialConnections(2);
-      ((PoolingDataSource) bds).setMaxConnections(10);
+    /**
+     * Creates and configures a new SimpleDataSource.
+     */
+    @Override
+    protected void initializeDataSource() {
+        if (bds == null) {
+            bds = new PoolingDataSource();
+            setupDataSource(bds);
+            ((PoolingDataSource) bds).setDataSourceName(DS_NAME);
+            ((PoolingDataSource) bds).setInitialConnections(2);
+            ((PoolingDataSource) bds).setMaxConnections(10);
+        }
     }
-  }
 
-  /**
-   * In this case, we *do* want it to be pooled.
-   */
-  @Override
-  public void testNotPooledConnection() throws SQLException {
-    con = getDataSourceConnection();
-    String name = con.toString();
-    con.close();
-    con = getDataSourceConnection();
-    String name2 = con.toString();
-    con.close();
-    assertEquals("Pooled DS doesn't appear to be pooling connections!", name, name2);
-  }
-
-  /**
-   * In this case, the desired behavior is dereferencing.
-   */
-  @Override
-  protected void compareJndiDataSource(BaseDataSource oldbds, BaseDataSource bds) {
-    assertSame("DataSource was serialized or recreated, should have been dereferenced",
-        bds, oldbds);
-  }
-
-  /**
-   * Check that 2 DS instances can't use the same name.
-   */
-  @Test
-  public void testCantReuseName() {
-    initializeDataSource();
-    PoolingDataSource pds = new PoolingDataSource();
-    try {
-      pds.setDataSourceName(DS_NAME);
-      fail("Should have denied 2nd DataSource with same name");
-    } catch (IllegalArgumentException e) {
+    /**
+     * In this case, we *do* want it to be pooled.
+     */
+    @Override
+    public void testNotPooledConnection() throws SQLException {
+        con = getDataSourceConnection();
+        String name = con.toString();
+        con.close();
+        con = getDataSourceConnection();
+        String name2 = con.toString();
+        con.close();
+        assertEquals("Pooled DS doesn't appear to be pooling connections!", name, name2);
     }
-  }
 
-  /**
-   * Closing a Connection twice is not an error.
-   */
-  @Test
-  public void testDoubleConnectionClose() throws SQLException {
-    con = getDataSourceConnection();
-    con.close();
-    con.close();
-  }
+    /**
+     * In this case, the desired behavior is dereferencing.
+     */
+    @Override
+    protected void compareJndiDataSource(BaseDataSource oldbds, BaseDataSource bds) {
+        assertSame("DataSource was serialized or recreated, should have been dereferenced",
+                bds, oldbds);
+    }
 
-  /**
-   * Closing a Statement twice is not an error.
-   */
-  @Test
-  public void testDoubleStatementClose() throws SQLException {
-    con = getDataSourceConnection();
-    Statement stmt = con.createStatement();
-    stmt.close();
-    stmt.close();
-    con.close();
-  }
+    /**
+     * Check that 2 DS instances can't use the same name.
+     */
+    @Test
+    public void testCantReuseName() {
+        initializeDataSource();
+        PoolingDataSource pds = new PoolingDataSource();
+        try {
+            pds.setDataSourceName(DS_NAME);
+            fail("Should have denied 2nd DataSource with same name");
+        } catch (IllegalArgumentException e) {
+        }
+    }
 
-  @Test
-  public void testConnectionObjectMethods() throws SQLException {
-    con = getDataSourceConnection();
+    /**
+     * Closing a Connection twice is not an error.
+     */
+    @Test
+    public void testDoubleConnectionClose() throws SQLException {
+        con = getDataSourceConnection();
+        con.close();
+        con.close();
+    }
 
-    Connection conRef = con;
-    assertEquals(con, conRef);
+    /**
+     * Closing a Statement twice is not an error.
+     */
+    @Test
+    public void testDoubleStatementClose() throws SQLException {
+        con = getDataSourceConnection();
+        Statement stmt = con.createStatement();
+        stmt.close();
+        stmt.close();
+        con.close();
+    }
 
-    int hc1 = con.hashCode();
-    con.close();
-    int hc2 = con.hashCode();
+    @Test
+    public void testConnectionObjectMethods() throws SQLException {
+        con = getDataSourceConnection();
 
-    assertEquals(con, conRef);
-    assertEquals(hc1, hc2);
-  }
+        Connection conRef = con;
+        assertEquals(con, conRef);
 
-  @Test
-  public void testStatementObjectMethods() throws SQLException {
-    con = getDataSourceConnection();
+        int hc1 = con.hashCode();
+        con.close();
+        int hc2 = con.hashCode();
 
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT 1");
-    Statement stmtRef = stmt;
+        assertEquals(con, conRef);
+        assertEquals(hc1, hc2);
+    }
 
-    assertEquals(stmt, stmtRef);
-    // Currently we aren't proxying ResultSet, so this doesn't
-    // work, see Bug #1010542.
-    // assertEquals(stmt, rs.getStatement());
+    @Test
+    public void testStatementObjectMethods() throws SQLException {
+        con = getDataSourceConnection();
 
-    int hc1 = stmt.hashCode();
-    stmt.close();
-    int hc2 = stmt.hashCode();
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT 1");
+        Statement stmtRef = stmt;
 
-    assertEquals(stmt, stmtRef);
-    assertEquals(hc1, hc2);
-  }
+        assertEquals(stmt, stmtRef);
+        // Currently we aren't proxying ResultSet, so this doesn't
+        // work, see Bug #1010542.
+        // assertEquals(stmt, rs.getStatement());
+
+        int hc1 = stmt.hashCode();
+        stmt.close();
+        int hc2 = stmt.hashCode();
+
+        assertEquals(stmt, stmtRef);
+        assertEquals(hc1, hc2);
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceTest.java
index 644d273..91b2d5f 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceTest.java
@@ -5,11 +5,10 @@
 
 package org.postgresql.test.jdbc2.optional;
 
+import org.junit.Test;
 import org.postgresql.ds.PGSimpleDataSource;
 import org.postgresql.jdbc2.optional.SimpleDataSource;
 
-import org.junit.Test;
-
 /**
  * Performs the basic tests defined in the superclass. Just adds the configuration logic.
  *
@@ -17,21 +16,21 @@ import org.junit.Test;
  */
 public class SimpleDataSourceTest extends BaseDataSourceTest {
 
-  /**
-   * Creates and configures a new SimpleDataSource.
-   */
-  @Override
-  protected void initializeDataSource() {
-    if (bds == null) {
-      bds = new SimpleDataSource();
-      setupDataSource(bds);
+    /**
+     * Creates and configures a new SimpleDataSource.
+     */
+    @Override
+    protected void initializeDataSource() {
+        if (bds == null) {
+            bds = new SimpleDataSource();
+            setupDataSource(bds);
+        }
     }
-  }
 
-  @Test(expected = IllegalArgumentException.class)
-  public void testTypoPostgresUrl() {
-    PGSimpleDataSource ds = new PGSimpleDataSource();
-    // this should fail because the protocol is wrong.
-    ds.setUrl("jdbc:postgres://localhost:5432/test");
-  }
+    @Test(expected = IllegalArgumentException.class)
+    public void testTypoPostgresUrl() {
+        PGSimpleDataSource ds = new PGSimpleDataSource();
+        // this should fail because the protocol is wrong.
+        ds.setUrl("jdbc:postgres://localhost:5432/test");
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceWithSetURLTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceWithSetURLTest.java
index 17ed36b..016f821 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceWithSetURLTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceWithSetURLTest.java
@@ -5,55 +5,52 @@
 
 package org.postgresql.test.jdbc2.optional;
 
-import static org.junit.Assert.assertEquals;
-import static org.postgresql.Driver.parseURL;
-
+import java.util.Properties;
+import org.junit.Test;
 import org.postgresql.PGProperty;
 import org.postgresql.jdbc2.optional.SimpleDataSource;
 import org.postgresql.test.TestUtil;
-
-import org.junit.Test;
-
-import java.util.Properties;
+import static org.junit.Assert.assertEquals;
+import static org.postgresql.Driver.parseURL;
 
 /**
  * Performs the basic tests defined in the superclass. Just adds the configuration logic.
  */
 public class SimpleDataSourceWithSetURLTest extends BaseDataSourceTest {
-  /**
-   * Creates and configures a new SimpleDataSource using setURL method.
-   */
-  @Override
-  protected void initializeDataSource() {
-    if (bds == null) {
-      bds = new SimpleDataSource();
-      bds.setURL(String.format("jdbc:postgresql://%s:%d/%s?prepareThreshold=%d", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getPrepareThreshold()));
-      bds.setUser(TestUtil.getUser());
-      bds.setPassword(TestUtil.getPassword());
-      bds.setProtocolVersion(TestUtil.getProtocolVersion());
+    /**
+     * Creates and configures a new SimpleDataSource using setURL method.
+     */
+    @Override
+    protected void initializeDataSource() {
+        if (bds == null) {
+            bds = new SimpleDataSource();
+            bds.setURL(String.format("jdbc:postgresql://%s:%d/%s?prepareThreshold=%d", TestUtil.getServer(), TestUtil.getPort(), TestUtil.getDatabase(), TestUtil.getPrepareThreshold()));
+            bds.setUser(TestUtil.getUser());
+            bds.setPassword(TestUtil.getPassword());
+            bds.setProtocolVersion(TestUtil.getProtocolVersion());
+        }
     }
-  }
 
-  @Test
-  public void testGetURL() throws Exception {
-    con = getDataSourceConnection();
+    @Test
+    public void testGetURL() throws Exception {
+        con = getDataSourceConnection();
 
-    String url = bds.getURL();
-    Properties properties = parseURL(url, null);
+        String url = bds.getURL();
+        Properties properties = parseURL(url, null);
 
-    assertEquals(TestUtil.getServer(), properties.getProperty(PGProperty.PG_HOST.getName()));
-    assertEquals(Integer.toString(TestUtil.getPort()), properties.getProperty(PGProperty.PG_PORT.getName()));
-    assertEquals(TestUtil.getDatabase(), properties.getProperty(PGProperty.PG_DBNAME.getName()));
-    assertEquals(Integer.toString(TestUtil.getPrepareThreshold()), properties.getProperty(PGProperty.PREPARE_THRESHOLD.getName()));
-  }
+        assertEquals(TestUtil.getServer(), properties.getProperty(PGProperty.PG_HOST.getName()));
+        assertEquals(Integer.toString(TestUtil.getPort()), properties.getProperty(PGProperty.PG_PORT.getName()));
+        assertEquals(TestUtil.getDatabase(), properties.getProperty(PGProperty.PG_DBNAME.getName()));
+        assertEquals(Integer.toString(TestUtil.getPrepareThreshold()), properties.getProperty(PGProperty.PREPARE_THRESHOLD.getName()));
+    }
 
-  @Test
-  public void testSetURL() throws Exception {
-    initializeDataSource();
+    @Test
+    public void testSetURL() throws Exception {
+        initializeDataSource();
 
-    assertEquals(TestUtil.getServer(), bds.getServerName());
-    assertEquals(TestUtil.getPort(), bds.getPortNumber());
-    assertEquals(TestUtil.getDatabase(), bds.getDatabaseName());
-    assertEquals(TestUtil.getPrepareThreshold(), bds.getPrepareThreshold());
-  }
+        assertEquals(TestUtil.getServer(), bds.getServerName());
+        assertEquals(TestUtil.getPort(), bds.getPortNumber());
+        assertEquals(TestUtil.getDatabase(), bds.getDatabaseName());
+        assertEquals(TestUtil.getPrepareThreshold(), bds.getPrepareThreshold());
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceWithUrlTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceWithUrlTest.java
index ad7106b..8a6dbac 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceWithUrlTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc2/optional/SimpleDataSourceWithUrlTest.java
@@ -14,18 +14,18 @@ import org.postgresql.test.TestUtil;
  * @author Aaron Mulder (ammulder@chariotsolutions.com)
  */
 public class SimpleDataSourceWithUrlTest extends BaseDataSourceTest {
-  /**
-   * Creates and configures a new SimpleDataSource.
-   */
-  @Override
-  protected void initializeDataSource() {
-    if (bds == null) {
-      bds = new SimpleDataSource();
-      bds.setUrl("jdbc:postgresql://" + TestUtil.getServer() + ":" + TestUtil.getPort() + "/"
-          + TestUtil.getDatabase() + "?prepareThreshold=" + TestUtil.getPrepareThreshold());
-      bds.setUser(TestUtil.getUser());
-      bds.setPassword(TestUtil.getPassword());
-      bds.setProtocolVersion(TestUtil.getProtocolVersion());
+    /**
+     * Creates and configures a new SimpleDataSource.
+     */
+    @Override
+    protected void initializeDataSource() {
+        if (bds == null) {
+            bds = new SimpleDataSource();
+            bds.setUrl("jdbc:postgresql://" + TestUtil.getServer() + ":" + TestUtil.getPort() + "/"
+                    + TestUtil.getDatabase() + "?prepareThreshold=" + TestUtil.getPrepareThreshold());
+            bds.setUser(TestUtil.getUser());
+            bds.setPassword(TestUtil.getPassword());
+            bds.setProtocolVersion(TestUtil.getProtocolVersion());
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/CompositeQueryParseTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/CompositeQueryParseTest.java
index 58c7f8c..51cddf8 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/CompositeQueryParseTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/CompositeQueryParseTest.java
@@ -20,201 +20,201 @@ import java.util.List;
 
 class CompositeQueryParseTest {
 
-  @Test
-  void emptyQuery() {
-    assertEquals("", reparse("", true, false, true));
-  }
-
-  @Test
-  void whitespaceQuery() {
-    assertEquals("", reparse("     ", true, false, true));
-  }
-
-  @Test
-  void onlyEmptyQueries() {
-    assertEquals("", reparse(";;;;  ;  \n;\n", true, false, true));
-  }
-
-  @Test
-  void simpleQuery() {
-    assertEquals("select 1", reparse("select 1", true, false, true));
-  }
-
-  @Test
-  void simpleBind() {
-    assertEquals("select $1", reparse("select ?", true, true, true));
-  }
-
-  @Test
-  void unquotedQuestionmark() {
-    assertEquals("select '{\"key\": \"val\"}'::jsonb ? 'key'",
-        reparse("select '{\"key\": \"val\"}'::jsonb ? 'key'", true, false, true));
-  }
-
-  @Test
-  void repeatedQuestionmark() {
-    assertEquals("select '{\"key\": \"val\"}'::jsonb ? 'key'",
-        reparse("select '{\"key\": \"val\"}'::jsonb ?? 'key'", true, false, true));
-  }
-
-  @Test
-  void quotedQuestionmark() {
-    assertEquals("select '?'", reparse("select '?'", true, false, true));
-  }
-
-  @Test
-  void doubleQuestionmark() {
-    assertEquals("select '?', $1 ?=> $2", reparse("select '?', ? ??=> ?", true, true, true));
-  }
-
-  @Test
-  void compositeBasic() {
-    assertEquals("select 1;/*cut*/\n select 2", reparse("select 1; select 2", true, false, true));
-  }
-
-  @Test
-  void compositeWithBinds() {
-    assertEquals("select $1;/*cut*/\n select $1", reparse("select ?; select ?", true, true, true));
-  }
-
-  @Test
-  void trailingSemicolon() {
-    assertEquals("select 1", reparse("select 1;", true, false, true));
-  }
-
-  @Test
-  void trailingSemicolonAndSpace() {
-    assertEquals("select 1", reparse("select 1; ", true, false, true));
-  }
-
-  @Test
-  void multipleTrailingSemicolons() {
-    assertEquals("select 1", reparse("select 1;;;", true, false, true));
-  }
-
-  @Test
-  void hasReturning() throws SQLException {
-    List<NativeQuery> queries = Parser.parseJdbcSql("insert into foo (a,b,c) values (?,?,?) RetuRning a", true, true, false,
-        true, true);
-    NativeQuery query = queries.get(0);
-    assertTrue(query.command.isReturningKeywordPresent(), "The parser should find the word returning");
-
-    queries = Parser.parseJdbcSql("insert into foo (a,b,c) values (?,?,?)", true, true, false, true, true);
-    query = queries.get(0);
-    assertFalse(query.command.isReturningKeywordPresent(), "The parser should not find the word returning");
-
-    queries = Parser.parseJdbcSql("insert into foo (a,b,c) values ('returning',?,?)", true, true, false,
-        true, true);
-    query = queries.get(0);
-    assertFalse(query.command.isReturningKeywordPresent(), "The parser should not find the word returning as it is in quotes ");
-  }
-
-  @Test
-  void select() throws SQLException {
-    List<NativeQuery> queries;
-    queries = Parser.parseJdbcSql("select 1 as returning from (update table)", true, true, false, true, true);
-    NativeQuery query = queries.get(0);
-    assertEquals(SqlCommandType.SELECT, query.command.getType(), "This is a select ");
-    assertTrue(query.command.isReturningKeywordPresent(), "Returning is OK here as it is not an insert command ");
-  }
-
-  @Test
-  void delete() throws SQLException {
-    List<NativeQuery> queries = Parser.parseJdbcSql("DeLeTe from foo where a=1", true, true, false,
-        true, true);
-    NativeQuery query = queries.get(0);
-    assertEquals(SqlCommandType.DELETE, query.command.getType(), "This is a delete command");
-  }
-
-  @Test
-  void multiQueryWithBind() throws SQLException {
-    // braces around (42) are required to puzzle the parser
-    String sql = "INSERT INTO inttable(a) VALUES (?);SELECT (42)";
-    List<NativeQuery> queries = Parser.parseJdbcSql(sql, true, true, true, true, true);
-    NativeQuery query = queries.get(0);
-    assertEquals("INSERT: INSERT INTO inttable(a) VALUES ($1)",
-        query.command.getType() + ": " + query.nativeSql,
-        "query(0) of " + sql);
-    query = queries.get(1);
-    assertEquals("SELECT: SELECT (42)",
-        query.command.getType() + ": " + query.nativeSql,
-        "query(1) of " + sql);
-  }
-
-  @Test
-  void move() throws SQLException {
-    List<NativeQuery> queries = Parser.parseJdbcSql("MoVe NEXT FROM FOO", true, true, false, true, true);
-    NativeQuery query = queries.get(0);
-    assertEquals(SqlCommandType.MOVE, query.command.getType(), "This is a move command");
-  }
-
-  @Test
-  void update() throws SQLException {
-    List<NativeQuery> queries;
-    NativeQuery query;
-    queries = Parser.parseJdbcSql("update foo set (a=?,b=?,c=?)", true, true, false, true, true);
-    query = queries.get(0);
-    assertEquals(SqlCommandType.UPDATE, query.command.getType(), "This is an UPDATE command");
-  }
-
-  @Test
-  void insert() throws SQLException {
-    List<NativeQuery> queries = Parser.parseJdbcSql("InSeRt into foo (a,b,c) values (?,?,?) returning a", true, true, false,
-        true, true);
-    NativeQuery query = queries.get(0);
-    assertEquals(SqlCommandType.INSERT, query.command.getType(), "This is an INSERT command");
-
-    queries = Parser.parseJdbcSql("select 1 as insert", true, true, false, true, true);
-    query = queries.get(0);
-    assertEquals(SqlCommandType.SELECT, query.command.getType(), "This is a SELECT command");
-  }
-
-  @Test
-  void withSelect() throws SQLException {
-    List<NativeQuery> queries;
-    queries = Parser.parseJdbcSql("with update as (update foo set (a=?,b=?,c=?)) select * from update", true, true, false, true, true);
-    NativeQuery query = queries.get(0);
-    assertEquals(SqlCommandType.SELECT, query.command.getType(), "with ... () select");
-  }
-
-  @Test
-  void withInsert() throws SQLException {
-    List<NativeQuery> queries;
-    queries = Parser.parseJdbcSql("with update as (update foo set (a=?,b=?,c=?)) insert into table(select) values(1)", true, true, false, true, true);
-    NativeQuery query = queries.get(0);
-    assertEquals(SqlCommandType.INSERT, query.command.getType(), "with ... () insert");
-  }
-
-  @Test
-  void multipleEmptyQueries() {
-    assertEquals("select 1;/*cut*/\n" + "select 2",
-        reparse("select 1; ;\t;select 2", true, false, true));
-  }
-
-  @Test
-  void compositeWithComments() {
-    assertEquals("select 1;/*cut*/\n" + "/* noop */;/*cut*/\n" + "select 2",
-        reparse("select 1;/* noop */;select 2", true, false, true));
-  }
-
-  private String reparse(String query, boolean standardConformingStrings, boolean withParameters,
-      boolean splitStatements) {
-    try {
-      return toString(
-          Parser.parseJdbcSql(query, standardConformingStrings, withParameters, splitStatements, false, true));
-    } catch (SQLException e) {
-      throw new IllegalStateException("Parser.parseJdbcSql: " + e.getMessage(), e);
+    @Test
+    void emptyQuery() {
+        assertEquals("", reparse("", true, false, true));
     }
-  }
 
-  private String toString(List<NativeQuery> queries) {
-    StringBuilder sb = new StringBuilder();
-    for (NativeQuery query : queries) {
-      if (sb.length() != 0) {
-        sb.append(";/*cut*/\n");
-      }
-      sb.append(query.nativeSql);
+    @Test
+    void whitespaceQuery() {
+        assertEquals("", reparse("     ", true, false, true));
+    }
+
+    @Test
+    void onlyEmptyQueries() {
+        assertEquals("", reparse(";;;;  ;  \n;\n", true, false, true));
+    }
+
+    @Test
+    void simpleQuery() {
+        assertEquals("select 1", reparse("select 1", true, false, true));
+    }
+
+    @Test
+    void simpleBind() {
+        assertEquals("select $1", reparse("select ?", true, true, true));
+    }
+
+    @Test
+    void unquotedQuestionmark() {
+        assertEquals("select '{\"key\": \"val\"}'::jsonb ? 'key'",
+                reparse("select '{\"key\": \"val\"}'::jsonb ? 'key'", true, false, true));
+    }
+
+    @Test
+    void repeatedQuestionmark() {
+        assertEquals("select '{\"key\": \"val\"}'::jsonb ? 'key'",
+                reparse("select '{\"key\": \"val\"}'::jsonb ?? 'key'", true, false, true));
+    }
+
+    @Test
+    void quotedQuestionmark() {
+        assertEquals("select '?'", reparse("select '?'", true, false, true));
+    }
+
+    @Test
+    void doubleQuestionmark() {
+        assertEquals("select '?', $1 ?=> $2", reparse("select '?', ? ??=> ?", true, true, true));
+    }
+
+    @Test
+    void compositeBasic() {
+        assertEquals("select 1;/*cut*/\n select 2", reparse("select 1; select 2", true, false, true));
+    }
+
+    @Test
+    void compositeWithBinds() {
+        assertEquals("select $1;/*cut*/\n select $1", reparse("select ?; select ?", true, true, true));
+    }
+
+    @Test
+    void trailingSemicolon() {
+        assertEquals("select 1", reparse("select 1;", true, false, true));
+    }
+
+    @Test
+    void trailingSemicolonAndSpace() {
+        assertEquals("select 1", reparse("select 1; ", true, false, true));
+    }
+
+    @Test
+    void multipleTrailingSemicolons() {
+        assertEquals("select 1", reparse("select 1;;;", true, false, true));
+    }
+
+    @Test
+    void hasReturning() throws SQLException {
+        List<NativeQuery> queries = Parser.parseJdbcSql("insert into foo (a,b,c) values (?,?,?) RetuRning a", true, true, false,
+                true, true);
+        NativeQuery query = queries.get(0);
+        assertTrue(query.command.isReturningKeywordPresent(), "The parser should find the word returning");
+
+        queries = Parser.parseJdbcSql("insert into foo (a,b,c) values (?,?,?)", true, true, false, true, true);
+        query = queries.get(0);
+        assertFalse(query.command.isReturningKeywordPresent(), "The parser should not find the word returning");
+
+        queries = Parser.parseJdbcSql("insert into foo (a,b,c) values ('returning',?,?)", true, true, false,
+                true, true);
+        query = queries.get(0);
+        assertFalse(query.command.isReturningKeywordPresent(), "The parser should not find the word returning as it is in quotes ");
+    }
+
+    @Test
+    void select() throws SQLException {
+        List<NativeQuery> queries;
+        queries = Parser.parseJdbcSql("select 1 as returning from (update table)", true, true, false, true, true);
+        NativeQuery query = queries.get(0);
+        assertEquals(SqlCommandType.SELECT, query.command.getType(), "This is a select ");
+        assertTrue(query.command.isReturningKeywordPresent(), "Returning is OK here as it is not an insert command ");
+    }
+
+    @Test
+    void delete() throws SQLException {
+        List<NativeQuery> queries = Parser.parseJdbcSql("DeLeTe from foo where a=1", true, true, false,
+                true, true);
+        NativeQuery query = queries.get(0);
+        assertEquals(SqlCommandType.DELETE, query.command.getType(), "This is a delete command");
+    }
+
+    @Test
+    void multiQueryWithBind() throws SQLException {
+        // braces around (42) are required to puzzle the parser
+        String sql = "INSERT INTO inttable(a) VALUES (?);SELECT (42)";
+        List<NativeQuery> queries = Parser.parseJdbcSql(sql, true, true, true, true, true);
+        NativeQuery query = queries.get(0);
+        assertEquals("INSERT: INSERT INTO inttable(a) VALUES ($1)",
+                query.command.getType() + ": " + query.nativeSql,
+                "query(0) of " + sql);
+        query = queries.get(1);
+        assertEquals("SELECT: SELECT (42)",
+                query.command.getType() + ": " + query.nativeSql,
+                "query(1) of " + sql);
+    }
+
+    @Test
+    void move() throws SQLException {
+        List<NativeQuery> queries = Parser.parseJdbcSql("MoVe NEXT FROM FOO", true, true, false, true, true);
+        NativeQuery query = queries.get(0);
+        assertEquals(SqlCommandType.MOVE, query.command.getType(), "This is a move command");
+    }
+
+    @Test
+    void update() throws SQLException {
+        List<NativeQuery> queries;
+        NativeQuery query;
+        queries = Parser.parseJdbcSql("update foo set (a=?,b=?,c=?)", true, true, false, true, true);
+        query = queries.get(0);
+        assertEquals(SqlCommandType.UPDATE, query.command.getType(), "This is an UPDATE command");
+    }
+
+    @Test
+    void insert() throws SQLException {
+        List<NativeQuery> queries = Parser.parseJdbcSql("InSeRt into foo (a,b,c) values (?,?,?) returning a", true, true, false,
+                true, true);
+        NativeQuery query = queries.get(0);
+        assertEquals(SqlCommandType.INSERT, query.command.getType(), "This is an INSERT command");
+
+        queries = Parser.parseJdbcSql("select 1 as insert", true, true, false, true, true);
+        query = queries.get(0);
+        assertEquals(SqlCommandType.SELECT, query.command.getType(), "This is a SELECT command");
+    }
+
+    @Test
+    void withSelect() throws SQLException {
+        List<NativeQuery> queries;
+        queries = Parser.parseJdbcSql("with update as (update foo set (a=?,b=?,c=?)) select * from update", true, true, false, true, true);
+        NativeQuery query = queries.get(0);
+        assertEquals(SqlCommandType.SELECT, query.command.getType(), "with ... () select");
+    }
+
+    @Test
+    void withInsert() throws SQLException {
+        List<NativeQuery> queries;
+        queries = Parser.parseJdbcSql("with update as (update foo set (a=?,b=?,c=?)) insert into table(select) values(1)", true, true, false, true, true);
+        NativeQuery query = queries.get(0);
+        assertEquals(SqlCommandType.INSERT, query.command.getType(), "with ... () insert");
+    }
+
+    @Test
+    void multipleEmptyQueries() {
+        assertEquals("select 1;/*cut*/\n" + "select 2",
+                reparse("select 1; ;\t;select 2", true, false, true));
+    }
+
+    @Test
+    void compositeWithComments() {
+        assertEquals("select 1;/*cut*/\n" + "/* noop */;/*cut*/\n" + "select 2",
+                reparse("select 1;/* noop */;select 2", true, false, true));
+    }
+
+    private String reparse(String query, boolean standardConformingStrings, boolean withParameters,
+                           boolean splitStatements) {
+        try {
+            return toString(
+                    Parser.parseJdbcSql(query, standardConformingStrings, withParameters, splitStatements, false, true));
+        } catch (SQLException e) {
+            throw new IllegalStateException("Parser.parseJdbcSql: " + e.getMessage(), e);
+        }
+    }
+
+    private String toString(List<NativeQuery> queries) {
+        StringBuilder sb = new StringBuilder();
+        for (NativeQuery query : queries) {
+            if (sb.length() != 0) {
+                sb.append(";/*cut*/\n");
+            }
+            sb.append(query.nativeSql);
+        }
+        return sb.toString();
     }
-    return sb.toString();
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/CompositeTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/CompositeTest.java
index 34c31d6..36ce064 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/CompositeTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/CompositeTest.java
@@ -29,178 +29,178 @@ import java.sql.SQLException;
 
 class CompositeTest {
 
-  private Connection conn;
+    private Connection conn;
 
-  @BeforeAll
-  static void beforeClass() throws Exception {
-    Connection conn = TestUtil.openDB();
-    try {
-      Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3), "uuid requires PostgreSQL 8.3+");
-    } finally {
-      conn.close();
+    @BeforeAll
+    static void beforeClass() throws Exception {
+        Connection conn = TestUtil.openDB();
+        try {
+            Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3), "uuid requires PostgreSQL 8.3+");
+        } finally {
+            conn.close();
+        }
     }
-  }
 
-  @BeforeEach
-  void setUp() throws Exception {
-    conn = TestUtil.openDB();
-    TestUtil.createSchema(conn, "\"Composites\"");
-    TestUtil.createCompositeType(conn, "simplecompositetest", "i int, d decimal, u uuid");
-    TestUtil.createCompositeType(conn, "nestedcompositetest", "t text, s simplecompositetest");
-    TestUtil.createCompositeType(conn, "\"Composites\".\"ComplexCompositeTest\"",
-        "l bigint[], n nestedcompositetest[], s simplecompositetest");
-    TestUtil.createTable(conn, "compositetabletest",
-        "s simplecompositetest, cc \"Composites\".\"ComplexCompositeTest\"[]");
-    TestUtil.createTable(conn, "\"Composites\".\"Table\"",
-        "s simplecompositetest, cc \"Composites\".\"ComplexCompositeTest\"[]");
-  }
+    @BeforeEach
+    void setUp() throws Exception {
+        conn = TestUtil.openDB();
+        TestUtil.createSchema(conn, "\"Composites\"");
+        TestUtil.createCompositeType(conn, "simplecompositetest", "i int, d decimal, u uuid");
+        TestUtil.createCompositeType(conn, "nestedcompositetest", "t text, s simplecompositetest");
+        TestUtil.createCompositeType(conn, "\"Composites\".\"ComplexCompositeTest\"",
+                "l bigint[], n nestedcompositetest[], s simplecompositetest");
+        TestUtil.createTable(conn, "compositetabletest",
+                "s simplecompositetest, cc \"Composites\".\"ComplexCompositeTest\"[]");
+        TestUtil.createTable(conn, "\"Composites\".\"Table\"",
+                "s simplecompositetest, cc \"Composites\".\"ComplexCompositeTest\"[]");
+    }
 
-  @AfterEach
-  void tearDown() throws SQLException {
-    TestUtil.dropTable(conn, "\"Composites\".\"Table\"");
-    TestUtil.dropTable(conn, "compositetabletest");
-    TestUtil.dropType(conn, "\"Composites\".\"ComplexCompositeTest\"");
-    TestUtil.dropType(conn, "nestedcompositetest");
-    TestUtil.dropType(conn, "simplecompositetest");
-    TestUtil.dropSchema(conn, "\"Composites\"");
-    TestUtil.closeDB(conn);
-  }
+    @AfterEach
+    void tearDown() throws SQLException {
+        TestUtil.dropTable(conn, "\"Composites\".\"Table\"");
+        TestUtil.dropTable(conn, "compositetabletest");
+        TestUtil.dropType(conn, "\"Composites\".\"ComplexCompositeTest\"");
+        TestUtil.dropType(conn, "nestedcompositetest");
+        TestUtil.dropType(conn, "simplecompositetest");
+        TestUtil.dropSchema(conn, "\"Composites\"");
+        TestUtil.closeDB(conn);
+    }
 
-  @Test
-  void simpleSelect() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("SELECT '(1,2.2,)'::simplecompositetest");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    PGobject pgo = (PGobject) rs.getObject(1);
-    assertEquals("simplecompositetest", pgo.getType());
-    assertEquals("(1,2.2,)", pgo.getValue());
-  }
+    @Test
+    void simpleSelect() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("SELECT '(1,2.2,)'::simplecompositetest");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        PGobject pgo = (PGobject) rs.getObject(1);
+        assertEquals("simplecompositetest", pgo.getType());
+        assertEquals("(1,2.2,)", pgo.getValue());
+    }
 
-  @Test
-  void complexSelect() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement(
-        "SELECT '(\"{1,2}\",{},\"(1,2.2,)\")'::\"Composites\".\"ComplexCompositeTest\"");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    PGobject pgo = (PGobject) rs.getObject(1);
-    assertEquals("\"Composites\".\"ComplexCompositeTest\"", pgo.getType());
-    assertEquals("(\"{1,2}\",{},\"(1,2.2,)\")", pgo.getValue());
-  }
+    @Test
+    void complexSelect() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement(
+                "SELECT '(\"{1,2}\",{},\"(1,2.2,)\")'::\"Composites\".\"ComplexCompositeTest\"");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        PGobject pgo = (PGobject) rs.getObject(1);
+        assertEquals("\"Composites\".\"ComplexCompositeTest\"", pgo.getType());
+        assertEquals("(\"{1,2}\",{},\"(1,2.2,)\")", pgo.getValue());
+    }
 
-  @Test
-  void simpleArgumentSelect() throws SQLException {
-    Assumptions.assumeTrue(conn.unwrap(PGConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE, "Skip if running in simple query mode");
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?");
-    PGobject pgo = new PGobject();
-    pgo.setType("simplecompositetest");
-    pgo.setValue("(1,2.2,)");
-    pstmt.setObject(1, pgo);
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    PGobject pgo2 = (PGobject) rs.getObject(1);
-    assertEquals(pgo, pgo2);
-  }
+    @Test
+    void simpleArgumentSelect() throws SQLException {
+        Assumptions.assumeTrue(conn.unwrap(PGConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE, "Skip if running in simple query mode");
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?");
+        PGobject pgo = new PGobject();
+        pgo.setType("simplecompositetest");
+        pgo.setValue("(1,2.2,)");
+        pstmt.setObject(1, pgo);
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        PGobject pgo2 = (PGobject) rs.getObject(1);
+        assertEquals(pgo, pgo2);
+    }
 
-  @Test
-  void complexArgumentSelect() throws SQLException {
-    Assumptions.assumeTrue(conn.unwrap(PGConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE, "Skip if running in simple query mode");
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?");
-    PGobject pgo = new PGobject();
-    pgo.setType("\"Composites\".\"ComplexCompositeTest\"");
-    pgo.setValue("(\"{1,2}\",{},\"(1,2.2,)\")");
-    pstmt.setObject(1, pgo);
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    PGobject pgo2 = (PGobject) rs.getObject(1);
-    assertEquals(pgo, pgo2);
-  }
+    @Test
+    void complexArgumentSelect() throws SQLException {
+        Assumptions.assumeTrue(conn.unwrap(PGConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE, "Skip if running in simple query mode");
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?");
+        PGobject pgo = new PGobject();
+        pgo.setType("\"Composites\".\"ComplexCompositeTest\"");
+        pgo.setValue("(\"{1,2}\",{},\"(1,2.2,)\")");
+        pstmt.setObject(1, pgo);
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        PGobject pgo2 = (PGobject) rs.getObject(1);
+        assertEquals(pgo, pgo2);
+    }
 
-  @Test
-  void compositeFromTable() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("INSERT INTO compositetabletest VALUES(?, ?)");
-    PGobject pgo1 = new PGobject();
-    pgo1.setType("public.simplecompositetest");
-    pgo1.setValue("(1,2.2,)");
-    pstmt.setObject(1, pgo1);
-    String[] ctArr = new String[1];
-    ctArr[0] = "(\"{1,2}\",{},\"(1,2.2,)\")";
-    Array pgarr1 = conn.createArrayOf("\"Composites\".\"ComplexCompositeTest\"", ctArr);
-    pstmt.setArray(2, pgarr1);
-    int res = pstmt.executeUpdate();
-    assertEquals(1, res);
-    pstmt = conn.prepareStatement("SELECT * FROM compositetabletest");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    PGobject pgo2 = (PGobject) rs.getObject(1);
-    Array pgarr2 = (Array) rs.getObject(2);
-    assertEquals("simplecompositetest", pgo2.getType());
-    assertEquals("\"Composites\".\"ComplexCompositeTest\"", pgarr2.getBaseTypeName());
-    Object[] pgobjarr2 = (Object[]) pgarr2.getArray();
-    assertEquals(1, pgobjarr2.length);
-    PGobject arr2Elem = (PGobject) pgobjarr2[0];
-    assertEquals("\"Composites\".\"ComplexCompositeTest\"", arr2Elem.getType());
-    assertEquals("(\"{1,2}\",{},\"(1,2.2,)\")", arr2Elem.getValue());
-    rs.close();
-    pstmt = conn.prepareStatement("SELECT c FROM compositetabletest c");
-    rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    PGobject pgo3 = (PGobject) rs.getObject(1);
-    assertEquals("compositetabletest", pgo3.getType());
-    assertEquals("(\"(1,2.2,)\",\"{\"\"(\\\\\"\"{1,2}\\\\\"\",{},\\\\\"\"(1,2.2,)\\\\\"\")\"\"}\")",
-        pgo3.getValue());
-  }
+    @Test
+    void compositeFromTable() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("INSERT INTO compositetabletest VALUES(?, ?)");
+        PGobject pgo1 = new PGobject();
+        pgo1.setType("public.simplecompositetest");
+        pgo1.setValue("(1,2.2,)");
+        pstmt.setObject(1, pgo1);
+        String[] ctArr = new String[1];
+        ctArr[0] = "(\"{1,2}\",{},\"(1,2.2,)\")";
+        Array pgarr1 = conn.createArrayOf("\"Composites\".\"ComplexCompositeTest\"", ctArr);
+        pstmt.setArray(2, pgarr1);
+        int res = pstmt.executeUpdate();
+        assertEquals(1, res);
+        pstmt = conn.prepareStatement("SELECT * FROM compositetabletest");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        PGobject pgo2 = (PGobject) rs.getObject(1);
+        Array pgarr2 = (Array) rs.getObject(2);
+        assertEquals("simplecompositetest", pgo2.getType());
+        assertEquals("\"Composites\".\"ComplexCompositeTest\"", pgarr2.getBaseTypeName());
+        Object[] pgobjarr2 = (Object[]) pgarr2.getArray();
+        assertEquals(1, pgobjarr2.length);
+        PGobject arr2Elem = (PGobject) pgobjarr2[0];
+        assertEquals("\"Composites\".\"ComplexCompositeTest\"", arr2Elem.getType());
+        assertEquals("(\"{1,2}\",{},\"(1,2.2,)\")", arr2Elem.getValue());
+        rs.close();
+        pstmt = conn.prepareStatement("SELECT c FROM compositetabletest c");
+        rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        PGobject pgo3 = (PGobject) rs.getObject(1);
+        assertEquals("compositetabletest", pgo3.getType());
+        assertEquals("(\"(1,2.2,)\",\"{\"\"(\\\\\"\"{1,2}\\\\\"\",{},\\\\\"\"(1,2.2,)\\\\\"\")\"\"}\")",
+                pgo3.getValue());
+    }
 
-  @Test
-  void nullArrayElement() throws SQLException {
-    PreparedStatement pstmt =
-        conn.prepareStatement("SELECT array[NULL, NULL]::compositetabletest[]");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-    assertEquals("compositetabletest", arr.getBaseTypeName());
-    Object[] items = (Object[]) arr.getArray();
-    assertEquals(2, items.length);
-    assertNull(items[0]);
-    assertNull(items[1]);
-  }
+    @Test
+    void nullArrayElement() throws SQLException {
+        PreparedStatement pstmt =
+                conn.prepareStatement("SELECT array[NULL, NULL]::compositetabletest[]");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+        assertEquals("compositetabletest", arr.getBaseTypeName());
+        Object[] items = (Object[]) arr.getArray();
+        assertEquals(2, items.length);
+        assertNull(items[0]);
+        assertNull(items[1]);
+    }
 
-  @Test
-  void tableMetadata() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("INSERT INTO compositetabletest VALUES(?, ?)");
-    PGobject pgo1 = new PGobject();
-    pgo1.setType("public.simplecompositetest");
-    pgo1.setValue("(1,2.2,)");
-    pstmt.setObject(1, pgo1);
-    String[] ctArr = new String[1];
-    ctArr[0] = "(\"{1,2}\",{},\"(1,2.2,)\")";
-    Array pgarr1 = conn.createArrayOf("\"Composites\".\"ComplexCompositeTest\"", ctArr);
-    pstmt.setArray(2, pgarr1);
-    int res = pstmt.executeUpdate();
-    assertEquals(1, res);
-    pstmt = conn.prepareStatement("SELECT t FROM compositetabletest t");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    String name = rs.getMetaData().getColumnTypeName(1);
-    assertEquals("compositetabletest", name);
-  }
+    @Test
+    void tableMetadata() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("INSERT INTO compositetabletest VALUES(?, ?)");
+        PGobject pgo1 = new PGobject();
+        pgo1.setType("public.simplecompositetest");
+        pgo1.setValue("(1,2.2,)");
+        pstmt.setObject(1, pgo1);
+        String[] ctArr = new String[1];
+        ctArr[0] = "(\"{1,2}\",{},\"(1,2.2,)\")";
+        Array pgarr1 = conn.createArrayOf("\"Composites\".\"ComplexCompositeTest\"", ctArr);
+        pstmt.setArray(2, pgarr1);
+        int res = pstmt.executeUpdate();
+        assertEquals(1, res);
+        pstmt = conn.prepareStatement("SELECT t FROM compositetabletest t");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        String name = rs.getMetaData().getColumnTypeName(1);
+        assertEquals("compositetabletest", name);
+    }
 
-  @Test
-  void complexTableNameMetadata() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("INSERT INTO \"Composites\".\"Table\" VALUES(?, ?)");
-    PGobject pgo1 = new PGobject();
-    pgo1.setType("public.simplecompositetest");
-    pgo1.setValue("(1,2.2,)");
-    pstmt.setObject(1, pgo1);
-    String[] ctArr = new String[1];
-    ctArr[0] = "(\"{1,2}\",{},\"(1,2.2,)\")";
-    Array pgarr1 = conn.createArrayOf("\"Composites\".\"ComplexCompositeTest\"", ctArr);
-    pstmt.setArray(2, pgarr1);
-    int res = pstmt.executeUpdate();
-    assertEquals(1, res);
-    pstmt = conn.prepareStatement("SELECT t FROM \"Composites\".\"Table\" t");
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    String name = rs.getMetaData().getColumnTypeName(1);
-    assertEquals("\"Composites\".\"Table\"", name);
-  }
+    @Test
+    void complexTableNameMetadata() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("INSERT INTO \"Composites\".\"Table\" VALUES(?, ?)");
+        PGobject pgo1 = new PGobject();
+        pgo1.setType("public.simplecompositetest");
+        pgo1.setValue("(1,2.2,)");
+        pstmt.setObject(1, pgo1);
+        String[] ctArr = new String[1];
+        ctArr[0] = "(\"{1,2}\",{},\"(1,2.2,)\")";
+        Array pgarr1 = conn.createArrayOf("\"Composites\".\"ComplexCompositeTest\"", ctArr);
+        pstmt.setArray(2, pgarr1);
+        int res = pstmt.executeUpdate();
+        assertEquals(1, res);
+        pstmt = conn.prepareStatement("SELECT t FROM \"Composites\".\"Table\" t");
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        String name = rs.getMetaData().getColumnTypeName(1);
+        assertEquals("\"Composites\".\"Table\"", name);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/DatabaseMetaDataTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/DatabaseMetaDataTest.java
index 05c5b3d..281cb66 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/DatabaseMetaDataTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/DatabaseMetaDataTest.java
@@ -23,35 +23,35 @@ import java.sql.Types;
 
 class DatabaseMetaDataTest {
 
-  private Connection conn;
+    private Connection conn;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    conn = TestUtil.openDB();
-    Statement stmt = conn.createStatement();
-    stmt.execute("CREATE DOMAIN mydom AS int");
-    stmt.execute("CREATE TABLE domtab (a mydom)");
-  }
+    @BeforeEach
+    void setUp() throws Exception {
+        conn = TestUtil.openDB();
+        Statement stmt = conn.createStatement();
+        stmt.execute("CREATE DOMAIN mydom AS int");
+        stmt.execute("CREATE TABLE domtab (a mydom)");
+    }
 
-  @AfterEach
-  void tearDown() throws Exception {
-    Statement stmt = conn.createStatement();
-    stmt.execute("DROP TABLE domtab");
-    stmt.execute("DROP DOMAIN mydom");
-    TestUtil.closeDB(conn);
-  }
+    @AfterEach
+    void tearDown() throws Exception {
+        Statement stmt = conn.createStatement();
+        stmt.execute("DROP TABLE domtab");
+        stmt.execute("DROP DOMAIN mydom");
+        TestUtil.closeDB(conn);
+    }
 
-  @Test
-  void getColumnsForDomain() throws Exception {
-    DatabaseMetaData dbmd = conn.getMetaData();
+    @Test
+    void getColumnsForDomain() throws Exception {
+        DatabaseMetaData dbmd = conn.getMetaData();
 
-    ResultSet rs = dbmd.getColumns("%", "%", "domtab", "%");
-    assertTrue(rs.next());
-    assertEquals("a", rs.getString("COLUMN_NAME"));
-    assertEquals(Types.DISTINCT, rs.getInt("DATA_TYPE"));
-    assertEquals("mydom", rs.getString("TYPE_NAME"));
-    assertEquals(Types.INTEGER, rs.getInt("SOURCE_DATA_TYPE"));
-    assertFalse(rs.next());
-  }
+        ResultSet rs = dbmd.getColumns("%", "%", "domtab", "%");
+        assertTrue(rs.next());
+        assertEquals("a", rs.getString("COLUMN_NAME"));
+        assertEquals(Types.DISTINCT, rs.getInt("DATA_TYPE"));
+        assertEquals("mydom", rs.getString("TYPE_NAME"));
+        assertEquals(Types.INTEGER, rs.getInt("SOURCE_DATA_TYPE"));
+        assertFalse(rs.next());
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeBaseTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeBaseTest.java
index ae2db30..2a8feae 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeBaseTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeBaseTest.java
@@ -14,30 +14,30 @@ import java.sql.Statement;
 
 public class EscapeSyntaxCallModeBaseTest extends BaseTest4 {
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    Statement stmt = con.createStatement();
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION myiofunc(a INOUT int, b OUT int) AS 'BEGIN b := a; a := 1; END;' LANGUAGE plpgsql");
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION mysumfunc(a int, b int) returns int AS 'BEGIN return a + b; END;' LANGUAGE plpgsql");
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) {
-      stmt.execute(
-          "CREATE OR REPLACE PROCEDURE myioproc(a INOUT int, b INOUT int) AS 'BEGIN b := a; a := 1; END;' LANGUAGE plpgsql");
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        Statement stmt = con.createStatement();
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION myiofunc(a INOUT int, b OUT int) AS 'BEGIN b := a; a := 1; END;' LANGUAGE plpgsql");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION mysumfunc(a int, b int) returns int AS 'BEGIN return a + b; END;' LANGUAGE plpgsql");
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) {
+            stmt.execute(
+                    "CREATE OR REPLACE PROCEDURE myioproc(a INOUT int, b INOUT int) AS 'BEGIN b := a; a := 1; END;' LANGUAGE plpgsql");
+        }
     }
-  }
 
-  @Override
-  public void tearDown() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.execute("drop function myiofunc(a INOUT int, b OUT int) ");
-    stmt.execute("drop function mysumfunc(a int, b int) ");
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) {
-      stmt.execute("drop procedure myioproc(a INOUT int, b INOUT int) ");
+    @Override
+    public void tearDown() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.execute("drop function myiofunc(a INOUT int, b OUT int) ");
+        stmt.execute("drop function mysumfunc(a int, b int) ");
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) {
+            stmt.execute("drop procedure myioproc(a INOUT int, b INOUT int) ");
+        }
+        stmt.close();
+        super.tearDown();
     }
-    stmt.close();
-    super.tearDown();
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeCallIfNoReturnTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeCallIfNoReturnTest.java
index 94f012d..d18d4b4 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeCallIfNoReturnTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeCallIfNoReturnTest.java
@@ -23,65 +23,65 @@ import java.util.Properties;
 
 public class EscapeSyntaxCallModeCallIfNoReturnTest extends EscapeSyntaxCallModeBaseTest {
 
-  @Override
-  protected void updateProperties(Properties props) {
-    super.updateProperties(props);
-    PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(props, EscapeSyntaxCallMode.CALL_IF_NO_RETURN.value());
-  }
-
-  @Test
-  public void testInvokeFunction() throws Throwable {
-    // escapeSyntaxCallMode=callIfNoReturn will cause a CALL statement to be used for the JDBC escape call
-    // syntax used below (since no return parameter is specified). "myiofunc" is a function, so the
-    // attempted invocation should fail.
-    PSQLState expected = PSQLState.WRONG_OBJECT_TYPE;
-    assumeCallableStatementsSupported();
-    assumeMinimumServerVersion(ServerVersion.v11);
-
-    CallableStatement cs = con.prepareCall("{ call myiofunc(?,?) }");
-    cs.registerOutParameter(1, Types.INTEGER);
-    cs.registerOutParameter(2, Types.INTEGER);
-    cs.setInt(1, 10);
-    try {
-      cs.execute();
-      fail("Should throw an exception");
-    } catch (SQLException ex) {
-      assertEquals(expected.getState(), ex.getSQLState());
+    @Override
+    protected void updateProperties(Properties props) {
+        super.updateProperties(props);
+        PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(props, EscapeSyntaxCallMode.CALL_IF_NO_RETURN.value());
     }
-  }
 
-  @Test
-  public void testInvokeFunctionHavingReturnParameter() throws Throwable {
-    // escapeSyntaxCallMode=callIfNoReturn will cause a SELECT statement to be used for the JDBC escape call
-    // syntax used below (since a return parameter is specified). "mysumfunc" is a function, so the
-    // invocation should succeed.
-    assumeCallableStatementsSupported();
-    CallableStatement cs = con.prepareCall("{ ? = call mysumfunc(?,?) }");
-    cs.registerOutParameter(1, Types.INTEGER);
-    cs.setInt(2, 10);
-    cs.setInt(3, 20);
-    cs.execute();
-    int ret = cs.getInt(1);
-    assertTrue("Expected mysumproc(10,20) to return 30 but returned " + ret, ret == 30);
-  }
+    @Test
+    public void testInvokeFunction() throws Throwable {
+        // escapeSyntaxCallMode=callIfNoReturn will cause a CALL statement to be used for the JDBC escape call
+        // syntax used below (since no return parameter is specified). "myiofunc" is a function, so the
+        // attempted invocation should fail.
+        PSQLState expected = PSQLState.WRONG_OBJECT_TYPE;
+        assumeCallableStatementsSupported();
+        assumeMinimumServerVersion(ServerVersion.v11);
 
-  @Test
-  public void testInvokeProcedure() throws Throwable {
-    // escapeSyntaxCallMode=callIfNoReturn will cause a CALL statement to be used for the JDBC escape call
-    // syntax used below (since there is no return parameter specified). "myioproc" is a procedure, so the
-    // invocation should succeed.
-    assumeCallableStatementsSupported();
-    assumeMinimumServerVersion(ServerVersion.v11);
-    CallableStatement cs = con.prepareCall("{call myioproc(?,?)}");
-    cs.registerOutParameter(1, Types.INTEGER);
-    cs.registerOutParameter(2, Types.INTEGER);
-    cs.setInt(1, 10);
-    cs.setInt(2, 20);
-    cs.execute();
-    // Expected output: a==1 (param 1), b==10 (param 2)
-    int a = cs.getInt(1);
-    int b = cs.getInt(2);
-    assertTrue("Expected myioproc() to return output parameter values 1,10 but returned " + a + "," + b, (a == 1 && b == 10));
-  }
+        CallableStatement cs = con.prepareCall("{ call myiofunc(?,?) }");
+        cs.registerOutParameter(1, Types.INTEGER);
+        cs.registerOutParameter(2, Types.INTEGER);
+        cs.setInt(1, 10);
+        try {
+            cs.execute();
+            fail("Should throw an exception");
+        } catch (SQLException ex) {
+            assertEquals(expected.getState(), ex.getSQLState());
+        }
+    }
+
+    @Test
+    public void testInvokeFunctionHavingReturnParameter() throws Throwable {
+        // escapeSyntaxCallMode=callIfNoReturn will cause a SELECT statement to be used for the JDBC escape call
+        // syntax used below (since a return parameter is specified). "mysumfunc" is a function, so the
+        // invocation should succeed.
+        assumeCallableStatementsSupported();
+        CallableStatement cs = con.prepareCall("{ ? = call mysumfunc(?,?) }");
+        cs.registerOutParameter(1, Types.INTEGER);
+        cs.setInt(2, 10);
+        cs.setInt(3, 20);
+        cs.execute();
+        int ret = cs.getInt(1);
+        assertTrue("Expected mysumproc(10,20) to return 30 but returned " + ret, ret == 30);
+    }
+
+    @Test
+    public void testInvokeProcedure() throws Throwable {
+        // escapeSyntaxCallMode=callIfNoReturn will cause a CALL statement to be used for the JDBC escape call
+        // syntax used below (since there is no return parameter specified). "myioproc" is a procedure, so the
+        // invocation should succeed.
+        assumeCallableStatementsSupported();
+        assumeMinimumServerVersion(ServerVersion.v11);
+        CallableStatement cs = con.prepareCall("{call myioproc(?,?)}");
+        cs.registerOutParameter(1, Types.INTEGER);
+        cs.registerOutParameter(2, Types.INTEGER);
+        cs.setInt(1, 10);
+        cs.setInt(2, 20);
+        cs.execute();
+        // Expected output: a==1 (param 1), b==10 (param 2)
+        int a = cs.getInt(1);
+        int b = cs.getInt(2);
+        assertTrue("Expected myioproc() to return output parameter values 1,10 but returned " + a + "," + b, (a == 1 && b == 10));
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeCallTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeCallTest.java
index 93bd5c1..ed2e16d 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeCallTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeCallTest.java
@@ -24,74 +24,74 @@ import java.util.Properties;
 
 public class EscapeSyntaxCallModeCallTest extends EscapeSyntaxCallModeBaseTest {
 
-  @Override
-  protected void updateProperties(Properties props) {
-    super.updateProperties(props);
-    PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(props, EscapeSyntaxCallMode.CALL.value());
-  }
-
-  @Test
-  public void testInvokeFunction() throws Throwable {
-    // escapeSyntaxCallMode=call will cause a CALL statement to be used for the JDBC escape call
-    // syntax used below. "myiofunc" is a function, so the attempted invocation should fail.
-    PSQLState expected = PSQLState.WRONG_OBJECT_TYPE;
-    assumeCallableStatementsSupported();
-    assumeMinimumServerVersion(ServerVersion.v11);
-
-    CallableStatement cs = con.prepareCall("{ call myiofunc(?,?) }");
-    cs.registerOutParameter(1, Types.INTEGER);
-    cs.registerOutParameter(2, Types.INTEGER);
-    cs.setInt(1, 10);
-    try {
-      cs.execute();
-      fail("Should throw an exception");
-    } catch (SQLException ex) {
-      assertEquals(expected.getState(), ex.getSQLState());
-    }
-  }
-
-  @Test
-  public void testInvokeFunctionHavingReturnParameter() throws Throwable {
-    // escapeSyntaxCallMode=call will cause a CALL statement to be used for the JDBC escape call
-    // syntax used below. "mysumfunc" is a function, so the attempted invocation should fail.
-
-    //version 14 changes this to undefined function
-    PSQLState expected = PSQLState.WRONG_OBJECT_TYPE;
-
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v14)) {
-      expected = PSQLState.UNDEFINED_FUNCTION;
+    @Override
+    protected void updateProperties(Properties props) {
+        super.updateProperties(props);
+        PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(props, EscapeSyntaxCallMode.CALL.value());
     }
 
-    assumeCallableStatementsSupported();
-    assumeMinimumServerVersion(ServerVersion.v11);
-    CallableStatement cs = con.prepareCall("{ ? = call mysumfunc(?,?) }");
-    cs.registerOutParameter(1, Types.INTEGER);
-    cs.setInt(2, 10);
-    cs.setInt(3, 20);
-    try {
-      cs.execute();
-      fail("Should throw an exception");
-    } catch (SQLException ex) {
-      assertEquals(expected.getState(), ex.getSQLState());
-    }
-  }
+    @Test
+    public void testInvokeFunction() throws Throwable {
+        // escapeSyntaxCallMode=call will cause a CALL statement to be used for the JDBC escape call
+        // syntax used below. "myiofunc" is a function, so the attempted invocation should fail.
+        PSQLState expected = PSQLState.WRONG_OBJECT_TYPE;
+        assumeCallableStatementsSupported();
+        assumeMinimumServerVersion(ServerVersion.v11);
 
-  @Test
-  public void testInvokeProcedure() throws Throwable {
-    // escapeSyntaxCallMode=call will cause a CALL statement to be used for the JDBC escape call
-    // syntax used below. "myioproc" is a procedure, so the invocation should succeed.
-    assumeCallableStatementsSupported();
-    assumeMinimumServerVersion(ServerVersion.v11);
-    CallableStatement cs = con.prepareCall("{call myioproc(?,?)}");
-    cs.registerOutParameter(1, Types.INTEGER);
-    cs.registerOutParameter(2, Types.INTEGER);
-    cs.setInt(1, 10);
-    cs.setInt(2, 20);
-    cs.execute();
-    // Expected output: a==1 (param 1), b==10 (param 2)
-    int a = cs.getInt(1);
-    int b = cs.getInt(2);
-    assertTrue("Expected myioproc() to return output parameter values 1,10 but returned " + a + "," + b, (a == 1 && b == 10));
-  }
+        CallableStatement cs = con.prepareCall("{ call myiofunc(?,?) }");
+        cs.registerOutParameter(1, Types.INTEGER);
+        cs.registerOutParameter(2, Types.INTEGER);
+        cs.setInt(1, 10);
+        try {
+            cs.execute();
+            fail("Should throw an exception");
+        } catch (SQLException ex) {
+            assertEquals(expected.getState(), ex.getSQLState());
+        }
+    }
+
+    @Test
+    public void testInvokeFunctionHavingReturnParameter() throws Throwable {
+        // escapeSyntaxCallMode=call will cause a CALL statement to be used for the JDBC escape call
+        // syntax used below. "mysumfunc" is a function, so the attempted invocation should fail.
+
+        //version 14 changes this to undefined function
+        PSQLState expected = PSQLState.WRONG_OBJECT_TYPE;
+
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v14)) {
+            expected = PSQLState.UNDEFINED_FUNCTION;
+        }
+
+        assumeCallableStatementsSupported();
+        assumeMinimumServerVersion(ServerVersion.v11);
+        CallableStatement cs = con.prepareCall("{ ? = call mysumfunc(?,?) }");
+        cs.registerOutParameter(1, Types.INTEGER);
+        cs.setInt(2, 10);
+        cs.setInt(3, 20);
+        try {
+            cs.execute();
+            fail("Should throw an exception");
+        } catch (SQLException ex) {
+            assertEquals(expected.getState(), ex.getSQLState());
+        }
+    }
+
+    @Test
+    public void testInvokeProcedure() throws Throwable {
+        // escapeSyntaxCallMode=call will cause a CALL statement to be used for the JDBC escape call
+        // syntax used below. "myioproc" is a procedure, so the invocation should succeed.
+        assumeCallableStatementsSupported();
+        assumeMinimumServerVersion(ServerVersion.v11);
+        CallableStatement cs = con.prepareCall("{call myioproc(?,?)}");
+        cs.registerOutParameter(1, Types.INTEGER);
+        cs.registerOutParameter(2, Types.INTEGER);
+        cs.setInt(1, 10);
+        cs.setInt(2, 20);
+        cs.execute();
+        // Expected output: a==1 (param 1), b==10 (param 2)
+        int a = cs.getInt(1);
+        int b = cs.getInt(2);
+        assertTrue("Expected myioproc() to return output parameter values 1,10 but returned " + a + "," + b, (a == 1 && b == 10));
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeSelectTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeSelectTest.java
index 73b4008..c608728 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeSelectTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/EscapeSyntaxCallModeSelectTest.java
@@ -23,59 +23,59 @@ import java.util.Properties;
 
 public class EscapeSyntaxCallModeSelectTest extends EscapeSyntaxCallModeBaseTest {
 
-  @Override
-  protected void updateProperties(Properties props) {
-    super.updateProperties(props);
-    PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(props, EscapeSyntaxCallMode.SELECT.value());
-  }
-
-  @Test
-  public void testInvokeFunction() throws Throwable {
-    // escapeSyntaxCallMode=select will cause a SELECT statement to be used for the JDBC escape call
-    // syntax used below. "myiofunc" is a function, so the invocation should succeed.
-    assumeCallableStatementsSupported();
-    CallableStatement cs = con.prepareCall("{ call myiofunc(?,?) }");
-    cs.registerOutParameter(1, Types.INTEGER);
-    cs.registerOutParameter(2, Types.INTEGER);
-    cs.setInt(1, 10);
-    cs.execute();
-    // Expected output: a==1 (param 1), b==10 (param 2)
-    int a = cs.getInt(1);
-    int b = cs.getInt(2);
-    assertTrue("Expected myiofunc() to return output parameter values 1,10 but returned " + a + "," + b, (a == 1 && b == 10));
-  }
-
-  @Test
-  public void testInvokeFunctionHavingReturnParameter() throws Throwable {
-    // escapeSyntaxCallMode=select will cause a SELECT statement to be used for the JDBC escape call
-    // syntax used below. "mysumfunc" is a function, so the invocation should succeed.
-    assumeCallableStatementsSupported();
-    CallableStatement cs = con.prepareCall("{ ? = call mysumfunc(?,?) }");
-    cs.registerOutParameter(1, Types.INTEGER);
-    cs.setInt(2, 10);
-    cs.setInt(3, 20);
-    cs.execute();
-    int ret = cs.getInt(1);
-    assertTrue("Expected mysumfunc(10,20) to return 30 but returned " + ret, ret == 30);
-  }
-
-  @Test
-  public void testInvokeProcedure() throws Throwable {
-    // escapeSyntaxCallMode=select will cause a SELECT statement to be used for the JDBC escape call
-    // syntax used below. "myioproc" is a procedure, so the attempted invocation should fail.
-    assumeCallableStatementsSupported();
-    assumeMinimumServerVersion(ServerVersion.v11);
-    CallableStatement cs = con.prepareCall("{call myioproc(?,?)}");
-    cs.registerOutParameter(1, Types.INTEGER);
-    cs.registerOutParameter(2, Types.INTEGER);
-    cs.setInt(1, 10);
-    cs.setInt(2, 20);
-    try {
-      cs.execute();
-      fail("Should throw an exception");
-    } catch (SQLException ex) {
-      assertEquals(PSQLState.WRONG_OBJECT_TYPE.getState(), ex.getSQLState());
+    @Override
+    protected void updateProperties(Properties props) {
+        super.updateProperties(props);
+        PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(props, EscapeSyntaxCallMode.SELECT.value());
+    }
+
+    @Test
+    public void testInvokeFunction() throws Throwable {
+        // escapeSyntaxCallMode=select will cause a SELECT statement to be used for the JDBC escape call
+        // syntax used below. "myiofunc" is a function, so the invocation should succeed.
+        assumeCallableStatementsSupported();
+        CallableStatement cs = con.prepareCall("{ call myiofunc(?,?) }");
+        cs.registerOutParameter(1, Types.INTEGER);
+        cs.registerOutParameter(2, Types.INTEGER);
+        cs.setInt(1, 10);
+        cs.execute();
+        // Expected output: a==1 (param 1), b==10 (param 2)
+        int a = cs.getInt(1);
+        int b = cs.getInt(2);
+        assertTrue("Expected myiofunc() to return output parameter values 1,10 but returned " + a + "," + b, (a == 1 && b == 10));
+    }
+
+    @Test
+    public void testInvokeFunctionHavingReturnParameter() throws Throwable {
+        // escapeSyntaxCallMode=select will cause a SELECT statement to be used for the JDBC escape call
+        // syntax used below. "mysumfunc" is a function, so the invocation should succeed.
+        assumeCallableStatementsSupported();
+        CallableStatement cs = con.prepareCall("{ ? = call mysumfunc(?,?) }");
+        cs.registerOutParameter(1, Types.INTEGER);
+        cs.setInt(2, 10);
+        cs.setInt(3, 20);
+        cs.execute();
+        int ret = cs.getInt(1);
+        assertTrue("Expected mysumfunc(10,20) to return 30 but returned " + ret, ret == 30);
+    }
+
+    @Test
+    public void testInvokeProcedure() throws Throwable {
+        // escapeSyntaxCallMode=select will cause a SELECT statement to be used for the JDBC escape call
+        // syntax used below. "myioproc" is a procedure, so the attempted invocation should fail.
+        assumeCallableStatementsSupported();
+        assumeMinimumServerVersion(ServerVersion.v11);
+        CallableStatement cs = con.prepareCall("{call myioproc(?,?)}");
+        cs.registerOutParameter(1, Types.INTEGER);
+        cs.registerOutParameter(2, Types.INTEGER);
+        cs.setInt(1, 10);
+        cs.setInt(2, 20);
+        try {
+            cs.execute();
+            fail("Should throw an exception");
+        } catch (SQLException ex) {
+            assertEquals(PSQLState.WRONG_OBJECT_TYPE.getState(), ex.getSQLState());
+        }
     }
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/GeneratedKeysTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/GeneratedKeysTest.java
index a7498ef..aa5d427 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/GeneratedKeysTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/GeneratedKeysTest.java
@@ -33,487 +33,486 @@ import java.util.Collection;
 
 @RunWith(Parameterized.class)
 public class GeneratedKeysTest extends BaseTest4 {
-  public enum ReturningInQuery {
-    A("a"),
-    AB("a", "b"),
-    STAR("*"),
-    NO();
-    final String[] columns;
-
-    ReturningInQuery(String... columns) {
-      this.columns = columns;
+    private final ReturningInQuery returningInQuery;
+    private final String returningClause;
+    public GeneratedKeysTest(ReturningInQuery returningInQuery, BinaryMode binaryMode) throws Exception {
+        this.returningInQuery = returningInQuery;
+        this.returningClause = returningInQuery.getClause();
+        setBinaryMode(binaryMode);
     }
 
-    public int columnsReturned() {
-      if (columns.length == 1 && columns[0].charAt(0) == '*') {
-        return 100500; // does not matter much, the meaning is "every possible column"
-      }
-      return columns.length;
-    }
-
-    public String getClause() {
-      if (columnsReturned() == 0) {
-        return "";
-      }
-      StringBuilder sb = new StringBuilder(" returning ");
-      for (int i = 0; i < columns.length; i++) {
-        String column = columns[i];
-        if (i != 0) {
-          sb.append(", ");
+    @Parameterized.Parameters(name = "returningInQuery = {0}, binary = {1}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (ReturningInQuery returningInQuery : ReturningInQuery.values()) {
+            for (BinaryMode binaryMode : BinaryMode.values()) {
+                ids.add(new Object[]{returningInQuery, binaryMode});
+            }
         }
-        sb.append(column);
-      }
-      return sb.toString();
+        return ids;
     }
-  }
 
-  private final ReturningInQuery returningInQuery;
-  private final String returningClause;
-
-  public GeneratedKeysTest(ReturningInQuery returningInQuery, BinaryMode binaryMode) throws Exception {
-    this.returningInQuery = returningInQuery;
-    this.returningClause = returningInQuery.getClause();
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "returningInQuery = {0}, binary = {1}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (ReturningInQuery returningInQuery : ReturningInQuery.values()) {
-      for (BinaryMode binaryMode : BinaryMode.values()) {
-        ids.add(new Object[]{returningInQuery, binaryMode});
-      }
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTempTable(con, "genkeys", "a serial, b varchar(5), c int");
     }
-    return ids;
-  }
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTempTable(con, "genkeys", "a serial, b varchar(5), c int");
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "genkeys");
-    super.tearDown();
-  }
-
-  @Test
-  public void testGeneratedKeys() throws SQLException {
-    testGeneratedKeysWithSuffix("");
-  }
-
-  private void testGeneratedKeysWithSuffix(String suffix) throws SQLException {
-    Statement stmt = con.createStatement();
-    int count = stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)" + returningClause + suffix,
-        Statement.RETURN_GENERATED_KEYS);
-    assertEquals(1, count);
-    ResultSet rs = stmt.getGeneratedKeys();
-    assert1a2(rs);
-  }
-
-  private void assert1a2(ResultSet rs) throws SQLException {
-    assertTrue(rs.next());
-    assertEquals(1, rs.getInt(1));
-    assertEquals(1, rs.getInt("a"));
-    if (returningInQuery.columnsReturned() >= 2) {
-      assertEquals("a", rs.getString(2));
-      assertEquals("a", rs.getString("b"));
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "genkeys");
+        super.tearDown();
     }
-    if (returningInQuery.columnsReturned() >= 3) {
-      assertEquals("2", rs.getString(3));
-      assertEquals(2, rs.getInt("c"));
+
+    @Test
+    public void testGeneratedKeys() throws SQLException {
+        testGeneratedKeysWithSuffix("");
     }
-    assertTrue(!rs.next());
-  }
 
-  @Test
-  public void testStatementUpdateCount() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)" + returningClause,
-        Statement.RETURN_GENERATED_KEYS);
-    assertEquals(1, stmt.getUpdateCount());
-    assertNull(stmt.getResultSet());
-    assertTrue(!stmt.getMoreResults());
-  }
-
-  @Test
-  public void testCloseStatementClosesRS() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)" + returningClause,
-        Statement.RETURN_GENERATED_KEYS);
-    ResultSet rs = stmt.getGeneratedKeys();
-    stmt.close();
-    assertTrue("statement was closed, thus the resultset should be closed as well", rs.isClosed());
-    try {
-      rs.next();
-      fail("Can't operate on a closed result set.");
-    } catch (SQLException sqle) {
+    private void testGeneratedKeysWithSuffix(String suffix) throws SQLException {
+        Statement stmt = con.createStatement();
+        int count = stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)" + returningClause + suffix,
+                Statement.RETURN_GENERATED_KEYS);
+        assertEquals(1, count);
+        ResultSet rs = stmt.getGeneratedKeys();
+        assert1a2(rs);
     }
-  }
 
-  @Test
-  public void testReturningWithTrailingSemicolon() throws SQLException {
-    testGeneratedKeysWithSuffix("; ");
-  }
-
-  @Test
-  public void testEmptyRSWithoutReturning() throws SQLException {
-    Statement stmt = con.createStatement();
-    try {
-      int count =
-          stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)" + returningClause + "; ",
-              Statement.NO_GENERATED_KEYS);
-      assertEquals(1, count);
-      if (returningInQuery.columnsReturned() > 0) {
-        fail(
-            "A result was returned when none was expected error should happen when executing executeUpdate('... returning ...')");
-      }
-    } catch (SQLException e) {
-      if (returningInQuery.columnsReturned() > 0 && "0100E".equals(e.getSQLState())) {
-        // A result was returned when none was expected
-        return; // just as expected
-      }
-      throw e;
-    }
-    ResultSet rs = stmt.getGeneratedKeys();
-    assertFalse("Statement.NO_GENERATED_KEYS => stmt.getGeneratedKeys() should be empty", rs.next());
-  }
-
-  @Test
-  public void testMultipleRows() throws SQLException {
-    Statement stmt = con.createStatement();
-    int count = stmt.executeUpdate(
-        "INSERT INTO genkeys VALUES (1, 'a', 2), (2, 'b', 4)" + returningClause + "; ",
-        new String[]{"c", "b"});
-    assertEquals(2, count);
-    ResultSet rs = stmt.getGeneratedKeys();
-    assertTrue(rs.next());
-    assertCB1(rs);
-    assertTrue(rs.next());
-    assertCB2(rs);
-    assertTrue(!rs.next());
-  }
-
-  @Test
-  public void testSerialWorks() throws SQLException {
-    Statement stmt = con.createStatement();
-    int count = stmt.executeUpdate(
-        "INSERT/*fool parser*/ INTO genkeys (b,c) VALUES ('a', 2), ('b', 4)" + returningClause + "; ",
-        new String[]{"a"});
-    assertEquals(2, count);
-    ResultSet rs = stmt.getGeneratedKeys();
-    assertTrue(rs.next());
-    assertEquals(1, rs.getInt(1));
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
-    assertTrue(!rs.next());
-  }
-
-  @Test
-  public void testUpdate() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 3)");
-    stmt.executeUpdate("INSERT INTO genkeys VALUES (2, 'b', 4)");
-    stmt.executeUpdate("UPDATE genkeys SET c=2 WHERE a = 1" + returningClause,
-        new String[]{"c", "b"});
-    ResultSet rs = stmt.getGeneratedKeys();
-    assertTrue(rs.next());
-    assertCB1(rs);
-    assertTrue(!rs.next());
-  }
-
-  @Test
-  public void testWithInsertInsert() throws SQLException {
-    assumeMinimumServerVersion(ServerVersion.v9_1);
-    Statement stmt = con.createStatement();
-    int count = stmt.executeUpdate(
-        "WITH x as (INSERT INTO genkeys (b,c) VALUES ('a', 2) returning c) insert into genkeys(a,b,c) VALUES (1, 'a', 2)" + returningClause + "",
-        new String[]{"c", "b"});
-    assertEquals(1, count);
-    ResultSet rs = stmt.getGeneratedKeys();
-    assertTrue(rs.next());
-    assertCB1(rs);
-    assertTrue(!rs.next());
-  }
-
-  @Test
-  public void testWithInsertSelect() throws SQLException {
-    assumeMinimumServerVersion(ServerVersion.v9_1);
-    Assume.assumeTrue(returningInQuery != ReturningInQuery.NO);
-    Statement stmt = con.createStatement();
-    int count = stmt.executeUpdate(
-        "WITH x as (INSERT INTO genkeys(a,b,c) VALUES (1, 'a', 2) " + returningClause
-            + ") select * from x",
-        new String[]{"c", "b"});
-    assertEquals("rowcount", -1, count);
-    // TODO: should SELECT produce rows through getResultSet or getGeneratedKeys?
-    ResultSet rs = stmt.getResultSet();
-    assertTrue(rs.next());
-    assertCB1(rs);
-    assertTrue(!rs.next());
-  }
-
-  @Test
-  public void testDelete() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)");
-    stmt.executeUpdate("INSERT INTO genkeys VALUES (2, 'b', 4)");
-    stmt.executeUpdate("DELETE FROM genkeys WHERE a = 1" + returningClause,
-        new String[]{"c", "b"});
-    ResultSet rs = stmt.getGeneratedKeys();
-    assertTrue(rs.next());
-    assertCB1(rs);
-    assertTrue(!rs.next());
-  }
-
-  @Test
-  public void testPSUpdate() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', -3)");
-    stmt.executeUpdate("INSERT INTO genkeys VALUES (2, 'b', 4)");
-    stmt.close();
-
-    PreparedStatement ps =
-        con.prepareStatement("UPDATE genkeys SET c=? WHERE a = ?" + returningClause, new String[]{"c", "b"});
-    ps.setInt(1, 2);
-    ps.setInt(2, 1);
-    assertEquals(1, ps.executeUpdate());
-    ResultSet rs = ps.getGeneratedKeys();
-    assertTrue(rs.next());
-    assertCB1(rs);
-    assertTrue(!rs.next());
-  }
-
-  @Test
-  public void testPSDelete() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)");
-    stmt.executeUpdate("INSERT INTO genkeys VALUES (2, 'b', 4)");
-    stmt.close();
-
-    PreparedStatement ps =
-        con.prepareStatement("DELETE FROM genkeys WHERE a = ?" + returningClause, new String[]{"c", "b"});
-
-    ps.setInt(1, 1);
-    assertEquals(1, ps.executeUpdate());
-    ResultSet rs = ps.getGeneratedKeys();
-    assertTrue(rs.next());
-    assertCB1(rs);
-    assertTrue(!rs.next());
-
-    ps.setInt(1, 2);
-    assertEquals(1, ps.executeUpdate());
-    rs = ps.getGeneratedKeys();
-    assertTrue(rs.next());
-    assertCB2(rs);
-    assertTrue(!rs.next());
-  }
-
-  private void assertCB1(ResultSet rs) throws SQLException {
-    ResultSetMetaData rsmd = rs.getMetaData();
-    StringBuilder sb = new StringBuilder();
-    for (int i = 1; i <= rsmd.getColumnCount(); i++) {
-      if (i > 1) {
-        sb.append(", ");
-      }
-      sb.append(rsmd.getColumnName(i));
-    }
-    String columnNames = sb.toString();
-    switch (returningInQuery) {
-      case NO:
-        assertEquals("Two columns should be returned since returning clause was empty and {c, b} was requested via API",
-            "c, b", columnNames);
-        assertEquals(2, rs.getInt(1));
-        assertEquals("a", rs.getString(2));
-        assertEquals(2, rs.getInt("c"));
-        assertEquals("a", rs.getString("b"));
-        break;
-      case A:
-        assertEquals("Just one column should be returned since returning clause was " + returningClause,
-            "a", columnNames);
+    private void assert1a2(ResultSet rs) throws SQLException {
+        assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
         assertEquals(1, rs.getInt("a"));
-        break;
-      case AB:
-        assertEquals("Two columns should be returned since returning clause was " + returningClause,
-            "a, b", columnNames);
+        if (returningInQuery.columnsReturned() >= 2) {
+            assertEquals("a", rs.getString(2));
+            assertEquals("a", rs.getString("b"));
+        }
+        if (returningInQuery.columnsReturned() >= 3) {
+            assertEquals("2", rs.getString(3));
+            assertEquals(2, rs.getInt("c"));
+        }
+        assertTrue(!rs.next());
+    }
+
+    @Test
+    public void testStatementUpdateCount() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)" + returningClause,
+                Statement.RETURN_GENERATED_KEYS);
+        assertEquals(1, stmt.getUpdateCount());
+        assertNull(stmt.getResultSet());
+        assertTrue(!stmt.getMoreResults());
+    }
+
+    @Test
+    public void testCloseStatementClosesRS() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)" + returningClause,
+                Statement.RETURN_GENERATED_KEYS);
+        ResultSet rs = stmt.getGeneratedKeys();
+        stmt.close();
+        assertTrue("statement was closed, thus the resultset should be closed as well", rs.isClosed());
+        try {
+            rs.next();
+            fail("Can't operate on a closed result set.");
+        } catch (SQLException sqle) {
+        }
+    }
+
+    @Test
+    public void testReturningWithTrailingSemicolon() throws SQLException {
+        testGeneratedKeysWithSuffix("; ");
+    }
+
+    @Test
+    public void testEmptyRSWithoutReturning() throws SQLException {
+        Statement stmt = con.createStatement();
+        try {
+            int count =
+                    stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)" + returningClause + "; ",
+                            Statement.NO_GENERATED_KEYS);
+            assertEquals(1, count);
+            if (returningInQuery.columnsReturned() > 0) {
+                fail(
+                        "A result was returned when none was expected error should happen when executing executeUpdate('... returning ...')");
+            }
+        } catch (SQLException e) {
+            if (returningInQuery.columnsReturned() > 0 && "0100E".equals(e.getSQLState())) {
+                // A result was returned when none was expected
+                return; // just as expected
+            }
+            throw e;
+        }
+        ResultSet rs = stmt.getGeneratedKeys();
+        assertFalse("Statement.NO_GENERATED_KEYS => stmt.getGeneratedKeys() should be empty", rs.next());
+    }
+
+    @Test
+    public void testMultipleRows() throws SQLException {
+        Statement stmt = con.createStatement();
+        int count = stmt.executeUpdate(
+                "INSERT INTO genkeys VALUES (1, 'a', 2), (2, 'b', 4)" + returningClause + "; ",
+                new String[]{"c", "b"});
+        assertEquals(2, count);
+        ResultSet rs = stmt.getGeneratedKeys();
+        assertTrue(rs.next());
+        assertCB1(rs);
+        assertTrue(rs.next());
+        assertCB2(rs);
+        assertTrue(!rs.next());
+    }
+
+    @Test
+    public void testSerialWorks() throws SQLException {
+        Statement stmt = con.createStatement();
+        int count = stmt.executeUpdate(
+                "INSERT/*fool parser*/ INTO genkeys (b,c) VALUES ('a', 2), ('b', 4)" + returningClause + "; ",
+                new String[]{"a"});
+        assertEquals(2, count);
+        ResultSet rs = stmt.getGeneratedKeys();
+        assertTrue(rs.next());
         assertEquals(1, rs.getInt(1));
-        assertEquals("a", rs.getString(2));
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        assertTrue(!rs.next());
+    }
+
+    @Test
+    public void testUpdate() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 3)");
+        stmt.executeUpdate("INSERT INTO genkeys VALUES (2, 'b', 4)");
+        stmt.executeUpdate("UPDATE genkeys SET c=2 WHERE a = 1" + returningClause,
+                new String[]{"c", "b"});
+        ResultSet rs = stmt.getGeneratedKeys();
+        assertTrue(rs.next());
+        assertCB1(rs);
+        assertTrue(!rs.next());
+    }
+
+    @Test
+    public void testWithInsertInsert() throws SQLException {
+        assumeMinimumServerVersion(ServerVersion.v9_1);
+        Statement stmt = con.createStatement();
+        int count = stmt.executeUpdate(
+                "WITH x as (INSERT INTO genkeys (b,c) VALUES ('a', 2) returning c) insert into genkeys(a,b,c) VALUES (1, 'a', 2)" + returningClause + "",
+                new String[]{"c", "b"});
+        assertEquals(1, count);
+        ResultSet rs = stmt.getGeneratedKeys();
+        assertTrue(rs.next());
+        assertCB1(rs);
+        assertTrue(!rs.next());
+    }
+
+    @Test
+    public void testWithInsertSelect() throws SQLException {
+        assumeMinimumServerVersion(ServerVersion.v9_1);
+        Assume.assumeTrue(returningInQuery != ReturningInQuery.NO);
+        Statement stmt = con.createStatement();
+        int count = stmt.executeUpdate(
+                "WITH x as (INSERT INTO genkeys(a,b,c) VALUES (1, 'a', 2) " + returningClause
+                        + ") select * from x",
+                new String[]{"c", "b"});
+        assertEquals("rowcount", -1, count);
+        // TODO: should SELECT produce rows through getResultSet or getGeneratedKeys?
+        ResultSet rs = stmt.getResultSet();
+        assertTrue(rs.next());
+        assertCB1(rs);
+        assertTrue(!rs.next());
+    }
+
+    @Test
+    public void testDelete() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)");
+        stmt.executeUpdate("INSERT INTO genkeys VALUES (2, 'b', 4)");
+        stmt.executeUpdate("DELETE FROM genkeys WHERE a = 1" + returningClause,
+                new String[]{"c", "b"});
+        ResultSet rs = stmt.getGeneratedKeys();
+        assertTrue(rs.next());
+        assertCB1(rs);
+        assertTrue(!rs.next());
+    }
+
+    @Test
+    public void testPSUpdate() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', -3)");
+        stmt.executeUpdate("INSERT INTO genkeys VALUES (2, 'b', 4)");
+        stmt.close();
+
+        PreparedStatement ps =
+                con.prepareStatement("UPDATE genkeys SET c=? WHERE a = ?" + returningClause, new String[]{"c", "b"});
+        ps.setInt(1, 2);
+        ps.setInt(2, 1);
+        assertEquals(1, ps.executeUpdate());
+        ResultSet rs = ps.getGeneratedKeys();
+        assertTrue(rs.next());
+        assertCB1(rs);
+        assertTrue(!rs.next());
+    }
+
+    @Test
+    public void testPSDelete() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)");
+        stmt.executeUpdate("INSERT INTO genkeys VALUES (2, 'b', 4)");
+        stmt.close();
+
+        PreparedStatement ps =
+                con.prepareStatement("DELETE FROM genkeys WHERE a = ?" + returningClause, new String[]{"c", "b"});
+
+        ps.setInt(1, 1);
+        assertEquals(1, ps.executeUpdate());
+        ResultSet rs = ps.getGeneratedKeys();
+        assertTrue(rs.next());
+        assertCB1(rs);
+        assertTrue(!rs.next());
+
+        ps.setInt(1, 2);
+        assertEquals(1, ps.executeUpdate());
+        rs = ps.getGeneratedKeys();
+        assertTrue(rs.next());
+        assertCB2(rs);
+        assertTrue(!rs.next());
+    }
+
+    private void assertCB1(ResultSet rs) throws SQLException {
+        ResultSetMetaData rsmd = rs.getMetaData();
+        StringBuilder sb = new StringBuilder();
+        for (int i = 1; i <= rsmd.getColumnCount(); i++) {
+            if (i > 1) {
+                sb.append(", ");
+            }
+            sb.append(rsmd.getColumnName(i));
+        }
+        String columnNames = sb.toString();
+        switch (returningInQuery) {
+            case NO:
+                assertEquals("Two columns should be returned since returning clause was empty and {c, b} was requested via API",
+                        "c, b", columnNames);
+                assertEquals(2, rs.getInt(1));
+                assertEquals("a", rs.getString(2));
+                assertEquals(2, rs.getInt("c"));
+                assertEquals("a", rs.getString("b"));
+                break;
+            case A:
+                assertEquals("Just one column should be returned since returning clause was " + returningClause,
+                        "a", columnNames);
+                assertEquals(1, rs.getInt(1));
+                assertEquals(1, rs.getInt("a"));
+                break;
+            case AB:
+                assertEquals("Two columns should be returned since returning clause was " + returningClause,
+                        "a, b", columnNames);
+                assertEquals(1, rs.getInt(1));
+                assertEquals("a", rs.getString(2));
+                assertEquals(1, rs.getInt("a"));
+                assertEquals("a", rs.getString("b"));
+                break;
+            case STAR:
+                assertEquals("Three columns should be returned since returning clause was " + returningClause,
+                        "a, b, c", columnNames);
+                assertEquals(1, rs.getInt(1));
+                assertEquals("a", rs.getString(2));
+                assertEquals(2, rs.getInt(3));
+                assertEquals(1, rs.getInt("a"));
+                assertEquals("a", rs.getString("b"));
+                assertEquals(2, rs.getInt("c"));
+                break;
+            default:
+                fail("Unexpected test kind: " + returningInQuery);
+        }
+    }
+
+    private void assertCB2(ResultSet rs) throws SQLException {
+        switch (returningInQuery) {
+            case NO:
+                assertEquals("Two columns should be returned since returning clause was empty and {c, b} was requested via API",
+                        2, rs.getMetaData().getColumnCount());
+                assertEquals(4, rs.getInt(1));
+                assertEquals("b", rs.getString(2));
+                break;
+            case A:
+                assertEquals("Just one column should be returned since returning clause was " + returningClause,
+                        1, rs.getMetaData().getColumnCount());
+                assertEquals(2, rs.getInt(1));
+                break;
+            case AB:
+                assertEquals("Two columns should be returned since returning clause was " + returningClause,
+                        2, rs.getMetaData().getColumnCount());
+                assertEquals(2, rs.getInt(1));
+                assertEquals("b", rs.getString(2));
+                break;
+            case STAR:
+                assertEquals("Three columns should be returned since returning clause was " + returningClause,
+                        3, rs.getMetaData().getColumnCount());
+                assertEquals(2, rs.getInt(1));
+                assertEquals("b", rs.getString(2));
+                assertEquals(4, rs.getInt(3));
+                break;
+            default:
+                fail("Unexpected test kind: " + returningInQuery);
+        }
+    }
+
+    @Test
+    public void testGeneratedKeysCleared() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)" + returningClause + "; ", Statement.RETURN_GENERATED_KEYS);
+        ResultSet rs = stmt.getGeneratedKeys();
+        assertTrue(rs.next());
+        try {
+            stmt.executeUpdate("INSERT INTO genkeys VALUES (2, 'b', 3)" + returningClause);
+            if (returningInQuery.columnsReturned() > 0) {
+                fail("A result was returned when none was expected error should happen when executing executeUpdate('... returning ...')");
+            }
+        } catch (SQLException e) {
+            if (returningInQuery.columnsReturned() > 0 && "0100E".equals(e.getSQLState())) {
+                // A result was returned when none was expected
+                return; // just as expected
+            }
+            throw e;
+        }
+        rs = stmt.getGeneratedKeys();
+        assertTrue(!rs.next());
+    }
+
+    @Test
+    public void testBatchGeneratedKeys() throws SQLException {
+        PreparedStatement ps = con.prepareStatement("INSERT INTO genkeys(c) VALUES (?)" + returningClause + "",
+                Statement.RETURN_GENERATED_KEYS);
+        ps.setInt(1, 4);
+        ps.addBatch();
+        ps.setInt(1, 7);
+        ps.addBatch();
+        ps.executeBatch();
+        ResultSet rs = ps.getGeneratedKeys();
+        assertTrue("getGeneratedKeys.next() should be non-empty", rs.next());
         assertEquals(1, rs.getInt("a"));
-        assertEquals("a", rs.getString("b"));
-        break;
-      case STAR:
-        assertEquals("Three columns should be returned since returning clause was " + returningClause,
-            "a, b, c", columnNames);
-        assertEquals(1, rs.getInt(1));
-        assertEquals("a", rs.getString(2));
-        assertEquals(2, rs.getInt(3));
-        assertEquals(1, rs.getInt("a"));
-        assertEquals("a", rs.getString("b"));
-        assertEquals(2, rs.getInt("c"));
-        break;
-      default:
-        fail("Unexpected test kind: " + returningInQuery);
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt("a"));
+        assertTrue(!rs.next());
     }
-  }
 
-  private void assertCB2(ResultSet rs) throws SQLException {
-    switch (returningInQuery) {
-      case NO:
-        assertEquals("Two columns should be returned since returning clause was empty and {c, b} was requested via API",
-            2, rs.getMetaData().getColumnCount());
-        assertEquals(4, rs.getInt(1));
-        assertEquals("b", rs.getString(2));
-        break;
-      case A:
-        assertEquals("Just one column should be returned since returning clause was " + returningClause,
-            1, rs.getMetaData().getColumnCount());
-        assertEquals(2, rs.getInt(1));
-        break;
-      case AB:
-        assertEquals("Two columns should be returned since returning clause was " + returningClause,
-            2, rs.getMetaData().getColumnCount());
-        assertEquals(2, rs.getInt(1));
-        assertEquals("b", rs.getString(2));
-        break;
-      case STAR:
-        assertEquals("Three columns should be returned since returning clause was " + returningClause,
-            3, rs.getMetaData().getColumnCount());
-        assertEquals(2, rs.getInt(1));
-        assertEquals("b", rs.getString(2));
-        assertEquals(4, rs.getInt(3));
-        break;
-      default:
-        fail("Unexpected test kind: " + returningInQuery);
+    private PreparedStatement prepareSelect() throws SQLException {
+        PreparedStatement ps;
+        String sql = "select c from genkeys";
+        switch (returningInQuery) {
+            case NO:
+                ps = con.prepareStatement(sql);
+                break;
+            case STAR:
+                ps = con.prepareStatement(sql, Statement.RETURN_GENERATED_KEYS);
+                break;
+            default:
+                ps = con.prepareStatement(sql, returningInQuery.columns);
+        }
+        return ps;
     }
-  }
 
-  @Test
-  public void testGeneratedKeysCleared() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate("INSERT INTO genkeys VALUES (1, 'a', 2)" + returningClause + "; ", Statement.RETURN_GENERATED_KEYS);
-    ResultSet rs = stmt.getGeneratedKeys();
-    assertTrue(rs.next());
-    try {
-      stmt.executeUpdate("INSERT INTO genkeys VALUES (2, 'b', 3)" + returningClause);
-      if (returningInQuery.columnsReturned() > 0) {
-        fail("A result was returned when none was expected error should happen when executing executeUpdate('... returning ...')");
-      }
-    } catch (SQLException e) {
-      if (returningInQuery.columnsReturned() > 0 && "0100E".equals(e.getSQLState())) {
-        // A result was returned when none was expected
-        return; // just as expected
-      }
-      throw e;
+    @Test
+    public void selectWithGeneratedKeysViaPreparedExecuteQuery() throws SQLException {
+        PreparedStatement ps = prepareSelect();
+        ResultSet rs = ps.executeQuery();
+        assertFalse("genkeys table is empty, thus rs.next() should return false", rs.next());
+        ps.close();
     }
-    rs = stmt.getGeneratedKeys();
-    assertTrue(!rs.next());
-  }
 
-  @Test
-  public void testBatchGeneratedKeys() throws SQLException {
-    PreparedStatement ps = con.prepareStatement("INSERT INTO genkeys(c) VALUES (?)" + returningClause + "",
-        Statement.RETURN_GENERATED_KEYS);
-    ps.setInt(1, 4);
-    ps.addBatch();
-    ps.setInt(1, 7);
-    ps.addBatch();
-    ps.executeBatch();
-    ResultSet rs = ps.getGeneratedKeys();
-    assertTrue("getGeneratedKeys.next() should be non-empty", rs.next());
-    assertEquals(1, rs.getInt("a"));
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt("a"));
-    assertTrue(!rs.next());
-  }
-
-  private PreparedStatement prepareSelect() throws SQLException {
-    PreparedStatement ps;
-    String sql = "select c from genkeys";
-    switch (returningInQuery) {
-      case NO:
-        ps = con.prepareStatement(sql);
-        break;
-      case STAR:
-        ps = con.prepareStatement(sql, Statement.RETURN_GENERATED_KEYS);
-        break;
-      default:
-        ps = con.prepareStatement(sql, returningInQuery.columns);
+    @Test
+    public void selectWithGeneratedKeysViaPreparedExecute() throws SQLException {
+        PreparedStatement ps = prepareSelect();
+        ps.execute();
+        ResultSet rs = ps.getResultSet();
+        assertFalse("genkeys table is empty, thus rs.next() should return false", rs.next());
+        ps.close();
     }
-    return ps;
-  }
 
-  @Test
-  public void selectWithGeneratedKeysViaPreparedExecuteQuery() throws SQLException {
-    PreparedStatement ps = prepareSelect();
-    ResultSet rs = ps.executeQuery();
-    assertFalse("genkeys table is empty, thus rs.next() should return false", rs.next());
-    ps.close();
-  }
-
-  @Test
-  public void selectWithGeneratedKeysViaPreparedExecute() throws SQLException {
-    PreparedStatement ps = prepareSelect();
-    ps.execute();
-    ResultSet rs = ps.getResultSet();
-    assertFalse("genkeys table is empty, thus rs.next() should return false", rs.next());
-    ps.close();
-  }
-
-  @Test
-  public void selectWithGeneratedKeysViaNonPrepared() throws SQLException {
-    Statement s = con.createStatement();
-    String sql = "select c from genkeys";
-    ResultSet rs;
-    switch (returningInQuery) {
-      case NO:
-        s.execute(sql);
-        rs = s.getResultSet();
-        break;
-      case STAR:
-        s.execute(sql, Statement.RETURN_GENERATED_KEYS);
-        rs = s.getResultSet();
-        break;
-      default:
-        s.execute(sql, returningInQuery.columns);
-        rs = s.getResultSet();
+    @Test
+    public void selectWithGeneratedKeysViaNonPrepared() throws SQLException {
+        Statement s = con.createStatement();
+        String sql = "select c from genkeys";
+        ResultSet rs;
+        switch (returningInQuery) {
+            case NO:
+                s.execute(sql);
+                rs = s.getResultSet();
+                break;
+            case STAR:
+                s.execute(sql, Statement.RETURN_GENERATED_KEYS);
+                rs = s.getResultSet();
+                break;
+            default:
+                s.execute(sql, returningInQuery.columns);
+                rs = s.getResultSet();
+        }
+        assertNotNull("SELECT statement should return results via getResultSet, not getGeneratedKeys", rs);
+        assertFalse("genkeys table is empty, thus rs.next() should return false", rs.next());
+        s.close();
     }
-    assertNotNull("SELECT statement should return results via getResultSet, not getGeneratedKeys", rs);
-    assertFalse("genkeys table is empty, thus rs.next() should return false", rs.next());
-    s.close();
-  }
 
-  @Test
-  public void breakDescribeOnFirstServerPreparedExecution() throws SQLException {
-    // Test code is adapted from https://github.com/pgjdbc/pgjdbc/issues/811#issuecomment-352468388
+    @Test
+    public void breakDescribeOnFirstServerPreparedExecution() throws SQLException {
+        // Test code is adapted from https://github.com/pgjdbc/pgjdbc/issues/811#issuecomment-352468388
 
-    PreparedStatement ps =
-        con.prepareStatement("insert into genkeys(b) values(?)" + returningClause,
-            Statement.RETURN_GENERATED_KEYS);
-    ps.setString(1, "TEST");
+        PreparedStatement ps =
+                con.prepareStatement("insert into genkeys(b) values(?)" + returningClause,
+                        Statement.RETURN_GENERATED_KEYS);
+        ps.setString(1, "TEST");
 
-    // The below "prepareThreshold - 1" executions ensure that bind failure would happen
-    // exactly on prepareThreshold execution (the first one when server flips to server-prepared)
-    int prepareThreshold = ps.unwrap(PGStatement.class).getPrepareThreshold();
-    for (int i = 0; i < prepareThreshold - 1; i++) {
-      ps.executeUpdate();
+        // The below "prepareThreshold - 1" executions ensure that bind failure would happen
+        // exactly on prepareThreshold execution (the first one when server flips to server-prepared)
+        int prepareThreshold = ps.unwrap(PGStatement.class).getPrepareThreshold();
+        for (int i = 0; i < prepareThreshold - 1; i++) {
+            ps.executeUpdate();
+        }
+        try {
+            // Send a value that's too long on the 5th request
+            ps.setString(1, "TESTTESTTEST");
+            ps.executeUpdate();
+        } catch (SQLException e) {
+            // Expected error: org.postgresql.util.PSQLException: ERROR: value
+            // too long for type character varying(10)
+            if (!PSQLState.STRING_DATA_RIGHT_TRUNCATION.getState().equals(e.getSQLState())) {
+                throw e;
+            }
+        }
+        // Send a valid value on the next request
+        ps.setString(1, "TEST");
+        ps.executeUpdate();
     }
-    try {
-      // Send a value that's too long on the 5th request
-      ps.setString(1, "TESTTESTTEST");
-      ps.executeUpdate();
-    } catch (SQLException e) {
-      // Expected error: org.postgresql.util.PSQLException: ERROR: value
-      // too long for type character varying(10)
-      if (!PSQLState.STRING_DATA_RIGHT_TRUNCATION.getState().equals(e.getSQLState())) {
-        throw e;
-      }
+
+    public enum ReturningInQuery {
+        A("a"),
+        AB("a", "b"),
+        STAR("*"),
+        NO();
+        final String[] columns;
+
+        ReturningInQuery(String... columns) {
+            this.columns = columns;
+        }
+
+        public int columnsReturned() {
+            if (columns.length == 1 && columns[0].charAt(0) == '*') {
+                return 100500; // does not matter much, the meaning is "every possible column"
+            }
+            return columns.length;
+        }
+
+        public String getClause() {
+            if (columnsReturned() == 0) {
+                return "";
+            }
+            StringBuilder sb = new StringBuilder(" returning ");
+            for (int i = 0; i < columns.length; i++) {
+                String column = columns[i];
+                if (i != 0) {
+                    sb.append(", ");
+                }
+                sb.append(column);
+            }
+            return sb.toString();
+        }
     }
-    // Send a valid value on the next request
-    ps.setString(1, "TEST");
-    ps.executeUpdate();
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3BlobTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3BlobTest.java
index 3ad1786..5276732 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3BlobTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3BlobTest.java
@@ -27,284 +27,282 @@ import java.sql.SQLException;
 import java.sql.Statement;
 
 public class Jdbc3BlobTest {
-  private static final String TABLE = "blobtest";
-  private static final String INSERT = "INSERT INTO " + TABLE + " VALUES (1, lo_creat(-1))";
-  private static final String SELECT = "SELECT ID, DATA FROM " + TABLE + " WHERE ID = 1";
+    private static final String TABLE = "blobtest";
+    private static final String INSERT = "INSERT INTO " + TABLE + " VALUES (1, lo_creat(-1))";
+    private static final String SELECT = "SELECT ID, DATA FROM " + TABLE + " WHERE ID = 1";
 
-  private Connection conn;
+    private Connection conn;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    conn = TestUtil.openDB();
-    TestUtil.createTable(conn, TABLE, "ID INT PRIMARY KEY, DATA OID");
-    conn.setAutoCommit(false);
-  }
+    @BeforeEach
+    void setUp() throws Exception {
+        conn = TestUtil.openDB();
+        TestUtil.createTable(conn, TABLE, "ID INT PRIMARY KEY, DATA OID");
+        conn.setAutoCommit(false);
+    }
 
-  @AfterEach
-  void tearDown() throws SQLException {
-    conn.setAutoCommit(true);
-    try {
-      Statement stmt = conn.createStatement();
-      try {
-        stmt.execute("SELECT lo_unlink(DATA) FROM " + TABLE);
-      } finally {
+    @AfterEach
+    void tearDown() throws SQLException {
+        conn.setAutoCommit(true);
         try {
-          stmt.close();
-        } catch (Exception e) {
+            Statement stmt = conn.createStatement();
+            try {
+                stmt.execute("SELECT lo_unlink(DATA) FROM " + TABLE);
+            } finally {
+                try {
+                    stmt.close();
+                } catch (Exception e) {
+                }
+            }
+        } finally {
+            TestUtil.dropTable(conn, TABLE);
+            TestUtil.closeDB(conn);
         }
-      }
-    } finally {
-      TestUtil.dropTable(conn, TABLE);
-      TestUtil.closeDB(conn);
-    }
-  }
-
-  /**
-   * Test the writing and reading of a single byte.
-   */
-  @Test
-  void test1Byte() throws SQLException {
-    byte[] data = {(byte) 'a'};
-    readWrite(data);
-  }
-
-  /**
-   * Test the writing and reading of a few bytes.
-   */
-  @Test
-  void manyBytes() throws SQLException {
-    byte[] data = "aaaaaaaaaa".getBytes();
-    readWrite(data);
-  }
-
-  /**
-   * Test writing a single byte with an offset.
-   */
-  @Test
-  void test1ByteOffset() throws SQLException {
-    byte[] data = {(byte) 'a'};
-    readWrite(10, data);
-  }
-
-  /**
-   * Test the writing and reading of a few bytes with an offset.
-   */
-  @Test
-  void manyBytesOffset() throws SQLException {
-    byte[] data = "aaaaaaaaaa".getBytes();
-    readWrite(10, data);
-  }
-
-  /**
-   * Tests all of the byte values from 0 - 255.
-   */
-  @Test
-  void allBytes() throws SQLException {
-    byte[] data = new byte[256];
-    for (int i = 0; i < data.length; i++) {
-      data[i] = (byte) i;
-    }
-    readWrite(data);
-  }
-
-  @Test
-  void truncate() throws SQLException {
-    if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3)) {
-      return;
     }
 
-    byte[] data = new byte[100];
-    for (byte i = 0; i < data.length; i++) {
-      data[i] = i;
-    }
-    readWrite(data);
-
-    PreparedStatement ps = conn.prepareStatement(SELECT);
-    ResultSet rs = ps.executeQuery();
-
-    assertTrue(rs.next());
-    Blob blob = rs.getBlob("DATA");
-
-    assertEquals(100, blob.length());
-
-    blob.truncate(50);
-    assertEquals(50, blob.length());
-
-    blob.truncate(150);
-    assertEquals(150, blob.length());
-
-    data = blob.getBytes(1, 200);
-    assertEquals(150, data.length);
-    for (byte i = 0; i < 50; i++) {
-      assertEquals(i, data[i]);
+    /**
+     * Test the writing and reading of a single byte.
+     */
+    @Test
+    void test1Byte() throws SQLException {
+        byte[] data = {(byte) 'a'};
+        readWrite(data);
     }
 
-    for (int i = 50; i < 150; i++) {
-      assertEquals(0, data[i]);
+    /**
+     * Test the writing and reading of a few bytes.
+     */
+    @Test
+    void manyBytes() throws SQLException {
+        byte[] data = "aaaaaaaaaa".getBytes();
+        readWrite(data);
     }
-  }
 
-  /**
-   *
-   * @param data data to write
-   * @throws SQLException if something goes wrong
-   */
-  public void readWrite(byte[] data) throws SQLException {
-    readWrite(1, data);
-  }
-
-  /**
-   *
-   * @param offset data offset
-   * @param data data to write
-   * @throws SQLException if something goes wrong
-   */
-  public void readWrite(int offset, byte[] data) throws SQLException {
-    PreparedStatement ps = conn.prepareStatement(INSERT);
-    ps.executeUpdate();
-    ps.close();
-
-    ps = conn.prepareStatement(SELECT);
-    ResultSet rs = ps.executeQuery();
-
-    assertTrue(rs.next());
-    Blob b = rs.getBlob("DATA");
-    b.setBytes(offset, data);
-
-    rs.close();
-    ps.close();
-
-    ps = conn.prepareStatement(SELECT);
-    rs = ps.executeQuery();
-
-    assertTrue(rs.next());
-    b = rs.getBlob("DATA");
-    byte[] rspData = b.getBytes(offset, data.length);
-    assertArrayEquals(data, rspData, "Request should be the same as the response");
-
-    rs.close();
-    ps.close();
-  }
-
-  /**
-   * Test the writing and reading of a single byte.
-   */
-  @Test
-  void test1ByteStream() throws SQLException, IOException {
-    byte[] data = {(byte) 'a'};
-    readWriteStream(data);
-  }
-
-  /**
-   * Test the writing and reading of a few bytes.
-   */
-  @Test
-  void manyBytesStream() throws SQLException, IOException {
-    byte[] data = "aaaaaaaaaa".getBytes();
-    readWriteStream(data);
-  }
-
-  /**
-   * Test writing a single byte with an offset.
-   */
-  @Test
-  void test1ByteOffsetStream() throws SQLException, IOException {
-    byte[] data = {(byte) 'a'};
-    readWriteStream(10, data);
-  }
-
-  /**
-   * Test the writing and reading of a few bytes with an offset.
-   */
-  @Test
-  void manyBytesOffsetStream() throws SQLException, IOException {
-    byte[] data = "aaaaaaaaaa".getBytes();
-    readWriteStream(10, data);
-  }
-
-  /**
-   * Tests all of the byte values from 0 - 255.
-   */
-  @Test
-  void allBytesStream() throws SQLException, IOException {
-    byte[] data = new byte[256];
-    for (int i = 0; i < data.length; i++) {
-      data[i] = (byte) i;
+    /**
+     * Test writing a single byte with an offset.
+     */
+    @Test
+    void test1ByteOffset() throws SQLException {
+        byte[] data = {(byte) 'a'};
+        readWrite(10, data);
     }
-    readWriteStream(data);
-  }
 
-  public void readWriteStream(byte[] data) throws SQLException, IOException {
-    readWriteStream(1, data);
-  }
+    /**
+     * Test the writing and reading of a few bytes with an offset.
+     */
+    @Test
+    void manyBytesOffset() throws SQLException {
+        byte[] data = "aaaaaaaaaa".getBytes();
+        readWrite(10, data);
+    }
 
-  /**
-   * Reads then writes data to the blob via a stream.
-   */
-  public void readWriteStream(int offset, byte[] data) throws SQLException, IOException {
-    PreparedStatement ps = conn.prepareStatement(INSERT);
-    ps.executeUpdate();
-    ps.close();
+    /**
+     * Tests all of the byte values from 0 - 255.
+     */
+    @Test
+    void allBytes() throws SQLException {
+        byte[] data = new byte[256];
+        for (int i = 0; i < data.length; i++) {
+            data[i] = (byte) i;
+        }
+        readWrite(data);
+    }
 
-    ps = conn.prepareStatement(SELECT);
-    ResultSet rs = ps.executeQuery();
+    @Test
+    void truncate() throws SQLException {
+        if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3)) {
+            return;
+        }
 
-    assertTrue(rs.next());
-    Blob b = rs.getBlob("DATA");
-    OutputStream out = b.setBinaryStream(offset);
-    out.write(data);
-    out.flush();
-    out.close();
+        byte[] data = new byte[100];
+        for (byte i = 0; i < data.length; i++) {
+            data[i] = i;
+        }
+        readWrite(data);
 
-    rs.close();
-    ps.close();
+        PreparedStatement ps = conn.prepareStatement(SELECT);
+        ResultSet rs = ps.executeQuery();
 
-    ps = conn.prepareStatement(SELECT);
-    rs = ps.executeQuery();
+        assertTrue(rs.next());
+        Blob blob = rs.getBlob("DATA");
 
-    assertTrue(rs.next());
-    b = rs.getBlob("DATA");
-    InputStream in = b.getBinaryStream();
-    byte[] rspData = new byte[data.length];
-    in.skip(offset - 1);
-    in.read(rspData);
-    in.close();
+        assertEquals(100, blob.length());
 
-    assertArrayEquals(data, rspData, "Request should be the same as the response");
+        blob.truncate(50);
+        assertEquals(50, blob.length());
 
-    rs.close();
-    ps.close();
-  }
+        blob.truncate(150);
+        assertEquals(150, blob.length());
 
-  @Test
-  void pattern() throws SQLException {
-    byte[] data = "abcdefghijklmnopqrstuvwxyz0123456789".getBytes();
-    byte[] pattern = "def".getBytes();
+        data = blob.getBytes(1, 200);
+        assertEquals(150, data.length);
+        for (byte i = 0; i < 50; i++) {
+            assertEquals(i, data[i]);
+        }
 
-    PreparedStatement ps = conn.prepareStatement(INSERT);
-    ps.executeUpdate();
-    ps.close();
+        for (int i = 50; i < 150; i++) {
+            assertEquals(0, data[i]);
+        }
+    }
 
-    ps = conn.prepareStatement(SELECT);
-    ResultSet rs = ps.executeQuery();
+    /**
+     * @param data data to write
+     * @throws SQLException if something goes wrong
+     */
+    public void readWrite(byte[] data) throws SQLException {
+        readWrite(1, data);
+    }
 
-    assertTrue(rs.next());
-    Blob b = rs.getBlob("DATA");
-    b.setBytes(1, data);
+    /**
+     * @param offset data offset
+     * @param data   data to write
+     * @throws SQLException if something goes wrong
+     */
+    public void readWrite(int offset, byte[] data) throws SQLException {
+        PreparedStatement ps = conn.prepareStatement(INSERT);
+        ps.executeUpdate();
+        ps.close();
 
-    rs.close();
-    ps.close();
+        ps = conn.prepareStatement(SELECT);
+        ResultSet rs = ps.executeQuery();
 
-    ps = conn.prepareStatement(SELECT);
-    rs = ps.executeQuery();
+        assertTrue(rs.next());
+        Blob b = rs.getBlob("DATA");
+        b.setBytes(offset, data);
 
-    assertTrue(rs.next());
-    b = rs.getBlob("DATA");
-    long position = b.position(pattern, 1);
-    byte[] rspData = b.getBytes(position, pattern.length);
-    assertArrayEquals(pattern, rspData, "Request should be the same as the response");
+        rs.close();
+        ps.close();
 
-    rs.close();
-    ps.close();
-  }
+        ps = conn.prepareStatement(SELECT);
+        rs = ps.executeQuery();
+
+        assertTrue(rs.next());
+        b = rs.getBlob("DATA");
+        byte[] rspData = b.getBytes(offset, data.length);
+        assertArrayEquals(data, rspData, "Request should be the same as the response");
+
+        rs.close();
+        ps.close();
+    }
+
+    /**
+     * Test the writing and reading of a single byte.
+     */
+    @Test
+    void test1ByteStream() throws SQLException, IOException {
+        byte[] data = {(byte) 'a'};
+        readWriteStream(data);
+    }
+
+    /**
+     * Test the writing and reading of a few bytes.
+     */
+    @Test
+    void manyBytesStream() throws SQLException, IOException {
+        byte[] data = "aaaaaaaaaa".getBytes();
+        readWriteStream(data);
+    }
+
+    /**
+     * Test writing a single byte with an offset.
+     */
+    @Test
+    void test1ByteOffsetStream() throws SQLException, IOException {
+        byte[] data = {(byte) 'a'};
+        readWriteStream(10, data);
+    }
+
+    /**
+     * Test the writing and reading of a few bytes with an offset.
+     */
+    @Test
+    void manyBytesOffsetStream() throws SQLException, IOException {
+        byte[] data = "aaaaaaaaaa".getBytes();
+        readWriteStream(10, data);
+    }
+
+    /**
+     * Tests all of the byte values from 0 - 255.
+     */
+    @Test
+    void allBytesStream() throws SQLException, IOException {
+        byte[] data = new byte[256];
+        for (int i = 0; i < data.length; i++) {
+            data[i] = (byte) i;
+        }
+        readWriteStream(data);
+    }
+
+    public void readWriteStream(byte[] data) throws SQLException, IOException {
+        readWriteStream(1, data);
+    }
+
+    /**
+     * Reads then writes data to the blob via a stream.
+     */
+    public void readWriteStream(int offset, byte[] data) throws SQLException, IOException {
+        PreparedStatement ps = conn.prepareStatement(INSERT);
+        ps.executeUpdate();
+        ps.close();
+
+        ps = conn.prepareStatement(SELECT);
+        ResultSet rs = ps.executeQuery();
+
+        assertTrue(rs.next());
+        Blob b = rs.getBlob("DATA");
+        OutputStream out = b.setBinaryStream(offset);
+        out.write(data);
+        out.flush();
+        out.close();
+
+        rs.close();
+        ps.close();
+
+        ps = conn.prepareStatement(SELECT);
+        rs = ps.executeQuery();
+
+        assertTrue(rs.next());
+        b = rs.getBlob("DATA");
+        InputStream in = b.getBinaryStream();
+        byte[] rspData = new byte[data.length];
+        in.skip(offset - 1);
+        in.read(rspData);
+        in.close();
+
+        assertArrayEquals(data, rspData, "Request should be the same as the response");
+
+        rs.close();
+        ps.close();
+    }
+
+    @Test
+    void pattern() throws SQLException {
+        byte[] data = "abcdefghijklmnopqrstuvwxyz0123456789".getBytes();
+        byte[] pattern = "def".getBytes();
+
+        PreparedStatement ps = conn.prepareStatement(INSERT);
+        ps.executeUpdate();
+        ps.close();
+
+        ps = conn.prepareStatement(SELECT);
+        ResultSet rs = ps.executeQuery();
+
+        assertTrue(rs.next());
+        Blob b = rs.getBlob("DATA");
+        b.setBytes(1, data);
+
+        rs.close();
+        ps.close();
+
+        ps = conn.prepareStatement(SELECT);
+        rs = ps.executeQuery();
+
+        assertTrue(rs.next());
+        b = rs.getBlob("DATA");
+        long position = b.position(pattern, 1);
+        byte[] rspData = b.getBytes(position, pattern.length);
+        assertArrayEquals(pattern, rspData, "Request should be the same as the response");
+
+        rs.close();
+        ps.close();
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3CallableStatementTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3CallableStatementTest.java
index f0f4b82..dc2ebc4 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3CallableStatementTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3CallableStatementTest.java
@@ -33,1103 +33,1099 @@ import java.time.LocalDate;
  * @author davec
  */
 public class Jdbc3CallableStatementTest extends BaseTest4 {
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    try (Connection con = TestUtil.openDB()) {
-      assumeCallableStatementsSupported(con);
-    }
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    Statement stmt = con.createStatement();
-    stmt.execute(
-        "create temp table numeric_tab (MAX_VAL NUMERIC(30,15), MIN_VAL NUMERIC(30,15), NULL_VAL NUMERIC(30,15) NULL)");
-    stmt.execute("insert into numeric_tab values ( 999999999999999,0.000000000000001, null)");
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION mysum(a int, b int) returns int AS 'BEGIN return a + b; END;' LANGUAGE plpgsql");
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION myiofunc(a INOUT int, b OUT int) AS 'BEGIN b := a; a := 1; END;' LANGUAGE plpgsql");
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION myif(a INOUT int, b IN int) AS 'BEGIN a := b; END;' LANGUAGE plpgsql");
-    stmt.execute(
-            "CREATE OR REPLACE FUNCTION mynoparams() returns int AS 'BEGIN return 733; END;' LANGUAGE plpgsql");
-    stmt.execute(
-            "CREATE OR REPLACE FUNCTION mynoparamsproc() returns void AS 'BEGIN NULL; END;' LANGUAGE plpgsql");
-
-    stmt.execute("create or replace function "
-        + "Numeric_Proc( OUT IMAX NUMERIC(30,15), OUT IMIN NUMERIC(30,15), OUT INUL NUMERIC(30,15))  as "
-        + "'begin "
-        + "select max_val into imax from numeric_tab;"
-        + "select min_val into imin from numeric_tab;"
-        + "select null_val into inul from numeric_tab;"
-
-        + " end;' "
-        + "language plpgsql;");
-
-    stmt.execute("CREATE OR REPLACE FUNCTION test_somein_someout("
-        + "pa IN int4,"
-        + "pb OUT varchar,"
-        + "pc OUT int8)"
-        + " AS "
-
-        + "'begin "
-        + "pb := ''out'';"
-        + "pc := pa + 1;"
-        + "end;'"
-
-        + "LANGUAGE plpgsql VOLATILE;"
-
-    );
-    stmt.execute("CREATE OR REPLACE FUNCTION test_allinout("
-        + "pa INOUT int4,"
-        + "pb INOUT varchar,"
-        + "pc INOUT int8)"
-        + " AS "
-        + "'begin "
-        + "pa := pa + 1;"
-        + "pb := ''foo out'';"
-        + "pc := pa + 1;"
-        + "end;'"
-        + "LANGUAGE plpgsql VOLATILE;"
-    );
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION testspg__getBooleanWithoutArg() "
-                + "RETURNS boolean AS '  "
-                + "begin return true; end; ' LANGUAGE plpgsql;");
-    stmt.execute(
-            "CREATE OR REPLACE FUNCTION testspg__getBit1WithoutArg() "
-                    + "RETURNS bit(1) AS '  "
-                    + "begin return B''1''; end; ' LANGUAGE plpgsql;");
-    stmt.execute(
-            "CREATE OR REPLACE FUNCTION testspg__getBit2WithoutArg() "
-                    + "RETURNS bit(2) AS '  "
-                    + "begin return B''10''; end; ' LANGUAGE plpgsql;");
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) {
-      stmt.execute(
-          "CREATE OR REPLACE PROCEDURE inonlyprocedure(a IN int) AS 'BEGIN NULL; END;' LANGUAGE plpgsql");
-      stmt.execute(
-          "CREATE OR REPLACE PROCEDURE inoutprocedure(a INOUT int) AS 'BEGIN a := a + a; END;' LANGUAGE plpgsql");
-
-    }
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v14)) {
-      stmt.execute("create or replace PROCEDURE testspg_refcursor(bar date, out cur1 refcursor) "
-          + " as $$ declare begin "
-          + "OPEN cur1 FOR "
-          + "SELECT now() as now; end $$ language plpgsql");
-    }
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.execute("drop function Numeric_Proc(out decimal, out decimal, out decimal)");
-    stmt.execute("drop function test_somein_someout(int4)");
-    stmt.execute("drop function test_allinout( inout int4, inout varchar, inout int8)");
-    stmt.execute("drop function mysum(a int, b int)");
-    stmt.execute("drop function myiofunc(a INOUT int, b OUT int) ");
-    stmt.execute("drop function myif(a INOUT int, b IN int)");
-    stmt.execute("drop function mynoparams()");
-    stmt.execute("drop function mynoparamsproc()");
-    stmt.execute("drop function testspg__getBooleanWithoutArg ();");
-    stmt.execute("drop function testspg__getBit1WithoutArg ();");
-    stmt.execute("drop function testspg__getBit2WithoutArg ();");
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) {
-      stmt.execute("drop procedure inonlyprocedure(a IN int)");
-      stmt.execute("drop procedure inoutprocedure(a INOUT int)");
-    }
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v14)) {
-      stmt.execute("DROP PROCEDURE testspg_refcursor(date);");
-    }
-    stmt.close();
-    super.tearDown();
-  }
-
-  @Test
-  public void testSomeInOut() throws Throwable {
-    CallableStatement call = con.prepareCall("{ call test_somein_someout(?,?,?) }");
-
-    call.registerOutParameter(2, Types.VARCHAR);
-    call.registerOutParameter(3, Types.BIGINT);
-    call.setInt(1, 20);
-    call.execute();
-
-  }
-
-  @Test
-  public void testNotEnoughParameters() throws Throwable {
-    CallableStatement cs = con.prepareCall("{call myiofunc(?,?)}");
-    cs.setInt(1, 2);
-    cs.registerOutParameter(2, Types.INTEGER);
-    try {
-      cs.execute();
-      fail("Should throw an exception ");
-    } catch (SQLException ex) {
-      assertTrue(ex.getSQLState().equalsIgnoreCase(PSQLState.SYNTAX_ERROR.getState()));
-    }
-
-  }
-
-  @Test
-  public void testTooManyParameters() throws Throwable {
-    CallableStatement cs = con.prepareCall("{call myif(?,?)}");
-    try {
-      cs.setInt(1, 1);
-      cs.setInt(2, 2);
-      cs.registerOutParameter(1, Types.INTEGER);
-      cs.registerOutParameter(2, Types.INTEGER);
-      cs.execute();
-      fail("should throw an exception");
-    } catch (SQLException ex) {
-      assertTrue(ex.getSQLState().equalsIgnoreCase(PSQLState.SYNTAX_ERROR.getState()));
-    }
-
-  }
-
-  @Test
-  public void testAllInOut() throws Throwable {
-    CallableStatement call = con.prepareCall("{ call test_allinout(?,?,?) }");
-
-    call.registerOutParameter(1, Types.INTEGER);
-    call.registerOutParameter(2, Types.VARCHAR);
-    call.registerOutParameter(3, Types.BIGINT);
-    call.setInt(1, 20);
-    call.setString(2, "hi");
-    call.setInt(3, 123);
-    call.execute();
-    call.getInt(1);
-    call.getString(2);
-    call.getLong(3);
-
-  }
-
-  @Test
-  public void testNumeric() throws Throwable {
-    CallableStatement call = con.prepareCall("{ call Numeric_Proc(?,?,?) }");
-
-    call.registerOutParameter(1, Types.NUMERIC, 15);
-    call.registerOutParameter(2, Types.NUMERIC, 15);
-    call.registerOutParameter(3, Types.NUMERIC, 15);
-
-    call.executeUpdate();
-    BigDecimal ret = call.getBigDecimal(1);
-    assertTrue(
-        "correct return from getNumeric () should be 999999999999999.000000000000000 but returned "
-            + ret.toString(),
-        ret.equals(new BigDecimal("999999999999999.000000000000000")));
-
-    ret = call.getBigDecimal(2);
-    assertTrue("correct return from getNumeric ()",
-        ret.equals(new BigDecimal("0.000000000000001")));
-    try {
-      ret = call.getBigDecimal(3);
-    } catch (NullPointerException ex) {
-      assertTrue("This should be null", call.wasNull());
-    }
-  }
-
-  @Test
-  public void testGetObjectDecimal() throws Throwable {
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute(
-          "create temp table decimal_tab ( max_val numeric(30,15), min_val numeric(30,15), nul_val numeric(30,15) )");
-      stmt.execute(
-          "insert into decimal_tab values (999999999999999.000000000000000,0.000000000000001,null)");
-
-      boolean ret = stmt.execute("create or replace function "
-          + "decimal_proc( OUT pmax numeric, OUT pmin numeric, OUT nval numeric)  as "
-          + "'begin "
-          + "select max_val into pmax from decimal_tab;"
-          + "select min_val into pmin from decimal_tab;"
-          + "select nul_val into nval from decimal_tab;"
-
-          + " end;' "
-          + "language plpgsql;");
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call decimal_proc(?,?,?) }");
-      cstmt.registerOutParameter(1, Types.DECIMAL);
-      cstmt.registerOutParameter(2, Types.DECIMAL);
-      cstmt.registerOutParameter(3, Types.DECIMAL);
-      cstmt.executeUpdate();
-      BigDecimal val = (BigDecimal) cstmt.getObject(1);
-      assertEquals(0, val.compareTo(new BigDecimal("999999999999999.000000000000000")));
-      val = (BigDecimal) cstmt.getObject(2);
-      assertEquals(0, val.compareTo(new BigDecimal("0.000000000000001")));
-      val = (BigDecimal) cstmt.getObject(3);
-      assertNull(val);
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute("drop function decimal_proc()");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @Test
-  public void testVarcharBool() throws Throwable {
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute("create temp table vartab( max_val text, min_val text)");
-      stmt.execute("insert into vartab values ('a','b')");
-      boolean ret = stmt.execute("create or replace function "
-          + "updatevarchar( in imax text, in imin text)  returns int as "
-          + "'begin "
-          + "update vartab set max_val = imax;"
-          + "update vartab set min_val = imin;"
-          + "return 0;"
-          + " end;' "
-          + "language plpgsql;");
-      stmt.close();
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call updatevarchar(?,?) }");
-      cstmt.setObject(1, Boolean.TRUE, Types.VARCHAR);
-      cstmt.setObject(2, Boolean.FALSE, Types.VARCHAR);
-
-      cstmt.executeUpdate();
-      cstmt.close();
-      ResultSet rs = con.createStatement().executeQuery("select * from vartab");
-      assertTrue(rs.next());
-      assertTrue(rs.getString(1).equals(Boolean.TRUE.toString()));
-
-      assertTrue(rs.getString(2).equals(Boolean.FALSE.toString()));
-      rs.close();
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute("drop function updatevarchar(text,text)");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @Test
-  public void testInOut() throws Throwable {
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute(createBitTab);
-      stmt.execute(insertBitTab);
-      boolean ret = stmt.execute("create or replace function "
-          + "insert_bit( inout IMAX boolean, inout IMIN boolean, inout INUL boolean)  as "
-          + "'begin "
-          + "insert into bit_tab values( imax, imin, inul);"
-          + "select max_val into imax from bit_tab;"
-          + "select min_val into imin from bit_tab;"
-          + "select null_val into inul from bit_tab;"
-          + " end;' "
-          + "language plpgsql;");
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call insert_bit(?,?,?) }");
-      cstmt.setObject(1, "true", Types.BIT);
-      cstmt.setObject(2, "false", Types.BIT);
-      cstmt.setNull(3, Types.BIT);
-      cstmt.registerOutParameter(1, Types.BIT);
-      cstmt.registerOutParameter(2, Types.BIT);
-      cstmt.registerOutParameter(3, Types.BIT);
-      cstmt.executeUpdate();
-
-      assertTrue(cstmt.getBoolean(1));
-      assertFalse(cstmt.getBoolean(2));
-      cstmt.getBoolean(3);
-      assertTrue(cstmt.wasNull());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute("drop function insert_bit(boolean, boolean, boolean)");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  private final String createBitTab =
-      "create temp table bit_tab ( max_val boolean, min_val boolean, null_val boolean )";
-  private final String insertBitTab = "insert into bit_tab values (true,false,null)";
-
-  @Test
-  public void testSetObjectBit() throws Throwable {
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute(createBitTab);
-      stmt.execute(insertBitTab);
-      boolean ret = stmt.execute("create or replace function "
-          + "update_bit( in IMAX boolean, in IMIN boolean, in INUL boolean) returns int as "
-          + "'begin "
-          + "update bit_tab set  max_val = imax;"
-          + "update bit_tab set  min_val = imin;"
-          + "update bit_tab set  min_val = inul;"
-          + " return 0;"
-          + " end;' "
-          + "language plpgsql;");
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call update_bit(?,?,?) }");
-      cstmt.setObject(1, "true", Types.BIT);
-      cstmt.setObject(2, "false", Types.BIT);
-      cstmt.setNull(3, Types.BIT);
-      cstmt.executeUpdate();
-      cstmt.close();
-      ResultSet rs = con.createStatement().executeQuery("select * from bit_tab");
-
-      assertTrue(rs.next());
-      assertTrue(rs.getBoolean(1));
-      assertFalse(rs.getBoolean(2));
-      rs.getBoolean(3);
-      assertTrue(rs.wasNull());
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute("drop function update_bit(boolean, boolean, boolean)");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @Test
-  public void testGetBit1WithoutArg() throws SQLException {
-    assumeNotSimpleQueryMode();
-    try (CallableStatement call = con.prepareCall("{ ? = call testspg__getBit1WithoutArg () }")) {
-      call.registerOutParameter(1, Types.BOOLEAN);
-      call.execute();
-      assertTrue(call.getBoolean(1));
-    }
-  }
-
-  @Test
-  public void testGetBit2WithoutArg() throws SQLException {
-    assumeNotSimpleQueryMode();
-    try (CallableStatement call = con.prepareCall("{ ? = call testspg__getBit2WithoutArg () }")) {
-      call.registerOutParameter(1, Types.BOOLEAN);
-      try {
-        call.execute();
-        assertTrue(call.getBoolean(1));
-        fail("#getBoolean(int) on bit(2) should throw");
-      } catch (SQLException e) {
-        assertEquals(PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
-      }
-    }
-  }
-
-  @Test
-  public void testGetObjectLongVarchar() throws Throwable {
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute("create temp table longvarchar_tab ( t text, null_val text )");
-      stmt.execute("insert into longvarchar_tab values ('testdata',null)");
-      boolean ret = stmt.execute("create or replace function "
-          + "longvarchar_proc( OUT pcn text, OUT nval text)  as "
-          + "'begin "
-          + "select t into pcn from longvarchar_tab;"
-          + "select null_val into nval from longvarchar_tab;"
-
-          + " end;' "
-          + "language plpgsql;");
-
-      ret = stmt.execute("create or replace function "
-          + "lvarchar_in_name( IN pcn text) returns int as "
-          + "'begin "
-          + "update longvarchar_tab set t=pcn;"
-          + "return 0;"
-          + " end;' "
-          + "language plpgsql;");
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call longvarchar_proc(?,?) }");
-      cstmt.registerOutParameter(1, Types.LONGVARCHAR);
-      cstmt.registerOutParameter(2, Types.LONGVARCHAR);
-      cstmt.executeUpdate();
-      String val = (String) cstmt.getObject(1);
-      assertEquals("testdata", val);
-      val = (String) cstmt.getObject(2);
-      assertNull(val);
-      cstmt.close();
-      cstmt = con.prepareCall("{ call lvarchar_in_name(?) }");
-      String maxFloat = "3.4E38";
-      cstmt.setObject(1, Float.valueOf(maxFloat), Types.LONGVARCHAR);
-      cstmt.executeUpdate();
-      cstmt.close();
-      Statement stmt = con.createStatement();
-      ResultSet rs = stmt.executeQuery("select * from longvarchar_tab");
-      assertTrue(rs.next());
-      String rval = (String) rs.getObject(1);
-      assertEquals(rval.trim(), maxFloat.trim());
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute("drop function longvarchar_proc()");
-        dstmt.execute("drop function lvarchar_in_name(text)");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @Test
-  public void testGetBytes01() throws Throwable {
-    assumeByteaSupported();
-    byte[] testdata = "TestData".getBytes();
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute("create temp table varbinary_tab ( vbinary bytea, null_val bytea )");
-      boolean ret = stmt.execute("create or replace function "
-          + "varbinary_proc( OUT pcn bytea, OUT nval bytea)  as "
-          + "'begin "
-          + "select vbinary into pcn from varbinary_tab;"
-          + "select null_val into nval from varbinary_tab;"
-
-          + " end;' "
-          + "language plpgsql;");
-      stmt.close();
-      PreparedStatement pstmt = con.prepareStatement("insert into varbinary_tab values (?,?)");
-      pstmt.setBytes(1, testdata);
-      pstmt.setBytes(2, null);
-
-      pstmt.executeUpdate();
-      pstmt.close();
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call varbinary_proc(?,?) }");
-      cstmt.registerOutParameter(1, Types.VARBINARY);
-      cstmt.registerOutParameter(2, Types.VARBINARY);
-      cstmt.executeUpdate();
-      byte[] retval = cstmt.getBytes(1);
-      for (int i = 0; i < testdata.length; i++) {
-        assertEquals(testdata[i], retval[i]);
-      }
-
-      retval = cstmt.getBytes(2);
-      assertNull(retval);
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute("drop function varbinary_proc()");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  private final String createDecimalTab =
-      "create temp table decimal_tab ( max_val float, min_val float, null_val float )";
-  private final String insertDecimalTab = "insert into decimal_tab values (1.0E125,1.0E-130,null)";
-  private final String createFloatProc = "create or replace function "
-      + "float_proc( OUT IMAX float, OUT IMIN float, OUT INUL float)  as "
-      + "'begin "
-      + "select max_val into imax from decimal_tab;"
-      + "select min_val into imin from decimal_tab;"
-      + "select null_val into inul from decimal_tab;"
-      + " end;' "
-      + "language plpgsql;";
-
-  private final String createUpdateFloat = "create or replace function "
-      + "updatefloat_proc ( IN maxparm float, IN minparm float ) returns int as "
-      + "'begin "
-      + "update decimal_tab set max_val=maxparm;"
-      + "update decimal_tab set min_val=minparm;"
-      + "return 0;"
-      + " end;' "
-      + "language plpgsql;";
-
-  private final String createRealTab =
-      "create temp table real_tab ( max_val float(25), min_val float(25), null_val float(25) )";
-  private final String insertRealTab = "insert into real_tab values (1.0E37,1.0E-37, null)";
-
-  private final String dropFloatProc = "drop function float_proc()";
-  private final String createUpdateReal = "create or replace function "
-      + "update_real_proc ( IN maxparm float(25), IN minparm float(25) ) returns int as "
-      + "'begin "
-      + "update real_tab set max_val=maxparm;"
-      + "update real_tab set min_val=minparm;"
-      + "return 0;"
-      + " end;' "
-      + "language plpgsql;";
-  private final String dropUpdateReal = "drop function update_real_proc(float, float)";
-  private final double[] doubleValues = {1.0E125, 1.0E-130};
-  private final float[] realValues = {(float) 1.0E37, (float) 1.0E-37};
-  private final int[] intValues = {2147483647, -2147483648};
-
-  @Test
-  public void testUpdateReal() throws Throwable {
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute(createRealTab);
-      boolean ret = stmt.execute(createUpdateReal);
-
-      stmt.execute(insertRealTab);
-      stmt.close();
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call update_real_proc(?,?) }");
-      BigDecimal val = new BigDecimal(intValues[0]);
-      float x = val.floatValue();
-      cstmt.setObject(1, val, Types.REAL);
-      val = new BigDecimal(intValues[1]);
-      cstmt.setObject(2, val, Types.REAL);
-      cstmt.executeUpdate();
-      cstmt.close();
-      ResultSet rs = con.createStatement().executeQuery("select * from real_tab");
-      assertTrue(rs.next());
-      Float oVal = (float) intValues[0];
-      Float rVal = Float.valueOf(rs.getObject(1).toString());
-      assertTrue(oVal.equals(rVal));
-      oVal = (float) intValues[1];
-      rVal = Float.valueOf(rs.getObject(2).toString());
-      assertTrue(oVal.equals(rVal));
-      rs.close();
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute(dropUpdateReal);
-        dstmt.close();
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @Test
-  public void testUpdateDecimal() throws Throwable {
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute(createDecimalTab);
-      boolean ret = stmt.execute(createUpdateFloat);
-      stmt.close();
-      PreparedStatement pstmt = con.prepareStatement("insert into decimal_tab values (?,?)");
-      // note these are reversed on purpose
-      pstmt.setDouble(1, doubleValues[1]);
-      pstmt.setDouble(2, doubleValues[0]);
-
-      pstmt.executeUpdate();
-      pstmt.close();
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call updatefloat_proc(?,?) }");
-      cstmt.setDouble(1, doubleValues[0]);
-      cstmt.setDouble(2, doubleValues[1]);
-      cstmt.executeUpdate();
-      cstmt.close();
-      ResultSet rs = con.createStatement().executeQuery("select * from decimal_tab");
-      assertTrue(rs.next());
-      assertTrue(rs.getDouble(1) == doubleValues[0]);
-      assertTrue(rs.getDouble(2) == doubleValues[1]);
-      rs.close();
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute("drop function updatefloat_proc(float, float)");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @Test
-  public void testGetBytes02() throws Throwable {
-    assumeByteaSupported();
-    byte[] testdata = "TestData".getBytes();
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute("create temp table longvarbinary_tab ( vbinary bytea, null_val bytea )");
-      boolean ret = stmt.execute("create or replace function "
-          + "longvarbinary_proc( OUT pcn bytea, OUT nval bytea)  as "
-          + "'begin "
-          + "select vbinary into pcn from longvarbinary_tab;"
-          + "select null_val into nval from longvarbinary_tab;"
-
-          + " end;' "
-          + "language plpgsql;");
-      stmt.close();
-      PreparedStatement pstmt = con.prepareStatement("insert into longvarbinary_tab values (?,?)");
-      pstmt.setBytes(1, testdata);
-      pstmt.setBytes(2, null);
-
-      pstmt.executeUpdate();
-      pstmt.close();
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call longvarbinary_proc(?,?) }");
-      cstmt.registerOutParameter(1, Types.LONGVARBINARY);
-      cstmt.registerOutParameter(2, Types.LONGVARBINARY);
-      cstmt.executeUpdate();
-      byte[] retval = cstmt.getBytes(1);
-      for (int i = 0; i < testdata.length; i++) {
-        assertEquals(testdata[i], retval[i]);
-      }
-
-      retval = cstmt.getBytes(2);
-      assertNull(retval);
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute("drop function longvarbinary_proc()");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @Test
-  public void testGetObjectFloat() throws Throwable {
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute(createDecimalTab);
-      stmt.execute(insertDecimalTab);
-      boolean ret = stmt.execute(createFloatProc);
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call float_proc(?,?,?) }");
-      cstmt.registerOutParameter(1, java.sql.Types.FLOAT);
-      cstmt.registerOutParameter(2, java.sql.Types.FLOAT);
-      cstmt.registerOutParameter(3, java.sql.Types.FLOAT);
-      cstmt.executeUpdate();
-      Double val = (Double) cstmt.getObject(1);
-      assertTrue(val.doubleValue() == doubleValues[0]);
-
-      val = (Double) cstmt.getObject(2);
-      assertTrue(val.doubleValue() == doubleValues[1]);
-
-      val = (Double) cstmt.getObject(3);
-      assertTrue(cstmt.wasNull());
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute(dropFloatProc);
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @Test
-  public void testGetDouble01() throws Throwable {
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute("create temp table d_tab ( max_val float, min_val float, null_val float )");
-      stmt.execute("insert into d_tab values (1.0E125,1.0E-130,null)");
-      boolean ret = stmt.execute("create or replace function "
-          + "double_proc( OUT IMAX float, OUT IMIN float, OUT INUL float)  as "
-          + "'begin "
-          + "select max_val into imax from d_tab;"
-          + "select min_val into imin from d_tab;"
-          + "select null_val into inul from d_tab;"
-
-          + " end;' "
-          + "language plpgsql;");
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call double_proc(?,?,?) }");
-      cstmt.registerOutParameter(1, java.sql.Types.DOUBLE);
-      cstmt.registerOutParameter(2, java.sql.Types.DOUBLE);
-      cstmt.registerOutParameter(3, java.sql.Types.DOUBLE);
-      cstmt.executeUpdate();
-      assertTrue(cstmt.getDouble(1) == 1.0E125);
-      assertTrue(cstmt.getDouble(2) == 1.0E-130);
-      cstmt.getDouble(3);
-      assertTrue(cstmt.wasNull());
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute("drop function double_proc()");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @Test
-  public void testGetDoubleAsReal() throws Throwable {
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute("create temp table d_tab ( max_val float, min_val float, null_val float )");
-      stmt.execute("insert into d_tab values (3.4E38,1.4E-45,null)");
-      boolean ret = stmt.execute("create or replace function "
-          + "double_proc( OUT IMAX float, OUT IMIN float, OUT INUL float)  as "
-          + "'begin "
-          + "select max_val into imax from d_tab;"
-          + "select min_val into imin from d_tab;"
-          + "select null_val into inul from d_tab;"
-
-          + " end;' "
-          + "language plpgsql;");
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call double_proc(?,?,?) }");
-      cstmt.registerOutParameter(1, java.sql.Types.REAL);
-      cstmt.registerOutParameter(2, java.sql.Types.REAL);
-      cstmt.registerOutParameter(3, java.sql.Types.REAL);
-      cstmt.executeUpdate();
-      assertTrue(cstmt.getFloat(1) == 3.4E38f);
-      assertTrue(cstmt.getFloat(2) == 1.4E-45f);
-      cstmt.getFloat(3);
-      assertTrue(cstmt.wasNull());
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute("drop function double_proc()");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @Test
-  public void testGetShort01() throws Throwable {
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute("create temp table short_tab ( max_val int2, min_val int2, null_val int2 )");
-      stmt.execute("insert into short_tab values (32767,-32768,null)");
-      boolean ret = stmt.execute("create or replace function "
-          + "short_proc( OUT IMAX int2, OUT IMIN int2, OUT INUL int2)  as "
-          + "'begin "
-          + "select max_val into imax from short_tab;"
-          + "select min_val into imin from short_tab;"
-          + "select null_val into inul from short_tab;"
-
-          + " end;' "
-          + "language plpgsql;");
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call short_proc(?,?,?) }");
-      cstmt.registerOutParameter(1, java.sql.Types.SMALLINT);
-      cstmt.registerOutParameter(2, java.sql.Types.SMALLINT);
-      cstmt.registerOutParameter(3, java.sql.Types.SMALLINT);
-      cstmt.executeUpdate();
-      assertEquals(32767, cstmt.getShort(1));
-      assertEquals(-32768, cstmt.getShort(2));
-      cstmt.getShort(3);
-      assertTrue(cstmt.wasNull());
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute("drop function short_proc()");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @Test
-  public void testGetInt01() throws Throwable {
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute("create temp table i_tab ( max_val int, min_val int, null_val int )");
-      stmt.execute("insert into i_tab values (2147483647,-2147483648,null)");
-      boolean ret = stmt.execute("create or replace function "
-          + "int_proc( OUT IMAX int, OUT IMIN int, OUT INUL int)  as "
-          + "'begin "
-          + "select max_val into imax from i_tab;"
-          + "select min_val into imin from i_tab;"
-          + "select null_val into inul from i_tab;"
-
-          + " end;' "
-          + "language plpgsql;");
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call int_proc(?,?,?) }");
-      cstmt.registerOutParameter(1, java.sql.Types.INTEGER);
-      cstmt.registerOutParameter(2, java.sql.Types.INTEGER);
-      cstmt.registerOutParameter(3, java.sql.Types.INTEGER);
-      cstmt.executeUpdate();
-      assertEquals(2147483647, cstmt.getInt(1));
-      assertEquals(-2147483648, cstmt.getInt(2));
-      cstmt.getInt(3);
-      assertTrue(cstmt.wasNull());
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute("drop function int_proc()");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @Test
-  public void testGetLong01() throws Throwable {
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute("create temp table l_tab ( max_val int8, min_val int8, null_val int8 )");
-      stmt.execute("insert into l_tab values (9223372036854775807,-9223372036854775808,null)");
-      boolean ret = stmt.execute("create or replace function "
-          + "bigint_proc( OUT IMAX int8, OUT IMIN int8, OUT INUL int8)  as "
-          + "'begin "
-          + "select max_val into imax from l_tab;"
-          + "select min_val into imin from l_tab;"
-          + "select null_val into inul from l_tab;"
-
-          + " end;' "
-          + "language plpgsql;");
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call bigint_proc(?,?,?) }");
-      cstmt.registerOutParameter(1, java.sql.Types.BIGINT);
-      cstmt.registerOutParameter(2, java.sql.Types.BIGINT);
-      cstmt.registerOutParameter(3, java.sql.Types.BIGINT);
-      cstmt.executeUpdate();
-      assertEquals(9223372036854775807L, cstmt.getLong(1));
-      assertEquals(-9223372036854775808L, cstmt.getLong(2));
-      cstmt.getLong(3);
-      assertTrue(cstmt.wasNull());
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute("drop function bigint_proc()");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @Test
-  public void testGetBoolean01() throws Throwable {
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute(createBitTab);
-      stmt.execute(insertBitTab);
-      boolean ret = stmt.execute("create or replace function "
-          + "bit_proc( OUT IMAX boolean, OUT IMIN boolean, OUT INUL boolean)  as "
-          + "'begin "
-          + "select max_val into imax from bit_tab;"
-          + "select min_val into imin from bit_tab;"
-          + "select null_val into inul from bit_tab;"
-
-          + " end;' "
-          + "language plpgsql;");
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call bit_proc(?,?,?) }");
-      cstmt.registerOutParameter(1, java.sql.Types.BIT);
-      cstmt.registerOutParameter(2, java.sql.Types.BIT);
-      cstmt.registerOutParameter(3, java.sql.Types.BIT);
-      cstmt.executeUpdate();
-      assertTrue(cstmt.getBoolean(1));
-      assertFalse(cstmt.getBoolean(2));
-      cstmt.getBoolean(3);
-      assertTrue(cstmt.wasNull());
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute("drop function bit_proc()");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @Test
-  public void testGetBooleanWithoutArg() throws SQLException {
-    assumeNotSimpleQueryMode();
-    try (CallableStatement call = con.prepareCall("{ ? = call testspg__getBooleanWithoutArg () }")) {
-      call.registerOutParameter(1, Types.BOOLEAN);
-      call.execute();
-      assertTrue(call.getBoolean(1));
-    }
-  }
-
-  @Test
-  public void testGetByte01() throws Throwable {
-    try {
-      Statement stmt = con.createStatement();
-      stmt.execute("create temp table byte_tab ( max_val int2, min_val int2, null_val int2 )");
-      stmt.execute("insert into byte_tab values (127,-128,null)");
-      boolean ret = stmt.execute("create or replace function "
-          + "byte_proc( OUT IMAX int2, OUT IMIN int2, OUT INUL int2)  as "
-          + "'begin "
-          + "select max_val into imax from byte_tab;"
-          + "select min_val into imin from byte_tab;"
-          + "select null_val into inul from byte_tab;"
-
-          + " end;' "
-          + "language plpgsql;");
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-      throw ex;
-    }
-    try {
-      CallableStatement cstmt = con.prepareCall("{ call byte_proc(?,?,?) }");
-      cstmt.registerOutParameter(1, java.sql.Types.TINYINT);
-      cstmt.registerOutParameter(2, java.sql.Types.TINYINT);
-      cstmt.registerOutParameter(3, java.sql.Types.TINYINT);
-      cstmt.executeUpdate();
-      assertEquals(127, cstmt.getByte(1));
-      assertEquals(-128, cstmt.getByte(2));
-      cstmt.getByte(3);
-      assertTrue(cstmt.wasNull());
-    } catch (Exception ex) {
-      fail(ex.getMessage());
-    } finally {
-      try {
-        Statement dstmt = con.createStatement();
-        dstmt.execute("drop function byte_proc()");
-      } catch (Exception ex) {
-      }
-    }
-  }
-
-  @Test
-  public void testMultipleOutExecutions() throws SQLException {
-    CallableStatement cs = con.prepareCall("{call myiofunc(?, ?)}");
-    for (int i = 0; i < 10; i++) {
-      cs.registerOutParameter(1, Types.INTEGER);
-      cs.registerOutParameter(2, Types.INTEGER);
-      cs.setInt(1, i);
-      cs.execute();
-      assertEquals(1, cs.getInt(1));
-      assertEquals(i, cs.getInt(2));
-      cs.clearParameters();
-    }
-  }
-
-  @Test
-  public void testSum() throws SQLException {
-    CallableStatement cs = con.prepareCall("{?= call mysum(?, ?)}");
-    cs.registerOutParameter(1, Types.INTEGER);
-    cs.setInt(2, 2);
-    cs.setInt(3, 3);
-    cs.execute();
-    assertEquals("2+3 should be 5 when executed via {?= call mysum(?, ?)}", 5, cs.getInt(1));
-  }
-
-  @Test
-  public void testFunctionNoParametersWithParentheses() throws SQLException {
-    CallableStatement cs = con.prepareCall("{?= call mynoparams()}");
-    cs.registerOutParameter(1, Types.INTEGER);
-    cs.execute();
-    assertEquals("{?= call mynoparam()} should return 733, but did not.", 733, cs.getInt(1));
-    TestUtil.closeQuietly(cs);
-  }
-
-  @Test
-  public void testFunctionNoParametersWithoutParentheses() throws SQLException {
-    CallableStatement cs = con.prepareCall("{?= call mynoparams}");
-    cs.registerOutParameter(1, Types.INTEGER);
-    cs.execute();
-    assertEquals("{?= call mynoparam()} should return 733, but did not.", 733, cs.getInt(1));
-    TestUtil.closeQuietly(cs);
-  }
-
-  @Test
-  public void testProcedureNoParametersWithParentheses() throws SQLException {
-    CallableStatement cs = con.prepareCall("{ call mynoparamsproc()}");
-    cs.execute();
-    TestUtil.closeQuietly(cs);
-  }
-
-  @Test
-  public void testProcedureNoParametersWithoutParentheses() throws SQLException {
-    CallableStatement cs = con.prepareCall("{ call mynoparamsproc}");
-    cs.execute();
-    TestUtil.closeQuietly(cs);
-  }
-
-  @Test
-  public void testProcedureInOnlyNativeCall() throws SQLException {
-    assumeMinimumServerVersion(ServerVersion.v11);
-    CallableStatement cs = con.prepareCall("call inonlyprocedure(?)");
-    cs.setInt(1, 5);
-    cs.execute();
-    TestUtil.closeQuietly(cs);
-  }
-
-  @Test
-  public void testProcedureInOutNativeCall() throws SQLException {
-    assumeMinimumServerVersion(ServerVersion.v11);
-    // inoutprocedure(a INOUT int) returns a*2 via the INOUT parameter
-    CallableStatement cs = con.prepareCall("call inoutprocedure(?)");
-    cs.setInt(1, 5);
-    cs.registerOutParameter(1, Types.INTEGER);
-    cs.execute();
-    assertEquals("call inoutprocedure(?) should return 10 (when input param = 5) via the INOUT parameter, but did not.", 10, cs.getInt(1));
-    TestUtil.closeQuietly(cs);
-  }
-
-  @Test
-  public void testCall5Times() throws SQLException {
-    assumeMinimumServerVersion(ServerVersion.v14);
-    // call this enough times to change to binary mode
-    for (int i = 0; i < 6; i++) {
-      con.setAutoCommit(false);
-      try (CallableStatement proc = con.prepareCall("call testspg_refcursor( ? , ? )")) {
-        proc.setDate(1, java.sql.Date.valueOf(LocalDate.now()));
-        proc.registerOutParameter(2, Types.REF_CURSOR);
-        proc.execute();
-        try (ResultSet results = (ResultSet) proc.getObject(2)) {
-          while (results.next()) {
-            System.out.println("  " + i + " " + results.getTimestamp("now").toLocalDateTime());
-          }
+    private final String createBitTab =
+            "create temp table bit_tab ( max_val boolean, min_val boolean, null_val boolean )";
+    private final String insertBitTab = "insert into bit_tab values (true,false,null)";
+    private final String createDecimalTab =
+            "create temp table decimal_tab ( max_val float, min_val float, null_val float )";
+    private final String insertDecimalTab = "insert into decimal_tab values (1.0E125,1.0E-130,null)";
+    private final String createFloatProc = "create or replace function "
+            + "float_proc( OUT IMAX float, OUT IMIN float, OUT INUL float)  as "
+            + "'begin "
+            + "select max_val into imax from decimal_tab;"
+            + "select min_val into imin from decimal_tab;"
+            + "select null_val into inul from decimal_tab;"
+            + " end;' "
+            + "language plpgsql;";
+    private final String createUpdateFloat = "create or replace function "
+            + "updatefloat_proc ( IN maxparm float, IN minparm float ) returns int as "
+            + "'begin "
+            + "update decimal_tab set max_val=maxparm;"
+            + "update decimal_tab set min_val=minparm;"
+            + "return 0;"
+            + " end;' "
+            + "language plpgsql;";
+    private final String createRealTab =
+            "create temp table real_tab ( max_val float(25), min_val float(25), null_val float(25) )";
+    private final String insertRealTab = "insert into real_tab values (1.0E37,1.0E-37, null)";
+    private final String dropFloatProc = "drop function float_proc()";
+    private final String createUpdateReal = "create or replace function "
+            + "update_real_proc ( IN maxparm float(25), IN minparm float(25) ) returns int as "
+            + "'begin "
+            + "update real_tab set max_val=maxparm;"
+            + "update real_tab set min_val=minparm;"
+            + "return 0;"
+            + " end;' "
+            + "language plpgsql;";
+    private final String dropUpdateReal = "drop function update_real_proc(float, float)";
+    private final double[] doubleValues = {1.0E125, 1.0E-130};
+    private final float[] realValues = {(float) 1.0E37, (float) 1.0E-37};
+    private final int[] intValues = {2147483647, -2147483648};
+
+    @BeforeClass
+    public static void beforeClass() throws Exception {
+        try (Connection con = TestUtil.openDB()) {
+            assumeCallableStatementsSupported(con);
+        }
+    }
+
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        Statement stmt = con.createStatement();
+        stmt.execute(
+                "create temp table numeric_tab (MAX_VAL NUMERIC(30,15), MIN_VAL NUMERIC(30,15), NULL_VAL NUMERIC(30,15) NULL)");
+        stmt.execute("insert into numeric_tab values ( 999999999999999,0.000000000000001, null)");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION mysum(a int, b int) returns int AS 'BEGIN return a + b; END;' LANGUAGE plpgsql");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION myiofunc(a INOUT int, b OUT int) AS 'BEGIN b := a; a := 1; END;' LANGUAGE plpgsql");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION myif(a INOUT int, b IN int) AS 'BEGIN a := b; END;' LANGUAGE plpgsql");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION mynoparams() returns int AS 'BEGIN return 733; END;' LANGUAGE plpgsql");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION mynoparamsproc() returns void AS 'BEGIN NULL; END;' LANGUAGE plpgsql");
+
+        stmt.execute("create or replace function "
+                + "Numeric_Proc( OUT IMAX NUMERIC(30,15), OUT IMIN NUMERIC(30,15), OUT INUL NUMERIC(30,15))  as "
+                + "'begin "
+                + "select max_val into imax from numeric_tab;"
+                + "select min_val into imin from numeric_tab;"
+                + "select null_val into inul from numeric_tab;"
+
+                + " end;' "
+                + "language plpgsql;");
+
+        stmt.execute("CREATE OR REPLACE FUNCTION test_somein_someout("
+                + "pa IN int4,"
+                + "pb OUT varchar,"
+                + "pc OUT int8)"
+                + " AS "
+
+                + "'begin "
+                + "pb := ''out'';"
+                + "pc := pa + 1;"
+                + "end;'"
+
+                + "LANGUAGE plpgsql VOLATILE;"
+
+        );
+        stmt.execute("CREATE OR REPLACE FUNCTION test_allinout("
+                + "pa INOUT int4,"
+                + "pb INOUT varchar,"
+                + "pc INOUT int8)"
+                + " AS "
+                + "'begin "
+                + "pa := pa + 1;"
+                + "pb := ''foo out'';"
+                + "pc := pa + 1;"
+                + "end;'"
+                + "LANGUAGE plpgsql VOLATILE;"
+        );
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION testspg__getBooleanWithoutArg() "
+                        + "RETURNS boolean AS '  "
+                        + "begin return true; end; ' LANGUAGE plpgsql;");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION testspg__getBit1WithoutArg() "
+                        + "RETURNS bit(1) AS '  "
+                        + "begin return B''1''; end; ' LANGUAGE plpgsql;");
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION testspg__getBit2WithoutArg() "
+                        + "RETURNS bit(2) AS '  "
+                        + "begin return B''10''; end; ' LANGUAGE plpgsql;");
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) {
+            stmt.execute(
+                    "CREATE OR REPLACE PROCEDURE inonlyprocedure(a IN int) AS 'BEGIN NULL; END;' LANGUAGE plpgsql");
+            stmt.execute(
+                    "CREATE OR REPLACE PROCEDURE inoutprocedure(a INOUT int) AS 'BEGIN a := a + a; END;' LANGUAGE plpgsql");
+
+        }
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v14)) {
+            stmt.execute("create or replace PROCEDURE testspg_refcursor(bar date, out cur1 refcursor) "
+                    + " as $$ declare begin "
+                    + "OPEN cur1 FOR "
+                    + "SELECT now() as now; end $$ language plpgsql");
+        }
+    }
+
+    @Override
+    public void tearDown() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.execute("drop function Numeric_Proc(out decimal, out decimal, out decimal)");
+        stmt.execute("drop function test_somein_someout(int4)");
+        stmt.execute("drop function test_allinout( inout int4, inout varchar, inout int8)");
+        stmt.execute("drop function mysum(a int, b int)");
+        stmt.execute("drop function myiofunc(a INOUT int, b OUT int) ");
+        stmt.execute("drop function myif(a INOUT int, b IN int)");
+        stmt.execute("drop function mynoparams()");
+        stmt.execute("drop function mynoparamsproc()");
+        stmt.execute("drop function testspg__getBooleanWithoutArg ();");
+        stmt.execute("drop function testspg__getBit1WithoutArg ();");
+        stmt.execute("drop function testspg__getBit2WithoutArg ();");
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) {
+            stmt.execute("drop procedure inonlyprocedure(a IN int)");
+            stmt.execute("drop procedure inoutprocedure(a INOUT int)");
+        }
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v14)) {
+            stmt.execute("DROP PROCEDURE testspg_refcursor(date);");
+        }
+        stmt.close();
+        super.tearDown();
+    }
+
+    @Test
+    public void testSomeInOut() throws Throwable {
+        CallableStatement call = con.prepareCall("{ call test_somein_someout(?,?,?) }");
+
+        call.registerOutParameter(2, Types.VARCHAR);
+        call.registerOutParameter(3, Types.BIGINT);
+        call.setInt(1, 20);
+        call.execute();
+
+    }
+
+    @Test
+    public void testNotEnoughParameters() throws Throwable {
+        CallableStatement cs = con.prepareCall("{call myiofunc(?,?)}");
+        cs.setInt(1, 2);
+        cs.registerOutParameter(2, Types.INTEGER);
+        try {
+            cs.execute();
+            fail("Should throw an exception ");
+        } catch (SQLException ex) {
+            assertTrue(ex.getSQLState().equalsIgnoreCase(PSQLState.SYNTAX_ERROR.getState()));
+        }
+
+    }
+
+    @Test
+    public void testTooManyParameters() throws Throwable {
+        CallableStatement cs = con.prepareCall("{call myif(?,?)}");
+        try {
+            cs.setInt(1, 1);
+            cs.setInt(2, 2);
+            cs.registerOutParameter(1, Types.INTEGER);
+            cs.registerOutParameter(2, Types.INTEGER);
+            cs.execute();
+            fail("should throw an exception");
+        } catch (SQLException ex) {
+            assertTrue(ex.getSQLState().equalsIgnoreCase(PSQLState.SYNTAX_ERROR.getState()));
+        }
+
+    }
+
+    @Test
+    public void testAllInOut() throws Throwable {
+        CallableStatement call = con.prepareCall("{ call test_allinout(?,?,?) }");
+
+        call.registerOutParameter(1, Types.INTEGER);
+        call.registerOutParameter(2, Types.VARCHAR);
+        call.registerOutParameter(3, Types.BIGINT);
+        call.setInt(1, 20);
+        call.setString(2, "hi");
+        call.setInt(3, 123);
+        call.execute();
+        call.getInt(1);
+        call.getString(2);
+        call.getLong(3);
+
+    }
+
+    @Test
+    public void testNumeric() throws Throwable {
+        CallableStatement call = con.prepareCall("{ call Numeric_Proc(?,?,?) }");
+
+        call.registerOutParameter(1, Types.NUMERIC, 15);
+        call.registerOutParameter(2, Types.NUMERIC, 15);
+        call.registerOutParameter(3, Types.NUMERIC, 15);
+
+        call.executeUpdate();
+        BigDecimal ret = call.getBigDecimal(1);
+        assertTrue(
+                "correct return from getNumeric () should be 999999999999999.000000000000000 but returned "
+                        + ret.toString(),
+                ret.equals(new BigDecimal("999999999999999.000000000000000")));
+
+        ret = call.getBigDecimal(2);
+        assertTrue("correct return from getNumeric ()",
+                ret.equals(new BigDecimal("0.000000000000001")));
+        try {
+            ret = call.getBigDecimal(3);
+        } catch (NullPointerException ex) {
+            assertTrue("This should be null", call.wasNull());
+        }
+    }
+
+    @Test
+    public void testGetObjectDecimal() throws Throwable {
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute(
+                    "create temp table decimal_tab ( max_val numeric(30,15), min_val numeric(30,15), nul_val numeric(30,15) )");
+            stmt.execute(
+                    "insert into decimal_tab values (999999999999999.000000000000000,0.000000000000001,null)");
+
+            boolean ret = stmt.execute("create or replace function "
+                    + "decimal_proc( OUT pmax numeric, OUT pmin numeric, OUT nval numeric)  as "
+                    + "'begin "
+                    + "select max_val into pmax from decimal_tab;"
+                    + "select min_val into pmin from decimal_tab;"
+                    + "select nul_val into nval from decimal_tab;"
+
+                    + " end;' "
+                    + "language plpgsql;");
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call decimal_proc(?,?,?) }");
+            cstmt.registerOutParameter(1, Types.DECIMAL);
+            cstmt.registerOutParameter(2, Types.DECIMAL);
+            cstmt.registerOutParameter(3, Types.DECIMAL);
+            cstmt.executeUpdate();
+            BigDecimal val = (BigDecimal) cstmt.getObject(1);
+            assertEquals(0, val.compareTo(new BigDecimal("999999999999999.000000000000000")));
+            val = (BigDecimal) cstmt.getObject(2);
+            assertEquals(0, val.compareTo(new BigDecimal("0.000000000000001")));
+            val = (BigDecimal) cstmt.getObject(3);
+            assertNull(val);
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute("drop function decimal_proc()");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testVarcharBool() throws Throwable {
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute("create temp table vartab( max_val text, min_val text)");
+            stmt.execute("insert into vartab values ('a','b')");
+            boolean ret = stmt.execute("create or replace function "
+                    + "updatevarchar( in imax text, in imin text)  returns int as "
+                    + "'begin "
+                    + "update vartab set max_val = imax;"
+                    + "update vartab set min_val = imin;"
+                    + "return 0;"
+                    + " end;' "
+                    + "language plpgsql;");
+            stmt.close();
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call updatevarchar(?,?) }");
+            cstmt.setObject(1, Boolean.TRUE, Types.VARCHAR);
+            cstmt.setObject(2, Boolean.FALSE, Types.VARCHAR);
+
+            cstmt.executeUpdate();
+            cstmt.close();
+            ResultSet rs = con.createStatement().executeQuery("select * from vartab");
+            assertTrue(rs.next());
+            assertTrue(rs.getString(1).equals(Boolean.TRUE.toString()));
+
+            assertTrue(rs.getString(2).equals(Boolean.FALSE.toString()));
+            rs.close();
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute("drop function updatevarchar(text,text)");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testInOut() throws Throwable {
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute(createBitTab);
+            stmt.execute(insertBitTab);
+            boolean ret = stmt.execute("create or replace function "
+                    + "insert_bit( inout IMAX boolean, inout IMIN boolean, inout INUL boolean)  as "
+                    + "'begin "
+                    + "insert into bit_tab values( imax, imin, inul);"
+                    + "select max_val into imax from bit_tab;"
+                    + "select min_val into imin from bit_tab;"
+                    + "select null_val into inul from bit_tab;"
+                    + " end;' "
+                    + "language plpgsql;");
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call insert_bit(?,?,?) }");
+            cstmt.setObject(1, "true", Types.BIT);
+            cstmt.setObject(2, "false", Types.BIT);
+            cstmt.setNull(3, Types.BIT);
+            cstmt.registerOutParameter(1, Types.BIT);
+            cstmt.registerOutParameter(2, Types.BIT);
+            cstmt.registerOutParameter(3, Types.BIT);
+            cstmt.executeUpdate();
+
+            assertTrue(cstmt.getBoolean(1));
+            assertFalse(cstmt.getBoolean(2));
+            cstmt.getBoolean(3);
+            assertTrue(cstmt.wasNull());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute("drop function insert_bit(boolean, boolean, boolean)");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testSetObjectBit() throws Throwable {
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute(createBitTab);
+            stmt.execute(insertBitTab);
+            boolean ret = stmt.execute("create or replace function "
+                    + "update_bit( in IMAX boolean, in IMIN boolean, in INUL boolean) returns int as "
+                    + "'begin "
+                    + "update bit_tab set  max_val = imax;"
+                    + "update bit_tab set  min_val = imin;"
+                    + "update bit_tab set  min_val = inul;"
+                    + " return 0;"
+                    + " end;' "
+                    + "language plpgsql;");
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call update_bit(?,?,?) }");
+            cstmt.setObject(1, "true", Types.BIT);
+            cstmt.setObject(2, "false", Types.BIT);
+            cstmt.setNull(3, Types.BIT);
+            cstmt.executeUpdate();
+            cstmt.close();
+            ResultSet rs = con.createStatement().executeQuery("select * from bit_tab");
+
+            assertTrue(rs.next());
+            assertTrue(rs.getBoolean(1));
+            assertFalse(rs.getBoolean(2));
+            rs.getBoolean(3);
+            assertTrue(rs.wasNull());
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute("drop function update_bit(boolean, boolean, boolean)");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testGetBit1WithoutArg() throws SQLException {
+        assumeNotSimpleQueryMode();
+        try (CallableStatement call = con.prepareCall("{ ? = call testspg__getBit1WithoutArg () }")) {
+            call.registerOutParameter(1, Types.BOOLEAN);
+            call.execute();
+            assertTrue(call.getBoolean(1));
+        }
+    }
+
+    @Test
+    public void testGetBit2WithoutArg() throws SQLException {
+        assumeNotSimpleQueryMode();
+        try (CallableStatement call = con.prepareCall("{ ? = call testspg__getBit2WithoutArg () }")) {
+            call.registerOutParameter(1, Types.BOOLEAN);
+            try {
+                call.execute();
+                assertTrue(call.getBoolean(1));
+                fail("#getBoolean(int) on bit(2) should throw");
+            } catch (SQLException e) {
+                assertEquals(PSQLState.CANNOT_COERCE.getState(), e.getSQLState());
+            }
+        }
+    }
+
+    @Test
+    public void testGetObjectLongVarchar() throws Throwable {
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute("create temp table longvarchar_tab ( t text, null_val text )");
+            stmt.execute("insert into longvarchar_tab values ('testdata',null)");
+            boolean ret = stmt.execute("create or replace function "
+                    + "longvarchar_proc( OUT pcn text, OUT nval text)  as "
+                    + "'begin "
+                    + "select t into pcn from longvarchar_tab;"
+                    + "select null_val into nval from longvarchar_tab;"
+
+                    + " end;' "
+                    + "language plpgsql;");
+
+            ret = stmt.execute("create or replace function "
+                    + "lvarchar_in_name( IN pcn text) returns int as "
+                    + "'begin "
+                    + "update longvarchar_tab set t=pcn;"
+                    + "return 0;"
+                    + " end;' "
+                    + "language plpgsql;");
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call longvarchar_proc(?,?) }");
+            cstmt.registerOutParameter(1, Types.LONGVARCHAR);
+            cstmt.registerOutParameter(2, Types.LONGVARCHAR);
+            cstmt.executeUpdate();
+            String val = (String) cstmt.getObject(1);
+            assertEquals("testdata", val);
+            val = (String) cstmt.getObject(2);
+            assertNull(val);
+            cstmt.close();
+            cstmt = con.prepareCall("{ call lvarchar_in_name(?) }");
+            String maxFloat = "3.4E38";
+            cstmt.setObject(1, Float.valueOf(maxFloat), Types.LONGVARCHAR);
+            cstmt.executeUpdate();
+            cstmt.close();
+            Statement stmt = con.createStatement();
+            ResultSet rs = stmt.executeQuery("select * from longvarchar_tab");
+            assertTrue(rs.next());
+            String rval = (String) rs.getObject(1);
+            assertEquals(rval.trim(), maxFloat.trim());
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute("drop function longvarchar_proc()");
+                dstmt.execute("drop function lvarchar_in_name(text)");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testGetBytes01() throws Throwable {
+        assumeByteaSupported();
+        byte[] testdata = "TestData".getBytes();
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute("create temp table varbinary_tab ( vbinary bytea, null_val bytea )");
+            boolean ret = stmt.execute("create or replace function "
+                    + "varbinary_proc( OUT pcn bytea, OUT nval bytea)  as "
+                    + "'begin "
+                    + "select vbinary into pcn from varbinary_tab;"
+                    + "select null_val into nval from varbinary_tab;"
+
+                    + " end;' "
+                    + "language plpgsql;");
+            stmt.close();
+            PreparedStatement pstmt = con.prepareStatement("insert into varbinary_tab values (?,?)");
+            pstmt.setBytes(1, testdata);
+            pstmt.setBytes(2, null);
+
+            pstmt.executeUpdate();
+            pstmt.close();
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call varbinary_proc(?,?) }");
+            cstmt.registerOutParameter(1, Types.VARBINARY);
+            cstmt.registerOutParameter(2, Types.VARBINARY);
+            cstmt.executeUpdate();
+            byte[] retval = cstmt.getBytes(1);
+            for (int i = 0; i < testdata.length; i++) {
+                assertEquals(testdata[i], retval[i]);
+            }
+
+            retval = cstmt.getBytes(2);
+            assertNull(retval);
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute("drop function varbinary_proc()");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testUpdateReal() throws Throwable {
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute(createRealTab);
+            boolean ret = stmt.execute(createUpdateReal);
+
+            stmt.execute(insertRealTab);
+            stmt.close();
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call update_real_proc(?,?) }");
+            BigDecimal val = new BigDecimal(intValues[0]);
+            float x = val.floatValue();
+            cstmt.setObject(1, val, Types.REAL);
+            val = new BigDecimal(intValues[1]);
+            cstmt.setObject(2, val, Types.REAL);
+            cstmt.executeUpdate();
+            cstmt.close();
+            ResultSet rs = con.createStatement().executeQuery("select * from real_tab");
+            assertTrue(rs.next());
+            Float oVal = (float) intValues[0];
+            Float rVal = Float.valueOf(rs.getObject(1).toString());
+            assertTrue(oVal.equals(rVal));
+            oVal = (float) intValues[1];
+            rVal = Float.valueOf(rs.getObject(2).toString());
+            assertTrue(oVal.equals(rVal));
+            rs.close();
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute(dropUpdateReal);
+                dstmt.close();
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testUpdateDecimal() throws Throwable {
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute(createDecimalTab);
+            boolean ret = stmt.execute(createUpdateFloat);
+            stmt.close();
+            PreparedStatement pstmt = con.prepareStatement("insert into decimal_tab values (?,?)");
+            // note these are reversed on purpose
+            pstmt.setDouble(1, doubleValues[1]);
+            pstmt.setDouble(2, doubleValues[0]);
+
+            pstmt.executeUpdate();
+            pstmt.close();
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call updatefloat_proc(?,?) }");
+            cstmt.setDouble(1, doubleValues[0]);
+            cstmt.setDouble(2, doubleValues[1]);
+            cstmt.executeUpdate();
+            cstmt.close();
+            ResultSet rs = con.createStatement().executeQuery("select * from decimal_tab");
+            assertTrue(rs.next());
+            assertTrue(rs.getDouble(1) == doubleValues[0]);
+            assertTrue(rs.getDouble(2) == doubleValues[1]);
+            rs.close();
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute("drop function updatefloat_proc(float, float)");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testGetBytes02() throws Throwable {
+        assumeByteaSupported();
+        byte[] testdata = "TestData".getBytes();
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute("create temp table longvarbinary_tab ( vbinary bytea, null_val bytea )");
+            boolean ret = stmt.execute("create or replace function "
+                    + "longvarbinary_proc( OUT pcn bytea, OUT nval bytea)  as "
+                    + "'begin "
+                    + "select vbinary into pcn from longvarbinary_tab;"
+                    + "select null_val into nval from longvarbinary_tab;"
+
+                    + " end;' "
+                    + "language plpgsql;");
+            stmt.close();
+            PreparedStatement pstmt = con.prepareStatement("insert into longvarbinary_tab values (?,?)");
+            pstmt.setBytes(1, testdata);
+            pstmt.setBytes(2, null);
+
+            pstmt.executeUpdate();
+            pstmt.close();
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call longvarbinary_proc(?,?) }");
+            cstmt.registerOutParameter(1, Types.LONGVARBINARY);
+            cstmt.registerOutParameter(2, Types.LONGVARBINARY);
+            cstmt.executeUpdate();
+            byte[] retval = cstmt.getBytes(1);
+            for (int i = 0; i < testdata.length; i++) {
+                assertEquals(testdata[i], retval[i]);
+            }
+
+            retval = cstmt.getBytes(2);
+            assertNull(retval);
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute("drop function longvarbinary_proc()");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testGetObjectFloat() throws Throwable {
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute(createDecimalTab);
+            stmt.execute(insertDecimalTab);
+            boolean ret = stmt.execute(createFloatProc);
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call float_proc(?,?,?) }");
+            cstmt.registerOutParameter(1, java.sql.Types.FLOAT);
+            cstmt.registerOutParameter(2, java.sql.Types.FLOAT);
+            cstmt.registerOutParameter(3, java.sql.Types.FLOAT);
+            cstmt.executeUpdate();
+            Double val = (Double) cstmt.getObject(1);
+            assertTrue(val.doubleValue() == doubleValues[0]);
+
+            val = (Double) cstmt.getObject(2);
+            assertTrue(val.doubleValue() == doubleValues[1]);
+
+            val = (Double) cstmt.getObject(3);
+            assertTrue(cstmt.wasNull());
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute(dropFloatProc);
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testGetDouble01() throws Throwable {
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute("create temp table d_tab ( max_val float, min_val float, null_val float )");
+            stmt.execute("insert into d_tab values (1.0E125,1.0E-130,null)");
+            boolean ret = stmt.execute("create or replace function "
+                    + "double_proc( OUT IMAX float, OUT IMIN float, OUT INUL float)  as "
+                    + "'begin "
+                    + "select max_val into imax from d_tab;"
+                    + "select min_val into imin from d_tab;"
+                    + "select null_val into inul from d_tab;"
+
+                    + " end;' "
+                    + "language plpgsql;");
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call double_proc(?,?,?) }");
+            cstmt.registerOutParameter(1, java.sql.Types.DOUBLE);
+            cstmt.registerOutParameter(2, java.sql.Types.DOUBLE);
+            cstmt.registerOutParameter(3, java.sql.Types.DOUBLE);
+            cstmt.executeUpdate();
+            assertTrue(cstmt.getDouble(1) == 1.0E125);
+            assertTrue(cstmt.getDouble(2) == 1.0E-130);
+            cstmt.getDouble(3);
+            assertTrue(cstmt.wasNull());
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute("drop function double_proc()");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testGetDoubleAsReal() throws Throwable {
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute("create temp table d_tab ( max_val float, min_val float, null_val float )");
+            stmt.execute("insert into d_tab values (3.4E38,1.4E-45,null)");
+            boolean ret = stmt.execute("create or replace function "
+                    + "double_proc( OUT IMAX float, OUT IMIN float, OUT INUL float)  as "
+                    + "'begin "
+                    + "select max_val into imax from d_tab;"
+                    + "select min_val into imin from d_tab;"
+                    + "select null_val into inul from d_tab;"
+
+                    + " end;' "
+                    + "language plpgsql;");
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call double_proc(?,?,?) }");
+            cstmt.registerOutParameter(1, java.sql.Types.REAL);
+            cstmt.registerOutParameter(2, java.sql.Types.REAL);
+            cstmt.registerOutParameter(3, java.sql.Types.REAL);
+            cstmt.executeUpdate();
+            assertTrue(cstmt.getFloat(1) == 3.4E38f);
+            assertTrue(cstmt.getFloat(2) == 1.4E-45f);
+            cstmt.getFloat(3);
+            assertTrue(cstmt.wasNull());
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute("drop function double_proc()");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testGetShort01() throws Throwable {
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute("create temp table short_tab ( max_val int2, min_val int2, null_val int2 )");
+            stmt.execute("insert into short_tab values (32767,-32768,null)");
+            boolean ret = stmt.execute("create or replace function "
+                    + "short_proc( OUT IMAX int2, OUT IMIN int2, OUT INUL int2)  as "
+                    + "'begin "
+                    + "select max_val into imax from short_tab;"
+                    + "select min_val into imin from short_tab;"
+                    + "select null_val into inul from short_tab;"
+
+                    + " end;' "
+                    + "language plpgsql;");
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call short_proc(?,?,?) }");
+            cstmt.registerOutParameter(1, java.sql.Types.SMALLINT);
+            cstmt.registerOutParameter(2, java.sql.Types.SMALLINT);
+            cstmt.registerOutParameter(3, java.sql.Types.SMALLINT);
+            cstmt.executeUpdate();
+            assertEquals(32767, cstmt.getShort(1));
+            assertEquals(-32768, cstmt.getShort(2));
+            cstmt.getShort(3);
+            assertTrue(cstmt.wasNull());
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute("drop function short_proc()");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testGetInt01() throws Throwable {
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute("create temp table i_tab ( max_val int, min_val int, null_val int )");
+            stmt.execute("insert into i_tab values (2147483647,-2147483648,null)");
+            boolean ret = stmt.execute("create or replace function "
+                    + "int_proc( OUT IMAX int, OUT IMIN int, OUT INUL int)  as "
+                    + "'begin "
+                    + "select max_val into imax from i_tab;"
+                    + "select min_val into imin from i_tab;"
+                    + "select null_val into inul from i_tab;"
+
+                    + " end;' "
+                    + "language plpgsql;");
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call int_proc(?,?,?) }");
+            cstmt.registerOutParameter(1, java.sql.Types.INTEGER);
+            cstmt.registerOutParameter(2, java.sql.Types.INTEGER);
+            cstmt.registerOutParameter(3, java.sql.Types.INTEGER);
+            cstmt.executeUpdate();
+            assertEquals(2147483647, cstmt.getInt(1));
+            assertEquals(-2147483648, cstmt.getInt(2));
+            cstmt.getInt(3);
+            assertTrue(cstmt.wasNull());
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute("drop function int_proc()");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testGetLong01() throws Throwable {
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute("create temp table l_tab ( max_val int8, min_val int8, null_val int8 )");
+            stmt.execute("insert into l_tab values (9223372036854775807,-9223372036854775808,null)");
+            boolean ret = stmt.execute("create or replace function "
+                    + "bigint_proc( OUT IMAX int8, OUT IMIN int8, OUT INUL int8)  as "
+                    + "'begin "
+                    + "select max_val into imax from l_tab;"
+                    + "select min_val into imin from l_tab;"
+                    + "select null_val into inul from l_tab;"
+
+                    + " end;' "
+                    + "language plpgsql;");
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call bigint_proc(?,?,?) }");
+            cstmt.registerOutParameter(1, java.sql.Types.BIGINT);
+            cstmt.registerOutParameter(2, java.sql.Types.BIGINT);
+            cstmt.registerOutParameter(3, java.sql.Types.BIGINT);
+            cstmt.executeUpdate();
+            assertEquals(9223372036854775807L, cstmt.getLong(1));
+            assertEquals(-9223372036854775808L, cstmt.getLong(2));
+            cstmt.getLong(3);
+            assertTrue(cstmt.wasNull());
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute("drop function bigint_proc()");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testGetBoolean01() throws Throwable {
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute(createBitTab);
+            stmt.execute(insertBitTab);
+            boolean ret = stmt.execute("create or replace function "
+                    + "bit_proc( OUT IMAX boolean, OUT IMIN boolean, OUT INUL boolean)  as "
+                    + "'begin "
+                    + "select max_val into imax from bit_tab;"
+                    + "select min_val into imin from bit_tab;"
+                    + "select null_val into inul from bit_tab;"
+
+                    + " end;' "
+                    + "language plpgsql;");
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call bit_proc(?,?,?) }");
+            cstmt.registerOutParameter(1, java.sql.Types.BIT);
+            cstmt.registerOutParameter(2, java.sql.Types.BIT);
+            cstmt.registerOutParameter(3, java.sql.Types.BIT);
+            cstmt.executeUpdate();
+            assertTrue(cstmt.getBoolean(1));
+            assertFalse(cstmt.getBoolean(2));
+            cstmt.getBoolean(3);
+            assertTrue(cstmt.wasNull());
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute("drop function bit_proc()");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testGetBooleanWithoutArg() throws SQLException {
+        assumeNotSimpleQueryMode();
+        try (CallableStatement call = con.prepareCall("{ ? = call testspg__getBooleanWithoutArg () }")) {
+            call.registerOutParameter(1, Types.BOOLEAN);
+            call.execute();
+            assertTrue(call.getBoolean(1));
+        }
+    }
+
+    @Test
+    public void testGetByte01() throws Throwable {
+        try {
+            Statement stmt = con.createStatement();
+            stmt.execute("create temp table byte_tab ( max_val int2, min_val int2, null_val int2 )");
+            stmt.execute("insert into byte_tab values (127,-128,null)");
+            boolean ret = stmt.execute("create or replace function "
+                    + "byte_proc( OUT IMAX int2, OUT IMIN int2, OUT INUL int2)  as "
+                    + "'begin "
+                    + "select max_val into imax from byte_tab;"
+                    + "select min_val into imin from byte_tab;"
+                    + "select null_val into inul from byte_tab;"
+
+                    + " end;' "
+                    + "language plpgsql;");
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+            throw ex;
+        }
+        try {
+            CallableStatement cstmt = con.prepareCall("{ call byte_proc(?,?,?) }");
+            cstmt.registerOutParameter(1, java.sql.Types.TINYINT);
+            cstmt.registerOutParameter(2, java.sql.Types.TINYINT);
+            cstmt.registerOutParameter(3, java.sql.Types.TINYINT);
+            cstmt.executeUpdate();
+            assertEquals(127, cstmt.getByte(1));
+            assertEquals(-128, cstmt.getByte(2));
+            cstmt.getByte(3);
+            assertTrue(cstmt.wasNull());
+        } catch (Exception ex) {
+            fail(ex.getMessage());
+        } finally {
+            try {
+                Statement dstmt = con.createStatement();
+                dstmt.execute("drop function byte_proc()");
+            } catch (Exception ex) {
+            }
+        }
+    }
+
+    @Test
+    public void testMultipleOutExecutions() throws SQLException {
+        CallableStatement cs = con.prepareCall("{call myiofunc(?, ?)}");
+        for (int i = 0; i < 10; i++) {
+            cs.registerOutParameter(1, Types.INTEGER);
+            cs.registerOutParameter(2, Types.INTEGER);
+            cs.setInt(1, i);
+            cs.execute();
+            assertEquals(1, cs.getInt(1));
+            assertEquals(i, cs.getInt(2));
+            cs.clearParameters();
+        }
+    }
+
+    @Test
+    public void testSum() throws SQLException {
+        CallableStatement cs = con.prepareCall("{?= call mysum(?, ?)}");
+        cs.registerOutParameter(1, Types.INTEGER);
+        cs.setInt(2, 2);
+        cs.setInt(3, 3);
+        cs.execute();
+        assertEquals("2+3 should be 5 when executed via {?= call mysum(?, ?)}", 5, cs.getInt(1));
+    }
+
+    @Test
+    public void testFunctionNoParametersWithParentheses() throws SQLException {
+        CallableStatement cs = con.prepareCall("{?= call mynoparams()}");
+        cs.registerOutParameter(1, Types.INTEGER);
+        cs.execute();
+        assertEquals("{?= call mynoparam()} should return 733, but did not.", 733, cs.getInt(1));
+        TestUtil.closeQuietly(cs);
+    }
+
+    @Test
+    public void testFunctionNoParametersWithoutParentheses() throws SQLException {
+        CallableStatement cs = con.prepareCall("{?= call mynoparams}");
+        cs.registerOutParameter(1, Types.INTEGER);
+        cs.execute();
+        assertEquals("{?= call mynoparam()} should return 733, but did not.", 733, cs.getInt(1));
+        TestUtil.closeQuietly(cs);
+    }
+
+    @Test
+    public void testProcedureNoParametersWithParentheses() throws SQLException {
+        CallableStatement cs = con.prepareCall("{ call mynoparamsproc()}");
+        cs.execute();
+        TestUtil.closeQuietly(cs);
+    }
+
+    @Test
+    public void testProcedureNoParametersWithoutParentheses() throws SQLException {
+        CallableStatement cs = con.prepareCall("{ call mynoparamsproc}");
+        cs.execute();
+        TestUtil.closeQuietly(cs);
+    }
+
+    @Test
+    public void testProcedureInOnlyNativeCall() throws SQLException {
+        assumeMinimumServerVersion(ServerVersion.v11);
+        CallableStatement cs = con.prepareCall("call inonlyprocedure(?)");
+        cs.setInt(1, 5);
+        cs.execute();
+        TestUtil.closeQuietly(cs);
+    }
+
+    @Test
+    public void testProcedureInOutNativeCall() throws SQLException {
+        assumeMinimumServerVersion(ServerVersion.v11);
+        // inoutprocedure(a INOUT int) returns a*2 via the INOUT parameter
+        CallableStatement cs = con.prepareCall("call inoutprocedure(?)");
+        cs.setInt(1, 5);
+        cs.registerOutParameter(1, Types.INTEGER);
+        cs.execute();
+        assertEquals("call inoutprocedure(?) should return 10 (when input param = 5) via the INOUT parameter, but did not.", 10, cs.getInt(1));
+        TestUtil.closeQuietly(cs);
+    }
+
+    @Test
+    public void testCall5Times() throws SQLException {
+        assumeMinimumServerVersion(ServerVersion.v14);
+        // call this enough times to change to binary mode
+        for (int i = 0; i < 6; i++) {
+            con.setAutoCommit(false);
+            try (CallableStatement proc = con.prepareCall("call testspg_refcursor( ? , ? )")) {
+                proc.setDate(1, java.sql.Date.valueOf(LocalDate.now()));
+                proc.registerOutParameter(2, Types.REF_CURSOR);
+                proc.execute();
+                try (ResultSet results = (ResultSet) proc.getObject(2)) {
+                    while (results.next()) {
+                        System.out.println("  " + i + " " + results.getTimestamp("now").toLocalDateTime());
+                    }
+                }
+            }
+            con.commit();
         }
-      }
-      con.commit();
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3SavepointTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3SavepointTest.java
index 6e82a3d..c6689d2 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3SavepointTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3SavepointTest.java
@@ -23,213 +23,213 @@ import java.sql.Statement;
 
 class Jdbc3SavepointTest {
 
-  private Connection conn;
+    private Connection conn;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    conn = TestUtil.openDB();
-    TestUtil.createTable(conn, "savepointtable", "id int primary key");
-    conn.setAutoCommit(false);
-  }
-
-  @AfterEach
-  void tearDown() throws SQLException {
-    conn.setAutoCommit(true);
-    TestUtil.dropTable(conn, "savepointtable");
-    TestUtil.closeDB(conn);
-  }
-
-  private boolean hasSavepoints() throws SQLException {
-    return true;
-  }
-
-  private void addRow(int id) throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("INSERT INTO savepointtable VALUES (?)");
-    pstmt.setInt(1, id);
-    pstmt.executeUpdate();
-    pstmt.close();
-  }
-
-  private int countRows() throws SQLException {
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT COUNT(*) FROM savepointtable");
-    rs.next();
-    int count = rs.getInt(1);
-    rs.close();
-    return count;
-  }
-
-  @Test
-  void autoCommitFails() throws SQLException {
-    if (!hasSavepoints()) {
-      return;
+    @BeforeEach
+    void setUp() throws Exception {
+        conn = TestUtil.openDB();
+        TestUtil.createTable(conn, "savepointtable", "id int primary key");
+        conn.setAutoCommit(false);
     }
 
-    conn.setAutoCommit(true);
-
-    try {
-      conn.setSavepoint();
-      fail("Can't create a savepoint with autocommit.");
-    } catch (SQLException sqle) {
+    @AfterEach
+    void tearDown() throws SQLException {
+        conn.setAutoCommit(true);
+        TestUtil.dropTable(conn, "savepointtable");
+        TestUtil.closeDB(conn);
     }
 
-    try {
-      conn.setSavepoint("spname");
-      fail("Can't create a savepoint with autocommit.");
-    } catch (SQLException sqle) {
-    }
-  }
-
-  @Test
-  void cantMixSavepointTypes() throws SQLException {
-    if (!hasSavepoints()) {
-      return;
+    private boolean hasSavepoints() throws SQLException {
+        return true;
     }
 
-    Savepoint namedSavepoint = conn.setSavepoint("named");
-    Savepoint unNamedSavepoint = conn.setSavepoint();
-
-    try {
-      namedSavepoint.getSavepointId();
-      fail("Can't get id from named savepoint.");
-    } catch (SQLException sqle) {
+    private void addRow(int id) throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("INSERT INTO savepointtable VALUES (?)");
+        pstmt.setInt(1, id);
+        pstmt.executeUpdate();
+        pstmt.close();
     }
 
-    try {
-      unNamedSavepoint.getSavepointName();
-      fail("Can't get name from unnamed savepoint.");
-    } catch (SQLException sqle) {
+    private int countRows() throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT COUNT(*) FROM savepointtable");
+        rs.next();
+        int count = rs.getInt(1);
+        rs.close();
+        return count;
     }
 
-  }
+    @Test
+    void autoCommitFails() throws SQLException {
+        if (!hasSavepoints()) {
+            return;
+        }
 
-  @Test
-  void rollingBackToSavepoints() throws SQLException {
-    if (!hasSavepoints()) {
-      return;
+        conn.setAutoCommit(true);
+
+        try {
+            conn.setSavepoint();
+            fail("Can't create a savepoint with autocommit.");
+        } catch (SQLException sqle) {
+        }
+
+        try {
+            conn.setSavepoint("spname");
+            fail("Can't create a savepoint with autocommit.");
+        } catch (SQLException sqle) {
+        }
     }
 
-    Savepoint empty = conn.setSavepoint();
-    addRow(1);
-    Savepoint onerow = conn.setSavepoint("onerow");
-    addRow(2);
+    @Test
+    void cantMixSavepointTypes() throws SQLException {
+        if (!hasSavepoints()) {
+            return;
+        }
 
-    assertEquals(2, countRows());
-    conn.rollback(onerow);
-    assertEquals(1, countRows());
-    conn.rollback(empty);
-    assertEquals(0, countRows());
-  }
+        Savepoint namedSavepoint = conn.setSavepoint("named");
+        Savepoint unNamedSavepoint = conn.setSavepoint();
+
+        try {
+            namedSavepoint.getSavepointId();
+            fail("Can't get id from named savepoint.");
+        } catch (SQLException sqle) {
+        }
+
+        try {
+            unNamedSavepoint.getSavepointName();
+            fail("Can't get name from unnamed savepoint.");
+        } catch (SQLException sqle) {
+        }
 
-  @Test
-  void globalRollbackWorks() throws SQLException {
-    if (!hasSavepoints()) {
-      return;
     }
 
-    conn.setSavepoint();
-    addRow(1);
-    conn.setSavepoint("onerow");
-    addRow(2);
+    @Test
+    void rollingBackToSavepoints() throws SQLException {
+        if (!hasSavepoints()) {
+            return;
+        }
 
-    assertEquals(2, countRows());
-    conn.rollback();
-    assertEquals(0, countRows());
-  }
+        Savepoint empty = conn.setSavepoint();
+        addRow(1);
+        Savepoint onerow = conn.setSavepoint("onerow");
+        addRow(2);
 
-  @Test
-  void continueAfterError() throws SQLException {
-    if (!hasSavepoints()) {
-      return;
+        assertEquals(2, countRows());
+        conn.rollback(onerow);
+        assertEquals(1, countRows());
+        conn.rollback(empty);
+        assertEquals(0, countRows());
     }
 
-    addRow(1);
-    Savepoint savepoint = conn.setSavepoint();
-    try {
-      addRow(1);
-      fail("Should have thrown duplicate key exception");
-    } catch (SQLException sqle) {
-      conn.rollback(savepoint);
+    @Test
+    void globalRollbackWorks() throws SQLException {
+        if (!hasSavepoints()) {
+            return;
+        }
+
+        conn.setSavepoint();
+        addRow(1);
+        conn.setSavepoint("onerow");
+        addRow(2);
+
+        assertEquals(2, countRows());
+        conn.rollback();
+        assertEquals(0, countRows());
     }
 
-    assertEquals(1, countRows());
-    addRow(2);
-    assertEquals(2, countRows());
-  }
+    @Test
+    void continueAfterError() throws SQLException {
+        if (!hasSavepoints()) {
+            return;
+        }
 
-  @Test
-  void releaseSavepoint() throws SQLException {
-    if (!hasSavepoints()) {
-      return;
+        addRow(1);
+        Savepoint savepoint = conn.setSavepoint();
+        try {
+            addRow(1);
+            fail("Should have thrown duplicate key exception");
+        } catch (SQLException sqle) {
+            conn.rollback(savepoint);
+        }
+
+        assertEquals(1, countRows());
+        addRow(2);
+        assertEquals(2, countRows());
     }
 
-    Savepoint savepoint = conn.setSavepoint("mysavepoint");
-    conn.releaseSavepoint(savepoint);
-    try {
-      savepoint.getSavepointName();
-      fail("Can't use savepoint after release.");
-    } catch (SQLException sqle) {
+    @Test
+    void releaseSavepoint() throws SQLException {
+        if (!hasSavepoints()) {
+            return;
+        }
+
+        Savepoint savepoint = conn.setSavepoint("mysavepoint");
+        conn.releaseSavepoint(savepoint);
+        try {
+            savepoint.getSavepointName();
+            fail("Can't use savepoint after release.");
+        } catch (SQLException sqle) {
+        }
+
+        savepoint = conn.setSavepoint();
+        conn.releaseSavepoint(savepoint);
+        try {
+            savepoint.getSavepointId();
+            fail("Can't use savepoint after release.");
+        } catch (SQLException sqle) {
+        }
     }
 
-    savepoint = conn.setSavepoint();
-    conn.releaseSavepoint(savepoint);
-    try {
-      savepoint.getSavepointId();
-      fail("Can't use savepoint after release.");
-    } catch (SQLException sqle) {
-    }
-  }
+    @Test
+    void complicatedSavepointName() throws SQLException {
+        if (!hasSavepoints()) {
+            return;
+        }
 
-  @Test
-  void complicatedSavepointName() throws SQLException {
-    if (!hasSavepoints()) {
-      return;
+        Savepoint savepoint = conn.setSavepoint("name with spaces + \"quotes\"");
+        conn.rollback(savepoint);
+        conn.releaseSavepoint(savepoint);
     }
 
-    Savepoint savepoint = conn.setSavepoint("name with spaces + \"quotes\"");
-    conn.rollback(savepoint);
-    conn.releaseSavepoint(savepoint);
-  }
+    @Test
+    void rollingBackToInvalidSavepointFails() throws SQLException {
+        if (!hasSavepoints()) {
+            return;
+        }
 
-  @Test
-  void rollingBackToInvalidSavepointFails() throws SQLException {
-    if (!hasSavepoints()) {
-      return;
+        Savepoint sp1 = conn.setSavepoint();
+        Savepoint sp2 = conn.setSavepoint();
+
+        conn.rollback(sp1);
+        try {
+            conn.rollback(sp2);
+            fail("Can't rollback to a savepoint that's invalid.");
+        } catch (SQLException sqle) {
+        }
     }
 
-    Savepoint sp1 = conn.setSavepoint();
-    Savepoint sp2 = conn.setSavepoint();
+    @Test
+    void rollbackMultipleTimes() throws SQLException {
+        if (!hasSavepoints()) {
+            return;
+        }
 
-    conn.rollback(sp1);
-    try {
-      conn.rollback(sp2);
-      fail("Can't rollback to a savepoint that's invalid.");
-    } catch (SQLException sqle) {
+        addRow(1);
+        Savepoint savepoint = conn.setSavepoint();
+
+        addRow(2);
+        conn.rollback(savepoint);
+        assertEquals(1, countRows());
+
+        conn.rollback(savepoint);
+        assertEquals(1, countRows());
+
+        addRow(2);
+        conn.rollback(savepoint);
+        assertEquals(1, countRows());
+
+        conn.releaseSavepoint(savepoint);
+        assertEquals(1, countRows());
     }
-  }
-
-  @Test
-  void rollbackMultipleTimes() throws SQLException {
-    if (!hasSavepoints()) {
-      return;
-    }
-
-    addRow(1);
-    Savepoint savepoint = conn.setSavepoint();
-
-    addRow(2);
-    conn.rollback(savepoint);
-    assertEquals(1, countRows());
-
-    conn.rollback(savepoint);
-    assertEquals(1, countRows());
-
-    addRow(2);
-    conn.rollback(savepoint);
-    assertEquals(1, countRows());
-
-    conn.releaseSavepoint(savepoint);
-    assertEquals(1, countRows());
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3TestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3TestSuite.java
index a059d5f..5083e15 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3TestSuite.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/Jdbc3TestSuite.java
@@ -13,23 +13,23 @@ import org.junit.runners.Suite;
  */
 @RunWith(Suite.class)
 @Suite.SuiteClasses({
-    CompositeQueryParseTest.class,
-    CompositeTest.class,
-    DatabaseMetaDataTest.class,
-    EscapeSyntaxCallModeCallTest.class,
-    EscapeSyntaxCallModeCallIfNoReturnTest.class,
-    EscapeSyntaxCallModeSelectTest.class,
-    GeneratedKeysTest.class,
-    Jdbc3BlobTest.class,
-    Jdbc3CallableStatementTest.class,
-    Jdbc3SavepointTest.class,
-    ParameterMetaDataTest.class,
-    ProcedureTransactionTest.class,
-    ResultSetTest.class,
-    SendRecvBufferSizeTest.class,
-    SqlCommandParseTest.class,
-    StringTypeParameterTest.class,
-    TypesTest.class,
+        CompositeQueryParseTest.class,
+        CompositeTest.class,
+        DatabaseMetaDataTest.class,
+        EscapeSyntaxCallModeCallTest.class,
+        EscapeSyntaxCallModeCallIfNoReturnTest.class,
+        EscapeSyntaxCallModeSelectTest.class,
+        GeneratedKeysTest.class,
+        Jdbc3BlobTest.class,
+        Jdbc3CallableStatementTest.class,
+        Jdbc3SavepointTest.class,
+        ParameterMetaDataTest.class,
+        ProcedureTransactionTest.class,
+        ResultSetTest.class,
+        SendRecvBufferSizeTest.class,
+        SqlCommandParseTest.class,
+        StringTypeParameterTest.class,
+        TypesTest.class,
 })
 public class Jdbc3TestSuite {
 
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ParameterMetaDataTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ParameterMetaDataTest.java
index b3b86d3..24d987e 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ParameterMetaDataTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ParameterMetaDataTest.java
@@ -28,105 +28,105 @@ import java.util.Collection;
 
 @RunWith(Parameterized.class)
 public class ParameterMetaDataTest extends BaseTest4 {
-  public ParameterMetaDataTest(BinaryMode binaryMode) {
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
+    public ParameterMetaDataTest(BinaryMode binaryMode) {
+        setBinaryMode(binaryMode);
     }
-    return ids;
-  }
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    Assume.assumeTrue("simple protocol only does not support describe statement requests",
-        preferQueryMode != PreferQueryMode.SIMPLE);
-    TestUtil.createTable(con, "parametertest",
-        "a int4, b float8, c text, d point, e timestamp with time zone");
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "parametertest");
-    super.tearDown();
-  }
-
-  @Test
-  public void testParameterMD() throws SQLException {
-    PreparedStatement pstmt =
-        con.prepareStatement("SELECT a FROM parametertest WHERE b = ? AND c = ? AND d >^ ? ");
-    ParameterMetaData pmd = pstmt.getParameterMetaData();
-
-    assertEquals(3, pmd.getParameterCount());
-    assertEquals(Types.DOUBLE, pmd.getParameterType(1));
-    assertEquals("float8", pmd.getParameterTypeName(1));
-    assertEquals("java.lang.Double", pmd.getParameterClassName(1));
-    assertEquals(Types.VARCHAR, pmd.getParameterType(2));
-    assertEquals("text", pmd.getParameterTypeName(2));
-    assertEquals("java.lang.String", pmd.getParameterClassName(2));
-    assertEquals(Types.OTHER, pmd.getParameterType(3));
-    assertEquals("point", pmd.getParameterTypeName(3));
-    assertEquals("org.postgresql.geometric.PGpoint", pmd.getParameterClassName(3));
-
-    pstmt.close();
-  }
-
-  @Test
-  public void testFailsOnBadIndex() throws SQLException {
-    PreparedStatement pstmt =
-        con.prepareStatement("SELECT a FROM parametertest WHERE b = ? AND c = ?");
-    ParameterMetaData pmd = pstmt.getParameterMetaData();
-    try {
-      pmd.getParameterType(0);
-      fail("Can't get parameter for index < 1.");
-    } catch (SQLException sqle) {
+    @Parameterized.Parameters(name = "binary = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
+        }
+        return ids;
     }
-    try {
-      pmd.getParameterType(3);
-      fail("Can't get parameter for index 3 with only two parameters.");
-    } catch (SQLException sqle) {
+
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        Assume.assumeTrue("simple protocol only does not support describe statement requests",
+                preferQueryMode != PreferQueryMode.SIMPLE);
+        TestUtil.createTable(con, "parametertest",
+                "a int4, b float8, c text, d point, e timestamp with time zone");
     }
-  }
 
-  // Make sure we work when mashing two queries into a single statement.
-  @Test
-  public void testMultiStatement() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement(
-        "SELECT a FROM parametertest WHERE b = ? AND c = ? ; SELECT b FROM parametertest WHERE a = ?");
-    ParameterMetaData pmd = pstmt.getParameterMetaData();
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "parametertest");
+        super.tearDown();
+    }
 
-    assertEquals(3, pmd.getParameterCount());
-    assertEquals(Types.DOUBLE, pmd.getParameterType(1));
-    assertEquals("float8", pmd.getParameterTypeName(1));
-    assertEquals(Types.VARCHAR, pmd.getParameterType(2));
-    assertEquals("text", pmd.getParameterTypeName(2));
-    assertEquals(Types.INTEGER, pmd.getParameterType(3));
-    assertEquals("int4", pmd.getParameterTypeName(3));
+    @Test
+    public void testParameterMD() throws SQLException {
+        PreparedStatement pstmt =
+                con.prepareStatement("SELECT a FROM parametertest WHERE b = ? AND c = ? AND d >^ ? ");
+        ParameterMetaData pmd = pstmt.getParameterMetaData();
 
-    pstmt.close();
+        assertEquals(3, pmd.getParameterCount());
+        assertEquals(Types.DOUBLE, pmd.getParameterType(1));
+        assertEquals("float8", pmd.getParameterTypeName(1));
+        assertEquals("java.lang.Double", pmd.getParameterClassName(1));
+        assertEquals(Types.VARCHAR, pmd.getParameterType(2));
+        assertEquals("text", pmd.getParameterTypeName(2));
+        assertEquals("java.lang.String", pmd.getParameterClassName(2));
+        assertEquals(Types.OTHER, pmd.getParameterType(3));
+        assertEquals("point", pmd.getParameterTypeName(3));
+        assertEquals("org.postgresql.geometric.PGpoint", pmd.getParameterClassName(3));
 
-  }
+        pstmt.close();
+    }
 
-  // Here we test that we can legally change the resolved type
-  // from text to varchar with the complicating factor that there
-  // is also an unknown parameter.
-  //
-  @Test
-  public void testTypeChangeWithUnknown() throws SQLException {
-    PreparedStatement pstmt =
-        con.prepareStatement("SELECT a FROM parametertest WHERE c = ? AND e = ?");
-    ParameterMetaData pmd = pstmt.getParameterMetaData();
+    @Test
+    public void testFailsOnBadIndex() throws SQLException {
+        PreparedStatement pstmt =
+                con.prepareStatement("SELECT a FROM parametertest WHERE b = ? AND c = ?");
+        ParameterMetaData pmd = pstmt.getParameterMetaData();
+        try {
+            pmd.getParameterType(0);
+            fail("Can't get parameter for index < 1.");
+        } catch (SQLException sqle) {
+        }
+        try {
+            pmd.getParameterType(3);
+            fail("Can't get parameter for index 3 with only two parameters.");
+        } catch (SQLException sqle) {
+        }
+    }
 
-    pstmt.setString(1, "Hi");
-    pstmt.setTimestamp(2, new Timestamp(0L));
+    // Make sure we work when mashing two queries into a single statement.
+    @Test
+    public void testMultiStatement() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement(
+                "SELECT a FROM parametertest WHERE b = ? AND c = ? ; SELECT b FROM parametertest WHERE a = ?");
+        ParameterMetaData pmd = pstmt.getParameterMetaData();
 
-    ResultSet rs = pstmt.executeQuery();
-    rs.close();
-  }
+        assertEquals(3, pmd.getParameterCount());
+        assertEquals(Types.DOUBLE, pmd.getParameterType(1));
+        assertEquals("float8", pmd.getParameterTypeName(1));
+        assertEquals(Types.VARCHAR, pmd.getParameterType(2));
+        assertEquals("text", pmd.getParameterTypeName(2));
+        assertEquals(Types.INTEGER, pmd.getParameterType(3));
+        assertEquals("int4", pmd.getParameterTypeName(3));
+
+        pstmt.close();
+
+    }
+
+    // Here we test that we can legally change the resolved type
+    // from text to varchar with the complicating factor that there
+    // is also an unknown parameter.
+    //
+    @Test
+    public void testTypeChangeWithUnknown() throws SQLException {
+        PreparedStatement pstmt =
+                con.prepareStatement("SELECT a FROM parametertest WHERE c = ? AND e = ?");
+        ParameterMetaData pmd = pstmt.getParameterMetaData();
+
+        pstmt.setString(1, "Hi");
+        pstmt.setTimestamp(2, new Timestamp(0L));
+
+        ResultSet rs = pstmt.executeQuery();
+        rs.close();
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ProcedureTransactionTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ProcedureTransactionTest.java
index 07c85c3..b16555e 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ProcedureTransactionTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ProcedureTransactionTest.java
@@ -27,147 +27,147 @@ import java.sql.Statement;
 import java.util.Properties;
 
 public class ProcedureTransactionTest extends BaseTest4 {
-  @BeforeClass
-  public static void beforeClass() throws Exception {
-    try (Connection con = TestUtil.openDB()) {
-      assumeCallableStatementsSupported(con);
-    }
-  }
-
-  @Override
-  protected void updateProperties(Properties props) {
-    super.updateProperties(props);
-    PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(props, EscapeSyntaxCallMode.CALL_IF_NO_RETURN.value());
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    Statement stmt = con.createStatement();
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) {
-      stmt.execute("create temp table proc_test ( some_val bigint )");
-      stmt.execute(
-          "CREATE OR REPLACE PROCEDURE mycommitproc(a INOUT bigint) AS 'BEGIN INSERT INTO proc_test values(a); commit; END;' LANGUAGE plpgsql");
-      stmt.execute(
-          "CREATE OR REPLACE PROCEDURE myrollbackproc(a INOUT bigint) AS 'BEGIN INSERT INTO proc_test values(a); rollback; END;' LANGUAGE plpgsql");
-      stmt.execute(
-          "CREATE OR REPLACE PROCEDURE mynotxnproc(a INOUT bigint) AS 'BEGIN INSERT INTO proc_test values(a); END;' LANGUAGE plpgsql");
-    }
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    Statement stmt = con.createStatement();
-    if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) {
-      stmt.execute("drop procedure mycommitproc(a INOUT bigint) ");
-      stmt.execute("drop procedure myrollbackproc(a INOUT bigint) ");
-      stmt.execute("drop procedure mynotxnproc(a INOUT bigint) ");
-      stmt.execute("drop table proc_test ");
-    }
-    stmt.close();
-    super.tearDown();
-  }
-
-  @Test
-  public void testProcWithNoTxnControl() throws SQLException {
-    assumeMinimumServerVersion(ServerVersion.v11);
-    CallableStatement cs = con.prepareCall("call mynotxnproc(?)");
-    int val = 1;
-    cs.setInt(1, val);
-    cs.execute();
-    TestUtil.closeQuietly(cs);
-
-    cs = con.prepareCall("select some_val from proc_test where some_val = ?");
-    cs.setInt(1, val);
-    ResultSet rs = cs.executeQuery();
-
-    assertTrue(rs.next());
-    assertTrue(rs.getInt(1) == val);
-
-    TestUtil.closeQuietly(rs);
-    TestUtil.closeQuietly(cs);
-  }
-
-  @Test
-  public void testProcWithCommitInside() throws SQLException {
-    assumeMinimumServerVersion(ServerVersion.v11);
-    CallableStatement cs = con.prepareCall("call mycommitproc(?)");
-    int val = 2;
-    cs.setInt(1, val);
-    cs.execute();
-    TestUtil.closeQuietly(cs);
-
-    cs = con.prepareCall("select some_val from proc_test where some_val = ?");
-    cs.setInt(1, val);
-    ResultSet rs = cs.executeQuery();
-
-    assertTrue(rs.next());
-    assertTrue(rs.getInt(1) == val);
-
-    TestUtil.closeQuietly(rs);
-    TestUtil.closeQuietly(cs);
-  }
-
-  @Test
-  public void testProcWithRollbackInside() throws SQLException {
-    assumeMinimumServerVersion(ServerVersion.v11);
-    CallableStatement cs = con.prepareCall("call myrollbackproc(?)");
-    int val = 3;
-    cs.setInt(1, val);
-    cs.execute();
-    TestUtil.closeQuietly(cs);
-
-    cs = con.prepareCall("select some_val from proc_test where some_val = ?");
-    cs.setInt(1, val);
-    ResultSet rs = cs.executeQuery();
-
-    assertFalse(rs.next());
-
-    TestUtil.closeQuietly(rs);
-    TestUtil.closeQuietly(cs);
-  }
-
-  @Test
-  public void testProcAutoCommitTrue() throws SQLException {
-    con.setAutoCommit(true);
-    testProcAutoCommit();
-  }
-
-  @Test
-  public void testProcAutoCommitFalse() throws SQLException {
-    // setting autocommit false enables application transaction control, meaning JDBC driver issues a BEGIN
-    // as of PostgreSQL 11, Stored Procedures with transaction control inside the procedure cannot be
-    // invoked inside a transaction, the procedure must start the top level transaction
-    // see: https://www.postgresql.org/docs/current/plpgsql-transactions.html
-    con.setAutoCommit(false);
-    try {
-      testProcAutoCommit();
-      fail("Should throw an exception");
-    } catch (SQLException ex) {
-      //2D000 invalid_transaction_termination
-      assertTrue(ex.getSQLState().equalsIgnoreCase(PSQLState.INVALID_TRANSACTION_TERMINATION.getState()));
-      con.rollback();
+    @BeforeClass
+    public static void beforeClass() throws Exception {
+        try (Connection con = TestUtil.openDB()) {
+            assumeCallableStatementsSupported(con);
+        }
     }
 
-  }
+    @Override
+    protected void updateProperties(Properties props) {
+        super.updateProperties(props);
+        PGProperty.ESCAPE_SYNTAX_CALL_MODE.set(props, EscapeSyntaxCallMode.CALL_IF_NO_RETURN.value());
+    }
 
-  private void testProcAutoCommit() throws SQLException {
-    assumeMinimumServerVersion(ServerVersion.v11);
-    CallableStatement cs = con.prepareCall("call mycommitproc(?)");
-    int val = 4;
-    cs.setInt(1, val);
-    cs.execute();
-    TestUtil.closeQuietly(cs);
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        Statement stmt = con.createStatement();
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) {
+            stmt.execute("create temp table proc_test ( some_val bigint )");
+            stmt.execute(
+                    "CREATE OR REPLACE PROCEDURE mycommitproc(a INOUT bigint) AS 'BEGIN INSERT INTO proc_test values(a); commit; END;' LANGUAGE plpgsql");
+            stmt.execute(
+                    "CREATE OR REPLACE PROCEDURE myrollbackproc(a INOUT bigint) AS 'BEGIN INSERT INTO proc_test values(a); rollback; END;' LANGUAGE plpgsql");
+            stmt.execute(
+                    "CREATE OR REPLACE PROCEDURE mynotxnproc(a INOUT bigint) AS 'BEGIN INSERT INTO proc_test values(a); END;' LANGUAGE plpgsql");
+        }
+    }
 
-    cs = con.prepareCall("select some_val from proc_test where some_val = ?");
-    cs.setInt(1, val);
-    ResultSet rs = cs.executeQuery();
+    @Override
+    public void tearDown() throws SQLException {
+        Statement stmt = con.createStatement();
+        if (TestUtil.haveMinimumServerVersion(con, ServerVersion.v11)) {
+            stmt.execute("drop procedure mycommitproc(a INOUT bigint) ");
+            stmt.execute("drop procedure myrollbackproc(a INOUT bigint) ");
+            stmt.execute("drop procedure mynotxnproc(a INOUT bigint) ");
+            stmt.execute("drop table proc_test ");
+        }
+        stmt.close();
+        super.tearDown();
+    }
 
-    assertTrue(rs.next());
-    assertTrue(rs.getInt(1) == val);
+    @Test
+    public void testProcWithNoTxnControl() throws SQLException {
+        assumeMinimumServerVersion(ServerVersion.v11);
+        CallableStatement cs = con.prepareCall("call mynotxnproc(?)");
+        int val = 1;
+        cs.setInt(1, val);
+        cs.execute();
+        TestUtil.closeQuietly(cs);
 
-    TestUtil.closeQuietly(rs);
-    TestUtil.closeQuietly(cs);
-  }
+        cs = con.prepareCall("select some_val from proc_test where some_val = ?");
+        cs.setInt(1, val);
+        ResultSet rs = cs.executeQuery();
+
+        assertTrue(rs.next());
+        assertTrue(rs.getInt(1) == val);
+
+        TestUtil.closeQuietly(rs);
+        TestUtil.closeQuietly(cs);
+    }
+
+    @Test
+    public void testProcWithCommitInside() throws SQLException {
+        assumeMinimumServerVersion(ServerVersion.v11);
+        CallableStatement cs = con.prepareCall("call mycommitproc(?)");
+        int val = 2;
+        cs.setInt(1, val);
+        cs.execute();
+        TestUtil.closeQuietly(cs);
+
+        cs = con.prepareCall("select some_val from proc_test where some_val = ?");
+        cs.setInt(1, val);
+        ResultSet rs = cs.executeQuery();
+
+        assertTrue(rs.next());
+        assertTrue(rs.getInt(1) == val);
+
+        TestUtil.closeQuietly(rs);
+        TestUtil.closeQuietly(cs);
+    }
+
+    @Test
+    public void testProcWithRollbackInside() throws SQLException {
+        assumeMinimumServerVersion(ServerVersion.v11);
+        CallableStatement cs = con.prepareCall("call myrollbackproc(?)");
+        int val = 3;
+        cs.setInt(1, val);
+        cs.execute();
+        TestUtil.closeQuietly(cs);
+
+        cs = con.prepareCall("select some_val from proc_test where some_val = ?");
+        cs.setInt(1, val);
+        ResultSet rs = cs.executeQuery();
+
+        assertFalse(rs.next());
+
+        TestUtil.closeQuietly(rs);
+        TestUtil.closeQuietly(cs);
+    }
+
+    @Test
+    public void testProcAutoCommitTrue() throws SQLException {
+        con.setAutoCommit(true);
+        testProcAutoCommit();
+    }
+
+    @Test
+    public void testProcAutoCommitFalse() throws SQLException {
+        // setting autocommit false enables application transaction control, meaning JDBC driver issues a BEGIN
+        // as of PostgreSQL 11, Stored Procedures with transaction control inside the procedure cannot be
+        // invoked inside a transaction, the procedure must start the top level transaction
+        // see: https://www.postgresql.org/docs/current/plpgsql-transactions.html
+        con.setAutoCommit(false);
+        try {
+            testProcAutoCommit();
+            fail("Should throw an exception");
+        } catch (SQLException ex) {
+            //2D000 invalid_transaction_termination
+            assertTrue(ex.getSQLState().equalsIgnoreCase(PSQLState.INVALID_TRANSACTION_TERMINATION.getState()));
+            con.rollback();
+        }
+
+    }
+
+    private void testProcAutoCommit() throws SQLException {
+        assumeMinimumServerVersion(ServerVersion.v11);
+        CallableStatement cs = con.prepareCall("call mycommitproc(?)");
+        int val = 4;
+        cs.setInt(1, val);
+        cs.execute();
+        TestUtil.closeQuietly(cs);
+
+        cs = con.prepareCall("select some_val from proc_test where some_val = ?");
+        cs.setInt(1, val);
+        ResultSet rs = cs.executeQuery();
+
+        assertTrue(rs.next());
+        assertTrue(rs.getInt(1) == val);
+
+        TestUtil.closeQuietly(rs);
+        TestUtil.closeQuietly(cs);
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ResultSetTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ResultSetTest.java
index d2286d3..1de9bf2 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ResultSetTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/ResultSetTest.java
@@ -22,47 +22,47 @@ import java.sql.Statement;
 
 class ResultSetTest {
 
-  private Connection conn;
+    private Connection conn;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    conn = TestUtil.openDB();
-    Statement stmt = conn.createStatement();
-    stmt.execute("CREATE TEMP TABLE hold(a int)");
-    stmt.execute("INSERT INTO hold VALUES (1)");
-    stmt.execute("INSERT INTO hold VALUES (2)");
-    stmt.close();
-  }
+    @BeforeEach
+    void setUp() throws Exception {
+        conn = TestUtil.openDB();
+        Statement stmt = conn.createStatement();
+        stmt.execute("CREATE TEMP TABLE hold(a int)");
+        stmt.execute("INSERT INTO hold VALUES (1)");
+        stmt.execute("INSERT INTO hold VALUES (2)");
+        stmt.close();
+    }
 
-  @AfterEach
-  void tearDown() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.execute("DROP TABLE hold");
-    stmt.close();
-    TestUtil.closeDB(conn);
-  }
+    @AfterEach
+    void tearDown() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.execute("DROP TABLE hold");
+        stmt.close();
+        TestUtil.closeDB(conn);
+    }
 
-  @Test
-  void holdableResultSet() throws SQLException {
-    Statement stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY,
-        ResultSet.HOLD_CURSORS_OVER_COMMIT);
+    @Test
+    void holdableResultSet() throws SQLException {
+        Statement stmt = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY,
+                ResultSet.HOLD_CURSORS_OVER_COMMIT);
 
-    conn.setAutoCommit(false);
-    stmt.setFetchSize(1);
+        conn.setAutoCommit(false);
+        stmt.setFetchSize(1);
 
-    ResultSet rs = stmt.executeQuery("SELECT a FROM hold ORDER BY a");
+        ResultSet rs = stmt.executeQuery("SELECT a FROM hold ORDER BY a");
 
-    assertTrue(rs.next());
-    assertEquals(1, rs.getInt(1));
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
 
-    conn.commit();
+        conn.commit();
 
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
-    assertFalse(rs.next());
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        assertFalse(rs.next());
 
-    rs.close();
-    stmt.close();
-  }
+        rs.close();
+        stmt.close();
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/SendRecvBufferSizeTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/SendRecvBufferSizeTest.java
index 96034c9..4ed89ef 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/SendRecvBufferSizeTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/SendRecvBufferSizeTest.java
@@ -19,34 +19,34 @@ import java.util.Properties;
 
 public class SendRecvBufferSizeTest extends BaseTest4 {
 
-  @Override
-  protected void updateProperties(Properties props) {
-    super.updateProperties(props);
-    PGProperty.SEND_BUFFER_SIZE.set(props, "1024");
-    PGProperty.RECEIVE_BUFFER_SIZE.set(props, "1024");
-  }
+    @Override
+    protected void updateProperties(Properties props) {
+        super.updateProperties(props);
+        PGProperty.SEND_BUFFER_SIZE.set(props, "1024");
+        PGProperty.RECEIVE_BUFFER_SIZE.set(props, "1024");
+    }
 
-  @Before
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTable(con, "hold", "a int");
-    Statement stmt = con.createStatement();
-    stmt.execute("INSERT INTO hold VALUES (1)");
-    stmt.execute("INSERT INTO hold VALUES (2)");
-    stmt.close();
-  }
+    @Before
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTable(con, "hold", "a int");
+        Statement stmt = con.createStatement();
+        stmt.execute("INSERT INTO hold VALUES (1)");
+        stmt.execute("INSERT INTO hold VALUES (2)");
+        stmt.close();
+    }
 
-  @After
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "hold");
-    super.tearDown();
-  }
+    @After
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "hold");
+        super.tearDown();
+    }
 
-  // dummy test
-  @Test
-  public void testSelect() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.execute("select * from hold");
-    stmt.close();
-  }
+    // dummy test
+    @Test
+    public void testSelect() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.execute("select * from hold");
+        stmt.close();
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/SqlCommandParseTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/SqlCommandParseTest.java
index cb2c64e..1b0c761 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/SqlCommandParseTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/SqlCommandParseTest.java
@@ -19,29 +19,29 @@ import java.util.Arrays;
 import java.util.List;
 
 public class SqlCommandParseTest {
-  public static Iterable<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-        {SqlCommandType.INSERT, "insert/**/ into table(select) values(1)"},
-        {SqlCommandType.SELECT, "select'abc'/**/ as insert"},
-        {SqlCommandType.INSERT, "INSERT/*fool /*nest comments -- parser*/*/ INTO genkeys (b,c) VALUES ('a', 2), ('b', 4) SELECT"},
-        {SqlCommandType.INSERT, "with update as (update foo set (a=?,b=?,c=?)) insert into table(select) values(1)"},
-        {SqlCommandType.INSERT, "with update as (update foo set (a=?,b=?,c=?)) insert into table(select) select * from update"},
-        {SqlCommandType.INSERT, "with update as (update foo set (a=?,b=?,c=?)) insert/**/ into table(select) values(1)"},
-        {SqlCommandType.INSERT, "with update as (update foo set (a=?,b=?,c=?)) insert /**/ into table(select) values(1)"},
-        {SqlCommandType.SELECT, "with update as (update foo set (a=?,b=?,c=?)) insert --\nas () select 1"},
-        {SqlCommandType.SELECT, "with update as (update foo set (a=?,b=?,c=?)) insert --\n/* dfhg \n*/\nas () select 1"},
-        {SqlCommandType.SELECT, "WITH x as (INSERT INTO genkeys(a,b,c) VALUES (1, 'a', 2) returning  returning a, b) select * from x"},
-        // No idea if it works, but it should be parsed as WITH
-        {SqlCommandType.WITH, "with update as (update foo set (a=?,b=?,c=?)) copy from stdin"},
-    });
-  }
+    public static Iterable<Object[]> data() {
+        return Arrays.asList(new Object[][]{
+                {SqlCommandType.INSERT, "insert/**/ into table(select) values(1)"},
+                {SqlCommandType.SELECT, "select'abc'/**/ as insert"},
+                {SqlCommandType.INSERT, "INSERT/*fool /*nest comments -- parser*/*/ INTO genkeys (b,c) VALUES ('a', 2), ('b', 4) SELECT"},
+                {SqlCommandType.INSERT, "with update as (update foo set (a=?,b=?,c=?)) insert into table(select) values(1)"},
+                {SqlCommandType.INSERT, "with update as (update foo set (a=?,b=?,c=?)) insert into table(select) select * from update"},
+                {SqlCommandType.INSERT, "with update as (update foo set (a=?,b=?,c=?)) insert/**/ into table(select) values(1)"},
+                {SqlCommandType.INSERT, "with update as (update foo set (a=?,b=?,c=?)) insert /**/ into table(select) values(1)"},
+                {SqlCommandType.SELECT, "with update as (update foo set (a=?,b=?,c=?)) insert --\nas () select 1"},
+                {SqlCommandType.SELECT, "with update as (update foo set (a=?,b=?,c=?)) insert --\n/* dfhg \n*/\nas () select 1"},
+                {SqlCommandType.SELECT, "WITH x as (INSERT INTO genkeys(a,b,c) VALUES (1, 'a', 2) returning  returning a, b) select * from x"},
+                // No idea if it works, but it should be parsed as WITH
+                {SqlCommandType.WITH, "with update as (update foo set (a=?,b=?,c=?)) copy from stdin"},
+        });
+    }
 
-  @MethodSource("data")
-  @ParameterizedTest(name = "expected={0}, sql={1}")
-  void run(SqlCommandType type, String sql) throws SQLException {
-    List<NativeQuery> queries;
-    queries = Parser.parseJdbcSql(sql, true, true, false, true, true);
-    NativeQuery query = queries.get(0);
-    assertEquals(type, query.command.getType(), sql);
-  }
+    @MethodSource("data")
+    @ParameterizedTest(name = "expected={0}, sql={1}")
+    void run(SqlCommandType type, String sql) throws SQLException {
+        List<NativeQuery> queries;
+        queries = Parser.parseJdbcSql(sql, true, true, false, true, true);
+        NativeQuery query = queries.get(0);
+        assertEquals(type, query.command.getType(), sql);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/StringTypeParameterTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/StringTypeParameterTest.java
index 569031a..ccb9967 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/StringTypeParameterTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/StringTypeParameterTest.java
@@ -30,136 +30,136 @@ import java.util.Properties;
 
 @RunWith(Parameterized.class)
 public class StringTypeParameterTest extends BaseTest4 {
-  private static final String UNSPECIFIED_STRING_TYPE = "unspecified";
+    private static final String UNSPECIFIED_STRING_TYPE = "unspecified";
 
-  private final String stringType;
+    private final String stringType;
 
-  public StringTypeParameterTest(String stringType) {
-    this.stringType = stringType;
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    // Assume enum supported
-    Assume.assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3));
-    TestUtil.createEnumType(con, "mood", "'happy', 'sad'");
-    TestUtil.createTable(con, "stringtypetest", "m mood");
-  }
-
-  @Override
-  protected void updateProperties(Properties props) {
-    super.updateProperties(props);
-    if (stringType != null) {
-      props.put("stringtype", stringType);
+    public StringTypeParameterTest(String stringType) {
+        this.stringType = stringType;
     }
-  }
 
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "stringtypetest");
-    TestUtil.dropType(con, "mood");
-    super.tearDown();
-  }
-
-  @Parameterized.Parameters(name = "stringType = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (String stringType : new String[]{null, "varchar", UNSPECIFIED_STRING_TYPE}) {
-      ids.add(new Object[]{stringType});
-    }
-    return ids;
-  }
-
-  @Test
-  public void testVarcharAsEnum() throws Exception {
-    Assume.assumeFalse(UNSPECIFIED_STRING_TYPE.equals(stringType));
-    Assume.assumeTrue(preferQueryMode != PreferQueryMode.SIMPLE);
-
-    PreparedStatement update = con.prepareStatement("insert into stringtypetest (m) values (?)");
-    for (int i = 0; i < 2; i++) {
-      update.clearParameters();
-      if (i == 0) {
-        update.setString(1, "sad");
-      } else {
-        update.setObject(1, "sad", Types.VARCHAR);
-      }
-      try {
-        update.executeUpdate();
-        fail("Expected 'column \"m\" is of type mood but expression is of type character varying', "
-            + (i == 0 ? "setString(1, \"sad\")" : "setObject(1, \"sad\", Types.VARCHAR)"));
-      } catch (SQLException e) {
-        // Exception exception is
-        // ERROR: column "m" is of type mood but expression is of type character varying
-        if (!PSQLState.DATATYPE_MISMATCH.getState().equals(e.getSQLState())) {
-          throw e;
+    @Parameterized.Parameters(name = "stringType = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (String stringType : new String[]{null, "varchar", UNSPECIFIED_STRING_TYPE}) {
+            ids.add(new Object[]{stringType});
         }
-      }
+        return ids;
     }
-    TestUtil.closeQuietly(update);
-  }
 
-  @Test
-  public void testOtherAsEnum() throws Exception {
-    PreparedStatement update = con.prepareStatement("insert into stringtypetest (m) values (?)");
-    update.setObject(1, "happy", Types.OTHER);
-    update.executeUpdate();
-    // all good
-    TestUtil.closeQuietly(update);
-  }
-
-  @Test
-  public void testMultipleEnumBinds() throws Exception {
-    Assume.assumeFalse(UNSPECIFIED_STRING_TYPE.equals(stringType));
-    Assume.assumeTrue(preferQueryMode != PreferQueryMode.SIMPLE);
-
-    PreparedStatement query =
-        con.prepareStatement("select * from stringtypetest where m = ? or m = ?");
-    query.setString(1, "sad");
-    query.setObject(2, "sad", Types.VARCHAR);
-    try {
-      query.executeQuery();
-      fail("Expected 'operator does not exist: mood = character varying'");
-    } catch (SQLException e) {
-      // Exception exception is
-      // ERROR: operator does not exist: mood = character varying
-      if (!PSQLState.UNDEFINED_FUNCTION.getState().equals(e.getSQLState())) {
-        throw e;
-      }
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        // Assume enum supported
+        Assume.assumeTrue(TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_3));
+        TestUtil.createEnumType(con, "mood", "'happy', 'sad'");
+        TestUtil.createTable(con, "stringtypetest", "m mood");
     }
-    TestUtil.closeQuietly(query);
-  }
 
-  @Test
-  public void testParameterUnspecified() throws Exception {
-    Assume.assumeTrue(UNSPECIFIED_STRING_TYPE.equals(stringType));
+    @Override
+    protected void updateProperties(Properties props) {
+        super.updateProperties(props);
+        if (stringType != null) {
+            props.put("stringtype", stringType);
+        }
+    }
 
-    PreparedStatement update = con.prepareStatement("insert into stringtypetest (m) values (?)");
-    update.setString(1, "happy");
-    update.executeUpdate();
-    // all good
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "stringtypetest");
+        TestUtil.dropType(con, "mood");
+        super.tearDown();
+    }
 
-    update.clearParameters();
-    update.setObject(1, "happy", Types.VARCHAR);
-    update.executeUpdate();
-    // all good
-    update.close();
+    @Test
+    public void testVarcharAsEnum() throws Exception {
+        Assume.assumeFalse(UNSPECIFIED_STRING_TYPE.equals(stringType));
+        Assume.assumeTrue(preferQueryMode != PreferQueryMode.SIMPLE);
 
-    PreparedStatement query = con.prepareStatement("select * from stringtypetest where m = ?");
-    query.setString(1, "happy");
-    ResultSet rs = query.executeQuery();
-    assertTrue(rs.next());
-    assertEquals("happy", rs.getObject("m"));
-    rs.close();
+        PreparedStatement update = con.prepareStatement("insert into stringtypetest (m) values (?)");
+        for (int i = 0; i < 2; i++) {
+            update.clearParameters();
+            if (i == 0) {
+                update.setString(1, "sad");
+            } else {
+                update.setObject(1, "sad", Types.VARCHAR);
+            }
+            try {
+                update.executeUpdate();
+                fail("Expected 'column \"m\" is of type mood but expression is of type character varying', "
+                        + (i == 0 ? "setString(1, \"sad\")" : "setObject(1, \"sad\", Types.VARCHAR)"));
+            } catch (SQLException e) {
+                // Exception exception is
+                // ERROR: column "m" is of type mood but expression is of type character varying
+                if (!PSQLState.DATATYPE_MISMATCH.getState().equals(e.getSQLState())) {
+                    throw e;
+                }
+            }
+        }
+        TestUtil.closeQuietly(update);
+    }
 
-    query.clearParameters();
-    query.setObject(1, "happy", Types.VARCHAR);
-    rs = query.executeQuery();
-    assertTrue(rs.next());
-    assertEquals("happy", rs.getObject("m"));
+    @Test
+    public void testOtherAsEnum() throws Exception {
+        PreparedStatement update = con.prepareStatement("insert into stringtypetest (m) values (?)");
+        update.setObject(1, "happy", Types.OTHER);
+        update.executeUpdate();
+        // all good
+        TestUtil.closeQuietly(update);
+    }
 
-    // all good
-    rs.close();
-    query.close();
-  }
+    @Test
+    public void testMultipleEnumBinds() throws Exception {
+        Assume.assumeFalse(UNSPECIFIED_STRING_TYPE.equals(stringType));
+        Assume.assumeTrue(preferQueryMode != PreferQueryMode.SIMPLE);
+
+        PreparedStatement query =
+                con.prepareStatement("select * from stringtypetest where m = ? or m = ?");
+        query.setString(1, "sad");
+        query.setObject(2, "sad", Types.VARCHAR);
+        try {
+            query.executeQuery();
+            fail("Expected 'operator does not exist: mood = character varying'");
+        } catch (SQLException e) {
+            // Exception exception is
+            // ERROR: operator does not exist: mood = character varying
+            if (!PSQLState.UNDEFINED_FUNCTION.getState().equals(e.getSQLState())) {
+                throw e;
+            }
+        }
+        TestUtil.closeQuietly(query);
+    }
+
+    @Test
+    public void testParameterUnspecified() throws Exception {
+        Assume.assumeTrue(UNSPECIFIED_STRING_TYPE.equals(stringType));
+
+        PreparedStatement update = con.prepareStatement("insert into stringtypetest (m) values (?)");
+        update.setString(1, "happy");
+        update.executeUpdate();
+        // all good
+
+        update.clearParameters();
+        update.setObject(1, "happy", Types.VARCHAR);
+        update.executeUpdate();
+        // all good
+        update.close();
+
+        PreparedStatement query = con.prepareStatement("select * from stringtypetest where m = ?");
+        query.setString(1, "happy");
+        ResultSet rs = query.executeQuery();
+        assertTrue(rs.next());
+        assertEquals("happy", rs.getObject("m"));
+        rs.close();
+
+        query.clearParameters();
+        query.setObject(1, "happy", Types.VARCHAR);
+        rs = query.executeQuery();
+        assertTrue(rs.next());
+        assertEquals("happy", rs.getObject("m"));
+
+        // all good
+        rs.close();
+        query.close();
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/TestReturning.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/TestReturning.java
index d792d5c..a992753 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/TestReturning.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/TestReturning.java
@@ -29,96 +29,95 @@ import java.util.Properties;
 @RunWith(Parameterized.class)
 public class TestReturning extends BaseTest4 {
 
-  public enum ColumnsReturned {
-    Id("Id"),
-    id("id"),
-    ID("*"),
-    QUOTED("\"Id\""),
-    NO();
+    static String[] returningOptions = {"true", "false"};
+    private final ColumnsReturned columnsReturned;
+    private final String quoteReturning;
 
-    final String[] columns;
-
-    ColumnsReturned(String... columns) {
-      this.columns =  columns;
+    public TestReturning(ColumnsReturned columnsReturned, String quoteReturning) throws Exception {
+        this.columnsReturned = columnsReturned;
+        this.quoteReturning = quoteReturning;
     }
 
-    public int columnsReturned() {
-      if (columns.length == 1 && columns[0].charAt(0) == '*') {
-        return 100500; // does not matter much, the meaning is "every possible column"
-      }
-      return columns.length;
+    @Parameterized.Parameters(name = "returningInQuery = {0}, quoteReturning = {1}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (ColumnsReturned columnsReturned : ColumnsReturned.values()) {
+            for (String q : returningOptions) {
+                ids.add(new Object[]{columnsReturned, q});
+            }
+        }
+        return ids;
     }
 
-    public String[] getColumnNames() {
-      if (columnsReturned() == 0) {
-        return new String[]{};
-      }
-
-      return columns;
+    protected void updateProperties(Properties props) {
+        PGProperty.QUOTE_RETURNING_IDENTIFIERS.set(props, quoteReturning);
     }
-  }
 
-  static String []returningOptions = {"true", "false"};
-
-  @Parameterized.Parameters(name = "returningInQuery = {0}, quoteReturning = {1}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (ColumnsReturned columnsReturned : ColumnsReturned.values()) {
-      for (String q : returningOptions) {
-        ids.add(new Object[]{columnsReturned, q});
-      }
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTempTable(con, "genkeys", "\"Id\" serial, b varchar(5), c int");
     }
-    return ids;
-  }
 
-  private final ColumnsReturned columnsReturned;
-  private final String quoteReturning;
-
-  public TestReturning(ColumnsReturned columnsReturned, String quoteReturning) throws Exception {
-    this.columnsReturned = columnsReturned;
-    this.quoteReturning = quoteReturning;
-  }
-
-  protected void updateProperties(Properties props) {
-    PGProperty.QUOTE_RETURNING_IDENTIFIERS.set(props, quoteReturning);
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTempTable(con, "genkeys", "\"Id\" serial, b varchar(5), c int");
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "genkeys");
-    super.tearDown();
-  }
-
-  private void testGeneratedKeys(Connection conn, String sql, String[] columnNames, boolean exceptionExpected) throws SQLException {
-
-    try (PreparedStatement stmt = conn.prepareStatement(sql, columnNames)) {
-      stmt.execute();
-      ResultSet rs = stmt.getGeneratedKeys();
-      assertNotNull(rs);
-      assertTrue(rs.next());
-      assertEquals(1, rs.getInt(1));
-    } catch (SQLException e) {
-      if ( !exceptionExpected ) {
-        fail("error getting column names: " + e.getMessage());
-      }
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "genkeys");
+        super.tearDown();
     }
-  }
 
-  @Test
-  public void testMixedCase() throws SQLException {
+    private void testGeneratedKeys(Connection conn, String sql, String[] columnNames, boolean exceptionExpected) throws SQLException {
 
-    String insertSql = "INSERT INTO genkeys (b,c) VALUES ('hello', 1)";
+        try (PreparedStatement stmt = conn.prepareStatement(sql, columnNames)) {
+            stmt.execute();
+            ResultSet rs = stmt.getGeneratedKeys();
+            assertNotNull(rs);
+            assertTrue(rs.next());
+            assertEquals(1, rs.getInt(1));
+        } catch (SQLException e) {
+            if (!exceptionExpected) {
+                fail("error getting column names: " + e.getMessage());
+            }
+        }
+    }
 
-    testGeneratedKeys(con, insertSql, new String[]{"Id"}, "false".equals(quoteReturning));
-    testGeneratedKeys(con, insertSql, new String[]{"id"}, true);
-    testGeneratedKeys(con, insertSql, new String[]{"ID"}, true);
-    testGeneratedKeys(con, insertSql, new String[]{"\"Id\""}, "true".equals(quoteReturning));
-    testGeneratedKeys(con, insertSql, new String[]{"bad"}, true);
-  }
+    @Test
+    public void testMixedCase() throws SQLException {
+
+        String insertSql = "INSERT INTO genkeys (b,c) VALUES ('hello', 1)";
+
+        testGeneratedKeys(con, insertSql, new String[]{"Id"}, "false".equals(quoteReturning));
+        testGeneratedKeys(con, insertSql, new String[]{"id"}, true);
+        testGeneratedKeys(con, insertSql, new String[]{"ID"}, true);
+        testGeneratedKeys(con, insertSql, new String[]{"\"Id\""}, "true".equals(quoteReturning));
+        testGeneratedKeys(con, insertSql, new String[]{"bad"}, true);
+    }
+
+    public enum ColumnsReturned {
+        Id("Id"),
+        id("id"),
+        ID("*"),
+        QUOTED("\"Id\""),
+        NO();
+
+        final String[] columns;
+
+        ColumnsReturned(String... columns) {
+            this.columns = columns;
+        }
+
+        public int columnsReturned() {
+            if (columns.length == 1 && columns[0].charAt(0) == '*') {
+                return 100500; // does not matter much, the meaning is "every possible column"
+            }
+            return columns.length;
+        }
+
+        public String[] getColumnNames() {
+            if (columnsReturned() == 0) {
+                return new String[]{};
+            }
+
+            return columns;
+        }
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/TypesTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/TypesTest.java
index f1bb91e..8236c43 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc3/TypesTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc3/TypesTest.java
@@ -25,81 +25,81 @@ import java.sql.Types;
 
 public class TypesTest extends BaseTest4 {
 
-  private Connection conn;
+    private Connection conn;
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    conn = con;
-    Statement stmt = conn.createStatement();
-    stmt.execute(
-        "CREATE OR REPLACE FUNCTION return_bool(boolean) RETURNS boolean AS 'BEGIN RETURN $1; END;' LANGUAGE plpgsql");
-    stmt.close();
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.execute("DROP FUNCTION return_bool(boolean)");
-    stmt.close();
-    super.tearDown();
-  }
-
-  @Test
-  public void testPreparedBoolean() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?,?,?,?");
-    pstmt.setNull(1, Types.BOOLEAN);
-    pstmt.setObject(2, null, Types.BOOLEAN);
-    pstmt.setBoolean(3, true);
-    pstmt.setObject(4, Boolean.FALSE);
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertTrue(!rs.getBoolean(1));
-    assertTrue(rs.wasNull());
-    assertNull(rs.getObject(2));
-    assertTrue(rs.getBoolean(3));
-    // Only the V3 protocol return will be strongly typed.
-    // The V2 path will return a String because it doesn't know
-    // any better.
-    if (preferQueryMode != PreferQueryMode.SIMPLE) {
-      assertTrue(!((Boolean) rs.getObject(4)).booleanValue());
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        conn = con;
+        Statement stmt = conn.createStatement();
+        stmt.execute(
+                "CREATE OR REPLACE FUNCTION return_bool(boolean) RETURNS boolean AS 'BEGIN RETURN $1; END;' LANGUAGE plpgsql");
+        stmt.close();
     }
-  }
 
-  @Test
-  public void testPreparedByte() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?,?");
-    pstmt.setByte(1, (byte) 1);
-    pstmt.setObject(2, (byte) 2);
-    ResultSet rs = pstmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals((byte) 1, rs.getByte(1));
-    assertFalse(rs.wasNull());
-    assertEquals((byte) 2, rs.getByte(2));
-    assertFalse(rs.wasNull());
-    rs.close();
-    pstmt.close();
-  }
+    @Override
+    public void tearDown() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.execute("DROP FUNCTION return_bool(boolean)");
+        stmt.close();
+        super.tearDown();
+    }
 
-  @Test
-  public void testCallableBoolean() throws SQLException {
-    assumeCallableStatementsSupported();
-    CallableStatement cs = conn.prepareCall("{? = call return_bool(?)}");
-    cs.registerOutParameter(1, Types.BOOLEAN);
-    cs.setBoolean(2, true);
-    cs.execute();
-    assertEquals(true, cs.getBoolean(1));
-    cs.close();
-  }
+    @Test
+    public void testPreparedBoolean() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?,?,?,?");
+        pstmt.setNull(1, Types.BOOLEAN);
+        pstmt.setObject(2, null, Types.BOOLEAN);
+        pstmt.setBoolean(3, true);
+        pstmt.setObject(4, Boolean.FALSE);
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertTrue(!rs.getBoolean(1));
+        assertTrue(rs.wasNull());
+        assertNull(rs.getObject(2));
+        assertTrue(rs.getBoolean(3));
+        // Only the V3 protocol return will be strongly typed.
+        // The V2 path will return a String because it doesn't know
+        // any better.
+        if (preferQueryMode != PreferQueryMode.SIMPLE) {
+            assertTrue(!((Boolean) rs.getObject(4)).booleanValue());
+        }
+    }
 
-  @Test
-  public void testUnknownType() throws SQLException {
-    Statement stmt = conn.createStatement();
+    @Test
+    public void testPreparedByte() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?,?");
+        pstmt.setByte(1, (byte) 1);
+        pstmt.setObject(2, (byte) 2);
+        ResultSet rs = pstmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals((byte) 1, rs.getByte(1));
+        assertFalse(rs.wasNull());
+        assertEquals((byte) 2, rs.getByte(2));
+        assertFalse(rs.wasNull());
+        rs.close();
+        pstmt.close();
+    }
 
-    ResultSet rs = stmt.executeQuery("select 'foo1' as icon1, 'foo2' as icon2 ");
-    assertTrue(rs.next());
-    assertEquals("failed", "foo1", rs.getString("icon1"));
-    assertEquals("failed", "foo2", rs.getString("icon2"));
-  }
+    @Test
+    public void testCallableBoolean() throws SQLException {
+        assumeCallableStatementsSupported();
+        CallableStatement cs = conn.prepareCall("{? = call return_bool(?)}");
+        cs.registerOutParameter(1, Types.BOOLEAN);
+        cs.setBoolean(2, true);
+        cs.execute();
+        assertEquals(true, cs.getBoolean(1));
+        cs.close();
+    }
+
+    @Test
+    public void testUnknownType() throws SQLException {
+        Statement stmt = conn.createStatement();
+
+        ResultSet rs = stmt.executeQuery("select 'foo1' as icon1, 'foo2' as icon2 ");
+        assertTrue(rs.next());
+        assertEquals("failed", "foo1", rs.getString("icon1"));
+        assertEquals("failed", "foo2", rs.getString("icon2"));
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ArrayTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ArrayTest.java
index db1db95..83de5fb 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ArrayTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ArrayTest.java
@@ -41,667 +41,667 @@ import java.util.UUID;
 @RunWith(Parameterized.class)
 public class ArrayTest extends BaseTest4 {
 
-  private Connection conn;
+    private Connection conn;
 
-  public ArrayTest(BinaryMode binaryMode) {
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
-    }
-    return ids;
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    conn = con;
-
-    TestUtil.createTable(conn, "arrtest",
-        "intarr int[], decarr decimal(2,1)[], strarr text[]"
-        + (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3) ? ", uuidarr uuid[]" : "")
-        + ", floatarr float8[]"
-        + ", intarr2 int4[][]");
-    TestUtil.createTable(conn, "arrcompprnttest", "id serial, name character(10)");
-    TestUtil.createTable(conn, "arrcompchldttest",
-        "id serial, name character(10), description character varying, parent integer");
-    TestUtil.createTable(conn, "\"CorrectCasing\"", "id serial");
-    TestUtil.createTable(conn, "\"Evil.Table\"", "id serial");
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(conn, "arrtest");
-    TestUtil.dropTable(conn, "arrcompprnttest");
-    TestUtil.dropTable(conn, "arrcompchldttest");
-    TestUtil.dropTable(conn, "\"CorrectCasing\"");
-
-    super.tearDown();
-  }
-
-  @Test
-  public void testCreateArrayOfBool() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?::bool[]");
-    pstmt.setArray(1, conn.unwrap(PgConnection.class).createArrayOf("boolean", new boolean[]{true, true, false}));
-
-    ResultSet rs = pstmt.executeQuery();
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-    Boolean[] out = (Boolean[]) arr.getArray();
-
-    Assert.assertEquals(3, out.length);
-    Assert.assertEquals(Boolean.TRUE, out[0]);
-    Assert.assertEquals(Boolean.TRUE, out[1]);
-    Assert.assertEquals(Boolean.FALSE, out[2]);
-  }
-
-  @Test
-  public void testCreateArrayOfInt() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?::int[]");
-    Integer[] in = new Integer[3];
-    in[0] = 0;
-    in[1] = -1;
-    in[2] = 2;
-    pstmt.setArray(1, conn.createArrayOf("int4", in));
-
-    ResultSet rs = pstmt.executeQuery();
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-    Integer[] out = (Integer[]) arr.getArray();
-
-    Assert.assertEquals(3, out.length);
-    Assert.assertEquals(0, out[0].intValue());
-    Assert.assertEquals(-1, out[1].intValue());
-    Assert.assertEquals(2, out[2].intValue());
-  }
-
-  @Test
-  public void testCreateArrayOfBytes() throws SQLException {
-
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?::bytea[]");
-    final byte[][] in = new byte[][]{{0x01, (byte) 0xFF, (byte) 0x12}, {}, {(byte) 0xAC, (byte) 0xE4}, null};
-    final Array createdArray = conn.createArrayOf("bytea", in);
-
-    byte[][] inCopy = (byte[][]) createdArray.getArray();
-
-    Assert.assertEquals(4, inCopy.length);
-
-    Assert.assertArrayEquals(in[0], inCopy[0]);
-    Assert.assertArrayEquals(in[1], inCopy[1]);
-    Assert.assertArrayEquals(in[2], inCopy[2]);
-    Assert.assertArrayEquals(in[3], inCopy[3]);
-    Assert.assertNull(inCopy[3]);
-
-    pstmt.setArray(1, createdArray);
-
-    ResultSet rs = pstmt.executeQuery();
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-
-    byte[][] out = (byte[][]) arr.getArray();
-
-    Assert.assertEquals(4, out.length);
-
-    Assert.assertArrayEquals(in[0], out[0]);
-    Assert.assertArrayEquals(in[1], out[1]);
-    Assert.assertArrayEquals(in[2], out[2]);
-    Assert.assertArrayEquals(in[3], out[3]);
-    Assert.assertNull(out[3]);
-  }
-
-  @Test
-  public void testCreateArrayOfBytesFromString() throws SQLException {
-
-    assumeMinimumServerVersion("support for bytea[] as string requires hex string support from 9.0",
-        ServerVersion.v9_0);
-
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?::bytea[]");
-    final byte[][] in = new byte[][]{{0x01, (byte) 0xFF, (byte) 0x12}, {}, {(byte) 0xAC, (byte) 0xE4}, null};
-
-    pstmt.setString(1, "{\"\\\\x01ff12\",\"\\\\x\",\"\\\\xace4\",NULL}");
-
-    ResultSet rs = pstmt.executeQuery();
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-
-    byte[][] out = (byte[][]) arr.getArray();
-
-    Assert.assertEquals(4, out.length);
-
-    Assert.assertArrayEquals(in[0], out[0]);
-    Assert.assertArrayEquals(in[1], out[1]);
-    Assert.assertArrayEquals(in[2], out[2]);
-    Assert.assertArrayEquals(in[3], out[3]);
-    Assert.assertNull(out[3]);
-  }
-
-  @Test
-  public void testCreateArrayOfSmallInt() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?::smallint[]");
-    Short[] in = new Short[3];
-    in[0] = 0;
-    in[1] = -1;
-    in[2] = 2;
-    pstmt.setArray(1, conn.createArrayOf("int2", in));
-
-    ResultSet rs = pstmt.executeQuery();
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-    Short[] out = (Short[]) arr.getArray();
-
-    Assert.assertEquals(3, out.length);
-    Assert.assertEquals(0, out[0].shortValue());
-    Assert.assertEquals(-1, out[1].shortValue());
-    Assert.assertEquals(2, out[2].shortValue());
-  }
-
-  @Test
-  public void testCreateArrayOfMultiString() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?::text[]");
-    String[][] in = new String[2][2];
-    in[0][0] = "a";
-    in[0][1] = "";
-    in[1][0] = "\\";
-    in[1][1] = "\"\\'z";
-    pstmt.setArray(1, conn.createArrayOf("text", in));
-
-    ResultSet rs = pstmt.executeQuery();
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-    String[][] out = (String[][]) arr.getArray();
-
-    Assert.assertEquals(2, out.length);
-    Assert.assertEquals(2, out[0].length);
-    Assert.assertEquals("a", out[0][0]);
-    Assert.assertEquals("", out[0][1]);
-    Assert.assertEquals("\\", out[1][0]);
-    Assert.assertEquals("\"\\'z", out[1][1]);
-  }
-
-  @Test
-  public void testCreateArrayOfMultiJson() throws SQLException {
-    if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v9_2)) {
-      return;
-    }
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?::json[]");
-    PGobject p1 = new PGobject();
-    p1.setType("json");
-    p1.setValue("{\"x\": 10}");
-
-    PGobject p2 = new PGobject();
-    p2.setType("json");
-    p2.setValue("{\"x\": 20}");
-    PGobject[] in = new PGobject[]{p1, p2};
-    pstmt.setArray(1, conn.createArrayOf("json", in));
-
-    ResultSet rs = pstmt.executeQuery();
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-    ResultSet arrRs = arr.getResultSet();
-    Assert.assertTrue(arrRs.next());
-    Assert.assertEquals(in[0], arrRs.getObject(2));
-
-    Assert.assertTrue(arrRs.next());
-    Assert.assertEquals(in[1], arrRs.getObject(2));
-  }
-
-  @Test
-  public void testCreateArrayWithNonStandardDelimiter() throws SQLException {
-    PGbox[] in = new PGbox[2];
-    in[0] = new PGbox(1, 2, 3, 4);
-    in[1] = new PGbox(5, 6, 7, 8);
-
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?::box[]");
-    pstmt.setArray(1, conn.createArrayOf("box", in));
-    ResultSet rs = pstmt.executeQuery();
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-    ResultSet arrRs = arr.getResultSet();
-    Assert.assertTrue(arrRs.next());
-    Assert.assertEquals(in[0], arrRs.getObject(2));
-    Assert.assertTrue(arrRs.next());
-    Assert.assertEquals(in[1], arrRs.getObject(2));
-    Assert.assertFalse(arrRs.next());
-  }
-
-  @Test
-  public void testCreateArrayOfNull() throws SQLException {
-    String sql = "SELECT ?";
-    // We must provide the type information for V2 protocol
-    if (preferQueryMode == PreferQueryMode.SIMPLE) {
-      sql = "SELECT ?::int8[]";
+    public ArrayTest(BinaryMode binaryMode) {
+        setBinaryMode(binaryMode);
     }
 
-    PreparedStatement pstmt = conn.prepareStatement(sql);
-    String[] in = new String[2];
-    in[0] = null;
-    in[1] = null;
-    pstmt.setArray(1, conn.createArrayOf("int8", in));
-
-    ResultSet rs = pstmt.executeQuery();
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-    Long[] out = (Long[]) arr.getArray();
-
-    Assert.assertEquals(2, out.length);
-    Assert.assertNull(out[0]);
-    Assert.assertNull(out[1]);
-  }
-
-  @Test
-  public void testCreateEmptyArrayOfIntViaAlias() throws SQLException {
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?::int[]");
-    Integer[] in = new Integer[0];
-    pstmt.setArray(1, conn.createArrayOf("integer", in));
-
-    ResultSet rs = pstmt.executeQuery();
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-    Integer[] out = (Integer[]) arr.getArray();
-
-    Assert.assertEquals(0, out.length);
-
-    ResultSet arrRs = arr.getResultSet();
-    Assert.assertFalse(arrRs.next());
-  }
-
-  @Test
-  public void testCreateArrayWithoutServer() throws SQLException {
-    String[][] in = new String[2][2];
-    in[0][0] = "a";
-    in[0][1] = "";
-    in[1][0] = "\\";
-    in[1][1] = "\"\\'z";
-
-    Array arr = conn.createArrayOf("varchar", in);
-    String[][] out = (String[][]) arr.getArray();
-
-    Assert.assertEquals(2, out.length);
-    Assert.assertEquals(2, out[0].length);
-    Assert.assertEquals("a", out[0][0]);
-    Assert.assertEquals("", out[0][1]);
-    Assert.assertEquals("\\", out[1][0]);
-    Assert.assertEquals("\"\\'z", out[1][1]);
-  }
-
-  @Test
-  public void testCreatePrimitiveArray() throws SQLException {
-    double[][] in = new double[2][2];
-    in[0][0] = 3.5;
-    in[0][1] = -4.5;
-    in[1][0] = 10.0 / 3;
-    in[1][1] = 77;
-
-    Array arr = conn.createArrayOf("float8", in);
-    Double[][] out = (Double[][]) arr.getArray();
-
-    Assert.assertEquals(2, out.length);
-    Assert.assertEquals(2, out[0].length);
-    Assert.assertEquals(3.5, out[0][0], 0.00001);
-    Assert.assertEquals(-4.5, out[0][1], 0.00001);
-    Assert.assertEquals(10.0 / 3, out[1][0], 0.00001);
-    Assert.assertEquals(77, out[1][1], 0.00001);
-  }
-
-  @Test
-  public void testUUIDArray() throws SQLException {
-    Assume.assumeTrue("UUID is not supported in PreferQueryMode.SIMPLE",
-        preferQueryMode != PreferQueryMode.SIMPLE);
-    Assume.assumeTrue("UUID requires PostgreSQL 8.3+",
-        TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3));
-    UUID uuid1 = UUID.randomUUID();
-    UUID uuid2 = UUID.randomUUID();
-    UUID uuid3 = UUID.randomUUID();
-
-    // insert a uuid array, and check
-    PreparedStatement pstmt1 = conn.prepareStatement("INSERT INTO arrtest(uuidarr) VALUES (?)");
-    pstmt1.setArray(1, conn.createArrayOf("uuid", new UUID[]{uuid1, uuid2, uuid3}));
-    pstmt1.executeUpdate();
-
-    PreparedStatement pstmt2 =
-        conn.prepareStatement("SELECT uuidarr FROM arrtest WHERE uuidarr @> ?");
-    pstmt2.setObject(1, conn.createArrayOf("uuid", new UUID[]{uuid1}), Types.OTHER);
-    ResultSet rs = pstmt2.executeQuery();
-    Assert.assertTrue(rs.next());
-    Array arr = rs.getArray(1);
-    UUID[] out = (UUID[]) arr.getArray();
-
-    Assert.assertEquals(3, out.length);
-    Assert.assertEquals(uuid1, out[0]);
-    Assert.assertEquals(uuid2, out[1]);
-    Assert.assertEquals(uuid3, out[2]);
-
-    // concatenate a uuid, and check
-    UUID uuid4 = UUID.randomUUID();
-    PreparedStatement pstmt3 =
-        conn.prepareStatement("UPDATE arrtest SET uuidarr = uuidarr || ? WHERE uuidarr @> ?");
-    pstmt3.setObject(1, uuid4, Types.OTHER);
-    pstmt3.setArray(2, conn.createArrayOf("uuid", new UUID[]{uuid1}));
-    pstmt3.executeUpdate();
-
-    // --
-    pstmt2.setObject(1, conn.createArrayOf("uuid", new UUID[]{uuid4}), Types.OTHER);
-    rs = pstmt2.executeQuery();
-    Assert.assertTrue(rs.next());
-    arr = rs.getArray(1);
-    out = (UUID[]) arr.getArray();
-
-    Assert.assertEquals(4, out.length);
-    Assert.assertEquals(uuid1, out[0]);
-    Assert.assertEquals(uuid2, out[1]);
-    Assert.assertEquals(uuid3, out[2]);
-    Assert.assertEquals(uuid4, out[3]);
-  }
-
-  @Test
-  public void testSetObjectFromJavaArray() throws SQLException {
-    String[] strArray = new String[]{"a", "b", "c"};
-    Object[] objCopy = Arrays.copyOf(strArray, strArray.length, Object[].class);
-
-    PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest(strarr) VALUES (?)");
-
-    // cannot handle generic Object[]
-    try {
-      pstmt.setObject(1, objCopy, Types.ARRAY);
-      pstmt.executeUpdate();
-      Assert.fail("setObject() with a Java array parameter and Types.ARRAY shouldn't succeed");
-    } catch (org.postgresql.util.PSQLException ex) {
-      // Expected failure.
-    }
-
-    try {
-      pstmt.setObject(1, objCopy);
-      pstmt.executeUpdate();
-      Assert.fail("setObject() with a Java array parameter and no Types argument shouldn't succeed");
-    } catch (org.postgresql.util.PSQLException ex) {
-      // Expected failure.
-    }
-
-    pstmt.setObject(1, strArray);
-    pstmt.executeUpdate();
-
-    pstmt.setObject(1, strArray, Types.ARRAY);
-    pstmt.executeUpdate();
-
-    // Correct way, though the use of "text" as a type is non-portable.
-    // Only supported for JDK 1.6 and JDBC4
-    Array sqlArray = conn.createArrayOf("text", strArray);
-    pstmt.setArray(1, sqlArray);
-    pstmt.executeUpdate();
-
-    pstmt.close();
-  }
-
-  @Test
-  public void testGetArrayOfComposites() throws SQLException {
-    Assume.assumeTrue("array_agg(expression) requires PostgreSQL 8.4+",
-        TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_4));
-
-    PreparedStatement insertParentPstmt =
-        conn.prepareStatement("INSERT INTO arrcompprnttest (name) "
-            + "VALUES ('aParent');");
-    insertParentPstmt.execute();
-
-    String[] children = {
-        "November 5, 2013",
-        "\"A Book Title\"",
-        "4\" by 6\"",
-        "5\",3\""};
-
-    PreparedStatement insertChildrenPstmt =
-        conn.prepareStatement("INSERT INTO arrcompchldttest (name,description,parent) "
-            + "VALUES ('child1',?,1),"
-            + "('child2',?,1),"
-            + "('child3',?,1),"
-            + "('child4',?,1);");
-
-    insertChildrenPstmt.setString(1, children[0]);
-    insertChildrenPstmt.setString(2, children[1]);
-    insertChildrenPstmt.setString(3, children[2]);
-    insertChildrenPstmt.setString(4, children[3]);
-
-    insertChildrenPstmt.execute();
-
-    PreparedStatement pstmt = conn.prepareStatement(
-        "SELECT arrcompprnttest.name, "
-            + "array_agg("
-            + "DISTINCT(arrcompchldttest.id, "
-            + "arrcompchldttest.name, "
-            + "arrcompchldttest.description)) "
-            + "AS children "
-            + "FROM arrcompprnttest "
-            + "LEFT JOIN arrcompchldttest "
-            + "ON (arrcompchldttest.parent = arrcompprnttest.id) "
-            + "WHERE arrcompprnttest.id=? "
-            + "GROUP BY arrcompprnttest.name;");
-    pstmt.setInt(1, 1);
-    ResultSet rs = pstmt.executeQuery();
-
-    assertNotNull(rs);
-    Assert.assertTrue(rs.next());
-
-    Array childrenArray = rs.getArray("children");
-    assertNotNull(childrenArray);
-
-    ResultSet rsChildren = childrenArray.getResultSet();
-    assertNotNull(rsChildren);
-    while (rsChildren.next()) {
-      String comp = rsChildren.getString(2);
-      PGtokenizer token = new PGtokenizer(PGtokenizer.removePara(comp), ',');
-      token.remove("\"", "\""); // remove surrounding double quotes
-      if (2 < token.getSize()) {
-        int childID = Integer.parseInt(token.getToken(0));
-        // remove double quotes escaping with double quotes
-        String value = token.getToken(2).replace("\"\"", "\"");
-        Assert.assertEquals(children[childID - 1], value);
-      } else {
-        Assert.fail("Needs to have 3 tokens");
-      }
-    }
-  }
-
-  @Test
-  public void testCasingComposite() throws SQLException {
-    Assume.assumeTrue("Arrays of composite types requires PostgreSQL 8.3+",
-        TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3));
-
-    PGobject cc = new PGobject();
-    cc.setType("\"CorrectCasing\"");
-    cc.setValue("(1)");
-    Object[] in = new Object[1];
-    in[0] = cc;
-
-    Array arr = conn.createArrayOf("\"CorrectCasing\"", in);
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?::\"CorrectCasing\"[]");
-    pstmt.setArray(1, arr);
-    ResultSet rs = pstmt.executeQuery();
-
-    Assert.assertTrue(rs.next());
-    Object[] resArr = (Object[]) rs.getArray(1).getArray();
-
-    Assert.assertTrue(resArr[0] instanceof PGobject);
-    PGobject resObj = (PGobject) resArr[0];
-    Assert.assertEquals("(1)", resObj.getValue());
-  }
-
-  @Test
-  public void testCasingBuiltinAlias() throws SQLException {
-    Array arr = conn.createArrayOf("INT", new Integer[]{1, 2, 3});
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?::INT[]");
-    pstmt.setArray(1, arr);
-    ResultSet rs = pstmt.executeQuery();
-
-    Assert.assertTrue(rs.next());
-    Integer[] resArr = (Integer[]) rs.getArray(1).getArray();
-
-    Assert.assertArrayEquals(new Integer[]{1, 2, 3}, resArr);
-  }
-
-  @Test
-  public void testCasingBuiltinNonAlias() throws SQLException {
-    Array arr = conn.createArrayOf("INT4", new Integer[]{1, 2, 3});
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?::INT4[]");
-    pstmt.setArray(1, arr);
-    ResultSet rs = pstmt.executeQuery();
-
-    Assert.assertTrue(rs.next());
-    Integer[] resArr = (Integer[]) rs.getArray(1).getArray();
-
-    Assert.assertArrayEquals(new Integer[]{1, 2, 3}, resArr);
-  }
-
-  @Test
-  public void testEvilCasing() throws SQLException {
-    Assume.assumeTrue("Arrays of composite types requires PostgreSQL 8.3+",
-        TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3));
-
-    PGobject cc = new PGobject();
-    cc.setType("\"Evil.Table\"");
-    cc.setValue("(1)");
-    Object[] in = new Object[1];
-    in[0] = cc;
-
-    Array arr = conn.createArrayOf("\"Evil.Table\"", in);
-    PreparedStatement pstmt = conn.prepareStatement("SELECT ?::\"Evil.Table\"[]");
-    pstmt.setArray(1, arr);
-    ResultSet rs = pstmt.executeQuery();
-
-    Assert.assertTrue(rs.next());
-    Object[] resArr = (Object[]) rs.getArray(1).getArray();
-
-    Assert.assertTrue(resArr[0] instanceof PGobject);
-    PGobject resObj = (PGobject) resArr[0];
-    Assert.assertEquals("(1)", resObj.getValue());
-  }
-
-  @Test
-  public void testToString() throws SQLException {
-    Double[] d = new Double[4];
-
-    d[0] = 3.5;
-    d[1] = -4.5;
-    d[2] = null;
-    d[3] = 77.0;
-
-    Array arr = con.createArrayOf("float8", d);
-    PreparedStatement pstmt = con.prepareStatement("INSERT INTO arrtest(floatarr) VALUES (?)");
-    ResultSet rs = null;
-
-    try {
-      pstmt.setArray(1, arr);
-      pstmt.execute();
-    } finally {
-      TestUtil.closeQuietly(pstmt);
-    }
-
-    Statement stmt = null;
-    try {
-      stmt = con.createStatement();
-
-      rs = stmt.executeQuery("select floatarr from arrtest");
-
-      while (rs.next()) {
-        Array doubles = rs.getArray(1);
-        String actual = doubles.toString();
-        if (actual != null) {
-          // if a binary array is provided, the string representation looks like [0:1][0:1]={{1,2},{3,4}}
-          int idx = actual.indexOf('=');
-          if (idx > 0) {
-            actual = actual.substring(idx + 1);
-          }
-          // Remove all double quotes. They do not make a difference here.
-          actual = actual.replaceAll("\"", "");
+    @Parameterized.Parameters(name = "binary = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
         }
-        //the string format may vary based on how data stored
-        MatcherAssert.assertThat(actual, RegexMatcher.matchesPattern("\\{3\\.5,-4\\.5,NULL,77(.0)?\\}"));
-      }
-
-    } finally {
-      TestUtil.closeQuietly(rs);
-      TestUtil.closeQuietly(stmt);
+        return ids;
     }
-  }
 
-  @Test
-  public void nullArray() throws SQLException {
-    PreparedStatement ps = con.prepareStatement("INSERT INTO arrtest(floatarr) VALUES (?)");
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        conn = con;
 
-    ps.setNull(1, Types.ARRAY, "float8[]");
-    ps.execute();
-
-    ps.close();
-    ps = con.prepareStatement("select floatarr from arrtest");
-    ResultSet rs = ps.executeQuery();
-    Assert.assertTrue("arrtest should contain a row", rs.next());
-    Array getArray = rs.getArray(1);
-    Assert.assertNull("null array should return null value on getArray", getArray);
-    Object getObject = rs.getObject(1);
-    Assert.assertNull("null array should return null on getObject", getObject);
-  }
-
-  @Test
-  public void createNullArray() throws SQLException {
-    Array arr = con.createArrayOf("float8", null);
-    assertNotNull(arr);
-    Assert.assertNull(arr.getArray());
-  }
-
-  @Test
-  public void multiDimIntArray() throws SQLException {
-    Array arr = con.createArrayOf("int4", new int[][]{{1, 2}, {3, 4}});
-    PreparedStatement ps = con.prepareStatement("select ?::int4[][]");
-    ps.setArray(1, arr);
-    ResultSet rs = ps.executeQuery();
-    rs.next();
-    Array resArray = rs.getArray(1);
-    String stringValue = resArray.toString();
-    // if a binary array is provided, the string representation looks like [0:1][0:1]={{1,2},{3,4}}
-    int idx = stringValue.indexOf('=');
-    if (idx > 0) {
-      stringValue = stringValue.substring(idx + 1);
+        TestUtil.createTable(conn, "arrtest",
+                "intarr int[], decarr decimal(2,1)[], strarr text[]"
+                        + (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3) ? ", uuidarr uuid[]" : "")
+                        + ", floatarr float8[]"
+                        + ", intarr2 int4[][]");
+        TestUtil.createTable(conn, "arrcompprnttest", "id serial, name character(10)");
+        TestUtil.createTable(conn, "arrcompchldttest",
+                "id serial, name character(10), description character varying, parent integer");
+        TestUtil.createTable(conn, "\"CorrectCasing\"", "id serial");
+        TestUtil.createTable(conn, "\"Evil.Table\"", "id serial");
     }
-    // Both {{"1","2"},{"3","4"}} and {{1,2},{3,4}} are the same array representation
-    stringValue = stringValue.replaceAll("\"", "");
-    Assert.assertEquals("{{1,2},{3,4}}", stringValue);
-    TestUtil.closeQuietly(rs);
-    TestUtil.closeQuietly(ps);
-  }
 
-  @Test
-  public void insertAndQueryMultiDimArray() throws SQLException {
-    Array arr = con.createArrayOf("int4", new int[][]{{1, 2}, {3, 4}});
-    PreparedStatement insertPs = con.prepareStatement("INSERT INTO arrtest(intarr2) VALUES (?)");
-    insertPs.setArray(1, arr);
-    insertPs.execute();
-    insertPs.close();
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(conn, "arrtest");
+        TestUtil.dropTable(conn, "arrcompprnttest");
+        TestUtil.dropTable(conn, "arrcompchldttest");
+        TestUtil.dropTable(conn, "\"CorrectCasing\"");
 
-    PreparedStatement selectPs = con.prepareStatement("SELECT intarr2 FROM arrtest");
-    ResultSet rs = selectPs.executeQuery();
-    rs.next();
-
-    Array array = rs.getArray(1);
-    Integer[][] secondRowValues = (Integer[][]) array.getArray(2, 1);
-
-    Assert.assertEquals(3, secondRowValues[0][0].intValue());
-    Assert.assertEquals(4, secondRowValues[0][1].intValue());
-  }
-
-  @Test
-  public void testJsonbArray() throws  SQLException {
-    Assume.assumeTrue("jsonb requires PostgreSQL 9.4+", TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_4));
-    TestUtil.createTempTable(con, "jsonbarray", "jbarray jsonb[]");
-    try (Statement stmt = con.createStatement()) {
-      stmt.executeUpdate("insert into jsonbarray values( ARRAY['{\"a\":\"a\"}'::jsonb, '{\"b\":\"b\"}'::jsonb] )");
-      try (ResultSet rs = stmt.executeQuery("select jbarray from jsonbarray")) {
-        assertTrue(rs.next());
-        Array jsonArray = rs.getArray(1);
-        assertNotNull(jsonArray);
-        assertEquals("jsonb", jsonArray.getBaseTypeName());
-      }
+        super.tearDown();
+    }
+
+    @Test
+    public void testCreateArrayOfBool() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?::bool[]");
+        pstmt.setArray(1, conn.unwrap(PgConnection.class).createArrayOf("boolean", new boolean[]{true, true, false}));
+
+        ResultSet rs = pstmt.executeQuery();
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+        Boolean[] out = (Boolean[]) arr.getArray();
+
+        Assert.assertEquals(3, out.length);
+        Assert.assertEquals(Boolean.TRUE, out[0]);
+        Assert.assertEquals(Boolean.TRUE, out[1]);
+        Assert.assertEquals(Boolean.FALSE, out[2]);
+    }
+
+    @Test
+    public void testCreateArrayOfInt() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?::int[]");
+        Integer[] in = new Integer[3];
+        in[0] = 0;
+        in[1] = -1;
+        in[2] = 2;
+        pstmt.setArray(1, conn.createArrayOf("int4", in));
+
+        ResultSet rs = pstmt.executeQuery();
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+        Integer[] out = (Integer[]) arr.getArray();
+
+        Assert.assertEquals(3, out.length);
+        Assert.assertEquals(0, out[0].intValue());
+        Assert.assertEquals(-1, out[1].intValue());
+        Assert.assertEquals(2, out[2].intValue());
+    }
+
+    @Test
+    public void testCreateArrayOfBytes() throws SQLException {
+
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?::bytea[]");
+        final byte[][] in = new byte[][]{{0x01, (byte) 0xFF, (byte) 0x12}, {}, {(byte) 0xAC, (byte) 0xE4}, null};
+        final Array createdArray = conn.createArrayOf("bytea", in);
+
+        byte[][] inCopy = (byte[][]) createdArray.getArray();
+
+        Assert.assertEquals(4, inCopy.length);
+
+        Assert.assertArrayEquals(in[0], inCopy[0]);
+        Assert.assertArrayEquals(in[1], inCopy[1]);
+        Assert.assertArrayEquals(in[2], inCopy[2]);
+        Assert.assertArrayEquals(in[3], inCopy[3]);
+        Assert.assertNull(inCopy[3]);
+
+        pstmt.setArray(1, createdArray);
+
+        ResultSet rs = pstmt.executeQuery();
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+
+        byte[][] out = (byte[][]) arr.getArray();
+
+        Assert.assertEquals(4, out.length);
+
+        Assert.assertArrayEquals(in[0], out[0]);
+        Assert.assertArrayEquals(in[1], out[1]);
+        Assert.assertArrayEquals(in[2], out[2]);
+        Assert.assertArrayEquals(in[3], out[3]);
+        Assert.assertNull(out[3]);
+    }
+
+    @Test
+    public void testCreateArrayOfBytesFromString() throws SQLException {
+
+        assumeMinimumServerVersion("support for bytea[] as string requires hex string support from 9.0",
+                ServerVersion.v9_0);
+
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?::bytea[]");
+        final byte[][] in = new byte[][]{{0x01, (byte) 0xFF, (byte) 0x12}, {}, {(byte) 0xAC, (byte) 0xE4}, null};
+
+        pstmt.setString(1, "{\"\\\\x01ff12\",\"\\\\x\",\"\\\\xace4\",NULL}");
+
+        ResultSet rs = pstmt.executeQuery();
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+
+        byte[][] out = (byte[][]) arr.getArray();
+
+        Assert.assertEquals(4, out.length);
+
+        Assert.assertArrayEquals(in[0], out[0]);
+        Assert.assertArrayEquals(in[1], out[1]);
+        Assert.assertArrayEquals(in[2], out[2]);
+        Assert.assertArrayEquals(in[3], out[3]);
+        Assert.assertNull(out[3]);
+    }
+
+    @Test
+    public void testCreateArrayOfSmallInt() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?::smallint[]");
+        Short[] in = new Short[3];
+        in[0] = 0;
+        in[1] = -1;
+        in[2] = 2;
+        pstmt.setArray(1, conn.createArrayOf("int2", in));
+
+        ResultSet rs = pstmt.executeQuery();
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+        Short[] out = (Short[]) arr.getArray();
+
+        Assert.assertEquals(3, out.length);
+        Assert.assertEquals(0, out[0].shortValue());
+        Assert.assertEquals(-1, out[1].shortValue());
+        Assert.assertEquals(2, out[2].shortValue());
+    }
+
+    @Test
+    public void testCreateArrayOfMultiString() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?::text[]");
+        String[][] in = new String[2][2];
+        in[0][0] = "a";
+        in[0][1] = "";
+        in[1][0] = "\\";
+        in[1][1] = "\"\\'z";
+        pstmt.setArray(1, conn.createArrayOf("text", in));
+
+        ResultSet rs = pstmt.executeQuery();
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+        String[][] out = (String[][]) arr.getArray();
+
+        Assert.assertEquals(2, out.length);
+        Assert.assertEquals(2, out[0].length);
+        Assert.assertEquals("a", out[0][0]);
+        Assert.assertEquals("", out[0][1]);
+        Assert.assertEquals("\\", out[1][0]);
+        Assert.assertEquals("\"\\'z", out[1][1]);
+    }
+
+    @Test
+    public void testCreateArrayOfMultiJson() throws SQLException {
+        if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v9_2)) {
+            return;
+        }
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?::json[]");
+        PGobject p1 = new PGobject();
+        p1.setType("json");
+        p1.setValue("{\"x\": 10}");
+
+        PGobject p2 = new PGobject();
+        p2.setType("json");
+        p2.setValue("{\"x\": 20}");
+        PGobject[] in = new PGobject[]{p1, p2};
+        pstmt.setArray(1, conn.createArrayOf("json", in));
+
+        ResultSet rs = pstmt.executeQuery();
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+        ResultSet arrRs = arr.getResultSet();
+        Assert.assertTrue(arrRs.next());
+        Assert.assertEquals(in[0], arrRs.getObject(2));
+
+        Assert.assertTrue(arrRs.next());
+        Assert.assertEquals(in[1], arrRs.getObject(2));
+    }
+
+    @Test
+    public void testCreateArrayWithNonStandardDelimiter() throws SQLException {
+        PGbox[] in = new PGbox[2];
+        in[0] = new PGbox(1, 2, 3, 4);
+        in[1] = new PGbox(5, 6, 7, 8);
+
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?::box[]");
+        pstmt.setArray(1, conn.createArrayOf("box", in));
+        ResultSet rs = pstmt.executeQuery();
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+        ResultSet arrRs = arr.getResultSet();
+        Assert.assertTrue(arrRs.next());
+        Assert.assertEquals(in[0], arrRs.getObject(2));
+        Assert.assertTrue(arrRs.next());
+        Assert.assertEquals(in[1], arrRs.getObject(2));
+        Assert.assertFalse(arrRs.next());
+    }
+
+    @Test
+    public void testCreateArrayOfNull() throws SQLException {
+        String sql = "SELECT ?";
+        // We must provide the type information for V2 protocol
+        if (preferQueryMode == PreferQueryMode.SIMPLE) {
+            sql = "SELECT ?::int8[]";
+        }
+
+        PreparedStatement pstmt = conn.prepareStatement(sql);
+        String[] in = new String[2];
+        in[0] = null;
+        in[1] = null;
+        pstmt.setArray(1, conn.createArrayOf("int8", in));
+
+        ResultSet rs = pstmt.executeQuery();
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+        Long[] out = (Long[]) arr.getArray();
+
+        Assert.assertEquals(2, out.length);
+        Assert.assertNull(out[0]);
+        Assert.assertNull(out[1]);
+    }
+
+    @Test
+    public void testCreateEmptyArrayOfIntViaAlias() throws SQLException {
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?::int[]");
+        Integer[] in = new Integer[0];
+        pstmt.setArray(1, conn.createArrayOf("integer", in));
+
+        ResultSet rs = pstmt.executeQuery();
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+        Integer[] out = (Integer[]) arr.getArray();
+
+        Assert.assertEquals(0, out.length);
+
+        ResultSet arrRs = arr.getResultSet();
+        Assert.assertFalse(arrRs.next());
+    }
+
+    @Test
+    public void testCreateArrayWithoutServer() throws SQLException {
+        String[][] in = new String[2][2];
+        in[0][0] = "a";
+        in[0][1] = "";
+        in[1][0] = "\\";
+        in[1][1] = "\"\\'z";
+
+        Array arr = conn.createArrayOf("varchar", in);
+        String[][] out = (String[][]) arr.getArray();
+
+        Assert.assertEquals(2, out.length);
+        Assert.assertEquals(2, out[0].length);
+        Assert.assertEquals("a", out[0][0]);
+        Assert.assertEquals("", out[0][1]);
+        Assert.assertEquals("\\", out[1][0]);
+        Assert.assertEquals("\"\\'z", out[1][1]);
+    }
+
+    @Test
+    public void testCreatePrimitiveArray() throws SQLException {
+        double[][] in = new double[2][2];
+        in[0][0] = 3.5;
+        in[0][1] = -4.5;
+        in[1][0] = 10.0 / 3;
+        in[1][1] = 77;
+
+        Array arr = conn.createArrayOf("float8", in);
+        Double[][] out = (Double[][]) arr.getArray();
+
+        Assert.assertEquals(2, out.length);
+        Assert.assertEquals(2, out[0].length);
+        Assert.assertEquals(3.5, out[0][0], 0.00001);
+        Assert.assertEquals(-4.5, out[0][1], 0.00001);
+        Assert.assertEquals(10.0 / 3, out[1][0], 0.00001);
+        Assert.assertEquals(77, out[1][1], 0.00001);
+    }
+
+    @Test
+    public void testUUIDArray() throws SQLException {
+        Assume.assumeTrue("UUID is not supported in PreferQueryMode.SIMPLE",
+                preferQueryMode != PreferQueryMode.SIMPLE);
+        Assume.assumeTrue("UUID requires PostgreSQL 8.3+",
+                TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3));
+        UUID uuid1 = UUID.randomUUID();
+        UUID uuid2 = UUID.randomUUID();
+        UUID uuid3 = UUID.randomUUID();
+
+        // insert a uuid array, and check
+        PreparedStatement pstmt1 = conn.prepareStatement("INSERT INTO arrtest(uuidarr) VALUES (?)");
+        pstmt1.setArray(1, conn.createArrayOf("uuid", new UUID[]{uuid1, uuid2, uuid3}));
+        pstmt1.executeUpdate();
+
+        PreparedStatement pstmt2 =
+                conn.prepareStatement("SELECT uuidarr FROM arrtest WHERE uuidarr @> ?");
+        pstmt2.setObject(1, conn.createArrayOf("uuid", new UUID[]{uuid1}), Types.OTHER);
+        ResultSet rs = pstmt2.executeQuery();
+        Assert.assertTrue(rs.next());
+        Array arr = rs.getArray(1);
+        UUID[] out = (UUID[]) arr.getArray();
+
+        Assert.assertEquals(3, out.length);
+        Assert.assertEquals(uuid1, out[0]);
+        Assert.assertEquals(uuid2, out[1]);
+        Assert.assertEquals(uuid3, out[2]);
+
+        // concatenate a uuid, and check
+        UUID uuid4 = UUID.randomUUID();
+        PreparedStatement pstmt3 =
+                conn.prepareStatement("UPDATE arrtest SET uuidarr = uuidarr || ? WHERE uuidarr @> ?");
+        pstmt3.setObject(1, uuid4, Types.OTHER);
+        pstmt3.setArray(2, conn.createArrayOf("uuid", new UUID[]{uuid1}));
+        pstmt3.executeUpdate();
+
+        // --
+        pstmt2.setObject(1, conn.createArrayOf("uuid", new UUID[]{uuid4}), Types.OTHER);
+        rs = pstmt2.executeQuery();
+        Assert.assertTrue(rs.next());
+        arr = rs.getArray(1);
+        out = (UUID[]) arr.getArray();
+
+        Assert.assertEquals(4, out.length);
+        Assert.assertEquals(uuid1, out[0]);
+        Assert.assertEquals(uuid2, out[1]);
+        Assert.assertEquals(uuid3, out[2]);
+        Assert.assertEquals(uuid4, out[3]);
+    }
+
+    @Test
+    public void testSetObjectFromJavaArray() throws SQLException {
+        String[] strArray = new String[]{"a", "b", "c"};
+        Object[] objCopy = Arrays.copyOf(strArray, strArray.length, Object[].class);
+
+        PreparedStatement pstmt = conn.prepareStatement("INSERT INTO arrtest(strarr) VALUES (?)");
+
+        // cannot handle generic Object[]
+        try {
+            pstmt.setObject(1, objCopy, Types.ARRAY);
+            pstmt.executeUpdate();
+            Assert.fail("setObject() with a Java array parameter and Types.ARRAY shouldn't succeed");
+        } catch (org.postgresql.util.PSQLException ex) {
+            // Expected failure.
+        }
+
+        try {
+            pstmt.setObject(1, objCopy);
+            pstmt.executeUpdate();
+            Assert.fail("setObject() with a Java array parameter and no Types argument shouldn't succeed");
+        } catch (org.postgresql.util.PSQLException ex) {
+            // Expected failure.
+        }
+
+        pstmt.setObject(1, strArray);
+        pstmt.executeUpdate();
+
+        pstmt.setObject(1, strArray, Types.ARRAY);
+        pstmt.executeUpdate();
+
+        // Correct way, though the use of "text" as a type is non-portable.
+        // Only supported for JDK 1.6 and JDBC4
+        Array sqlArray = conn.createArrayOf("text", strArray);
+        pstmt.setArray(1, sqlArray);
+        pstmt.executeUpdate();
+
+        pstmt.close();
+    }
+
+    @Test
+    public void testGetArrayOfComposites() throws SQLException {
+        Assume.assumeTrue("array_agg(expression) requires PostgreSQL 8.4+",
+                TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_4));
+
+        PreparedStatement insertParentPstmt =
+                conn.prepareStatement("INSERT INTO arrcompprnttest (name) "
+                        + "VALUES ('aParent');");
+        insertParentPstmt.execute();
+
+        String[] children = {
+                "November 5, 2013",
+                "\"A Book Title\"",
+                "4\" by 6\"",
+                "5\",3\""};
+
+        PreparedStatement insertChildrenPstmt =
+                conn.prepareStatement("INSERT INTO arrcompchldttest (name,description,parent) "
+                        + "VALUES ('child1',?,1),"
+                        + "('child2',?,1),"
+                        + "('child3',?,1),"
+                        + "('child4',?,1);");
+
+        insertChildrenPstmt.setString(1, children[0]);
+        insertChildrenPstmt.setString(2, children[1]);
+        insertChildrenPstmt.setString(3, children[2]);
+        insertChildrenPstmt.setString(4, children[3]);
+
+        insertChildrenPstmt.execute();
+
+        PreparedStatement pstmt = conn.prepareStatement(
+                "SELECT arrcompprnttest.name, "
+                        + "array_agg("
+                        + "DISTINCT(arrcompchldttest.id, "
+                        + "arrcompchldttest.name, "
+                        + "arrcompchldttest.description)) "
+                        + "AS children "
+                        + "FROM arrcompprnttest "
+                        + "LEFT JOIN arrcompchldttest "
+                        + "ON (arrcompchldttest.parent = arrcompprnttest.id) "
+                        + "WHERE arrcompprnttest.id=? "
+                        + "GROUP BY arrcompprnttest.name;");
+        pstmt.setInt(1, 1);
+        ResultSet rs = pstmt.executeQuery();
+
+        assertNotNull(rs);
+        Assert.assertTrue(rs.next());
+
+        Array childrenArray = rs.getArray("children");
+        assertNotNull(childrenArray);
+
+        ResultSet rsChildren = childrenArray.getResultSet();
+        assertNotNull(rsChildren);
+        while (rsChildren.next()) {
+            String comp = rsChildren.getString(2);
+            PGtokenizer token = new PGtokenizer(PGtokenizer.removePara(comp), ',');
+            token.remove("\"", "\""); // remove surrounding double quotes
+            if (2 < token.getSize()) {
+                int childID = Integer.parseInt(token.getToken(0));
+                // remove double quotes escaping with double quotes
+                String value = token.getToken(2).replace("\"\"", "\"");
+                Assert.assertEquals(children[childID - 1], value);
+            } else {
+                Assert.fail("Needs to have 3 tokens");
+            }
+        }
+    }
+
+    @Test
+    public void testCasingComposite() throws SQLException {
+        Assume.assumeTrue("Arrays of composite types requires PostgreSQL 8.3+",
+                TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3));
+
+        PGobject cc = new PGobject();
+        cc.setType("\"CorrectCasing\"");
+        cc.setValue("(1)");
+        Object[] in = new Object[1];
+        in[0] = cc;
+
+        Array arr = conn.createArrayOf("\"CorrectCasing\"", in);
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?::\"CorrectCasing\"[]");
+        pstmt.setArray(1, arr);
+        ResultSet rs = pstmt.executeQuery();
+
+        Assert.assertTrue(rs.next());
+        Object[] resArr = (Object[]) rs.getArray(1).getArray();
+
+        Assert.assertTrue(resArr[0] instanceof PGobject);
+        PGobject resObj = (PGobject) resArr[0];
+        Assert.assertEquals("(1)", resObj.getValue());
+    }
+
+    @Test
+    public void testCasingBuiltinAlias() throws SQLException {
+        Array arr = conn.createArrayOf("INT", new Integer[]{1, 2, 3});
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?::INT[]");
+        pstmt.setArray(1, arr);
+        ResultSet rs = pstmt.executeQuery();
+
+        Assert.assertTrue(rs.next());
+        Integer[] resArr = (Integer[]) rs.getArray(1).getArray();
+
+        Assert.assertArrayEquals(new Integer[]{1, 2, 3}, resArr);
+    }
+
+    @Test
+    public void testCasingBuiltinNonAlias() throws SQLException {
+        Array arr = conn.createArrayOf("INT4", new Integer[]{1, 2, 3});
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?::INT4[]");
+        pstmt.setArray(1, arr);
+        ResultSet rs = pstmt.executeQuery();
+
+        Assert.assertTrue(rs.next());
+        Integer[] resArr = (Integer[]) rs.getArray(1).getArray();
+
+        Assert.assertArrayEquals(new Integer[]{1, 2, 3}, resArr);
+    }
+
+    @Test
+    public void testEvilCasing() throws SQLException {
+        Assume.assumeTrue("Arrays of composite types requires PostgreSQL 8.3+",
+                TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3));
+
+        PGobject cc = new PGobject();
+        cc.setType("\"Evil.Table\"");
+        cc.setValue("(1)");
+        Object[] in = new Object[1];
+        in[0] = cc;
+
+        Array arr = conn.createArrayOf("\"Evil.Table\"", in);
+        PreparedStatement pstmt = conn.prepareStatement("SELECT ?::\"Evil.Table\"[]");
+        pstmt.setArray(1, arr);
+        ResultSet rs = pstmt.executeQuery();
+
+        Assert.assertTrue(rs.next());
+        Object[] resArr = (Object[]) rs.getArray(1).getArray();
+
+        Assert.assertTrue(resArr[0] instanceof PGobject);
+        PGobject resObj = (PGobject) resArr[0];
+        Assert.assertEquals("(1)", resObj.getValue());
+    }
+
+    @Test
+    public void testToString() throws SQLException {
+        Double[] d = new Double[4];
+
+        d[0] = 3.5;
+        d[1] = -4.5;
+        d[2] = null;
+        d[3] = 77.0;
+
+        Array arr = con.createArrayOf("float8", d);
+        PreparedStatement pstmt = con.prepareStatement("INSERT INTO arrtest(floatarr) VALUES (?)");
+        ResultSet rs = null;
+
+        try {
+            pstmt.setArray(1, arr);
+            pstmt.execute();
+        } finally {
+            TestUtil.closeQuietly(pstmt);
+        }
+
+        Statement stmt = null;
+        try {
+            stmt = con.createStatement();
+
+            rs = stmt.executeQuery("select floatarr from arrtest");
+
+            while (rs.next()) {
+                Array doubles = rs.getArray(1);
+                String actual = doubles.toString();
+                if (actual != null) {
+                    // if a binary array is provided, the string representation looks like [0:1][0:1]={{1,2},{3,4}}
+                    int idx = actual.indexOf('=');
+                    if (idx > 0) {
+                        actual = actual.substring(idx + 1);
+                    }
+                    // Remove all double quotes. They do not make a difference here.
+                    actual = actual.replaceAll("\"", "");
+                }
+                //the string format may vary based on how data stored
+                MatcherAssert.assertThat(actual, RegexMatcher.matchesPattern("\\{3\\.5,-4\\.5,NULL,77(.0)?\\}"));
+            }
+
+        } finally {
+            TestUtil.closeQuietly(rs);
+            TestUtil.closeQuietly(stmt);
+        }
+    }
+
+    @Test
+    public void nullArray() throws SQLException {
+        PreparedStatement ps = con.prepareStatement("INSERT INTO arrtest(floatarr) VALUES (?)");
+
+        ps.setNull(1, Types.ARRAY, "float8[]");
+        ps.execute();
+
+        ps.close();
+        ps = con.prepareStatement("select floatarr from arrtest");
+        ResultSet rs = ps.executeQuery();
+        Assert.assertTrue("arrtest should contain a row", rs.next());
+        Array getArray = rs.getArray(1);
+        Assert.assertNull("null array should return null value on getArray", getArray);
+        Object getObject = rs.getObject(1);
+        Assert.assertNull("null array should return null on getObject", getObject);
+    }
+
+    @Test
+    public void createNullArray() throws SQLException {
+        Array arr = con.createArrayOf("float8", null);
+        assertNotNull(arr);
+        Assert.assertNull(arr.getArray());
+    }
+
+    @Test
+    public void multiDimIntArray() throws SQLException {
+        Array arr = con.createArrayOf("int4", new int[][]{{1, 2}, {3, 4}});
+        PreparedStatement ps = con.prepareStatement("select ?::int4[][]");
+        ps.setArray(1, arr);
+        ResultSet rs = ps.executeQuery();
+        rs.next();
+        Array resArray = rs.getArray(1);
+        String stringValue = resArray.toString();
+        // if a binary array is provided, the string representation looks like [0:1][0:1]={{1,2},{3,4}}
+        int idx = stringValue.indexOf('=');
+        if (idx > 0) {
+            stringValue = stringValue.substring(idx + 1);
+        }
+        // Both {{"1","2"},{"3","4"}} and {{1,2},{3,4}} are the same array representation
+        stringValue = stringValue.replaceAll("\"", "");
+        Assert.assertEquals("{{1,2},{3,4}}", stringValue);
+        TestUtil.closeQuietly(rs);
+        TestUtil.closeQuietly(ps);
+    }
+
+    @Test
+    public void insertAndQueryMultiDimArray() throws SQLException {
+        Array arr = con.createArrayOf("int4", new int[][]{{1, 2}, {3, 4}});
+        PreparedStatement insertPs = con.prepareStatement("INSERT INTO arrtest(intarr2) VALUES (?)");
+        insertPs.setArray(1, arr);
+        insertPs.execute();
+        insertPs.close();
+
+        PreparedStatement selectPs = con.prepareStatement("SELECT intarr2 FROM arrtest");
+        ResultSet rs = selectPs.executeQuery();
+        rs.next();
+
+        Array array = rs.getArray(1);
+        Integer[][] secondRowValues = (Integer[][]) array.getArray(2, 1);
+
+        Assert.assertEquals(3, secondRowValues[0][0].intValue());
+        Assert.assertEquals(4, secondRowValues[0][1].intValue());
+    }
+
+    @Test
+    public void testJsonbArray() throws SQLException {
+        Assume.assumeTrue("jsonb requires PostgreSQL 9.4+", TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_4));
+        TestUtil.createTempTable(con, "jsonbarray", "jbarray jsonb[]");
+        try (Statement stmt = con.createStatement()) {
+            stmt.executeUpdate("insert into jsonbarray values( ARRAY['{\"a\":\"a\"}'::jsonb, '{\"b\":\"b\"}'::jsonb] )");
+            try (ResultSet rs = stmt.executeQuery("select jbarray from jsonbarray")) {
+                assertTrue(rs.next());
+                Array jsonArray = rs.getArray(1);
+                assertNotNull(jsonArray);
+                assertEquals("jsonb", jsonArray.getBaseTypeName());
+            }
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BinaryStreamTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BinaryStreamTest.java
index 9ea02d2..a8fdbf0 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BinaryStreamTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BinaryStreamTest.java
@@ -20,144 +20,144 @@ import java.util.Random;
 
 public class BinaryStreamTest extends BaseTest4 {
 
-  private ByteBuffer testData;
+    private ByteBuffer testData;
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    assumeByteaSupported();
-    TestUtil.createTable(con, "images", "img bytea");
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        assumeByteaSupported();
+        TestUtil.createTable(con, "images", "img bytea");
 
-    Random random = new Random(31459);
-    testData = ByteBuffer.allocate(200 * 1024);
-    while (testData.remaining() > 0) {
-      testData.putLong(random.nextLong());
-    }
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "images");
-    super.tearDown();
-  }
-
-  private void insertStreamKownLength(byte[] data) throws Exception {
-    PreparedStatement updatePS = con.prepareStatement(TestUtil.insertSQL("images", "img", "?"));
-    try {
-      updatePS.setBinaryStream(1, new ByteArrayInputStream(data), data.length);
-      updatePS.executeUpdate();
-    } finally {
-      updatePS.close();
-    }
-  }
-
-  private void insertStreamUnknownLength(byte[] data) throws Exception {
-    PreparedStatement updatePS = con.prepareStatement(TestUtil.insertSQL("images", "img", "?"));
-    try {
-      updatePS.setBinaryStream(1, new ByteArrayInputStream(data));
-      updatePS.executeUpdate();
-    } finally {
-      updatePS.close();
-    }
-  }
-
-  private void validateContent(byte[] data) throws Exception {
-    PreparedStatement selectPS = con.prepareStatement(TestUtil.selectSQL("images", "img"));
-    try {
-      ResultSet rs = selectPS.executeQuery();
-      try {
-        rs.next();
-        byte[] actualData = rs.getBytes(1);
-        Assert.assertArrayEquals("Sent and received data are not the same", data, actualData);
-      } finally {
-        rs.close();
-      }
-    } finally {
-      selectPS.close();
+        Random random = new Random(31459);
+        testData = ByteBuffer.allocate(200 * 1024);
+        while (testData.remaining() > 0) {
+            testData.putLong(random.nextLong());
+        }
     }
 
-    PreparedStatement deletePS = con.prepareStatement("DELETE FROM images");
-    try {
-      deletePS.executeUpdate();
-    } finally {
-      deletePS.close();
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "images");
+        super.tearDown();
     }
-  }
 
-  private byte[] getTestData(int size) {
-    testData.rewind();
-    byte[] data = new byte[size];
-    testData.get(data);
-    return data;
-  }
+    private void insertStreamKownLength(byte[] data) throws Exception {
+        PreparedStatement updatePS = con.prepareStatement(TestUtil.insertSQL("images", "img", "?"));
+        try {
+            updatePS.setBinaryStream(1, new ByteArrayInputStream(data), data.length);
+            updatePS.executeUpdate();
+        } finally {
+            updatePS.close();
+        }
+    }
 
-  @Test
-  public void testKnownLengthEmpty() throws Exception {
-    byte[] data = new byte[0];
-    insertStreamKownLength(data);
-    validateContent(data);
-  }
+    private void insertStreamUnknownLength(byte[] data) throws Exception {
+        PreparedStatement updatePS = con.prepareStatement(TestUtil.insertSQL("images", "img", "?"));
+        try {
+            updatePS.setBinaryStream(1, new ByteArrayInputStream(data));
+            updatePS.executeUpdate();
+        } finally {
+            updatePS.close();
+        }
+    }
 
-  @Test
-  public void testKnownLength2Kb() throws Exception {
-    byte[] data = getTestData(2 * 1024);
-    insertStreamKownLength(data);
-    validateContent(data);
-  }
+    private void validateContent(byte[] data) throws Exception {
+        PreparedStatement selectPS = con.prepareStatement(TestUtil.selectSQL("images", "img"));
+        try {
+            ResultSet rs = selectPS.executeQuery();
+            try {
+                rs.next();
+                byte[] actualData = rs.getBytes(1);
+                Assert.assertArrayEquals("Sent and received data are not the same", data, actualData);
+            } finally {
+                rs.close();
+            }
+        } finally {
+            selectPS.close();
+        }
 
-  @Test
-  public void testKnownLength10Kb() throws Exception {
-    byte[] data = getTestData(10 * 1024);
-    insertStreamKownLength(data);
-    validateContent(data);
-  }
+        PreparedStatement deletePS = con.prepareStatement("DELETE FROM images");
+        try {
+            deletePS.executeUpdate();
+        } finally {
+            deletePS.close();
+        }
+    }
 
-  @Test
-  public void testKnownLength100Kb() throws Exception {
-    byte[] data = getTestData(100 * 1024);
-    insertStreamKownLength(data);
-    validateContent(data);
-  }
+    private byte[] getTestData(int size) {
+        testData.rewind();
+        byte[] data = new byte[size];
+        testData.get(data);
+        return data;
+    }
 
-  @Test
-  public void testKnownLength200Kb() throws Exception {
-    byte[] data = getTestData(200 * 1024);
-    insertStreamKownLength(data);
-    validateContent(data);
-  }
+    @Test
+    public void testKnownLengthEmpty() throws Exception {
+        byte[] data = new byte[0];
+        insertStreamKownLength(data);
+        validateContent(data);
+    }
 
-  @Test
-  public void testUnknownLengthEmpty() throws Exception {
-    byte[] data = getTestData(2 * 1024);
-    insertStreamUnknownLength(data);
-    validateContent(data);
-  }
+    @Test
+    public void testKnownLength2Kb() throws Exception {
+        byte[] data = getTestData(2 * 1024);
+        insertStreamKownLength(data);
+        validateContent(data);
+    }
 
-  @Test
-  public void testUnknownLength2Kb() throws Exception {
-    byte[] data = getTestData(2 * 1024);
-    insertStreamUnknownLength(data);
-    validateContent(data);
-  }
+    @Test
+    public void testKnownLength10Kb() throws Exception {
+        byte[] data = getTestData(10 * 1024);
+        insertStreamKownLength(data);
+        validateContent(data);
+    }
 
-  @Test
-  public void testUnknownLength10Kb() throws Exception {
-    byte[] data = getTestData(10 * 1024);
-    insertStreamUnknownLength(data);
-    validateContent(data);
-  }
+    @Test
+    public void testKnownLength100Kb() throws Exception {
+        byte[] data = getTestData(100 * 1024);
+        insertStreamKownLength(data);
+        validateContent(data);
+    }
 
-  @Test
-  public void testUnknownLength100Kb() throws Exception {
-    byte[] data = getTestData(100 * 1024);
-    insertStreamUnknownLength(data);
-    validateContent(data);
-  }
+    @Test
+    public void testKnownLength200Kb() throws Exception {
+        byte[] data = getTestData(200 * 1024);
+        insertStreamKownLength(data);
+        validateContent(data);
+    }
 
-  @Test
-  public void testUnknownLength200Kb() throws Exception {
-    byte[] data = getTestData(200 * 1024);
-    insertStreamUnknownLength(data);
-    validateContent(data);
-  }
+    @Test
+    public void testUnknownLengthEmpty() throws Exception {
+        byte[] data = getTestData(2 * 1024);
+        insertStreamUnknownLength(data);
+        validateContent(data);
+    }
+
+    @Test
+    public void testUnknownLength2Kb() throws Exception {
+        byte[] data = getTestData(2 * 1024);
+        insertStreamUnknownLength(data);
+        validateContent(data);
+    }
+
+    @Test
+    public void testUnknownLength10Kb() throws Exception {
+        byte[] data = getTestData(10 * 1024);
+        insertStreamUnknownLength(data);
+        validateContent(data);
+    }
+
+    @Test
+    public void testUnknownLength100Kb() throws Exception {
+        byte[] data = getTestData(100 * 1024);
+        insertStreamUnknownLength(data);
+        validateContent(data);
+    }
+
+    @Test
+    public void testUnknownLength200Kb() throws Exception {
+        byte[] data = getTestData(200 * 1024);
+        insertStreamUnknownLength(data);
+        validateContent(data);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BinaryTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BinaryTest.java
index 1089e3a..85b8263 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BinaryTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BinaryTest.java
@@ -28,109 +28,109 @@ import java.sql.SQLException;
  * save bandwidth and reduce decoding time.
  */
 public class BinaryTest extends BaseTest4 {
-  private ResultSet results;
-  private PreparedStatement statement;
+    private ResultSet results;
+    private PreparedStatement statement;
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    Assume.assumeTrue("Server-prepared statements are not supported in 'simple protocol only'",
-        preferQueryMode != PreferQueryMode.SIMPLE);
-    statement = con.prepareStatement("select 1");
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        Assume.assumeTrue("Server-prepared statements are not supported in 'simple protocol only'",
+                preferQueryMode != PreferQueryMode.SIMPLE);
+        statement = con.prepareStatement("select 1");
 
-    ((PGStatement) statement).setPrepareThreshold(5);
-  }
-
-  @Test
-  public void testPreparedStatement_3() throws Exception {
-    ((PGStatement) statement).setPrepareThreshold(3);
-
-    results = statement.executeQuery();
-    assertEquals(Field.TEXT_FORMAT, getFormat(results));
-
-    results = statement.executeQuery();
-    assertEquals(Field.TEXT_FORMAT, getFormat(results));
-
-    results = statement.executeQuery();
-    assertEquals(Field.TEXT_FORMAT, getFormat(results));
-
-    results = statement.executeQuery();
-    assertEquals(Field.BINARY_FORMAT, getFormat(results));
-
-    ((PGStatement) statement).setPrepareThreshold(5);
-  }
-
-  @Test
-  public void testPreparedStatement_1() throws Exception {
-    ((PGStatement) statement).setPrepareThreshold(1);
-
-    results = statement.executeQuery();
-    assertEquals(Field.TEXT_FORMAT, getFormat(results));
-
-    results = statement.executeQuery();
-    assertEquals(Field.BINARY_FORMAT, getFormat(results));
-
-    results = statement.executeQuery();
-    assertEquals(Field.BINARY_FORMAT, getFormat(results));
-
-    results = statement.executeQuery();
-    assertEquals(Field.BINARY_FORMAT, getFormat(results));
-
-    ((PGStatement) statement).setPrepareThreshold(5);
-  }
-
-  @Test
-  public void testPreparedStatement_0() throws Exception {
-    ((PGStatement) statement).setPrepareThreshold(0);
-
-    results = statement.executeQuery();
-    assertEquals(Field.TEXT_FORMAT, getFormat(results));
-
-    results = statement.executeQuery();
-    assertEquals(Field.TEXT_FORMAT, getFormat(results));
-
-    results = statement.executeQuery();
-    assertEquals(Field.TEXT_FORMAT, getFormat(results));
-
-    results = statement.executeQuery();
-    assertEquals(Field.TEXT_FORMAT, getFormat(results));
-
-    ((PGStatement) statement).setPrepareThreshold(5);
-  }
-
-  @Test
-  public void testPreparedStatement_negative1() throws Exception {
-    ((PGStatement) statement).setPrepareThreshold(-1);
-
-    results = statement.executeQuery();
-    assertEquals(Field.BINARY_FORMAT, getFormat(results));
-
-    results = statement.executeQuery();
-    assertEquals(Field.BINARY_FORMAT, getFormat(results));
-
-    results = statement.executeQuery();
-    assertEquals(Field.BINARY_FORMAT, getFormat(results));
-
-    results = statement.executeQuery();
-    assertEquals(Field.BINARY_FORMAT, getFormat(results));
-
-    ((PGStatement) statement).setPrepareThreshold(5);
-  }
-
-  @Test
-  public void testReceiveBinary() throws Exception {
-    PreparedStatement ps = con.prepareStatement("select ?");
-    for (int i = 0; i < 10; i++) {
-      ps.setInt(1, 42 + i);
-      ResultSet rs = ps.executeQuery();
-      assertEquals("One row should be returned", true, rs.next());
-      assertEquals(42 + i, rs.getInt(1));
-      rs.close();
+        ((PGStatement) statement).setPrepareThreshold(5);
     }
-    ps.close();
-  }
 
-  private int getFormat(ResultSet results) throws SQLException {
-    return ((PGResultSetMetaData) results.getMetaData()).getFormat(1);
-  }
+    @Test
+    public void testPreparedStatement_3() throws Exception {
+        ((PGStatement) statement).setPrepareThreshold(3);
+
+        results = statement.executeQuery();
+        assertEquals(Field.TEXT_FORMAT, getFormat(results));
+
+        results = statement.executeQuery();
+        assertEquals(Field.TEXT_FORMAT, getFormat(results));
+
+        results = statement.executeQuery();
+        assertEquals(Field.TEXT_FORMAT, getFormat(results));
+
+        results = statement.executeQuery();
+        assertEquals(Field.BINARY_FORMAT, getFormat(results));
+
+        ((PGStatement) statement).setPrepareThreshold(5);
+    }
+
+    @Test
+    public void testPreparedStatement_1() throws Exception {
+        ((PGStatement) statement).setPrepareThreshold(1);
+
+        results = statement.executeQuery();
+        assertEquals(Field.TEXT_FORMAT, getFormat(results));
+
+        results = statement.executeQuery();
+        assertEquals(Field.BINARY_FORMAT, getFormat(results));
+
+        results = statement.executeQuery();
+        assertEquals(Field.BINARY_FORMAT, getFormat(results));
+
+        results = statement.executeQuery();
+        assertEquals(Field.BINARY_FORMAT, getFormat(results));
+
+        ((PGStatement) statement).setPrepareThreshold(5);
+    }
+
+    @Test
+    public void testPreparedStatement_0() throws Exception {
+        ((PGStatement) statement).setPrepareThreshold(0);
+
+        results = statement.executeQuery();
+        assertEquals(Field.TEXT_FORMAT, getFormat(results));
+
+        results = statement.executeQuery();
+        assertEquals(Field.TEXT_FORMAT, getFormat(results));
+
+        results = statement.executeQuery();
+        assertEquals(Field.TEXT_FORMAT, getFormat(results));
+
+        results = statement.executeQuery();
+        assertEquals(Field.TEXT_FORMAT, getFormat(results));
+
+        ((PGStatement) statement).setPrepareThreshold(5);
+    }
+
+    @Test
+    public void testPreparedStatement_negative1() throws Exception {
+        ((PGStatement) statement).setPrepareThreshold(-1);
+
+        results = statement.executeQuery();
+        assertEquals(Field.BINARY_FORMAT, getFormat(results));
+
+        results = statement.executeQuery();
+        assertEquals(Field.BINARY_FORMAT, getFormat(results));
+
+        results = statement.executeQuery();
+        assertEquals(Field.BINARY_FORMAT, getFormat(results));
+
+        results = statement.executeQuery();
+        assertEquals(Field.BINARY_FORMAT, getFormat(results));
+
+        ((PGStatement) statement).setPrepareThreshold(5);
+    }
+
+    @Test
+    public void testReceiveBinary() throws Exception {
+        PreparedStatement ps = con.prepareStatement("select ?");
+        for (int i = 0; i < 10; i++) {
+            ps.setInt(1, 42 + i);
+            ResultSet rs = ps.executeQuery();
+            assertEquals("One row should be returned", true, rs.next());
+            assertEquals(42 + i, rs.getInt(1));
+            rs.close();
+        }
+        ps.close();
+    }
+
+    private int getFormat(ResultSet results) throws SQLException {
+        return ((PGResultSetMetaData) results.getMetaData()).getFormat(1);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BlobTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BlobTest.java
index 70dd3cb..6d72f63 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BlobTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/BlobTest.java
@@ -31,152 +31,152 @@ import java.sql.Statement;
  */
 class BlobTest {
 
-  private Connection conn;
+    private Connection conn;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    conn = TestUtil.openDB();
-    TestUtil.createTable(conn, "testblob", "id name,lo oid");
-    conn.setAutoCommit(false);
-  }
+    @BeforeEach
+    void setUp() throws Exception {
+        conn = TestUtil.openDB();
+        TestUtil.createTable(conn, "testblob", "id name,lo oid");
+        conn.setAutoCommit(false);
+    }
 
-  @AfterEach
-  void tearDown() throws Exception {
-    conn.setAutoCommit(true);
-    try {
-      Statement stmt = conn.createStatement();
-      try {
-        stmt.execute("SELECT lo_unlink(lo) FROM testblob");
-      } finally {
+    @AfterEach
+    void tearDown() throws Exception {
+        conn.setAutoCommit(true);
         try {
-          stmt.close();
-        } catch (Exception e) {
-        }
-      }
-    } finally {
-      TestUtil.dropTable(conn, "testblob");
-      TestUtil.closeDB(conn);
-    }
-  }
-
-  @Test
-  void setBlobWithStream() throws Exception {
-    byte[] data = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque bibendum dapibus varius."
-        .getBytes("UTF-8");
-    try ( PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("testblob", "lo", "?")) ) {
-      insertPS.setBlob(1, new ByteArrayInputStream(data));
-      insertPS.executeUpdate();
-    }
-
-    try (Statement selectStmt = conn.createStatement() ) {
-      try (ResultSet rs = selectStmt.executeQuery(TestUtil.selectSQL("testblob", "lo"))) {
-        assertTrue(rs.next());
-
-        Blob actualBlob = rs.getBlob(1);
-        byte[] actualBytes = actualBlob.getBytes(1, (int) actualBlob.length());
-
-        assertArrayEquals(data, actualBytes);
-      }
-    }
-  }
-
-  @Test
-  void setBlobWithStreamAndLength() throws Exception {
-    byte[] fullData = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse placerat tristique tellus, id tempus lectus."
-            .getBytes("UTF-8");
-    byte[] data =
-        "Lorem ipsum dolor sit amet, consectetur adipiscing elit.".getBytes("UTF-8");
-
-    try ( PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("testblob", "lo", "?")) ) {
-      insertPS.setBlob(1, new ByteArrayInputStream(fullData), data.length);
-      insertPS.executeUpdate();
-    }
-
-    try ( Statement selectStmt = conn.createStatement() ) {
-      try (ResultSet rs = selectStmt.executeQuery(TestUtil.selectSQL("testblob", "lo"))) {
-        assertTrue(rs.next());
-
-        Blob actualBlob = rs.getBlob(1);
-        byte[] actualBytes = actualBlob.getBytes(1, (int) actualBlob.length());
-
-        assertArrayEquals(data, actualBytes);
-      }
-    }
-  }
-
-  @Test
-  void getBinaryStreamWithBoundaries() throws Exception {
-    byte[] data =
-        "Cras vestibulum tellus eu sapien imperdiet ornare.".getBytes("UTF-8");
-    try ( PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("testblob", "lo", "?")) ) {
-      insertPS.setBlob(1, new ByteArrayInputStream(data), data.length);
-      insertPS.executeUpdate();
-    }
-    try ( Statement selectStmt = conn.createStatement() ) {
-      try (ResultSet rs = selectStmt.executeQuery(TestUtil.selectSQL("testblob", "lo"))) {
-        assertTrue(rs.next());
-
-        byte[] actualData = new byte[10];
-        Blob actualBlob = rs.getBlob(1);
-        InputStream stream = actualBlob.getBinaryStream(6, 10);
-        try {
-          stream.read(actualData);
-          assertEquals(-1, stream.read(new byte[1]), "Stream should be at end");
+            Statement stmt = conn.createStatement();
+            try {
+                stmt.execute("SELECT lo_unlink(lo) FROM testblob");
+            } finally {
+                try {
+                    stmt.close();
+                } catch (Exception e) {
+                }
+            }
         } finally {
-          stream.close();
+            TestUtil.dropTable(conn, "testblob");
+            TestUtil.closeDB(conn);
         }
-        assertEquals("vestibulum", new String(actualData, "UTF-8"));
-      }
-    }
-  }
-
-  @Test
-  void getBinaryStreamWithBoundaries2() throws Exception {
-    byte[] data =
-        "Cras vestibulum tellus eu sapien imperdiet ornare.".getBytes("UTF-8");
-
-    try ( PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("testblob", "lo", "?")) ) {
-      insertPS.setBlob(1, new ByteArrayInputStream(data), data.length);
-      insertPS.executeUpdate();
     }
 
-    try ( Statement selectStmt = conn.createStatement() ) {
-      try (ResultSet rs = selectStmt.executeQuery(TestUtil.selectSQL("testblob", "lo"))) {
-        assertTrue(rs.next());
-
-        byte[] actualData = new byte[9];
-        Blob actualBlob = rs.getBlob(1);
-        try ( InputStream stream = actualBlob.getBinaryStream(6, 10) ) {
-          // read 9 bytes 1 at a time
-          for (int i = 0; i < 9; i++) {
-            actualData[i] = (byte) stream.read();
-          }
-          /* try to read past the end and make sure we get 1 byte */
-          assertEquals(1, stream.read(new byte[2]), "There should be 1 byte left");
-          /* now read one more and we should get an EOF */
-          assertEquals(-1, stream.read(new byte[1]), "Stream should be at end");
+    @Test
+    void setBlobWithStream() throws Exception {
+        byte[] data = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque bibendum dapibus varius."
+                .getBytes("UTF-8");
+        try (PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("testblob", "lo", "?"))) {
+            insertPS.setBlob(1, new ByteArrayInputStream(data));
+            insertPS.executeUpdate();
         }
-        assertEquals("vestibulu", new String(actualData, "UTF-8"));
-      }
-    }
-  }
 
-  @Test
-  void free() throws SQLException {
-    try ( Statement stmt = conn.createStatement() ) {
-      stmt.execute("INSERT INTO testblob(lo) VALUES(lo_creat(-1))");
-      try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob")) {
-        assertTrue(rs.next());
+        try (Statement selectStmt = conn.createStatement()) {
+            try (ResultSet rs = selectStmt.executeQuery(TestUtil.selectSQL("testblob", "lo"))) {
+                assertTrue(rs.next());
 
-        Blob blob = rs.getBlob(1);
-        blob.free();
-        try {
-          blob.length();
-          fail("Should have thrown an Exception because it was freed.");
-        } catch (SQLException sqle) {
-          // expected
+                Blob actualBlob = rs.getBlob(1);
+                byte[] actualBytes = actualBlob.getBytes(1, (int) actualBlob.length());
+
+                assertArrayEquals(data, actualBytes);
+            }
+        }
+    }
+
+    @Test
+    void setBlobWithStreamAndLength() throws Exception {
+        byte[] fullData = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse placerat tristique tellus, id tempus lectus."
+                .getBytes("UTF-8");
+        byte[] data =
+                "Lorem ipsum dolor sit amet, consectetur adipiscing elit.".getBytes("UTF-8");
+
+        try (PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("testblob", "lo", "?"))) {
+            insertPS.setBlob(1, new ByteArrayInputStream(fullData), data.length);
+            insertPS.executeUpdate();
+        }
+
+        try (Statement selectStmt = conn.createStatement()) {
+            try (ResultSet rs = selectStmt.executeQuery(TestUtil.selectSQL("testblob", "lo"))) {
+                assertTrue(rs.next());
+
+                Blob actualBlob = rs.getBlob(1);
+                byte[] actualBytes = actualBlob.getBytes(1, (int) actualBlob.length());
+
+                assertArrayEquals(data, actualBytes);
+            }
+        }
+    }
+
+    @Test
+    void getBinaryStreamWithBoundaries() throws Exception {
+        byte[] data =
+                "Cras vestibulum tellus eu sapien imperdiet ornare.".getBytes("UTF-8");
+        try (PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("testblob", "lo", "?"))) {
+            insertPS.setBlob(1, new ByteArrayInputStream(data), data.length);
+            insertPS.executeUpdate();
+        }
+        try (Statement selectStmt = conn.createStatement()) {
+            try (ResultSet rs = selectStmt.executeQuery(TestUtil.selectSQL("testblob", "lo"))) {
+                assertTrue(rs.next());
+
+                byte[] actualData = new byte[10];
+                Blob actualBlob = rs.getBlob(1);
+                InputStream stream = actualBlob.getBinaryStream(6, 10);
+                try {
+                    stream.read(actualData);
+                    assertEquals(-1, stream.read(new byte[1]), "Stream should be at end");
+                } finally {
+                    stream.close();
+                }
+                assertEquals("vestibulum", new String(actualData, "UTF-8"));
+            }
+        }
+    }
+
+    @Test
+    void getBinaryStreamWithBoundaries2() throws Exception {
+        byte[] data =
+                "Cras vestibulum tellus eu sapien imperdiet ornare.".getBytes("UTF-8");
+
+        try (PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("testblob", "lo", "?"))) {
+            insertPS.setBlob(1, new ByteArrayInputStream(data), data.length);
+            insertPS.executeUpdate();
+        }
+
+        try (Statement selectStmt = conn.createStatement()) {
+            try (ResultSet rs = selectStmt.executeQuery(TestUtil.selectSQL("testblob", "lo"))) {
+                assertTrue(rs.next());
+
+                byte[] actualData = new byte[9];
+                Blob actualBlob = rs.getBlob(1);
+                try (InputStream stream = actualBlob.getBinaryStream(6, 10)) {
+                    // read 9 bytes 1 at a time
+                    for (int i = 0; i < 9; i++) {
+                        actualData[i] = (byte) stream.read();
+                    }
+                    /* try to read past the end and make sure we get 1 byte */
+                    assertEquals(1, stream.read(new byte[2]), "There should be 1 byte left");
+                    /* now read one more and we should get an EOF */
+                    assertEquals(-1, stream.read(new byte[1]), "Stream should be at end");
+                }
+                assertEquals("vestibulu", new String(actualData, "UTF-8"));
+            }
+        }
+    }
+
+    @Test
+    void free() throws SQLException {
+        try (Statement stmt = conn.createStatement()) {
+            stmt.execute("INSERT INTO testblob(lo) VALUES(lo_creat(-1))");
+            try (ResultSet rs = stmt.executeQuery("SELECT lo FROM testblob")) {
+                assertTrue(rs.next());
+
+                Blob blob = rs.getBlob(1);
+                blob.free();
+                try {
+                    blob.length();
+                    fail("Should have thrown an Exception because it was freed.");
+                } catch (SQLException sqle) {
+                    // expected
+                }
+            }
         }
-      }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/CharacterStreamTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/CharacterStreamTest.java
index 64da80f..de065bc 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/CharacterStreamTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/CharacterStreamTest.java
@@ -18,205 +18,205 @@ import java.sql.SQLFeatureNotSupportedException;
 
 public class CharacterStreamTest extends BaseTest4 {
 
-  private static final String TEST_TABLE_NAME = "charstream";
-  private static final String TEST_COLUMN_NAME = "cs";
+    private static final String TEST_TABLE_NAME = "charstream";
+    private static final String TEST_COLUMN_NAME = "cs";
 
-  private static final String _insert;
-  private static final String _select;
+    private static final String _insert;
+    private static final String _select;
 
-  static {
-    _insert = String.format("INSERT INTO %s (%s) VALUES (?)", TEST_TABLE_NAME, TEST_COLUMN_NAME);
-    _select = String.format("SELECT %s FROM %s", TEST_COLUMN_NAME, TEST_TABLE_NAME);
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTempTable(con, TEST_TABLE_NAME, "cs text");
-  }
-
-  private void insertStreamKnownIntLength(String data) throws Exception {
-    PreparedStatement insertPS = con.prepareStatement(_insert);
-    try {
-      Reader reader = data != null ? new StringReader(data) : null;
-      int length = data != null ? data.length() : 0;
-      insertPS.setCharacterStream(1, reader, length);
-      insertPS.executeUpdate();
-    } finally {
-      TestUtil.closeQuietly(insertPS);
-    }
-  }
-
-  private void insertStreamKnownLongLength(String data) throws Exception {
-    PreparedStatement insertPS = con.prepareStatement(_insert);
-    try {
-      Reader reader = data != null ? new StringReader(data) : null;
-      long length = data != null ? data.length() : 0;
-      insertPS.setCharacterStream(1, reader, length);
-      insertPS.executeUpdate();
-    } finally {
-      TestUtil.closeQuietly(insertPS);
-    }
-  }
-
-  private void insertStreamUnknownLength(String data) throws Exception {
-    PreparedStatement insertPS = con.prepareStatement(_insert);
-    try {
-      Reader reader = data != null ? new StringReader(data) : null;
-      insertPS.setCharacterStream(1, reader);
-      insertPS.executeUpdate();
-    } finally {
-      TestUtil.closeQuietly(insertPS);
-    }
-  }
-
-  private void validateContent(String data) throws Exception {
-    String actualData = TestUtil.queryForString(con, _select);
-    Assert.assertEquals("Sent and received data are not the same", data, actualData);
-  }
-
-  private String getTestData(int size) {
-    StringBuilder buf = new StringBuilder(size);
-    String s = "This is a test string.\n";
-    int slen = s.length();
-    int len = 0;
-
-    while ((len + slen) < size) {
-      buf.append(s);
-      len += slen;
+    static {
+        _insert = String.format("INSERT INTO %s (%s) VALUES (?)", TEST_TABLE_NAME, TEST_COLUMN_NAME);
+        _select = String.format("SELECT %s FROM %s", TEST_COLUMN_NAME, TEST_TABLE_NAME);
     }
 
-    while (len < size) {
-      buf.append('.');
-      len++;
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTempTable(con, TEST_TABLE_NAME, "cs text");
     }
 
-    return buf.toString();
-  }
+    private void insertStreamKnownIntLength(String data) throws Exception {
+        PreparedStatement insertPS = con.prepareStatement(_insert);
+        try {
+            Reader reader = data != null ? new StringReader(data) : null;
+            int length = data != null ? data.length() : 0;
+            insertPS.setCharacterStream(1, reader, length);
+            insertPS.executeUpdate();
+        } finally {
+            TestUtil.closeQuietly(insertPS);
+        }
+    }
 
-  @Test
-  public void testKnownIntLengthNull() throws Exception {
-    String data = null;
-    insertStreamKnownIntLength(data);
-    validateContent(data);
-  }
+    private void insertStreamKnownLongLength(String data) throws Exception {
+        PreparedStatement insertPS = con.prepareStatement(_insert);
+        try {
+            Reader reader = data != null ? new StringReader(data) : null;
+            long length = data != null ? data.length() : 0;
+            insertPS.setCharacterStream(1, reader, length);
+            insertPS.executeUpdate();
+        } finally {
+            TestUtil.closeQuietly(insertPS);
+        }
+    }
 
-  @Test(expected = SQLFeatureNotSupportedException.class)
-  public void testKnownLongLengthNull() throws Exception {
-    String data = null;
-    insertStreamKnownLongLength(data);
-    validateContent(data);
-  }
+    private void insertStreamUnknownLength(String data) throws Exception {
+        PreparedStatement insertPS = con.prepareStatement(_insert);
+        try {
+            Reader reader = data != null ? new StringReader(data) : null;
+            insertPS.setCharacterStream(1, reader);
+            insertPS.executeUpdate();
+        } finally {
+            TestUtil.closeQuietly(insertPS);
+        }
+    }
 
-  @Test
-  public void testUnknownLengthNull() throws Exception {
-    String data = null;
-    insertStreamUnknownLength(data);
-    validateContent(data);
-  }
+    private void validateContent(String data) throws Exception {
+        String actualData = TestUtil.queryForString(con, _select);
+        Assert.assertEquals("Sent and received data are not the same", data, actualData);
+    }
 
-  @Test
-  public void testKnownIntLengthEmpty() throws Exception {
-    String data = "";
-    insertStreamKnownIntLength(data);
-    validateContent(data);
-  }
+    private String getTestData(int size) {
+        StringBuilder buf = new StringBuilder(size);
+        String s = "This is a test string.\n";
+        int slen = s.length();
+        int len = 0;
 
-  @Test(expected = SQLFeatureNotSupportedException.class)
-  public void testKnownLongLengthEmpty() throws Exception {
-    String data = "";
-    insertStreamKnownLongLength(data);
-    validateContent(data);
-  }
+        while ((len + slen) < size) {
+            buf.append(s);
+            len += slen;
+        }
 
-  @Test
-  public void testUnknownLengthEmpty() throws Exception {
-    String data = "";
-    insertStreamUnknownLength(data);
-    validateContent(data);
-  }
+        while (len < size) {
+            buf.append('.');
+            len++;
+        }
 
-  @Test
-  public void testKnownIntLength2Kb() throws Exception {
-    String data = getTestData(2 * 1024);
-    insertStreamKnownIntLength(data);
-    validateContent(data);
-  }
+        return buf.toString();
+    }
 
-  @Test(expected = SQLFeatureNotSupportedException.class)
-  public void testKnownLongLength2Kb() throws Exception {
-    String data = getTestData(2 * 1024);
-    insertStreamKnownLongLength(data);
-    validateContent(data);
-  }
+    @Test
+    public void testKnownIntLengthNull() throws Exception {
+        String data = null;
+        insertStreamKnownIntLength(data);
+        validateContent(data);
+    }
 
-  @Test
-  public void testUnknownLength2Kb() throws Exception {
-    String data = getTestData(2 * 1024);
-    insertStreamUnknownLength(data);
-    validateContent(data);
-  }
+    @Test(expected = SQLFeatureNotSupportedException.class)
+    public void testKnownLongLengthNull() throws Exception {
+        String data = null;
+        insertStreamKnownLongLength(data);
+        validateContent(data);
+    }
 
-  @Test
-  public void testKnownIntLength10Kb() throws Exception {
-    String data = getTestData(10 * 1024);
-    insertStreamKnownIntLength(data);
-    validateContent(data);
-  }
+    @Test
+    public void testUnknownLengthNull() throws Exception {
+        String data = null;
+        insertStreamUnknownLength(data);
+        validateContent(data);
+    }
 
-  @Test(expected = SQLFeatureNotSupportedException.class)
-  public void testKnownLongLength10Kb() throws Exception {
-    String data = getTestData(10 * 1024);
-    insertStreamKnownLongLength(data);
-    validateContent(data);
-  }
+    @Test
+    public void testKnownIntLengthEmpty() throws Exception {
+        String data = "";
+        insertStreamKnownIntLength(data);
+        validateContent(data);
+    }
 
-  @Test
-  public void testUnknownLength10Kb() throws Exception {
-    String data = getTestData(10 * 1024);
-    insertStreamUnknownLength(data);
-    validateContent(data);
-  }
+    @Test(expected = SQLFeatureNotSupportedException.class)
+    public void testKnownLongLengthEmpty() throws Exception {
+        String data = "";
+        insertStreamKnownLongLength(data);
+        validateContent(data);
+    }
 
-  @Test
-  public void testKnownIntLength100Kb() throws Exception {
-    String data = getTestData(100 * 1024);
-    insertStreamKnownIntLength(data);
-    validateContent(data);
-  }
+    @Test
+    public void testUnknownLengthEmpty() throws Exception {
+        String data = "";
+        insertStreamUnknownLength(data);
+        validateContent(data);
+    }
 
-  @Test(expected = SQLFeatureNotSupportedException.class)
-  public void testKnownLongLength100Kb() throws Exception {
-    String data = getTestData(100 * 1024);
-    insertStreamKnownLongLength(data);
-    validateContent(data);
-  }
+    @Test
+    public void testKnownIntLength2Kb() throws Exception {
+        String data = getTestData(2 * 1024);
+        insertStreamKnownIntLength(data);
+        validateContent(data);
+    }
 
-  @Test
-  public void testUnknownLength100Kb() throws Exception {
-    String data = getTestData(100 * 1024);
-    insertStreamUnknownLength(data);
-    validateContent(data);
-  }
+    @Test(expected = SQLFeatureNotSupportedException.class)
+    public void testKnownLongLength2Kb() throws Exception {
+        String data = getTestData(2 * 1024);
+        insertStreamKnownLongLength(data);
+        validateContent(data);
+    }
 
-  @Test
-  public void testKnownIntLength200Kb() throws Exception {
-    String data = getTestData(200 * 1024);
-    insertStreamKnownIntLength(data);
-    validateContent(data);
-  }
+    @Test
+    public void testUnknownLength2Kb() throws Exception {
+        String data = getTestData(2 * 1024);
+        insertStreamUnknownLength(data);
+        validateContent(data);
+    }
 
-  @Test(expected = SQLFeatureNotSupportedException.class)
-  public void testKnownLongLength200Kb() throws Exception {
-    String data = getTestData(200 * 1024);
-    insertStreamKnownLongLength(data);
-    validateContent(data);
-  }
+    @Test
+    public void testKnownIntLength10Kb() throws Exception {
+        String data = getTestData(10 * 1024);
+        insertStreamKnownIntLength(data);
+        validateContent(data);
+    }
 
-  @Test
-  public void testUnknownLength200Kb() throws Exception {
-    String data = getTestData(200 * 1024);
-    insertStreamUnknownLength(data);
-    validateContent(data);
-  }
+    @Test(expected = SQLFeatureNotSupportedException.class)
+    public void testKnownLongLength10Kb() throws Exception {
+        String data = getTestData(10 * 1024);
+        insertStreamKnownLongLength(data);
+        validateContent(data);
+    }
+
+    @Test
+    public void testUnknownLength10Kb() throws Exception {
+        String data = getTestData(10 * 1024);
+        insertStreamUnknownLength(data);
+        validateContent(data);
+    }
+
+    @Test
+    public void testKnownIntLength100Kb() throws Exception {
+        String data = getTestData(100 * 1024);
+        insertStreamKnownIntLength(data);
+        validateContent(data);
+    }
+
+    @Test(expected = SQLFeatureNotSupportedException.class)
+    public void testKnownLongLength100Kb() throws Exception {
+        String data = getTestData(100 * 1024);
+        insertStreamKnownLongLength(data);
+        validateContent(data);
+    }
+
+    @Test
+    public void testUnknownLength100Kb() throws Exception {
+        String data = getTestData(100 * 1024);
+        insertStreamUnknownLength(data);
+        validateContent(data);
+    }
+
+    @Test
+    public void testKnownIntLength200Kb() throws Exception {
+        String data = getTestData(200 * 1024);
+        insertStreamKnownIntLength(data);
+        validateContent(data);
+    }
+
+    @Test(expected = SQLFeatureNotSupportedException.class)
+    public void testKnownLongLength200Kb() throws Exception {
+        String data = getTestData(200 * 1024);
+        insertStreamKnownLongLength(data);
+        validateContent(data);
+    }
+
+    @Test
+    public void testUnknownLength200Kb() throws Exception {
+        String data = getTestData(200 * 1024);
+        insertStreamUnknownLength(data);
+        validateContent(data);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ClientInfoTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ClientInfoTest.java
index 84fb9b6..192406f 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ClientInfoTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ClientInfoTest.java
@@ -24,87 +24,87 @@ import java.util.Properties;
 
 public class ClientInfoTest extends BaseTest4 {
 
-  private String getAppName() throws SQLException {
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("SHOW application_name");
-    rs.next();
-    String appName = rs.getString(1);
-    rs.close();
-    stmt.close();
-    return appName;
-  }
-
-  @Test
-  public void testSetAppName() throws SQLException {
-    if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
-      return;
+    private String getAppName() throws SQLException {
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("SHOW application_name");
+        rs.next();
+        String appName = rs.getString(1);
+        rs.close();
+        stmt.close();
+        return appName;
     }
 
-    con.setClientInfo("ApplicationName", "my app");
-    assertEquals("my app", getAppName());
-    assertEquals("my app", con.getClientInfo("ApplicationName"));
-    assertEquals("my app", con.getClientInfo().getProperty("ApplicationName"));
-  }
+    @Test
+    public void testSetAppName() throws SQLException {
+        if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
+            return;
+        }
 
-  @Test
-  public void testExplicitSetAppNameNotificationIsParsed() throws SQLException {
-    if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
-      return;
+        con.setClientInfo("ApplicationName", "my app");
+        assertEquals("my app", getAppName());
+        assertEquals("my app", con.getClientInfo("ApplicationName"));
+        assertEquals("my app", con.getClientInfo().getProperty("ApplicationName"));
     }
 
-    String appName = "test-42";
+    @Test
+    public void testExplicitSetAppNameNotificationIsParsed() throws SQLException {
+        if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
+            return;
+        }
 
-    Statement s = con.createStatement();
-    s.execute("set application_name='" + appName + "'");
-    s.close();
-    assertEquals("application_name was set to " + appName + ", and it should be visible via "
-        + "con.getClientInfo", appName, con.getClientInfo("ApplicationName"));
-    assertEquals("application_name was set to " + appName + ", and it should be visible via "
-        + "con.getClientInfo", appName, con.getClientInfo().get("ApplicationName"));
-  }
+        String appName = "test-42";
 
-  @Test
-  public void testSetAppNameProps() throws SQLException {
-    if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
-      return;
+        Statement s = con.createStatement();
+        s.execute("set application_name='" + appName + "'");
+        s.close();
+        assertEquals("application_name was set to " + appName + ", and it should be visible via "
+                + "con.getClientInfo", appName, con.getClientInfo("ApplicationName"));
+        assertEquals("application_name was set to " + appName + ", and it should be visible via "
+                + "con.getClientInfo", appName, con.getClientInfo().get("ApplicationName"));
     }
 
-    Properties props = new Properties();
-    props.put("ApplicationName", "my app");
-    con.setClientInfo(props);
-    assertEquals("my app", getAppName());
-    assertEquals("my app", con.getClientInfo("ApplicationName"));
-    assertEquals("my app", con.getClientInfo().getProperty("ApplicationName"));
-  }
+    @Test
+    public void testSetAppNameProps() throws SQLException {
+        if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
+            return;
+        }
 
-  /**
-   * Test that no exception is thrown when an unknown property is set.
-   */
-  @Test
-  public void testWarningOnUnknownName() throws SQLException {
-    try {
-      con.setClientInfo("NonexistentClientInfoName", "NoValue");
-    } catch (SQLClientInfoException e) {
-      fail("Trying to set a nonexistent name must not throw an exception (spec)");
-    }
-    assertNotNull(con.getWarnings());
-  }
-
-  /**
-   * Test that a name missing in the properties given to setClientInfo should be unset (spec).
-   */
-  @Test
-  public void testMissingName() throws SQLException {
-    if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
-      return;
+        Properties props = new Properties();
+        props.put("ApplicationName", "my app");
+        con.setClientInfo(props);
+        assertEquals("my app", getAppName());
+        assertEquals("my app", con.getClientInfo("ApplicationName"));
+        assertEquals("my app", con.getClientInfo().getProperty("ApplicationName"));
     }
 
-    con.setClientInfo("ApplicationName", "my app");
+    /**
+     * Test that no exception is thrown when an unknown property is set.
+     */
+    @Test
+    public void testWarningOnUnknownName() throws SQLException {
+        try {
+            con.setClientInfo("NonexistentClientInfoName", "NoValue");
+        } catch (SQLClientInfoException e) {
+            fail("Trying to set a nonexistent name must not throw an exception (spec)");
+        }
+        assertNotNull(con.getWarnings());
+    }
 
-    // According to the spec, empty properties must clear all (because all names are missing)
-    con.setClientInfo(new Properties());
+    /**
+     * Test that a name missing in the properties given to setClientInfo should be unset (spec).
+     */
+    @Test
+    public void testMissingName() throws SQLException {
+        if (!TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_0)) {
+            return;
+        }
 
-    String applicationName = con.getClientInfo("ApplicationName");
-    assertTrue("".equals(applicationName) || applicationName == null);
-  }
+        con.setClientInfo("ApplicationName", "my app");
+
+        // According to the spec, empty properties must clear all (because all names are missing)
+        con.setClientInfo(new Properties());
+
+        String applicationName = con.getClientInfo("ApplicationName");
+        assertTrue("".equals(applicationName) || applicationName == null);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ConnectionValidTimeoutTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ConnectionValidTimeoutTest.java
index 5fda51f..a44b80e 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ConnectionValidTimeoutTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/ConnectionValidTimeoutTest.java
@@ -23,47 +23,47 @@ import java.util.Properties;
 @DisabledIfServerVersionBelow("9.4")
 public class ConnectionValidTimeoutTest {
 
-  public static Iterable<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-      {500, 1, 600},
-      {1500, 1, 1100},
-      {0, 1, 1100},
-      {500, 0, 600},
-    });
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "networkTimeoutMillis={0}, validationTimeoutSeconds={1}, expectedMaxValidationTimeMillis={2}")
-  @Timeout(30)
-  void isValidRespectsSmallerTimeout(int networkTimeoutMillis, int validationTimeoutSeconds, int expectedMaxValidationTimeMillis) throws Exception {
-    try (StrangeProxyServer proxyServer = new StrangeProxyServer(TestUtil.getServer(), TestUtil.getPort())) {
-      final Properties props = new Properties();
-      props.setProperty(TestUtil.SERVER_HOST_PORT_PROP, String.format("%s:%s", "localhost", proxyServer.getServerPort()));
-      try (Connection conn = TestUtil.openDB(props)) {
-        assertTrue(conn.isValid(validationTimeoutSeconds), "Connection through proxy should be valid");
-
-        conn.setNetworkTimeout(null, networkTimeoutMillis);
-        assertTrue(conn.isValid(validationTimeoutSeconds), "Connection through proxy should still be valid");
-
-        proxyServer.stopForwardingOlderClients();
-
-        long start = System.currentTimeMillis();
-        boolean result = conn.isValid(validationTimeoutSeconds);
-        long elapsed = System.currentTimeMillis() - start;
-
-        assertFalse(result, "Broken connection should not be valid");
-
-        assertTrue(elapsed <= expectedMaxValidationTimeMillis,
-            String.format(
-            "Connection validation should not take longer than %d ms"
-                + " when network timeout is %d ms and validation timeout is %d s"
-                + " (actual result: %d ms)",
-            expectedMaxValidationTimeMillis,
-            networkTimeoutMillis,
-            validationTimeoutSeconds,
-            elapsed)
-        );
-      }
+    public static Iterable<Object[]> data() {
+        return Arrays.asList(new Object[][]{
+                {500, 1, 600},
+                {1500, 1, 1100},
+                {0, 1, 1100},
+                {500, 0, 600},
+        });
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "networkTimeoutMillis={0}, validationTimeoutSeconds={1}, expectedMaxValidationTimeMillis={2}")
+    @Timeout(30)
+    void isValidRespectsSmallerTimeout(int networkTimeoutMillis, int validationTimeoutSeconds, int expectedMaxValidationTimeMillis) throws Exception {
+        try (StrangeProxyServer proxyServer = new StrangeProxyServer(TestUtil.getServer(), TestUtil.getPort())) {
+            final Properties props = new Properties();
+            props.setProperty(TestUtil.SERVER_HOST_PORT_PROP, String.format("%s:%s", "localhost", proxyServer.getServerPort()));
+            try (Connection conn = TestUtil.openDB(props)) {
+                assertTrue(conn.isValid(validationTimeoutSeconds), "Connection through proxy should be valid");
+
+                conn.setNetworkTimeout(null, networkTimeoutMillis);
+                assertTrue(conn.isValid(validationTimeoutSeconds), "Connection through proxy should still be valid");
+
+                proxyServer.stopForwardingOlderClients();
+
+                long start = System.currentTimeMillis();
+                boolean result = conn.isValid(validationTimeoutSeconds);
+                long elapsed = System.currentTimeMillis() - start;
+
+                assertFalse(result, "Broken connection should not be valid");
+
+                assertTrue(elapsed <= expectedMaxValidationTimeMillis,
+                        String.format(
+                                "Connection validation should not take longer than %d ms"
+                                        + " when network timeout is %d ms and validation timeout is %d s"
+                                        + " (actual result: %d ms)",
+                                expectedMaxValidationTimeMillis,
+                                networkTimeoutMillis,
+                                validationTimeoutSeconds,
+                                elapsed)
+                );
+            }
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/DatabaseMetaDataHideUnprivilegedObjectsTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/DatabaseMetaDataHideUnprivilegedObjectsTest.java
index 8c7afe4..d14fe2f 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/DatabaseMetaDataHideUnprivilegedObjectsTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/DatabaseMetaDataHideUnprivilegedObjectsTest.java
@@ -35,536 +35,536 @@ import java.util.Properties;
  * set to true.
  */
 public class DatabaseMetaDataHideUnprivilegedObjectsTest {
-  public static final String COLUMNS = "digit int4, name text";
-  private static Connection hidingCon;
-  private static Connection nonHidingCon;
-  private static Connection privilegedCon;
-  private static PgConnection pgConnection;
-  private static DatabaseMetaData hidingDatabaseMetaData;
-  private static DatabaseMetaData nonHidingDatabaseMetaData;
-
-  @BeforeAll
-  static void setUp() throws Exception {
-    Properties props = new Properties();
-    privilegedCon = TestUtil.openPrivilegedDB();
-    pgConnection = privilegedCon.unwrap(PgConnection.class);
-    Statement stmt = privilegedCon.createStatement();
-
-    createTestDataObjectsWithRangeOfPrivilegesInSchema("high_privileges_schema");
-    // Grant Test User ALL privileges on schema.
-    stmt.executeUpdate("GRANT ALL ON SCHEMA high_privileges_schema TO " + TestUtil.getUser());
-    stmt.executeUpdate("REVOKE ALL ON SCHEMA high_privileges_schema FROM public");
-
-    createTestDataObjectsWithRangeOfPrivilegesInSchema("low_privileges_schema");
-    // Grant Test User USAGE privileges on schema.
-    stmt.executeUpdate("GRANT USAGE ON SCHEMA low_privileges_schema TO " + TestUtil.getUser());
-    stmt.executeUpdate("REVOKE ALL ON SCHEMA low_privileges_schema FROM public");
-
-    createTestDataObjectsWithRangeOfPrivilegesInSchema("no_privileges_schema");
-    // Revoke ALL privileges from Test User USAGE on schema.
-    stmt.executeUpdate("REVOKE ALL ON SCHEMA no_privileges_schema FROM " + TestUtil.getUser());
-    stmt.executeUpdate("REVOKE ALL ON SCHEMA no_privileges_schema FROM public");
-
-    stmt.close();
-
-    nonHidingDatabaseMetaData = getNonHidingDatabaseMetaData(props);
-    hidingDatabaseMetaData = getHidingDatabaseMetaData(props);
-  }
-
-  private static DatabaseMetaData getHidingDatabaseMetaData(Properties props) throws Exception {
-    PGProperty.HIDE_UNPRIVILEGED_OBJECTS.set(props, true);
-    hidingCon = TestUtil.openDB(props);
-    if (isSuperUser(hidingCon)) {
-      fail("Test for hiding database objects will not work while:" + TestUtil.getUser()
-          + " has a SUPERUSER role.");
-    }
-    return hidingCon.getMetaData();
-  }
-
-  private static DatabaseMetaData getNonHidingDatabaseMetaData(Properties props) throws Exception {
-    nonHidingCon = TestUtil.openDB(props);
-    return nonHidingCon.getMetaData();
-  }
-
-  private static void createTestDataObjectsWithRangeOfPrivilegesInSchema(String schema)
-      throws SQLException {
-    TestUtil.createSchema(privilegedCon, schema);
-    createSimpleTablesInSchema(schema,
-        new String[]{"owned_table", "all_grants_table", "insert_granted_table",
-          "select_granted_table", "no_grants_table"});
-
-    Statement stmt = privilegedCon.createStatement();
-    stmt.executeUpdate(
-        "CREATE FUNCTION " + schema + "."
-          + "execute_granted_add_function(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE  RETURNS NULL ON NULL INPUT");
-    stmt.executeUpdate(
-        "CREATE FUNCTION " + schema + "."
-          + "no_grants_add_function(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE  RETURNS NULL ON NULL INPUT");
-
-    if (pgConnection.haveMinimumServerVersion(ServerVersion.v11)) {
-      stmt.executeUpdate(
-          "CREATE PROCEDURE " + schema + "."
-          + "execute_granted_insert_procedure( a integer, b integer) LANGUAGE SQL AS 'select $1 + $2;'");
-      stmt.executeUpdate(
-          "CREATE PROCEDURE " + schema + "."
-          + "no_grants_insert_procedure( a integer, b integer) LANGUAGE SQL AS 'select $1 + $2;'");
-
-    }
-    stmt.executeUpdate(
-        "CREATE OR REPLACE VIEW " + schema + "." + "select_granted_view AS SELECT name FROM "
-          + schema + "." + "select_granted_table");
-    stmt.executeUpdate(
-        "CREATE OR REPLACE VIEW " + schema + "." + "no_grants_view AS SELECT name FROM " + schema
-          + "." + "owned_table");
-    stmt.executeUpdate(
-        "CREATE TYPE " + schema + "." + "usage_granted_composite_type AS (f1 int, f2 text)");
-    stmt.executeUpdate(
-        "CREATE TYPE " + schema + "." + "no_grants_composite_type AS (f1 int, f2 text)");
-    stmt.executeUpdate(
-        "CREATE DOMAIN " + schema + "." + "usage_granted_us_postal_code_domain CHAR(5) NOT NULL");
-    stmt.executeUpdate(
-        "CREATE DOMAIN " + schema + "." + "no_grants_us_postal_code_domain AS CHAR(5) NOT NULL");
-
-    if (pgConnection.haveMinimumServerVersion(ServerVersion.v9_2)) {
-      stmt.executeUpdate(
-          "REVOKE ALL ON TYPE " + schema + "."
-          + "usage_granted_composite_type FROM public RESTRICT");
-      stmt.executeUpdate(
-          "REVOKE ALL ON TYPE " + schema + "." + "no_grants_composite_type FROM public RESTRICT");
-      stmt.executeUpdate("GRANT USAGE on TYPE " + schema + "." + "usage_granted_composite_type TO "
-          + TestUtil.getUser());
-      stmt.executeUpdate(
-          "REVOKE ALL ON TYPE " + schema + "."
-            + "usage_granted_us_postal_code_domain FROM public RESTRICT");
-      stmt.executeUpdate(
-          "REVOKE ALL ON TYPE " + schema + "."
-            + "no_grants_us_postal_code_domain FROM public RESTRICT");
-      stmt.executeUpdate(
-          "GRANT USAGE on TYPE " + schema + "." + "usage_granted_us_postal_code_domain TO "
-            + TestUtil.getUser());
-    }
-    revokeAllOnFunctions(schema, new String[]{"execute_granted_add_function(integer, integer)",
-        "no_grants_add_function(integer, integer)"});
-
-    revokeAllOnTables(schema,
-        new String[]{"owned_table", "all_grants_table", "insert_granted_table",
-          "select_granted_table", "no_grants_table", "select_granted_view", "no_grants_view"});
-
-    stmt.executeUpdate(
-        "GRANT ALL ON FUNCTION " + schema + "."
-          + "execute_granted_add_function(integer, integer) TO "
-          + TestUtil.getUser());
-
-    if (pgConnection.haveMinimumServerVersion(ServerVersion.v11)) {
-      revokeAllOnProcedures(schema, new String[]{"execute_granted_insert_procedure(integer, integer)",
-          "no_grants_insert_procedure(integer, integer)"});
-      stmt.executeUpdate(
-          "GRANT ALL ON PROCEDURE " + schema + "."
-            + "execute_granted_insert_procedure(integer, integer) TO "
-            + TestUtil.getUser());
-
-    }
-    stmt.executeUpdate(
-          "ALTER TABLE " + schema + "." + "owned_table OWNER TO " + TestUtil.getUser());
-    stmt.executeUpdate(
-        "GRANT ALL ON TABLE " + schema + "." + "all_grants_table TO " + TestUtil.getUser());
-    stmt.executeUpdate("GRANT INSERT ON TABLE " + schema + "." + "insert_granted_table TO "
-        + TestUtil.getUser());
-    stmt.executeUpdate("GRANT SELECT ON TABLE " + schema + "." + "select_granted_table TO "
-        + TestUtil.getUser());
-    stmt.executeUpdate("GRANT SELECT ON TABLE " + schema + "." + "select_granted_view TO "
-        + TestUtil.getUser());
-    stmt.close();
-  }
-
-  private static void revokeAllOnProcedures(String schema, String[] procedures
-  ) throws SQLException {
-    Statement stmt = privilegedCon.createStatement();
-    for (String procedure : procedures) {
-      stmt.executeUpdate(
-          "REVOKE ALL ON PROCEDURE " + schema + "." + procedure + " FROM public RESTRICT");
-      stmt.executeUpdate(
-          "REVOKE ALL ON PROCEDURE  " + schema + "." + procedure + " FROM " + TestUtil.getUser()
-            + " RESTRICT");
-    }
-    stmt.close();
-  }
-
-  private static void revokeAllOnFunctions(String schema, String[] functions
-  ) throws SQLException {
-    Statement stmt = privilegedCon.createStatement();
-    for (String function : functions) {
-      stmt.executeUpdate(
-          "REVOKE ALL ON FUNCTION " + schema + "." + function + " FROM public RESTRICT");
-      stmt.executeUpdate("REVOKE ALL ON FUNCTION  " + schema + "."
-            + function + " FROM " + TestUtil.getUser()
-            + " RESTRICT");
-    }
-    stmt.close();
-  }
-
-  private static void revokeAllOnTables(String schema, String[] tables
-  ) throws SQLException {
-    Statement stmt = privilegedCon.createStatement();
-    for (String table : tables) {
-      stmt.executeUpdate("REVOKE ALL ON TABLE " + schema + "." + table + " FROM public RESTRICT");
-      stmt.executeUpdate(
-          "REVOKE ALL ON TABLE  " + schema + "." + table + " FROM " + TestUtil.getUser()
-          + " RESTRICT");
-    }
-    stmt.close();
-  }
-
-  private static void createSimpleTablesInSchema(String schema, String[] tables
-  ) throws SQLException {
-    for (String tableName : tables) {
-      TestUtil.createTable(privilegedCon, schema + "." + tableName, COLUMNS);
-    }
-  }
-
-  @AfterAll
-  static void tearDown() throws SQLException {
-    TestUtil.closeDB(hidingCon);
-    TestUtil.closeDB(nonHidingCon);
-    TestUtil.dropSchema(privilegedCon, "high_privileges_schema");
-    TestUtil.dropSchema(privilegedCon, "low_privileges_schema");
-    TestUtil.dropSchema(privilegedCon, "no_privileges_schema");
-    TestUtil.closeDB(privilegedCon);
-  }
-
-  private static boolean isSuperUser(Connection connection) throws SQLException {
-    // Check if we're operating as a superuser.
-    Statement st = connection.createStatement();
-    st.executeQuery("SHOW is_superuser;");
-    ResultSet rs = st.getResultSet();
-    rs.next(); // One row is guaranteed
-    boolean connIsSuper = "on".equalsIgnoreCase(rs.getString(1));
-    st.close();
-    return connIsSuper;
-  }
-
-  @Test
-  void getSchemas() throws SQLException {
-    List<String> schemasWithHiding = getSchemaNames(hidingDatabaseMetaData);
-    assertThat(schemasWithHiding,
-        hasItems("pg_catalog", "information_schema",
-        "high_privileges_schema", "low_privileges_schema"));
-    assertThat(schemasWithHiding,
-        not(hasItem("no_privileges_schema")));
-
-    List<String> schemasWithNoHiding = getSchemaNames(nonHidingDatabaseMetaData);
-    assertThat(schemasWithNoHiding,
-        hasItems("pg_catalog", "information_schema",
-        "high_privileges_schema", "low_privileges_schema", "no_privileges_schema"));
-  }
-
-  List<String> getSchemaNames(DatabaseMetaData databaseMetaData) throws SQLException {
-    List<String> schemaNames = new ArrayList<>();
-    ResultSet rs = databaseMetaData.getSchemas();
-    while (rs.next()) {
-      schemaNames.add(rs.getString("TABLE_SCHEM"));
-    }
-    return schemaNames;
-  }
-
-  @Test
-  void getTables() throws SQLException {
-    List<String> tablesWithHiding = getTableNames(hidingDatabaseMetaData, "high_privileges_schema");
-
-    assertThat(tablesWithHiding,
-        hasItems(
-        "owned_table",
-        "all_grants_table",
-        "insert_granted_table",
-        "select_granted_table"));
-    assertThat(tablesWithHiding,
-        not(hasItem("no_grants_table")));
-
-    List<String> tablesWithNoHiding =
-        getTableNames(nonHidingDatabaseMetaData, "high_privileges_schema");
-    assertThat(tablesWithNoHiding,
-        hasItems(
-        "owned_table",
-        "all_grants_table",
-        "insert_granted_table",
-        "select_granted_table",
-        "no_grants_table"));
-
-    tablesWithHiding = getTableNames(hidingDatabaseMetaData, "low_privileges_schema");
-
-    assertThat(tablesWithHiding,
-        hasItems(
-        "owned_table",
-        "all_grants_table",
-        "insert_granted_table",
-        "select_granted_table"));
-    assertThat(tablesWithHiding,
-        not(hasItem("no_grants_table")));
-
-    tablesWithNoHiding =
-        getTableNames(nonHidingDatabaseMetaData, "low_privileges_schema");
-    assertThat(tablesWithNoHiding,
-        hasItems(
-        "owned_table",
-        "all_grants_table",
-        "insert_granted_table",
-        "select_granted_table",
-        "no_grants_table"));
-
-    // Or should the tables names not be returned because the schema is not visible?
-    tablesWithHiding = getTableNames(hidingDatabaseMetaData, "no_privileges_schema");
-
-    assertThat(tablesWithHiding,
-        hasItems(
-        "owned_table",
-        "all_grants_table",
-        "insert_granted_table",
-        "select_granted_table"));
-    assertThat(tablesWithHiding,
-        not(hasItem("no_grants_table")));
-
-    tablesWithNoHiding =
-        getTableNames(nonHidingDatabaseMetaData, "no_privileges_schema");
-    assertThat(tablesWithNoHiding,
-        hasItems(
-        "owned_table",
-        "all_grants_table",
-        "insert_granted_table",
-        "select_granted_table",
-        "no_grants_table"));
-
-  }
-
-  List<String> getTableNames(DatabaseMetaData databaseMetaData, String schemaPattern)
-      throws SQLException {
-    List<String> tableNames = new ArrayList<>();
-    ResultSet rs = databaseMetaData.getTables(null, schemaPattern, null, new String[]{"TABLE"});
-    while (rs.next()) {
-      tableNames.add(rs.getString("TABLE_NAME"));
-    }
-    return tableNames;
-  }
-
-  @Test
-  void getViews() throws SQLException {
-    List<String> viewsWithHiding = getViewNames(hidingDatabaseMetaData, "high_privileges_schema");
-
-    assertThat(viewsWithHiding,
-        hasItems(
-        "select_granted_view"));
-    assertThat(viewsWithHiding,
-        not(hasItem("no_grants_view")));
-
-    List<String> viewsWithNoHiding =
-        getViewNames(nonHidingDatabaseMetaData, "high_privileges_schema");
-    assertThat(viewsWithNoHiding,
-        hasItems(
-        "select_granted_view",
-        "no_grants_view"));
-
-    viewsWithHiding = getViewNames(hidingDatabaseMetaData, "low_privileges_schema");
-
-    assertThat(viewsWithHiding,
-        hasItems(
-        "select_granted_view"));
-    assertThat(viewsWithHiding,
-        not(hasItem("no_grants_view")));
-
-    viewsWithNoHiding =
-        getViewNames(nonHidingDatabaseMetaData, "low_privileges_schema");
-    assertThat(viewsWithNoHiding,
-        hasItems(
-        "select_granted_view",
-        "no_grants_view"));
-
-    // Or should the view names not be returned because the schema is not visible?
-    viewsWithHiding = getViewNames(hidingDatabaseMetaData, "no_privileges_schema");
-
-    assertThat(viewsWithHiding,
-        hasItems(
-        "select_granted_view"));
-    assertThat(viewsWithHiding,
-        not(hasItem("no_grants_view")));
-
-    viewsWithNoHiding =
-        getViewNames(nonHidingDatabaseMetaData, "no_privileges_schema");
-    assertThat(viewsWithNoHiding,
-        hasItems(
-        "select_granted_view",
-        "no_grants_view"));
-
-  }
-
-  List<String> getViewNames(DatabaseMetaData databaseMetaData, String schemaPattern)
-      throws SQLException {
-    List<String> viewNames = new ArrayList<>();
-    ResultSet rs = databaseMetaData.getTables(null, schemaPattern, null, new String[]{"VIEW"});
-    while (rs.next()) {
-      viewNames.add(rs.getString("TABLE_NAME"));
-    }
-    return viewNames;
-  }
-
-  @Test
-  void getFunctions() throws SQLException {
-    List<String> functionsWithHiding =
-        getFunctionNames(hidingDatabaseMetaData, "high_privileges_schema");
-    assertThat(functionsWithHiding,
-        hasItem("execute_granted_add_function"));
-    assertThat(functionsWithHiding,
-        not(hasItem("no_grants_add_function")));
-
-    List<String> functionsWithNoHiding =
-        getFunctionNames(nonHidingDatabaseMetaData, "high_privileges_schema");
-    assertThat(functionsWithNoHiding,
-        hasItems("execute_granted_add_function", "no_grants_add_function"));
-
-    functionsWithHiding =
-        getFunctionNames(hidingDatabaseMetaData, "low_privileges_schema");
-    assertThat(functionsWithHiding,
-        hasItem("execute_granted_add_function"));
-    assertThat(functionsWithHiding,
-        not(hasItem("no_grants_add_function")));
-
-    functionsWithNoHiding =
-        getFunctionNames(nonHidingDatabaseMetaData, "low_privileges_schema");
-    assertThat(functionsWithNoHiding,
-        hasItems("execute_granted_add_function", "no_grants_add_function"));
-
-    // Or should the function names not be returned because the schema is not visible?
-    functionsWithHiding =
-        getFunctionNames(hidingDatabaseMetaData, "no_privileges_schema");
-    assertThat(functionsWithHiding,
-        hasItem("execute_granted_add_function"));
-    assertThat(functionsWithHiding,
-        not(hasItem("no_grants_add_function")));
-
-    functionsWithNoHiding =
-        getFunctionNames(nonHidingDatabaseMetaData, "no_privileges_schema");
-    assertThat(functionsWithNoHiding,
-        hasItems("execute_granted_add_function", "no_grants_add_function"));
-  }
-
-  List<String> getFunctionNames(DatabaseMetaData databaseMetaData, String schemaPattern)
-      throws SQLException {
-    List<String> functionNames = new ArrayList<>();
-    ResultSet rs = databaseMetaData.getFunctions(null, schemaPattern, null);
-    while (rs.next()) {
-      functionNames.add(rs.getString("FUNCTION_NAME"));
-    }
-    return functionNames;
-  }
-
-  @Test
-  void getProcedures() throws SQLException {
-    String executeGranted = TestUtil.haveMinimumServerVersion(hidingCon, ServerVersion.v11) ? "execute_granted_insert_procedure" : "execute_granted_add_function";
-    String noGrants = TestUtil.haveMinimumServerVersion(hidingCon, ServerVersion.v11) ? "no_grants_insert_procedure" : "no_grants_add_function";
-
-    List<String> proceduresWithHiding =
-        getProcedureNames(hidingDatabaseMetaData, "high_privileges_schema");
-    assertThat(proceduresWithHiding,
-        hasItem(executeGranted));
-    assertThat(proceduresWithHiding,
-        not(hasItem(noGrants)));
-
-    List<String> proceduresWithNoHiding =
-        getProcedureNames(nonHidingDatabaseMetaData, "high_privileges_schema");
-    assertThat(proceduresWithNoHiding,
-        hasItems(executeGranted, noGrants));
-
-    proceduresWithHiding =
-        getProcedureNames(hidingDatabaseMetaData, "low_privileges_schema");
-    assertThat(proceduresWithHiding,
-        hasItem(executeGranted));
-    assertThat(proceduresWithHiding,
-        not(hasItem(noGrants)));
-
-    proceduresWithNoHiding =
-        getProcedureNames(nonHidingDatabaseMetaData, "low_privileges_schema");
-    assertThat(proceduresWithNoHiding,
-        hasItems(executeGranted, noGrants));
-
-    // Or should the function names not be returned because the schema is not visible?
-    proceduresWithHiding =
-        getProcedureNames(hidingDatabaseMetaData, "no_privileges_schema");
-    assertThat(proceduresWithHiding,
-        hasItem(executeGranted));
-    assertThat(proceduresWithHiding,
-        not(hasItem(noGrants)));
-
-    proceduresWithNoHiding =
-        getProcedureNames(nonHidingDatabaseMetaData, "no_privileges_schema");
-    assertThat(proceduresWithNoHiding,
-        hasItems(executeGranted, noGrants));
-
-  }
-
-  List<String> getProcedureNames(DatabaseMetaData databaseMetaData, String schemaPattern)
-      throws SQLException {
-    List<String> procedureNames = new ArrayList<>();
-    ResultSet rs = databaseMetaData.getProcedures(null, schemaPattern, null);
-    while (rs.next()) {
-      procedureNames.add(rs.getString("PROCEDURE_NAME"));
-    }
-    return procedureNames;
-  }
-
-  /*
-   *  According to the JDBC JavaDoc, the applicable UDTs are: JAVA_OBJECT, STRUCT, or DISTINCT.
-   */
-  @Test
-  void getUDTs() throws SQLException {
-    if (pgConnection.haveMinimumServerVersion(ServerVersion.v9_2)) {
-      List<String> typesWithHiding = getTypeNames(hidingDatabaseMetaData, "high_privileges_schema");
-      assertThat(typesWithHiding,
-          hasItems("usage_granted_composite_type", "usage_granted_us_postal_code_domain"));
-      assertThat(typesWithHiding,
-          not(hasItems("no_grants_composite_type", "no_grants_us_postal_code_domain")));
-
-      typesWithHiding = getTypeNames(hidingDatabaseMetaData, "low_privileges_schema");
-      assertThat(typesWithHiding,
-          hasItems("usage_granted_composite_type", "usage_granted_us_postal_code_domain"));
-      assertThat(typesWithHiding,
-          not(hasItems("no_grants_composite_type", "no_grants_us_postal_code_domain")));
-
-      // Or should the types names not be returned because the schema is not visible?
-      typesWithHiding = getTypeNames(hidingDatabaseMetaData, "no_privileges_schema");
-      assertThat(typesWithHiding,
-          hasItems("usage_granted_composite_type", "usage_granted_us_postal_code_domain"));
-      assertThat(typesWithHiding,
-          not(hasItems("no_grants_composite_type", "no_grants_us_postal_code_domain")));
+    public static final String COLUMNS = "digit int4, name text";
+    private static Connection hidingCon;
+    private static Connection nonHidingCon;
+    private static Connection privilegedCon;
+    private static PgConnection pgConnection;
+    private static DatabaseMetaData hidingDatabaseMetaData;
+    private static DatabaseMetaData nonHidingDatabaseMetaData;
+
+    @BeforeAll
+    static void setUp() throws Exception {
+        Properties props = new Properties();
+        privilegedCon = TestUtil.openPrivilegedDB();
+        pgConnection = privilegedCon.unwrap(PgConnection.class);
+        Statement stmt = privilegedCon.createStatement();
+
+        createTestDataObjectsWithRangeOfPrivilegesInSchema("high_privileges_schema");
+        // Grant Test User ALL privileges on schema.
+        stmt.executeUpdate("GRANT ALL ON SCHEMA high_privileges_schema TO " + TestUtil.getUser());
+        stmt.executeUpdate("REVOKE ALL ON SCHEMA high_privileges_schema FROM public");
+
+        createTestDataObjectsWithRangeOfPrivilegesInSchema("low_privileges_schema");
+        // Grant Test User USAGE privileges on schema.
+        stmt.executeUpdate("GRANT USAGE ON SCHEMA low_privileges_schema TO " + TestUtil.getUser());
+        stmt.executeUpdate("REVOKE ALL ON SCHEMA low_privileges_schema FROM public");
+
+        createTestDataObjectsWithRangeOfPrivilegesInSchema("no_privileges_schema");
+        // Revoke ALL privileges from Test User USAGE on schema.
+        stmt.executeUpdate("REVOKE ALL ON SCHEMA no_privileges_schema FROM " + TestUtil.getUser());
+        stmt.executeUpdate("REVOKE ALL ON SCHEMA no_privileges_schema FROM public");
+
+        stmt.close();
+
+        nonHidingDatabaseMetaData = getNonHidingDatabaseMetaData(props);
+        hidingDatabaseMetaData = getHidingDatabaseMetaData(props);
     }
 
-    List<String> typesWithNoHiding =
-        getTypeNames(nonHidingDatabaseMetaData, "high_privileges_schema");
-    assertThat(typesWithNoHiding,
-        hasItems("usage_granted_composite_type", "no_grants_composite_type",
-          "usage_granted_us_postal_code_domain", "no_grants_us_postal_code_domain"));
-
-    typesWithNoHiding =
-        getTypeNames(nonHidingDatabaseMetaData, "low_privileges_schema");
-    assertThat(typesWithNoHiding,
-        hasItems("usage_granted_composite_type", "no_grants_composite_type",
-          "usage_granted_us_postal_code_domain", "no_grants_us_postal_code_domain"));
-
-    typesWithNoHiding =
-        getTypeNames(nonHidingDatabaseMetaData, "no_privileges_schema");
-    assertThat(typesWithNoHiding,
-        hasItems("usage_granted_composite_type", "no_grants_composite_type",
-          "usage_granted_us_postal_code_domain", "no_grants_us_postal_code_domain"));
-  }
-
-  /*
-  From the Postgres JDBC driver source code, we are mapping the types:
-      java.sql.Types.DISTINCT to the Postgres type:  TYPTYPE_COMPOSITE  'c'   # composite (e.g., table's rowtype)
-      java.sql.Types.STRUCT   to the Postgres type:  TYPTYPE_DOMAIN     'd'   # domain over another type
-   */
-  List<String> getTypeNames(DatabaseMetaData databaseMetaData, String schemaPattern) throws SQLException {
-    List<String> typeNames = new ArrayList<>();
-    ResultSet rs = databaseMetaData.getUDTs(null, schemaPattern, null, null);
-    while (rs.next()) {
-      typeNames.add(rs.getString("TYPE_NAME"));
+    private static DatabaseMetaData getHidingDatabaseMetaData(Properties props) throws Exception {
+        PGProperty.HIDE_UNPRIVILEGED_OBJECTS.set(props, true);
+        hidingCon = TestUtil.openDB(props);
+        if (isSuperUser(hidingCon)) {
+            fail("Test for hiding database objects will not work while:" + TestUtil.getUser()
+                    + " has a SUPERUSER role.");
+        }
+        return hidingCon.getMetaData();
+    }
+
+    private static DatabaseMetaData getNonHidingDatabaseMetaData(Properties props) throws Exception {
+        nonHidingCon = TestUtil.openDB(props);
+        return nonHidingCon.getMetaData();
+    }
+
+    private static void createTestDataObjectsWithRangeOfPrivilegesInSchema(String schema)
+            throws SQLException {
+        TestUtil.createSchema(privilegedCon, schema);
+        createSimpleTablesInSchema(schema,
+                new String[]{"owned_table", "all_grants_table", "insert_granted_table",
+                        "select_granted_table", "no_grants_table"});
+
+        Statement stmt = privilegedCon.createStatement();
+        stmt.executeUpdate(
+                "CREATE FUNCTION " + schema + "."
+                        + "execute_granted_add_function(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE  RETURNS NULL ON NULL INPUT");
+        stmt.executeUpdate(
+                "CREATE FUNCTION " + schema + "."
+                        + "no_grants_add_function(integer, integer) RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE  RETURNS NULL ON NULL INPUT");
+
+        if (pgConnection.haveMinimumServerVersion(ServerVersion.v11)) {
+            stmt.executeUpdate(
+                    "CREATE PROCEDURE " + schema + "."
+                            + "execute_granted_insert_procedure( a integer, b integer) LANGUAGE SQL AS 'select $1 + $2;'");
+            stmt.executeUpdate(
+                    "CREATE PROCEDURE " + schema + "."
+                            + "no_grants_insert_procedure( a integer, b integer) LANGUAGE SQL AS 'select $1 + $2;'");
+
+        }
+        stmt.executeUpdate(
+                "CREATE OR REPLACE VIEW " + schema + "." + "select_granted_view AS SELECT name FROM "
+                        + schema + "." + "select_granted_table");
+        stmt.executeUpdate(
+                "CREATE OR REPLACE VIEW " + schema + "." + "no_grants_view AS SELECT name FROM " + schema
+                        + "." + "owned_table");
+        stmt.executeUpdate(
+                "CREATE TYPE " + schema + "." + "usage_granted_composite_type AS (f1 int, f2 text)");
+        stmt.executeUpdate(
+                "CREATE TYPE " + schema + "." + "no_grants_composite_type AS (f1 int, f2 text)");
+        stmt.executeUpdate(
+                "CREATE DOMAIN " + schema + "." + "usage_granted_us_postal_code_domain CHAR(5) NOT NULL");
+        stmt.executeUpdate(
+                "CREATE DOMAIN " + schema + "." + "no_grants_us_postal_code_domain AS CHAR(5) NOT NULL");
+
+        if (pgConnection.haveMinimumServerVersion(ServerVersion.v9_2)) {
+            stmt.executeUpdate(
+                    "REVOKE ALL ON TYPE " + schema + "."
+                            + "usage_granted_composite_type FROM public RESTRICT");
+            stmt.executeUpdate(
+                    "REVOKE ALL ON TYPE " + schema + "." + "no_grants_composite_type FROM public RESTRICT");
+            stmt.executeUpdate("GRANT USAGE on TYPE " + schema + "." + "usage_granted_composite_type TO "
+                    + TestUtil.getUser());
+            stmt.executeUpdate(
+                    "REVOKE ALL ON TYPE " + schema + "."
+                            + "usage_granted_us_postal_code_domain FROM public RESTRICT");
+            stmt.executeUpdate(
+                    "REVOKE ALL ON TYPE " + schema + "."
+                            + "no_grants_us_postal_code_domain FROM public RESTRICT");
+            stmt.executeUpdate(
+                    "GRANT USAGE on TYPE " + schema + "." + "usage_granted_us_postal_code_domain TO "
+                            + TestUtil.getUser());
+        }
+        revokeAllOnFunctions(schema, new String[]{"execute_granted_add_function(integer, integer)",
+                "no_grants_add_function(integer, integer)"});
+
+        revokeAllOnTables(schema,
+                new String[]{"owned_table", "all_grants_table", "insert_granted_table",
+                        "select_granted_table", "no_grants_table", "select_granted_view", "no_grants_view"});
+
+        stmt.executeUpdate(
+                "GRANT ALL ON FUNCTION " + schema + "."
+                        + "execute_granted_add_function(integer, integer) TO "
+                        + TestUtil.getUser());
+
+        if (pgConnection.haveMinimumServerVersion(ServerVersion.v11)) {
+            revokeAllOnProcedures(schema, new String[]{"execute_granted_insert_procedure(integer, integer)",
+                    "no_grants_insert_procedure(integer, integer)"});
+            stmt.executeUpdate(
+                    "GRANT ALL ON PROCEDURE " + schema + "."
+                            + "execute_granted_insert_procedure(integer, integer) TO "
+                            + TestUtil.getUser());
+
+        }
+        stmt.executeUpdate(
+                "ALTER TABLE " + schema + "." + "owned_table OWNER TO " + TestUtil.getUser());
+        stmt.executeUpdate(
+                "GRANT ALL ON TABLE " + schema + "." + "all_grants_table TO " + TestUtil.getUser());
+        stmt.executeUpdate("GRANT INSERT ON TABLE " + schema + "." + "insert_granted_table TO "
+                + TestUtil.getUser());
+        stmt.executeUpdate("GRANT SELECT ON TABLE " + schema + "." + "select_granted_table TO "
+                + TestUtil.getUser());
+        stmt.executeUpdate("GRANT SELECT ON TABLE " + schema + "." + "select_granted_view TO "
+                + TestUtil.getUser());
+        stmt.close();
+    }
+
+    private static void revokeAllOnProcedures(String schema, String[] procedures
+    ) throws SQLException {
+        Statement stmt = privilegedCon.createStatement();
+        for (String procedure : procedures) {
+            stmt.executeUpdate(
+                    "REVOKE ALL ON PROCEDURE " + schema + "." + procedure + " FROM public RESTRICT");
+            stmt.executeUpdate(
+                    "REVOKE ALL ON PROCEDURE  " + schema + "." + procedure + " FROM " + TestUtil.getUser()
+                            + " RESTRICT");
+        }
+        stmt.close();
+    }
+
+    private static void revokeAllOnFunctions(String schema, String[] functions
+    ) throws SQLException {
+        Statement stmt = privilegedCon.createStatement();
+        for (String function : functions) {
+            stmt.executeUpdate(
+                    "REVOKE ALL ON FUNCTION " + schema + "." + function + " FROM public RESTRICT");
+            stmt.executeUpdate("REVOKE ALL ON FUNCTION  " + schema + "."
+                    + function + " FROM " + TestUtil.getUser()
+                    + " RESTRICT");
+        }
+        stmt.close();
+    }
+
+    private static void revokeAllOnTables(String schema, String[] tables
+    ) throws SQLException {
+        Statement stmt = privilegedCon.createStatement();
+        for (String table : tables) {
+            stmt.executeUpdate("REVOKE ALL ON TABLE " + schema + "." + table + " FROM public RESTRICT");
+            stmt.executeUpdate(
+                    "REVOKE ALL ON TABLE  " + schema + "." + table + " FROM " + TestUtil.getUser()
+                            + " RESTRICT");
+        }
+        stmt.close();
+    }
+
+    private static void createSimpleTablesInSchema(String schema, String[] tables
+    ) throws SQLException {
+        for (String tableName : tables) {
+            TestUtil.createTable(privilegedCon, schema + "." + tableName, COLUMNS);
+        }
+    }
+
+    @AfterAll
+    static void tearDown() throws SQLException {
+        TestUtil.closeDB(hidingCon);
+        TestUtil.closeDB(nonHidingCon);
+        TestUtil.dropSchema(privilegedCon, "high_privileges_schema");
+        TestUtil.dropSchema(privilegedCon, "low_privileges_schema");
+        TestUtil.dropSchema(privilegedCon, "no_privileges_schema");
+        TestUtil.closeDB(privilegedCon);
+    }
+
+    private static boolean isSuperUser(Connection connection) throws SQLException {
+        // Check if we're operating as a superuser.
+        Statement st = connection.createStatement();
+        st.executeQuery("SHOW is_superuser;");
+        ResultSet rs = st.getResultSet();
+        rs.next(); // One row is guaranteed
+        boolean connIsSuper = "on".equalsIgnoreCase(rs.getString(1));
+        st.close();
+        return connIsSuper;
+    }
+
+    @Test
+    void getSchemas() throws SQLException {
+        List<String> schemasWithHiding = getSchemaNames(hidingDatabaseMetaData);
+        assertThat(schemasWithHiding,
+                hasItems("pg_catalog", "information_schema",
+                        "high_privileges_schema", "low_privileges_schema"));
+        assertThat(schemasWithHiding,
+                not(hasItem("no_privileges_schema")));
+
+        List<String> schemasWithNoHiding = getSchemaNames(nonHidingDatabaseMetaData);
+        assertThat(schemasWithNoHiding,
+                hasItems("pg_catalog", "information_schema",
+                        "high_privileges_schema", "low_privileges_schema", "no_privileges_schema"));
+    }
+
+    List<String> getSchemaNames(DatabaseMetaData databaseMetaData) throws SQLException {
+        List<String> schemaNames = new ArrayList<>();
+        ResultSet rs = databaseMetaData.getSchemas();
+        while (rs.next()) {
+            schemaNames.add(rs.getString("TABLE_SCHEM"));
+        }
+        return schemaNames;
+    }
+
+    @Test
+    void getTables() throws SQLException {
+        List<String> tablesWithHiding = getTableNames(hidingDatabaseMetaData, "high_privileges_schema");
+
+        assertThat(tablesWithHiding,
+                hasItems(
+                        "owned_table",
+                        "all_grants_table",
+                        "insert_granted_table",
+                        "select_granted_table"));
+        assertThat(tablesWithHiding,
+                not(hasItem("no_grants_table")));
+
+        List<String> tablesWithNoHiding =
+                getTableNames(nonHidingDatabaseMetaData, "high_privileges_schema");
+        assertThat(tablesWithNoHiding,
+                hasItems(
+                        "owned_table",
+                        "all_grants_table",
+                        "insert_granted_table",
+                        "select_granted_table",
+                        "no_grants_table"));
+
+        tablesWithHiding = getTableNames(hidingDatabaseMetaData, "low_privileges_schema");
+
+        assertThat(tablesWithHiding,
+                hasItems(
+                        "owned_table",
+                        "all_grants_table",
+                        "insert_granted_table",
+                        "select_granted_table"));
+        assertThat(tablesWithHiding,
+                not(hasItem("no_grants_table")));
+
+        tablesWithNoHiding =
+                getTableNames(nonHidingDatabaseMetaData, "low_privileges_schema");
+        assertThat(tablesWithNoHiding,
+                hasItems(
+                        "owned_table",
+                        "all_grants_table",
+                        "insert_granted_table",
+                        "select_granted_table",
+                        "no_grants_table"));
+
+        // Or should the tables names not be returned because the schema is not visible?
+        tablesWithHiding = getTableNames(hidingDatabaseMetaData, "no_privileges_schema");
+
+        assertThat(tablesWithHiding,
+                hasItems(
+                        "owned_table",
+                        "all_grants_table",
+                        "insert_granted_table",
+                        "select_granted_table"));
+        assertThat(tablesWithHiding,
+                not(hasItem("no_grants_table")));
+
+        tablesWithNoHiding =
+                getTableNames(nonHidingDatabaseMetaData, "no_privileges_schema");
+        assertThat(tablesWithNoHiding,
+                hasItems(
+                        "owned_table",
+                        "all_grants_table",
+                        "insert_granted_table",
+                        "select_granted_table",
+                        "no_grants_table"));
+
+    }
+
+    List<String> getTableNames(DatabaseMetaData databaseMetaData, String schemaPattern)
+            throws SQLException {
+        List<String> tableNames = new ArrayList<>();
+        ResultSet rs = databaseMetaData.getTables(null, schemaPattern, null, new String[]{"TABLE"});
+        while (rs.next()) {
+            tableNames.add(rs.getString("TABLE_NAME"));
+        }
+        return tableNames;
+    }
+
+    @Test
+    void getViews() throws SQLException {
+        List<String> viewsWithHiding = getViewNames(hidingDatabaseMetaData, "high_privileges_schema");
+
+        assertThat(viewsWithHiding,
+                hasItems(
+                        "select_granted_view"));
+        assertThat(viewsWithHiding,
+                not(hasItem("no_grants_view")));
+
+        List<String> viewsWithNoHiding =
+                getViewNames(nonHidingDatabaseMetaData, "high_privileges_schema");
+        assertThat(viewsWithNoHiding,
+                hasItems(
+                        "select_granted_view",
+                        "no_grants_view"));
+
+        viewsWithHiding = getViewNames(hidingDatabaseMetaData, "low_privileges_schema");
+
+        assertThat(viewsWithHiding,
+                hasItems(
+                        "select_granted_view"));
+        assertThat(viewsWithHiding,
+                not(hasItem("no_grants_view")));
+
+        viewsWithNoHiding =
+                getViewNames(nonHidingDatabaseMetaData, "low_privileges_schema");
+        assertThat(viewsWithNoHiding,
+                hasItems(
+                        "select_granted_view",
+                        "no_grants_view"));
+
+        // Or should the view names not be returned because the schema is not visible?
+        viewsWithHiding = getViewNames(hidingDatabaseMetaData, "no_privileges_schema");
+
+        assertThat(viewsWithHiding,
+                hasItems(
+                        "select_granted_view"));
+        assertThat(viewsWithHiding,
+                not(hasItem("no_grants_view")));
+
+        viewsWithNoHiding =
+                getViewNames(nonHidingDatabaseMetaData, "no_privileges_schema");
+        assertThat(viewsWithNoHiding,
+                hasItems(
+                        "select_granted_view",
+                        "no_grants_view"));
+
+    }
+
+    List<String> getViewNames(DatabaseMetaData databaseMetaData, String schemaPattern)
+            throws SQLException {
+        List<String> viewNames = new ArrayList<>();
+        ResultSet rs = databaseMetaData.getTables(null, schemaPattern, null, new String[]{"VIEW"});
+        while (rs.next()) {
+            viewNames.add(rs.getString("TABLE_NAME"));
+        }
+        return viewNames;
+    }
+
+    @Test
+    void getFunctions() throws SQLException {
+        List<String> functionsWithHiding =
+                getFunctionNames(hidingDatabaseMetaData, "high_privileges_schema");
+        assertThat(functionsWithHiding,
+                hasItem("execute_granted_add_function"));
+        assertThat(functionsWithHiding,
+                not(hasItem("no_grants_add_function")));
+
+        List<String> functionsWithNoHiding =
+                getFunctionNames(nonHidingDatabaseMetaData, "high_privileges_schema");
+        assertThat(functionsWithNoHiding,
+                hasItems("execute_granted_add_function", "no_grants_add_function"));
+
+        functionsWithHiding =
+                getFunctionNames(hidingDatabaseMetaData, "low_privileges_schema");
+        assertThat(functionsWithHiding,
+                hasItem("execute_granted_add_function"));
+        assertThat(functionsWithHiding,
+                not(hasItem("no_grants_add_function")));
+
+        functionsWithNoHiding =
+                getFunctionNames(nonHidingDatabaseMetaData, "low_privileges_schema");
+        assertThat(functionsWithNoHiding,
+                hasItems("execute_granted_add_function", "no_grants_add_function"));
+
+        // Or should the function names not be returned because the schema is not visible?
+        functionsWithHiding =
+                getFunctionNames(hidingDatabaseMetaData, "no_privileges_schema");
+        assertThat(functionsWithHiding,
+                hasItem("execute_granted_add_function"));
+        assertThat(functionsWithHiding,
+                not(hasItem("no_grants_add_function")));
+
+        functionsWithNoHiding =
+                getFunctionNames(nonHidingDatabaseMetaData, "no_privileges_schema");
+        assertThat(functionsWithNoHiding,
+                hasItems("execute_granted_add_function", "no_grants_add_function"));
+    }
+
+    List<String> getFunctionNames(DatabaseMetaData databaseMetaData, String schemaPattern)
+            throws SQLException {
+        List<String> functionNames = new ArrayList<>();
+        ResultSet rs = databaseMetaData.getFunctions(null, schemaPattern, null);
+        while (rs.next()) {
+            functionNames.add(rs.getString("FUNCTION_NAME"));
+        }
+        return functionNames;
+    }
+
+    @Test
+    void getProcedures() throws SQLException {
+        String executeGranted = TestUtil.haveMinimumServerVersion(hidingCon, ServerVersion.v11) ? "execute_granted_insert_procedure" : "execute_granted_add_function";
+        String noGrants = TestUtil.haveMinimumServerVersion(hidingCon, ServerVersion.v11) ? "no_grants_insert_procedure" : "no_grants_add_function";
+
+        List<String> proceduresWithHiding =
+                getProcedureNames(hidingDatabaseMetaData, "high_privileges_schema");
+        assertThat(proceduresWithHiding,
+                hasItem(executeGranted));
+        assertThat(proceduresWithHiding,
+                not(hasItem(noGrants)));
+
+        List<String> proceduresWithNoHiding =
+                getProcedureNames(nonHidingDatabaseMetaData, "high_privileges_schema");
+        assertThat(proceduresWithNoHiding,
+                hasItems(executeGranted, noGrants));
+
+        proceduresWithHiding =
+                getProcedureNames(hidingDatabaseMetaData, "low_privileges_schema");
+        assertThat(proceduresWithHiding,
+                hasItem(executeGranted));
+        assertThat(proceduresWithHiding,
+                not(hasItem(noGrants)));
+
+        proceduresWithNoHiding =
+                getProcedureNames(nonHidingDatabaseMetaData, "low_privileges_schema");
+        assertThat(proceduresWithNoHiding,
+                hasItems(executeGranted, noGrants));
+
+        // Or should the function names not be returned because the schema is not visible?
+        proceduresWithHiding =
+                getProcedureNames(hidingDatabaseMetaData, "no_privileges_schema");
+        assertThat(proceduresWithHiding,
+                hasItem(executeGranted));
+        assertThat(proceduresWithHiding,
+                not(hasItem(noGrants)));
+
+        proceduresWithNoHiding =
+                getProcedureNames(nonHidingDatabaseMetaData, "no_privileges_schema");
+        assertThat(proceduresWithNoHiding,
+                hasItems(executeGranted, noGrants));
+
+    }
+
+    List<String> getProcedureNames(DatabaseMetaData databaseMetaData, String schemaPattern)
+            throws SQLException {
+        List<String> procedureNames = new ArrayList<>();
+        ResultSet rs = databaseMetaData.getProcedures(null, schemaPattern, null);
+        while (rs.next()) {
+            procedureNames.add(rs.getString("PROCEDURE_NAME"));
+        }
+        return procedureNames;
+    }
+
+    /*
+     *  According to the JDBC JavaDoc, the applicable UDTs are: JAVA_OBJECT, STRUCT, or DISTINCT.
+     */
+    @Test
+    void getUDTs() throws SQLException {
+        if (pgConnection.haveMinimumServerVersion(ServerVersion.v9_2)) {
+            List<String> typesWithHiding = getTypeNames(hidingDatabaseMetaData, "high_privileges_schema");
+            assertThat(typesWithHiding,
+                    hasItems("usage_granted_composite_type", "usage_granted_us_postal_code_domain"));
+            assertThat(typesWithHiding,
+                    not(hasItems("no_grants_composite_type", "no_grants_us_postal_code_domain")));
+
+            typesWithHiding = getTypeNames(hidingDatabaseMetaData, "low_privileges_schema");
+            assertThat(typesWithHiding,
+                    hasItems("usage_granted_composite_type", "usage_granted_us_postal_code_domain"));
+            assertThat(typesWithHiding,
+                    not(hasItems("no_grants_composite_type", "no_grants_us_postal_code_domain")));
+
+            // Or should the types names not be returned because the schema is not visible?
+            typesWithHiding = getTypeNames(hidingDatabaseMetaData, "no_privileges_schema");
+            assertThat(typesWithHiding,
+                    hasItems("usage_granted_composite_type", "usage_granted_us_postal_code_domain"));
+            assertThat(typesWithHiding,
+                    not(hasItems("no_grants_composite_type", "no_grants_us_postal_code_domain")));
+        }
+
+        List<String> typesWithNoHiding =
+                getTypeNames(nonHidingDatabaseMetaData, "high_privileges_schema");
+        assertThat(typesWithNoHiding,
+                hasItems("usage_granted_composite_type", "no_grants_composite_type",
+                        "usage_granted_us_postal_code_domain", "no_grants_us_postal_code_domain"));
+
+        typesWithNoHiding =
+                getTypeNames(nonHidingDatabaseMetaData, "low_privileges_schema");
+        assertThat(typesWithNoHiding,
+                hasItems("usage_granted_composite_type", "no_grants_composite_type",
+                        "usage_granted_us_postal_code_domain", "no_grants_us_postal_code_domain"));
+
+        typesWithNoHiding =
+                getTypeNames(nonHidingDatabaseMetaData, "no_privileges_schema");
+        assertThat(typesWithNoHiding,
+                hasItems("usage_granted_composite_type", "no_grants_composite_type",
+                        "usage_granted_us_postal_code_domain", "no_grants_us_postal_code_domain"));
+    }
+
+    /*
+    From the Postgres JDBC driver source code, we are mapping the types:
+        java.sql.Types.DISTINCT to the Postgres type:  TYPTYPE_COMPOSITE  'c'   # composite (e.g., table's rowtype)
+        java.sql.Types.STRUCT   to the Postgres type:  TYPTYPE_DOMAIN     'd'   # domain over another type
+     */
+    List<String> getTypeNames(DatabaseMetaData databaseMetaData, String schemaPattern) throws SQLException {
+        List<String> typeNames = new ArrayList<>();
+        ResultSet rs = databaseMetaData.getUDTs(null, schemaPattern, null, null);
+        while (rs.next()) {
+            typeNames.add(rs.getString("TYPE_NAME"));
+        }
+        return typeNames;
     }
-    return typeNames;
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/DatabaseMetaDataTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/DatabaseMetaDataTest.java
index 80e16e4..5fbe08e 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/DatabaseMetaDataTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/DatabaseMetaDataTest.java
@@ -33,465 +33,467 @@ import java.util.List;
 
 class DatabaseMetaDataTest {
 
-  private Connection conn;
+    private Connection conn;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    conn = TestUtil.openDB();
-    TestUtil.dropSequence(conn, "sercoltest_a_seq");
-    TestUtil.createTable(conn, "sercoltest", "a serial, b int");
-    TestUtil.createSchema(conn, "hasfunctions");
-    TestUtil.createSchema(conn, "nofunctions");
-    TestUtil.createSchema(conn, "hasprocedures");
-    TestUtil.createSchema(conn, "noprocedures");
-    TestUtil.execute(conn, "create function hasfunctions.addfunction (integer, integer) "
-        + "RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE");
-    if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v11)) {
-      TestUtil.execute(conn, "create procedure hasprocedures.addprocedure() "
-          + "LANGUAGE plpgsql AS $$ BEGIN SELECT 1; END; $$");
-    }
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    TestUtil.dropSequence(conn, "sercoltest_a_seq");
-    TestUtil.dropTable(conn, "sercoltest");
-    TestUtil.dropSchema(conn, "hasfunctions");
-    TestUtil.dropSchema(conn, "nofunctions");
-    TestUtil.dropSchema(conn, "hasprocedures");
-    TestUtil.dropSchema(conn, "noprocedures");
-    TestUtil.closeDB(conn);
-  }
-
-  @Test
-  void getClientInfoProperties() throws Exception {
-    DatabaseMetaData dbmd = conn.getMetaData();
-
-    ResultSet rs = dbmd.getClientInfoProperties();
-    if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v9_0)) {
-      assertFalse(rs.next());
-      return;
-    }
-
-    assertTrue(rs.next());
-    assertEquals("ApplicationName", rs.getString("NAME"));
-  }
-
-  @Test
-  void getColumnsForAutoIncrement() throws Exception {
-    DatabaseMetaData dbmd = conn.getMetaData();
-
-    ResultSet rs = dbmd.getColumns("%", "%", "sercoltest", "%");
-    assertTrue(rs.next());
-    assertEquals("a", rs.getString("COLUMN_NAME"));
-    assertEquals("YES", rs.getString("IS_AUTOINCREMENT"));
-
-    assertTrue(rs.next());
-    assertEquals("b", rs.getString("COLUMN_NAME"));
-    assertEquals("NO", rs.getString("IS_AUTOINCREMENT"));
-
-    assertFalse(rs.next());
-  }
-
-  @Test
-  void getSchemas() throws SQLException {
-    DatabaseMetaData dbmd = conn.getMetaData();
-
-    ResultSet rs = dbmd.getSchemas("", "publ%");
-
-    assertTrue(rs.next());
-    assertEquals("public", rs.getString("TABLE_SCHEM"));
-    assertNull(rs.getString("TABLE_CATALOG"));
-    assertFalse(rs.next());
-  }
-
-  @Test
-  void getFunctionsInSchemaForFunctions() throws SQLException {
-    DatabaseMetaData dbmd = conn.getMetaData();
-
-    try (ResultSet rs = dbmd.getFunctions("", "hasfunctions", "")) {
-      List<CatalogObject> list = assertFunctionRSAndReturnList(rs);
-      assertEquals(1, list.size(), "There should be one function in the hasfunctions schema");
-      assertListContains("getFunctions('', 'hasfunctions', '') must contain addfunction", list, "hasfunctions", "addfunction");
-    }
-
-    try (ResultSet rs = dbmd.getFunctions("", "hasfunctions", "addfunction")) {
-      List<CatalogObject> list = assertFunctionRSAndReturnList(rs);
-      assertEquals(1, list.size(), "There should be one function in the hasfunctions schema with name addfunction");
-      assertListContains("getFunctions('', 'hasfunctions', 'addfunction') must contain addfunction", list, "hasfunctions", "addfunction");
-    }
-
-    try (ResultSet rs = dbmd.getFunctions("", "nofunctions", "")) {
-      boolean hasFunctions = rs.next();
-      assertFalse(hasFunctions, "There should be no functions in the nofunctions schema");
-    }
-  }
-
-  @Test
-  void getFunctionsInSchemaForProcedures() throws SQLException {
-    // Due to the introduction of actual stored procedures in PostgreSQL 11, getFunctions should not return procedures for PostgreSQL versions 11+
-    // On older installation we do not create the procedures so the below schemas should all be empty
-    DatabaseMetaData dbmd = conn.getMetaData();
-
-    // Search for functions in schema "hasprocedures"
-    try (ResultSet rs = dbmd.getFunctions("", "hasprocedures", null)) {
-      assertFalse(rs.next(), "The hasprocedures schema not return procedures from getFunctions");
-    }
-    // Search for functions in schema "noprocedures" (which should never expect records)
-    try (ResultSet rs = dbmd.getFunctions("", "noprocedures", null)) {
-      assertFalse(rs.next(), "The noprocedures schema should not have functions");
-    }
-    // Search for functions by procedure name "addprocedure"
-    try (ResultSet rs = dbmd.getFunctions("", "hasprocedures", "addprocedure")) {
-      assertFalse(rs.next(), "Should not return procedures from getFunctions by schema + name");
-    }
-  }
-
-  @Test
-  void getProceduresInSchemaForFunctions() throws SQLException {
-    // Due to the introduction of actual stored procedures in PostgreSQL 11, getProcedures should not return functions for PostgreSQL versions 11+
-    DatabaseMetaData dbmd = conn.getMetaData();
-
-    // Search for procedures in schema "hasfunctions" (which should expect a record only for PostgreSQL < 11)
-    try (ResultSet rs = dbmd.getProcedures("", "hasfunctions", null)) {
-      if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v11)) {
-        assertFalse(rs.next(), "PostgreSQL11+ should not return functions from getProcedures");
-      } else {
-        // PostgreSQL prior to 11 should return functions from getProcedures
-        assertProcedureRS(rs);
-      }
-    }
-
-    // Search for procedures in schema "nofunctions" (which should never expect records)
-    try (ResultSet rs = dbmd.getProcedures("", "nofunctions", null)) {
-      assertFalse(rs.next(), "getProcedures(...) should not return procedures for schema nofunctions");
-    }
-
-    // Search for procedures by function name "addfunction" within schema "hasfunctions" (which should expect a record for PostgreSQL < 11)
-    try (ResultSet rs = dbmd.getProcedures("", "hasfunctions", "addfunction")) {
-      if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v11)) {
-        assertFalse(rs.next(), "PostgreSQL11+ should not return functions from getProcedures");
-      } else {
-        // PostgreSQL prior to 11 should return functions from getProcedures
-        assertProcedureRS(rs);
-      }
-    }
-
-    // Search for procedures by function name "addfunction" within schema "nofunctions"  (which should never expect records)
-    try (ResultSet rs = dbmd.getProcedures("", "nofunctions", "addfunction")) {
-      assertFalse(rs.next(), "getProcedures(...) should not return procedures for schema nofunctions + addfunction");
-    }
-  }
-
-  @Test
-  void getProceduresInSchemaForProcedures() throws SQLException {
-    // Only run this test for PostgreSQL version 11+; assertions for versions prior would be vacuously true as we don't create a procedure in the setup for older versions
-    Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(conn, ServerVersion.v11));
-
-    DatabaseMetaData dbmd = conn.getMetaData();
-
-    try (ResultSet rs = dbmd.getProcedures("", "hasprocedures", null)) {
-      int count = assertProcedureRS(rs);
-      assertEquals(1, count, "getProcedures() should be non-empty for the hasprocedures schema");
-    }
-
-    try (ResultSet rs = dbmd.getProcedures("", "noprocedures", null)) {
-      assertFalse(rs.next(), "getProcedures() should be empty for the hasprocedures schema");
-    }
-
-    try (ResultSet rs = dbmd.getProcedures("", "hasfunctions", null)) {
-      assertFalse(rs.next(), "getProcedures() should be empty for the nofunctions schema");
-    }
-
-    try (ResultSet rs = dbmd.getProcedures("", "nofunctions", null)) {
-      assertFalse(rs.next(), "getProcedures() should be empty for the nofunctions schema");
-    }
-  }
-
-  @Test
-  void getFunctionsWithBlankPatterns() throws SQLException {
-    int minFuncCount = 1000;
-    DatabaseMetaData dbmd = conn.getMetaData();
-
-    final int totalCount;
-    try (ResultSet rs = dbmd.getFunctions("", "", "")) {
-      List<CatalogObject> list = assertFunctionRSAndReturnList(rs);
-      totalCount = list.size(); // Rest of this test will validate against this value
-      assertThat(totalCount > minFuncCount, is(true));
-      assertListContains("getFunctions('', '', '') must contain addfunction", list, "hasfunctions", "addfunction");
-    }
-
-    // Should be same as blank pattern
-    try (ResultSet rs = dbmd.getFunctions(null, null, null)) {
-      int count = assertGetFunctionRS(rs);
-      assertThat(count, is(totalCount));
-    }
-
-    // Catalog parameter has no affect on our getFunctions filtering
-    try (ResultSet rs = dbmd.getFunctions("ANYTHING_WILL_WORK", null, null)) {
-      int count = assertGetFunctionRS(rs);
-      assertThat(count, is(totalCount));
-    }
-
-    // Filter by schema
-    try (ResultSet rs = dbmd.getFunctions("", "pg_catalog", null)) {
-      int count = assertGetFunctionRS(rs);
-      assertThat(count > minFuncCount, is(true));
-    }
-
-    // Filter by schema and function name
-    try (ResultSet rs = dbmd.getFunctions("", "pg_catalog", "abs")) {
-      int count = assertGetFunctionRS(rs);
-      assertThat(count >= 1, is(true));
-    }
-
-    // Filter by function name only
-    try (ResultSet rs = dbmd.getFunctions("", "", "abs")) {
-      int count = assertGetFunctionRS(rs);
-      assertThat(count >= 1, is(true));
-    }
-  }
-
-  private static class CatalogObject implements Comparable<CatalogObject> {
-    private final String catalog;
-    private final String schema;
-    private final String name;
-    private final String specificName;
-
-    private CatalogObject(String catalog, String schema, String name, String specificName) {
-      this.catalog = catalog;
-      this.schema = schema;
-      this.name = name;
-      this.specificName = specificName;
-    }
-
-    @Override
-    public int hashCode() {
-      final int prime = 31;
-      int result = 1;
-      result = prime * result + (catalog == null ? 0 : catalog.hashCode());
-      result = prime * result + (name == null ? 0 : name.hashCode());
-      result = prime * result + (schema == null ? 0 : schema.hashCode());
-      result = prime * result + (specificName == null ? 0 : specificName.hashCode());
-      return result;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-      if (obj == null || getClass() != obj.getClass()) {
-        return false;
-      } else if (obj == this) {
-        return true;
-      }
-      return compareTo((CatalogObject) obj) == 0;
-    }
-
-    @Override
-    public int compareTo(CatalogObject other) {
-      int comp = catalog.compareTo(other.catalog);
-      if (comp != 0) {
-        return comp;
-      }
-      comp = schema.compareTo(other.schema);
-      if (comp != 0) {
-        return comp;
-      }
-      comp = name.compareTo(other.name);
-      if (comp != 0) {
-        return comp;
-      }
-      comp = specificName.compareTo(other.specificName);
-      if (comp != 0) {
-        return comp;
-      }
-      return 0;
-    }
-  }
-
-  /** Assert some basic result from ResultSet of a GetFunctions method. Return the total row count. */
-  private int assertGetFunctionRS(ResultSet rs) throws SQLException {
-    return assertFunctionRSAndReturnList(rs).size();
-  }
-
-  private List<CatalogObject> assertFunctionRSAndReturnList(ResultSet rs) throws SQLException {
-    // There should be at least one row
-    assertThat(rs.next(), is(true));
-    assertThat(rs.getString("FUNCTION_CAT"), is(System.getProperty("database")));
-    assertThat(rs.getString("FUNCTION_SCHEM"), notNullValue());
-    assertThat(rs.getString("FUNCTION_NAME"), notNullValue());
-    assertThat(rs.getShort("FUNCTION_TYPE") >= 0, is(true));
-    assertThat(rs.getString("SPECIFIC_NAME"), notNullValue());
-
-    // Ensure there is enough column and column value retrieve by index should be same as column name (ordered)
-    assertThat(rs.getMetaData().getColumnCount(), is(6));
-    assertThat(rs.getString(1), is(rs.getString("FUNCTION_CAT")));
-    assertThat(rs.getString(2), is(rs.getString("FUNCTION_SCHEM")));
-    assertThat(rs.getString(3), is(rs.getString("FUNCTION_NAME")));
-    assertThat(rs.getString(4), is(rs.getString("REMARKS")));
-    assertThat(rs.getShort(5), is(rs.getShort("FUNCTION_TYPE")));
-    assertThat(rs.getString(6), is(rs.getString("SPECIFIC_NAME")));
-
-    // Get all result and assert they are ordered per javadoc spec:
-    //   FUNCTION_CAT, FUNCTION_SCHEM, FUNCTION_NAME and SPECIFIC_NAME
-    List<CatalogObject> result = new ArrayList<>();
-    do {
-      CatalogObject obj = new CatalogObject(
-          rs.getString("FUNCTION_CAT"),
-          rs.getString("FUNCTION_SCHEM"),
-          rs.getString("FUNCTION_NAME"),
-          rs.getString("SPECIFIC_NAME"));
-      result.add(obj);
-    } while (rs.next());
-
-    List<CatalogObject> orderedResult = new ArrayList<>(result);
-    Collections.sort(orderedResult);
-    assertThat(result, is(orderedResult));
-
-    return result;
-  }
-
-  private int assertProcedureRS(ResultSet rs) throws SQLException {
-    return assertProcedureRSAndReturnList(rs).size();
-  }
-
-  private List<CatalogObject> assertProcedureRSAndReturnList(ResultSet rs) throws SQLException {
-    // There should be at least one row
-    assertThat(rs.next(), is(true));
-    assertThat(rs.getString("PROCEDURE_CAT"), nullValue());
-    assertThat(rs.getString("PROCEDURE_SCHEM"), notNullValue());
-    assertThat(rs.getString("PROCEDURE_NAME"), notNullValue());
-    assertThat(rs.getShort("PROCEDURE_TYPE") >= 0, is(true));
-    assertThat(rs.getString("SPECIFIC_NAME"), notNullValue());
-
-    // Ensure there is enough column and column value retrieve by index should be same as column name (ordered)
-    assertThat(rs.getMetaData().getColumnCount(), is(9));
-    assertThat(rs.getString(1), is(rs.getString("PROCEDURE_CAT")));
-    assertThat(rs.getString(2), is(rs.getString("PROCEDURE_SCHEM")));
-    assertThat(rs.getString(3), is(rs.getString("PROCEDURE_NAME")));
-    // Per JDBC spec, indexes 4, 5, and 6 are reserved for future use
-    assertThat(rs.getString(7), is(rs.getString("REMARKS")));
-    assertThat(rs.getShort(8), is(rs.getShort("PROCEDURE_TYPE")));
-    assertThat(rs.getString(9), is(rs.getString("SPECIFIC_NAME")));
-
-    // Get all result and assert they are ordered per javadoc spec:
-    //   FUNCTION_CAT, FUNCTION_SCHEM, FUNCTION_NAME and SPECIFIC_NAME
-    List<CatalogObject> result = new ArrayList<>();
-    do {
-      CatalogObject obj = new CatalogObject(
-          rs.getString("PROCEDURE_CAT"),
-          rs.getString("PROCEDURE_SCHEM"),
-          rs.getString("PROCEDURE_NAME"),
-          rs.getString("SPECIFIC_NAME"));
-      result.add(obj);
-    } while (rs.next());
-
-    List<CatalogObject> orderedResult = new ArrayList<>(result);
-    Collections.sort(orderedResult);
-    assertThat(result, is(orderedResult));
-
-    return result;
-  }
-
-  private void assertListContains(String message, List<CatalogObject> list, String schema, String name) throws SQLException {
-    boolean found = list.stream().anyMatch(item -> item.schema.equals(schema) && item.name.equals(name));
-    assertTrue(found, message + "; schema=" + schema + " name=" + name);
-  }
-
-  @Test
-  void getFunctionsWithSpecificTypes() throws SQLException {
-    // These function creation are borrow from jdbc2/DatabaseMetaDataTest
-    // We modify to ensure new function created are returned by getFunctions()
-
-    DatabaseMetaData dbmd = conn.getMetaData();
-    if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_4)) {
-      Statement stmt = conn.createStatement();
-      stmt.execute(
-              "CREATE OR REPLACE FUNCTION getfunc_f1(int, varchar) RETURNS int AS 'SELECT 1;' LANGUAGE SQL");
-      ResultSet rs = dbmd.getFunctions("", "", "getfunc_f1");
-      assertThat(rs.next(), is(true));
-      assertThat(rs.getString("FUNCTION_NAME"), is("getfunc_f1"));
-      assertThat(rs.getShort("FUNCTION_TYPE"), is((short) DatabaseMetaData.functionNoTable));
-      assertThat(rs.next(), is(false));
-      rs.close();
-      stmt.execute("DROP FUNCTION getfunc_f1(int, varchar)");
-
-      stmt.execute(
-              "CREATE OR REPLACE FUNCTION getfunc_f3(IN a int, INOUT b varchar, OUT c timestamptz) AS $f$ BEGIN b := 'a'; c := now(); return; END; $f$ LANGUAGE plpgsql");
-      rs = dbmd.getFunctions("", "", "getfunc_f3");
-      assertThat(rs.next(), is(true));
-      assertThat(rs.getString("FUNCTION_NAME"), is("getfunc_f3"));
-      assertThat(rs.getShort("FUNCTION_TYPE"), is((short) DatabaseMetaData.functionNoTable));
-      assertThat(rs.next(), is(false));
-      rs.close();
-      stmt.execute("DROP FUNCTION getfunc_f3(int, varchar)");
-
-      // RETURNS TABLE requires PostgreSQL 8.4+
-      stmt.execute(
-              "CREATE OR REPLACE FUNCTION getfunc_f5() RETURNS TABLE (i int) LANGUAGE sql AS 'SELECT 1'");
-
-      rs = dbmd.getFunctions("", "", "getfunc_f5");
-      assertThat(rs.next(), is(true));
-      assertThat(rs.getString("FUNCTION_NAME"), is("getfunc_f5"));
-      assertThat(rs.getShort("FUNCTION_TYPE"), is((short) DatabaseMetaData.functionReturnsTable));
-      assertThat(rs.next(), is(false));
-      rs.close();
-      stmt.execute("DROP FUNCTION getfunc_f5()");
-    } else {
-      // For PG 8.3 or 8.2 it will resulted in unknown function type
-      Statement stmt = conn.createStatement();
-      stmt.execute(
-              "CREATE OR REPLACE FUNCTION getfunc_f1(int, varchar) RETURNS int AS 'SELECT 1;' LANGUAGE SQL");
-      ResultSet rs = dbmd.getFunctions("", "", "getfunc_f1");
-      assertThat(rs.next(), is(true));
-      assertThat(rs.getString("FUNCTION_NAME"), is("getfunc_f1"));
-      assertThat(rs.getShort("FUNCTION_TYPE"), is((short) DatabaseMetaData.functionResultUnknown));
-      assertThat(rs.next(), is(false));
-      rs.close();
-      stmt.execute("DROP FUNCTION getfunc_f1(int, varchar)");
-
-      stmt.execute(
-              "CREATE OR REPLACE FUNCTION getfunc_f3(IN a int, INOUT b varchar, OUT c timestamptz) AS $f$ BEGIN b := 'a'; c := now(); return; END; $f$ LANGUAGE plpgsql");
-      rs = dbmd.getFunctions("", "", "getfunc_f3");
-      assertThat(rs.next(), is(true));
-      assertThat(rs.getString("FUNCTION_NAME"), is("getfunc_f3"));
-      assertThat(rs.getShort("FUNCTION_TYPE"), is((short) DatabaseMetaData.functionResultUnknown));
-      assertThat(rs.next(), is(false));
-      rs.close();
-      stmt.execute("DROP FUNCTION getfunc_f3(int, varchar)");
-    }
-  }
-
-  @Test
-  void sortedDataTypes() throws SQLException {
-    // https://github.com/pgjdbc/pgjdbc/issues/716
-    DatabaseMetaData dbmd = conn.getMetaData();
-    ResultSet rs = dbmd.getTypeInfo();
-    int lastType = Integer.MIN_VALUE;
-    while (rs.next()) {
-      int type = rs.getInt("DATA_TYPE");
-      assertTrue(lastType <= type);
-      lastType = type;
-    }
-  }
-
-  @Test
-  void getSqlTypes() throws SQLException {
-    if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v10)) {
-      try (Connection privileged = TestUtil.openPrivilegedDB()) {
-        try (Statement stmt = privileged.createStatement()) {
-          // create a function called array_in
-          stmt.execute("CREATE OR REPLACE FUNCTION public.array_in(anyarray, oid, integer)\n"
-              + " RETURNS anyarray\n"
-              + " LANGUAGE internal\n"
-              + " STABLE PARALLEL SAFE STRICT\n"
-              + "AS $function$array_in$function$");
+    @BeforeEach
+    void setUp() throws Exception {
+        conn = TestUtil.openDB();
+        TestUtil.dropSequence(conn, "sercoltest_a_seq");
+        TestUtil.createTable(conn, "sercoltest", "a serial, b int");
+        TestUtil.createSchema(conn, "hasfunctions");
+        TestUtil.createSchema(conn, "nofunctions");
+        TestUtil.createSchema(conn, "hasprocedures");
+        TestUtil.createSchema(conn, "noprocedures");
+        TestUtil.execute(conn, "create function hasfunctions.addfunction (integer, integer) "
+                + "RETURNS integer AS 'select $1 + $2;' LANGUAGE SQL IMMUTABLE");
+        if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v11)) {
+            TestUtil.execute(conn, "create procedure hasprocedures.addprocedure() "
+                    + "LANGUAGE plpgsql AS $$ BEGIN SELECT 1; END; $$");
         }
-        DatabaseMetaData dbmd = privileged.getMetaData();
+    }
+
+    @AfterEach
+    void tearDown() throws Exception {
+        TestUtil.dropSequence(conn, "sercoltest_a_seq");
+        TestUtil.dropTable(conn, "sercoltest");
+        TestUtil.dropSchema(conn, "hasfunctions");
+        TestUtil.dropSchema(conn, "nofunctions");
+        TestUtil.dropSchema(conn, "hasprocedures");
+        TestUtil.dropSchema(conn, "noprocedures");
+        TestUtil.closeDB(conn);
+    }
+
+    @Test
+    void getClientInfoProperties() throws Exception {
+        DatabaseMetaData dbmd = conn.getMetaData();
+
+        ResultSet rs = dbmd.getClientInfoProperties();
+        if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v9_0)) {
+            assertFalse(rs.next());
+            return;
+        }
+
+        assertTrue(rs.next());
+        assertEquals("ApplicationName", rs.getString("NAME"));
+    }
+
+    @Test
+    void getColumnsForAutoIncrement() throws Exception {
+        DatabaseMetaData dbmd = conn.getMetaData();
+
+        ResultSet rs = dbmd.getColumns("%", "%", "sercoltest", "%");
+        assertTrue(rs.next());
+        assertEquals("a", rs.getString("COLUMN_NAME"));
+        assertEquals("YES", rs.getString("IS_AUTOINCREMENT"));
+
+        assertTrue(rs.next());
+        assertEquals("b", rs.getString("COLUMN_NAME"));
+        assertEquals("NO", rs.getString("IS_AUTOINCREMENT"));
+
+        assertFalse(rs.next());
+    }
+
+    @Test
+    void getSchemas() throws SQLException {
+        DatabaseMetaData dbmd = conn.getMetaData();
+
+        ResultSet rs = dbmd.getSchemas("", "publ%");
+
+        assertTrue(rs.next());
+        assertEquals("public", rs.getString("TABLE_SCHEM"));
+        assertNull(rs.getString("TABLE_CATALOG"));
+        assertFalse(rs.next());
+    }
+
+    @Test
+    void getFunctionsInSchemaForFunctions() throws SQLException {
+        DatabaseMetaData dbmd = conn.getMetaData();
+
+        try (ResultSet rs = dbmd.getFunctions("", "hasfunctions", "")) {
+            List<CatalogObject> list = assertFunctionRSAndReturnList(rs);
+            assertEquals(1, list.size(), "There should be one function in the hasfunctions schema");
+            assertListContains("getFunctions('', 'hasfunctions', '') must contain addfunction", list, "hasfunctions", "addfunction");
+        }
+
+        try (ResultSet rs = dbmd.getFunctions("", "hasfunctions", "addfunction")) {
+            List<CatalogObject> list = assertFunctionRSAndReturnList(rs);
+            assertEquals(1, list.size(), "There should be one function in the hasfunctions schema with name addfunction");
+            assertListContains("getFunctions('', 'hasfunctions', 'addfunction') must contain addfunction", list, "hasfunctions", "addfunction");
+        }
+
+        try (ResultSet rs = dbmd.getFunctions("", "nofunctions", "")) {
+            boolean hasFunctions = rs.next();
+            assertFalse(hasFunctions, "There should be no functions in the nofunctions schema");
+        }
+    }
+
+    @Test
+    void getFunctionsInSchemaForProcedures() throws SQLException {
+        // Due to the introduction of actual stored procedures in PostgreSQL 11, getFunctions should not return procedures for PostgreSQL versions 11+
+        // On older installation we do not create the procedures so the below schemas should all be empty
+        DatabaseMetaData dbmd = conn.getMetaData();
+
+        // Search for functions in schema "hasprocedures"
+        try (ResultSet rs = dbmd.getFunctions("", "hasprocedures", null)) {
+            assertFalse(rs.next(), "The hasprocedures schema not return procedures from getFunctions");
+        }
+        // Search for functions in schema "noprocedures" (which should never expect records)
+        try (ResultSet rs = dbmd.getFunctions("", "noprocedures", null)) {
+            assertFalse(rs.next(), "The noprocedures schema should not have functions");
+        }
+        // Search for functions by procedure name "addprocedure"
+        try (ResultSet rs = dbmd.getFunctions("", "hasprocedures", "addprocedure")) {
+            assertFalse(rs.next(), "Should not return procedures from getFunctions by schema + name");
+        }
+    }
+
+    @Test
+    void getProceduresInSchemaForFunctions() throws SQLException {
+        // Due to the introduction of actual stored procedures in PostgreSQL 11, getProcedures should not return functions for PostgreSQL versions 11+
+        DatabaseMetaData dbmd = conn.getMetaData();
+
+        // Search for procedures in schema "hasfunctions" (which should expect a record only for PostgreSQL < 11)
+        try (ResultSet rs = dbmd.getProcedures("", "hasfunctions", null)) {
+            if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v11)) {
+                assertFalse(rs.next(), "PostgreSQL11+ should not return functions from getProcedures");
+            } else {
+                // PostgreSQL prior to 11 should return functions from getProcedures
+                assertProcedureRS(rs);
+            }
+        }
+
+        // Search for procedures in schema "nofunctions" (which should never expect records)
+        try (ResultSet rs = dbmd.getProcedures("", "nofunctions", null)) {
+            assertFalse(rs.next(), "getProcedures(...) should not return procedures for schema nofunctions");
+        }
+
+        // Search for procedures by function name "addfunction" within schema "hasfunctions" (which should expect a record for PostgreSQL < 11)
+        try (ResultSet rs = dbmd.getProcedures("", "hasfunctions", "addfunction")) {
+            if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v11)) {
+                assertFalse(rs.next(), "PostgreSQL11+ should not return functions from getProcedures");
+            } else {
+                // PostgreSQL prior to 11 should return functions from getProcedures
+                assertProcedureRS(rs);
+            }
+        }
+
+        // Search for procedures by function name "addfunction" within schema "nofunctions"  (which should never expect records)
+        try (ResultSet rs = dbmd.getProcedures("", "nofunctions", "addfunction")) {
+            assertFalse(rs.next(), "getProcedures(...) should not return procedures for schema nofunctions + addfunction");
+        }
+    }
+
+    @Test
+    void getProceduresInSchemaForProcedures() throws SQLException {
+        // Only run this test for PostgreSQL version 11+; assertions for versions prior would be vacuously true as we don't create a procedure in the setup for older versions
+        Assumptions.assumeTrue(TestUtil.haveMinimumServerVersion(conn, ServerVersion.v11));
+
+        DatabaseMetaData dbmd = conn.getMetaData();
+
+        try (ResultSet rs = dbmd.getProcedures("", "hasprocedures", null)) {
+            int count = assertProcedureRS(rs);
+            assertEquals(1, count, "getProcedures() should be non-empty for the hasprocedures schema");
+        }
+
+        try (ResultSet rs = dbmd.getProcedures("", "noprocedures", null)) {
+            assertFalse(rs.next(), "getProcedures() should be empty for the hasprocedures schema");
+        }
+
+        try (ResultSet rs = dbmd.getProcedures("", "hasfunctions", null)) {
+            assertFalse(rs.next(), "getProcedures() should be empty for the nofunctions schema");
+        }
+
+        try (ResultSet rs = dbmd.getProcedures("", "nofunctions", null)) {
+            assertFalse(rs.next(), "getProcedures() should be empty for the nofunctions schema");
+        }
+    }
+
+    @Test
+    void getFunctionsWithBlankPatterns() throws SQLException {
+        int minFuncCount = 1000;
+        DatabaseMetaData dbmd = conn.getMetaData();
+
+        final int totalCount;
+        try (ResultSet rs = dbmd.getFunctions("", "", "")) {
+            List<CatalogObject> list = assertFunctionRSAndReturnList(rs);
+            totalCount = list.size(); // Rest of this test will validate against this value
+            assertThat(totalCount > minFuncCount, is(true));
+            assertListContains("getFunctions('', '', '') must contain addfunction", list, "hasfunctions", "addfunction");
+        }
+
+        // Should be same as blank pattern
+        try (ResultSet rs = dbmd.getFunctions(null, null, null)) {
+            int count = assertGetFunctionRS(rs);
+            assertThat(count, is(totalCount));
+        }
+
+        // Catalog parameter has no affect on our getFunctions filtering
+        try (ResultSet rs = dbmd.getFunctions("ANYTHING_WILL_WORK", null, null)) {
+            int count = assertGetFunctionRS(rs);
+            assertThat(count, is(totalCount));
+        }
+
+        // Filter by schema
+        try (ResultSet rs = dbmd.getFunctions("", "pg_catalog", null)) {
+            int count = assertGetFunctionRS(rs);
+            assertThat(count > minFuncCount, is(true));
+        }
+
+        // Filter by schema and function name
+        try (ResultSet rs = dbmd.getFunctions("", "pg_catalog", "abs")) {
+            int count = assertGetFunctionRS(rs);
+            assertThat(count >= 1, is(true));
+        }
+
+        // Filter by function name only
+        try (ResultSet rs = dbmd.getFunctions("", "", "abs")) {
+            int count = assertGetFunctionRS(rs);
+            assertThat(count >= 1, is(true));
+        }
+    }
+
+    /**
+     * Assert some basic result from ResultSet of a GetFunctions method. Return the total row count.
+     */
+    private int assertGetFunctionRS(ResultSet rs) throws SQLException {
+        return assertFunctionRSAndReturnList(rs).size();
+    }
+
+    private List<CatalogObject> assertFunctionRSAndReturnList(ResultSet rs) throws SQLException {
+        // There should be at least one row
+        assertThat(rs.next(), is(true));
+        assertThat(rs.getString("FUNCTION_CAT"), is(System.getProperty("database")));
+        assertThat(rs.getString("FUNCTION_SCHEM"), notNullValue());
+        assertThat(rs.getString("FUNCTION_NAME"), notNullValue());
+        assertThat(rs.getShort("FUNCTION_TYPE") >= 0, is(true));
+        assertThat(rs.getString("SPECIFIC_NAME"), notNullValue());
+
+        // Ensure there is enough column and column value retrieve by index should be same as column name (ordered)
+        assertThat(rs.getMetaData().getColumnCount(), is(6));
+        assertThat(rs.getString(1), is(rs.getString("FUNCTION_CAT")));
+        assertThat(rs.getString(2), is(rs.getString("FUNCTION_SCHEM")));
+        assertThat(rs.getString(3), is(rs.getString("FUNCTION_NAME")));
+        assertThat(rs.getString(4), is(rs.getString("REMARKS")));
+        assertThat(rs.getShort(5), is(rs.getShort("FUNCTION_TYPE")));
+        assertThat(rs.getString(6), is(rs.getString("SPECIFIC_NAME")));
+
+        // Get all result and assert they are ordered per javadoc spec:
+        //   FUNCTION_CAT, FUNCTION_SCHEM, FUNCTION_NAME and SPECIFIC_NAME
+        List<CatalogObject> result = new ArrayList<>();
+        do {
+            CatalogObject obj = new CatalogObject(
+                    rs.getString("FUNCTION_CAT"),
+                    rs.getString("FUNCTION_SCHEM"),
+                    rs.getString("FUNCTION_NAME"),
+                    rs.getString("SPECIFIC_NAME"));
+            result.add(obj);
+        } while (rs.next());
+
+        List<CatalogObject> orderedResult = new ArrayList<>(result);
+        Collections.sort(orderedResult);
+        assertThat(result, is(orderedResult));
+
+        return result;
+    }
+
+    private int assertProcedureRS(ResultSet rs) throws SQLException {
+        return assertProcedureRSAndReturnList(rs).size();
+    }
+
+    private List<CatalogObject> assertProcedureRSAndReturnList(ResultSet rs) throws SQLException {
+        // There should be at least one row
+        assertThat(rs.next(), is(true));
+        assertThat(rs.getString("PROCEDURE_CAT"), nullValue());
+        assertThat(rs.getString("PROCEDURE_SCHEM"), notNullValue());
+        assertThat(rs.getString("PROCEDURE_NAME"), notNullValue());
+        assertThat(rs.getShort("PROCEDURE_TYPE") >= 0, is(true));
+        assertThat(rs.getString("SPECIFIC_NAME"), notNullValue());
+
+        // Ensure there is enough column and column value retrieve by index should be same as column name (ordered)
+        assertThat(rs.getMetaData().getColumnCount(), is(9));
+        assertThat(rs.getString(1), is(rs.getString("PROCEDURE_CAT")));
+        assertThat(rs.getString(2), is(rs.getString("PROCEDURE_SCHEM")));
+        assertThat(rs.getString(3), is(rs.getString("PROCEDURE_NAME")));
+        // Per JDBC spec, indexes 4, 5, and 6 are reserved for future use
+        assertThat(rs.getString(7), is(rs.getString("REMARKS")));
+        assertThat(rs.getShort(8), is(rs.getShort("PROCEDURE_TYPE")));
+        assertThat(rs.getString(9), is(rs.getString("SPECIFIC_NAME")));
+
+        // Get all result and assert they are ordered per javadoc spec:
+        //   FUNCTION_CAT, FUNCTION_SCHEM, FUNCTION_NAME and SPECIFIC_NAME
+        List<CatalogObject> result = new ArrayList<>();
+        do {
+            CatalogObject obj = new CatalogObject(
+                    rs.getString("PROCEDURE_CAT"),
+                    rs.getString("PROCEDURE_SCHEM"),
+                    rs.getString("PROCEDURE_NAME"),
+                    rs.getString("SPECIFIC_NAME"));
+            result.add(obj);
+        } while (rs.next());
+
+        List<CatalogObject> orderedResult = new ArrayList<>(result);
+        Collections.sort(orderedResult);
+        assertThat(result, is(orderedResult));
+
+        return result;
+    }
+
+    private void assertListContains(String message, List<CatalogObject> list, String schema, String name) throws SQLException {
+        boolean found = list.stream().anyMatch(item -> item.schema.equals(schema) && item.name.equals(name));
+        assertTrue(found, message + "; schema=" + schema + " name=" + name);
+    }
+
+    @Test
+    void getFunctionsWithSpecificTypes() throws SQLException {
+        // These function creation are borrow from jdbc2/DatabaseMetaDataTest
+        // We modify to ensure new function created are returned by getFunctions()
+
+        DatabaseMetaData dbmd = conn.getMetaData();
+        if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_4)) {
+            Statement stmt = conn.createStatement();
+            stmt.execute(
+                    "CREATE OR REPLACE FUNCTION getfunc_f1(int, varchar) RETURNS int AS 'SELECT 1;' LANGUAGE SQL");
+            ResultSet rs = dbmd.getFunctions("", "", "getfunc_f1");
+            assertThat(rs.next(), is(true));
+            assertThat(rs.getString("FUNCTION_NAME"), is("getfunc_f1"));
+            assertThat(rs.getShort("FUNCTION_TYPE"), is((short) DatabaseMetaData.functionNoTable));
+            assertThat(rs.next(), is(false));
+            rs.close();
+            stmt.execute("DROP FUNCTION getfunc_f1(int, varchar)");
+
+            stmt.execute(
+                    "CREATE OR REPLACE FUNCTION getfunc_f3(IN a int, INOUT b varchar, OUT c timestamptz) AS $f$ BEGIN b := 'a'; c := now(); return; END; $f$ LANGUAGE plpgsql");
+            rs = dbmd.getFunctions("", "", "getfunc_f3");
+            assertThat(rs.next(), is(true));
+            assertThat(rs.getString("FUNCTION_NAME"), is("getfunc_f3"));
+            assertThat(rs.getShort("FUNCTION_TYPE"), is((short) DatabaseMetaData.functionNoTable));
+            assertThat(rs.next(), is(false));
+            rs.close();
+            stmt.execute("DROP FUNCTION getfunc_f3(int, varchar)");
+
+            // RETURNS TABLE requires PostgreSQL 8.4+
+            stmt.execute(
+                    "CREATE OR REPLACE FUNCTION getfunc_f5() RETURNS TABLE (i int) LANGUAGE sql AS 'SELECT 1'");
+
+            rs = dbmd.getFunctions("", "", "getfunc_f5");
+            assertThat(rs.next(), is(true));
+            assertThat(rs.getString("FUNCTION_NAME"), is("getfunc_f5"));
+            assertThat(rs.getShort("FUNCTION_TYPE"), is((short) DatabaseMetaData.functionReturnsTable));
+            assertThat(rs.next(), is(false));
+            rs.close();
+            stmt.execute("DROP FUNCTION getfunc_f5()");
+        } else {
+            // For PG 8.3 or 8.2 it will resulted in unknown function type
+            Statement stmt = conn.createStatement();
+            stmt.execute(
+                    "CREATE OR REPLACE FUNCTION getfunc_f1(int, varchar) RETURNS int AS 'SELECT 1;' LANGUAGE SQL");
+            ResultSet rs = dbmd.getFunctions("", "", "getfunc_f1");
+            assertThat(rs.next(), is(true));
+            assertThat(rs.getString("FUNCTION_NAME"), is("getfunc_f1"));
+            assertThat(rs.getShort("FUNCTION_TYPE"), is((short) DatabaseMetaData.functionResultUnknown));
+            assertThat(rs.next(), is(false));
+            rs.close();
+            stmt.execute("DROP FUNCTION getfunc_f1(int, varchar)");
+
+            stmt.execute(
+                    "CREATE OR REPLACE FUNCTION getfunc_f3(IN a int, INOUT b varchar, OUT c timestamptz) AS $f$ BEGIN b := 'a'; c := now(); return; END; $f$ LANGUAGE plpgsql");
+            rs = dbmd.getFunctions("", "", "getfunc_f3");
+            assertThat(rs.next(), is(true));
+            assertThat(rs.getString("FUNCTION_NAME"), is("getfunc_f3"));
+            assertThat(rs.getShort("FUNCTION_TYPE"), is((short) DatabaseMetaData.functionResultUnknown));
+            assertThat(rs.next(), is(false));
+            rs.close();
+            stmt.execute("DROP FUNCTION getfunc_f3(int, varchar)");
+        }
+    }
+
+    @Test
+    void sortedDataTypes() throws SQLException {
+        // https://github.com/pgjdbc/pgjdbc/issues/716
+        DatabaseMetaData dbmd = conn.getMetaData();
         ResultSet rs = dbmd.getTypeInfo();
-        try (Statement stmt = privileged.createStatement()) {
-          stmt.execute("drop function public.array_in(anyarray, oid, integer)");
+        int lastType = Integer.MIN_VALUE;
+        while (rs.next()) {
+            int type = rs.getInt("DATA_TYPE");
+            assertTrue(lastType <= type);
+            lastType = type;
+        }
+    }
+
+    @Test
+    void getSqlTypes() throws SQLException {
+        if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v10)) {
+            try (Connection privileged = TestUtil.openPrivilegedDB()) {
+                try (Statement stmt = privileged.createStatement()) {
+                    // create a function called array_in
+                    stmt.execute("CREATE OR REPLACE FUNCTION public.array_in(anyarray, oid, integer)\n"
+                            + " RETURNS anyarray\n"
+                            + " LANGUAGE internal\n"
+                            + " STABLE PARALLEL SAFE STRICT\n"
+                            + "AS $function$array_in$function$");
+                }
+                DatabaseMetaData dbmd = privileged.getMetaData();
+                ResultSet rs = dbmd.getTypeInfo();
+                try (Statement stmt = privileged.createStatement()) {
+                    stmt.execute("drop function public.array_in(anyarray, oid, integer)");
+                }
+            }
+        }
+    }
+
+    private static class CatalogObject implements Comparable<CatalogObject> {
+        private final String catalog;
+        private final String schema;
+        private final String name;
+        private final String specificName;
+
+        private CatalogObject(String catalog, String schema, String name, String specificName) {
+            this.catalog = catalog;
+            this.schema = schema;
+            this.name = name;
+            this.specificName = specificName;
+        }
+
+        @Override
+        public int hashCode() {
+            final int prime = 31;
+            int result = 1;
+            result = prime * result + (catalog == null ? 0 : catalog.hashCode());
+            result = prime * result + (name == null ? 0 : name.hashCode());
+            result = prime * result + (schema == null ? 0 : schema.hashCode());
+            result = prime * result + (specificName == null ? 0 : specificName.hashCode());
+            return result;
+        }
+
+        @Override
+        public boolean equals(Object obj) {
+            if (obj == null || getClass() != obj.getClass()) {
+                return false;
+            } else if (obj == this) {
+                return true;
+            }
+            return compareTo((CatalogObject) obj) == 0;
+        }
+
+        @Override
+        public int compareTo(CatalogObject other) {
+            int comp = catalog.compareTo(other.catalog);
+            if (comp != 0) {
+                return comp;
+            }
+            comp = schema.compareTo(other.schema);
+            if (comp != 0) {
+                return comp;
+            }
+            comp = name.compareTo(other.name);
+            if (comp != 0) {
+                return comp;
+            }
+            comp = specificName.compareTo(other.specificName);
+            if (comp != 0) {
+                return comp;
+            }
+            return 0;
         }
-      }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/IsValidTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/IsValidTest.java
index 5be6ad8..0c71a6e 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/IsValidTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/IsValidTest.java
@@ -20,47 +20,47 @@ import java.sql.Connection;
 import java.sql.SQLException;
 
 public class IsValidTest extends BaseTest4 {
-  @Test
-  public void testIsValidShouldNotModifyTransactionStateOutsideTransaction() throws SQLException {
-    TransactionState initialTransactionState = TestUtil.getTransactionState(con);
-    assertTrue("Connection should be valid", con.isValid(0));
-    TestUtil.assertTransactionState("Transaction state should not be modified by non-transactional Connection.isValid(...)", con, initialTransactionState);
-  }
-
-  @Test
-  public void testIsValidShouldNotModifyTransactionStateInEmptyTransaction() throws SQLException {
-    con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
-    con.setAutoCommit(false);
-    TransactionState transactionState = TestUtil.getTransactionState(con);
-    assertTrue("Connection should be valid", con.isValid(0));
-    TestUtil.assertTransactionState("Transaction state should not be modified by Connection.isValid(...) within an empty transaction", con, transactionState);
-  }
-
-  @Test
-  public void testIsValidShouldNotModifyTransactionStateInNonEmptyTransaction() throws SQLException {
-    con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
-    con.setAutoCommit(false);
-    TestUtil.executeQuery(con, "SELECT 1");
-    TransactionState transactionState = TestUtil.getTransactionState(con);
-    assertTrue("Connection should be valid", con.isValid(0));
-    TestUtil.assertTransactionState("Transaction state should not be modified by Connection.isValid(...) within a non-empty transaction", con, transactionState);
-  }
-
-  @Test
-  public void testIsValidRemoteClose() throws SQLException, InterruptedException {
-    Assume.assumeTrue("Unable to use pg_terminate_backend(...) before version 8.4", TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4));
-
-    boolean wasTerminated = TestUtil.terminateBackend(con);
-    assertTrue("The backend should be terminated", wasTerminated);
-
-    // Keeps checking for up to 5-seconds that the connection is marked invalid
-    for (int i = 0; i < 500; i++) {
-      if (!con.isValid(0)) {
-        break;
-      }
-      // Wait a bit to give the connection a chance to gracefully handle the termination
-      Thread.sleep(10);
+    @Test
+    public void testIsValidShouldNotModifyTransactionStateOutsideTransaction() throws SQLException {
+        TransactionState initialTransactionState = TestUtil.getTransactionState(con);
+        assertTrue("Connection should be valid", con.isValid(0));
+        TestUtil.assertTransactionState("Transaction state should not be modified by non-transactional Connection.isValid(...)", con, initialTransactionState);
+    }
+
+    @Test
+    public void testIsValidShouldNotModifyTransactionStateInEmptyTransaction() throws SQLException {
+        con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
+        con.setAutoCommit(false);
+        TransactionState transactionState = TestUtil.getTransactionState(con);
+        assertTrue("Connection should be valid", con.isValid(0));
+        TestUtil.assertTransactionState("Transaction state should not be modified by Connection.isValid(...) within an empty transaction", con, transactionState);
+    }
+
+    @Test
+    public void testIsValidShouldNotModifyTransactionStateInNonEmptyTransaction() throws SQLException {
+        con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
+        con.setAutoCommit(false);
+        TestUtil.executeQuery(con, "SELECT 1");
+        TransactionState transactionState = TestUtil.getTransactionState(con);
+        assertTrue("Connection should be valid", con.isValid(0));
+        TestUtil.assertTransactionState("Transaction state should not be modified by Connection.isValid(...) within a non-empty transaction", con, transactionState);
+    }
+
+    @Test
+    public void testIsValidRemoteClose() throws SQLException, InterruptedException {
+        Assume.assumeTrue("Unable to use pg_terminate_backend(...) before version 8.4", TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4));
+
+        boolean wasTerminated = TestUtil.terminateBackend(con);
+        assertTrue("The backend should be terminated", wasTerminated);
+
+        // Keeps checking for up to 5-seconds that the connection is marked invalid
+        for (int i = 0; i < 500; i++) {
+            if (!con.isValid(0)) {
+                break;
+            }
+            // Wait a bit to give the connection a chance to gracefully handle the termination
+            Thread.sleep(10);
+        }
+        assertFalse("The terminated connection should not be valid", con.isValid(0));
     }
-    assertFalse("The terminated connection should not be valid", con.isValid(0));
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/Jdbc4TestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/Jdbc4TestSuite.java
index 908f5f8..56089fa 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/Jdbc4TestSuite.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/Jdbc4TestSuite.java
@@ -13,21 +13,21 @@ import org.junit.runners.Suite;
  */
 @RunWith(Suite.class)
 @Suite.SuiteClasses({
-    ArrayTest.class,
-    BinaryStreamTest.class,
-    BinaryTest.class,
-    BlobTest.class,
-    CharacterStreamTest.class,
-    ClientInfoTest.class,
-    ConnectionValidTimeoutTest.class,
-    DatabaseMetaDataHideUnprivilegedObjectsTest.class,
-    DatabaseMetaDataTest.class,
-    IsValidTest.class,
-    JsonbTest.class,
-    PGCopyInputStreamTest.class,
-    UUIDTest.class,
-    WrapperTest.class,
-    XmlTest.class,
+        ArrayTest.class,
+        BinaryStreamTest.class,
+        BinaryTest.class,
+        BlobTest.class,
+        CharacterStreamTest.class,
+        ClientInfoTest.class,
+        ConnectionValidTimeoutTest.class,
+        DatabaseMetaDataHideUnprivilegedObjectsTest.class,
+        DatabaseMetaDataTest.class,
+        IsValidTest.class,
+        JsonbTest.class,
+        PGCopyInputStreamTest.class,
+        UUIDTest.class,
+        WrapperTest.class,
+        XmlTest.class,
 })
 public class Jdbc4TestSuite {
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/JsonbTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/JsonbTest.java
index 6248ed2..aada706 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/JsonbTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/JsonbTest.java
@@ -29,86 +29,86 @@ import java.util.Collection;
 
 @RunWith(Parameterized.class)
 public class JsonbTest extends BaseTest4 {
-  public JsonbTest(BinaryMode binaryMode) {
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
+    public JsonbTest(BinaryMode binaryMode) {
+        setBinaryMode(binaryMode);
     }
-    return ids;
-  }
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    Assume.assumeTrue("jsonb requires PostgreSQL 9.4+", TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_4));
-    TestUtil.createTable(con, "jsonbtest", "detail jsonb");
-    Statement stmt = con.createStatement();
-    stmt.executeUpdate("INSERT INTO jsonbtest (detail) VALUES ('{\"a\": 1}')");
-    stmt.executeUpdate("INSERT INTO jsonbtest (detail) VALUES ('{\"b\": 1}')");
-    stmt.executeUpdate("INSERT INTO jsonbtest (detail) VALUES ('{\"c\": 1}')");
-    stmt.close();
-  }
+    @Parameterized.Parameters(name = "binary = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
+        }
+        return ids;
+    }
 
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "jsonbtest");
-    super.tearDown();
-  }
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        Assume.assumeTrue("jsonb requires PostgreSQL 9.4+", TestUtil.haveMinimumServerVersion(con, ServerVersion.v9_4));
+        TestUtil.createTable(con, "jsonbtest", "detail jsonb");
+        Statement stmt = con.createStatement();
+        stmt.executeUpdate("INSERT INTO jsonbtest (detail) VALUES ('{\"a\": 1}')");
+        stmt.executeUpdate("INSERT INTO jsonbtest (detail) VALUES ('{\"b\": 1}')");
+        stmt.executeUpdate("INSERT INTO jsonbtest (detail) VALUES ('{\"c\": 1}')");
+        stmt.close();
+    }
 
-  @Test
-  public void testJsonbNonPreparedStatement() throws SQLException {
-    Statement stmt = con.createStatement();
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "jsonbtest");
+        super.tearDown();
+    }
 
-    ResultSet rs = stmt.executeQuery("SELECT count(1) FROM jsonbtest WHERE detail ? 'a' = false;");
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
-    rs.close();
-    stmt.close();
-  }
+    @Test
+    public void testJsonbNonPreparedStatement() throws SQLException {
+        Statement stmt = con.createStatement();
 
-  @Test
-  public void testJsonbPreparedStatement() throws SQLException {
-    PreparedStatement stmt = con.prepareStatement("SELECT count(1) FROM jsonbtest WHERE detail ?? 'a' = false;");
-    ResultSet rs = stmt.executeQuery();
-    assertTrue(rs.next());
-    assertEquals(2, rs.getInt(1));
-    rs.close();
-    stmt.close();
-  }
+        ResultSet rs = stmt.executeQuery("SELECT count(1) FROM jsonbtest WHERE detail ? 'a' = false;");
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        rs.close();
+        stmt.close();
+    }
 
-  @Test
-  public void jsonbArray() throws SQLException {
-    jsonArrayGet("jsonb", String.class);
-  }
+    @Test
+    public void testJsonbPreparedStatement() throws SQLException {
+        PreparedStatement stmt = con.prepareStatement("SELECT count(1) FROM jsonbtest WHERE detail ?? 'a' = false;");
+        ResultSet rs = stmt.executeQuery();
+        assertTrue(rs.next());
+        assertEquals(2, rs.getInt(1));
+        rs.close();
+        stmt.close();
+    }
 
-  @Test
-  public void jsonArray() throws SQLException {
-    jsonArrayGet("json", String.class);
-  }
+    @Test
+    public void jsonbArray() throws SQLException {
+        jsonArrayGet("jsonb", String.class);
+    }
 
-  private void jsonArrayGet(String type, Class<?> arrayElement) throws SQLException {
-    PreparedStatement stmt = con.prepareStatement("SELECT '{[2],[3]}'::" + type + "[]");
-    ResultSet rs = stmt.executeQuery();
-    assertTrue(rs.next());
-    Array array = rs.getArray(1);
-    Object[] objectArray = (Object[]) array.getArray();
-    Assert.assertEquals(
-        "'{[2],[3]}'::" + type + "[] should come up as Java array with two entries",
-        "[[2], [3]]",
-        Arrays.deepToString(objectArray)
-    );
+    @Test
+    public void jsonArray() throws SQLException {
+        jsonArrayGet("json", String.class);
+    }
 
-    Assert.assertEquals(
-        type + " array entries should come up as strings",
-        arrayElement.getName() + ", " + arrayElement.getName(),
-        objectArray[0].getClass().getName() + ", " + objectArray[1].getClass().getName()
-    );
-    rs.close();
-    stmt.close();
-  }
+    private void jsonArrayGet(String type, Class<?> arrayElement) throws SQLException {
+        PreparedStatement stmt = con.prepareStatement("SELECT '{[2],[3]}'::" + type + "[]");
+        ResultSet rs = stmt.executeQuery();
+        assertTrue(rs.next());
+        Array array = rs.getArray(1);
+        Object[] objectArray = (Object[]) array.getArray();
+        Assert.assertEquals(
+                "'{[2],[3]}'::" + type + "[] should come up as Java array with two entries",
+                "[[2], [3]]",
+                Arrays.deepToString(objectArray)
+        );
+
+        Assert.assertEquals(
+                type + " array entries should come up as strings",
+                arrayElement.getName() + ", " + arrayElement.getName(),
+                objectArray[0].getClass().getName() + ", " + objectArray[1].getClass().getName()
+        );
+        rs.close();
+        stmt.close();
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/PGCopyInputStreamTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/PGCopyInputStreamTest.java
index c92cc50..2fb9546 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/PGCopyInputStreamTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/PGCopyInputStreamTest.java
@@ -25,102 +25,102 @@ import java.util.ArrayList;
 import java.util.List;
 
 class PGCopyInputStreamTest {
-  private static final int NUM_TEST_ROWS = 4;
-  /**
-   * COPY .. TO STDOUT terminates each row of data with a LF regardless of platform so the size of
-   * each output row will always be two, one byte for the character and one for the LF.
-   */
-  private static final int COPY_ROW_SIZE = 2; // One character plus newline
-  private static final int COPY_DATA_SIZE = NUM_TEST_ROWS * COPY_ROW_SIZE;
-  private static final String COPY_SQL = String.format("COPY (SELECT i FROM generate_series(0, %d - 1) i) TO STDOUT", NUM_TEST_ROWS);
+    private static final int NUM_TEST_ROWS = 4;
+    /**
+     * COPY .. TO STDOUT terminates each row of data with a LF regardless of platform so the size of
+     * each output row will always be two, one byte for the character and one for the LF.
+     */
+    private static final int COPY_ROW_SIZE = 2; // One character plus newline
+    private static final int COPY_DATA_SIZE = NUM_TEST_ROWS * COPY_ROW_SIZE;
+    private static final String COPY_SQL = String.format("COPY (SELECT i FROM generate_series(0, %d - 1) i) TO STDOUT", NUM_TEST_ROWS);
 
-  private Connection conn;
+    private Connection conn;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    conn = TestUtil.openDB();
-  }
-
-  @AfterEach
-  void tearDown() throws SQLException {
-    TestUtil.closeDB(conn);
-  }
-
-  @Test
-  void readBytesCorrectlyHandlesEof() throws SQLException, IOException {
-    PGConnection pgConn = conn.unwrap(PGConnection.class);
-    try (PGCopyInputStream in = new PGCopyInputStream(pgConn, COPY_SQL)) {
-      // large enough to read everything on the next step
-      byte[] buf = new byte[COPY_DATA_SIZE + 100];
-      assertEquals(COPY_DATA_SIZE, in.read(buf), "First read should get the entire table into the byte array");
-      assertEquals(-1, in.read(buf), "Subsequent read should return -1 to indicate stream is finished");
+    private static List<byte[]> readFully(PGCopyInputStream in, int size) throws SQLException, IOException {
+        List<byte[]> chunks = new ArrayList<>();
+        do {
+            byte[] buf = new byte[size];
+            if (in.read(buf) <= 0) {
+                break;
+            }
+            chunks.add(buf);
+        } while (true);
+        return chunks;
     }
-  }
 
-  @Test
-  void readBytesCorrectlyReadsDataInChunks() throws SQLException, IOException {
-    PGConnection pgConn = conn.unwrap(PGConnection.class);
-    try (PGCopyInputStream in = new PGCopyInputStream(pgConn, COPY_SQL)) {
-      // Read in row sized chunks
-      List<byte[]> chunks = readFully(in, COPY_ROW_SIZE);
-      assertEquals(NUM_TEST_ROWS, chunks.size(), "Should read one chunk per row");
-      assertEquals("0\n1\n2\n3\n", chunksToString(chunks), "Entire table should have be read");
+    private static List<byte[]> readFromCopyFully(PGCopyInputStream in) throws SQLException, IOException {
+        List<byte[]> chunks = new ArrayList<>();
+        byte[] buf;
+        while ((buf = in.readFromCopy()) != null) {
+            chunks.add(buf);
+        }
+        return chunks;
     }
-  }
 
-  @Test
-  void copyAPI() throws SQLException, IOException {
-    PGConnection pgConn = conn.unwrap(PGConnection.class);
-    try (PGCopyInputStream in = new PGCopyInputStream(pgConn, COPY_SQL)) {
-      List<byte[]> chunks = readFromCopyFully(in);
-      assertEquals(NUM_TEST_ROWS, chunks.size(), "Should read one chunk per row");
-      assertEquals("0\n1\n2\n3\n", chunksToString(chunks), "Entire table should have be read");
+    private static String chunksToString(List<byte[]> chunks) {
+        ByteArrayOutputStream out = new ByteArrayOutputStream();
+        chunks.forEach(chunk -> out.write(chunk, 0, chunk.length));
+        return new String(out.toByteArray(), StandardCharsets.UTF_8);
     }
-  }
 
-  @Test
-  void mixedAPI() throws SQLException, IOException {
-    PGConnection pgConn = conn.unwrap(PGConnection.class);
-    try (PGCopyInputStream in = new PGCopyInputStream(pgConn, COPY_SQL)) {
-      // First read using java.io.InputStream API
-      byte[] firstChar = new byte[1];
-      in.read(firstChar);
-      assertArrayEquals("0".getBytes(), firstChar, "IO API should read first character");
-
-      // Read remainder of first row using CopyOut API
-      assertArrayEquals("\n".getBytes(), in.readFromCopy(), "readFromCopy() should return remainder of first row");
-
-      // Then read the rest using CopyOut API
-      List<byte[]> chunks = readFromCopyFully(in);
-      assertEquals(NUM_TEST_ROWS - 1, chunks.size(), "Should read one chunk per row");
-      assertEquals("1\n2\n3\n", chunksToString(chunks), "Rest of table should have be read");
+    @BeforeEach
+    void setUp() throws Exception {
+        conn = TestUtil.openDB();
     }
-  }
 
-  private static List<byte[]> readFully(PGCopyInputStream in, int size) throws SQLException, IOException {
-    List<byte[]> chunks = new ArrayList<>();
-    do {
-      byte[] buf = new byte[size];
-      if (in.read(buf) <= 0) {
-        break;
-      }
-      chunks.add(buf);
-    } while (true);
-    return chunks;
-  }
-
-  private static List<byte[]> readFromCopyFully(PGCopyInputStream in) throws SQLException, IOException {
-    List<byte[]> chunks = new ArrayList<>();
-    byte[] buf;
-    while ((buf = in.readFromCopy()) != null) {
-      chunks.add(buf);
+    @AfterEach
+    void tearDown() throws SQLException {
+        TestUtil.closeDB(conn);
     }
-    return chunks;
-  }
 
-  private static String chunksToString(List<byte[]> chunks) {
-    ByteArrayOutputStream out = new ByteArrayOutputStream();
-    chunks.forEach(chunk -> out.write(chunk, 0, chunk.length));
-    return new String(out.toByteArray(), StandardCharsets.UTF_8);
-  }
+    @Test
+    void readBytesCorrectlyHandlesEof() throws SQLException, IOException {
+        PGConnection pgConn = conn.unwrap(PGConnection.class);
+        try (PGCopyInputStream in = new PGCopyInputStream(pgConn, COPY_SQL)) {
+            // large enough to read everything on the next step
+            byte[] buf = new byte[COPY_DATA_SIZE + 100];
+            assertEquals(COPY_DATA_SIZE, in.read(buf), "First read should get the entire table into the byte array");
+            assertEquals(-1, in.read(buf), "Subsequent read should return -1 to indicate stream is finished");
+        }
+    }
+
+    @Test
+    void readBytesCorrectlyReadsDataInChunks() throws SQLException, IOException {
+        PGConnection pgConn = conn.unwrap(PGConnection.class);
+        try (PGCopyInputStream in = new PGCopyInputStream(pgConn, COPY_SQL)) {
+            // Read in row sized chunks
+            List<byte[]> chunks = readFully(in, COPY_ROW_SIZE);
+            assertEquals(NUM_TEST_ROWS, chunks.size(), "Should read one chunk per row");
+            assertEquals("0\n1\n2\n3\n", chunksToString(chunks), "Entire table should have be read");
+        }
+    }
+
+    @Test
+    void copyAPI() throws SQLException, IOException {
+        PGConnection pgConn = conn.unwrap(PGConnection.class);
+        try (PGCopyInputStream in = new PGCopyInputStream(pgConn, COPY_SQL)) {
+            List<byte[]> chunks = readFromCopyFully(in);
+            assertEquals(NUM_TEST_ROWS, chunks.size(), "Should read one chunk per row");
+            assertEquals("0\n1\n2\n3\n", chunksToString(chunks), "Entire table should have be read");
+        }
+    }
+
+    @Test
+    void mixedAPI() throws SQLException, IOException {
+        PGConnection pgConn = conn.unwrap(PGConnection.class);
+        try (PGCopyInputStream in = new PGCopyInputStream(pgConn, COPY_SQL)) {
+            // First read using java.io.InputStream API
+            byte[] firstChar = new byte[1];
+            in.read(firstChar);
+            assertArrayEquals("0".getBytes(), firstChar, "IO API should read first character");
+
+            // Read remainder of first row using CopyOut API
+            assertArrayEquals("\n".getBytes(), in.readFromCopy(), "readFromCopy() should return remainder of first row");
+
+            // Then read the rest using CopyOut API
+            List<byte[]> chunks = readFromCopyFully(in);
+            assertEquals(NUM_TEST_ROWS - 1, chunks.size(), "Should read one chunk per row");
+            assertEquals("1\n2\n3\n", chunksToString(chunks), "Rest of table should have be read");
+        }
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/UUIDTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/UUIDTest.java
index 05d834c..2898764 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/UUIDTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/UUIDTest.java
@@ -31,83 +31,83 @@ import java.util.UUID;
 @RunWith(Parameterized.class)
 public class UUIDTest extends BaseTest4 {
 
-  public UUIDTest(BinaryMode binaryMode, StringType stringType) {
-    setBinaryMode(binaryMode);
-    setStringType(stringType);
-  }
-
-  @Parameterized.Parameters(name = "binary={0}, stringType={1}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      for (StringType stringType : StringType.values()) {
-        ids.add(new Object[]{binaryMode, stringType});
-      }
+    public UUIDTest(BinaryMode binaryMode, StringType stringType) {
+        setBinaryMode(binaryMode);
+        setStringType(stringType);
     }
-    return ids;
-  }
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    assumeMinimumServerVersion(ServerVersion.v8_3);
-
-    Statement stmt = con.createStatement();
-    stmt.execute("CREATE TEMP TABLE uuidtest(id uuid)");
-    stmt.close();
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.execute("DROP TABLE IF EXISTS uuidtest");
-    stmt.close();
-    super.tearDown();
-  }
-
-  @Test
-  public void testUUID() throws SQLException {
-    UUID uuid = UUID.randomUUID();
-    PreparedStatement ps = con.prepareStatement("INSERT INTO uuidtest VALUES (?)");
-    ps.setObject(1, uuid, Types.OTHER);
-    ps.executeUpdate();
-    ps.close();
-
-    Statement stmt = con.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT id FROM uuidtest");
-    assertTrue(rs.next());
-
-    UUID uuid2 = (UUID) rs.getObject(1);
-    assertEquals(uuid, rs.getObject(1));
-    assertEquals(uuid.toString(), rs.getString(1));
-
-    rs.close();
-    stmt.close();
-  }
-
-  @Test
-  public void testUUIDString() throws SQLException {
-    String uuid = "0dcdf03a-058c-4fa3-b210-8385cb6810d5";
-    PreparedStatement ps = con.prepareStatement("INSERT INTO uuidtest VALUES (?)");
-    ps.setString(1, uuid);
-    try {
-      ps.executeUpdate();
-      if (getStringType() == StringType.VARCHAR && preferQueryMode != PreferQueryMode.SIMPLE) {
-        Assert.fail(
-            "setString(, uuid) should fail to insert value into UUID column when stringType=varchar."
-                + " Expecting error <<column \"id\" is of type uuid but expression is of type character varying>>");
-      }
-    } catch (SQLException e) {
-      if (getStringType() == StringType.VARCHAR
-          && PSQLState.DATATYPE_MISMATCH.getState().equals(e.getSQLState())) {
-        // The following error is expected in stringType=varchar mode
-        // ERROR: column "id" is of type uuid but expression is of type character varying
-        return;
-      }
-      throw e;
-    } finally {
-      TestUtil.closeQuietly(ps);
+    @Parameterized.Parameters(name = "binary={0}, stringType={1}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            for (StringType stringType : StringType.values()) {
+                ids.add(new Object[]{binaryMode, stringType});
+            }
+        }
+        return ids;
+    }
+
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        assumeMinimumServerVersion(ServerVersion.v8_3);
+
+        Statement stmt = con.createStatement();
+        stmt.execute("CREATE TEMP TABLE uuidtest(id uuid)");
+        stmt.close();
+    }
+
+    @Override
+    public void tearDown() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.execute("DROP TABLE IF EXISTS uuidtest");
+        stmt.close();
+        super.tearDown();
+    }
+
+    @Test
+    public void testUUID() throws SQLException {
+        UUID uuid = UUID.randomUUID();
+        PreparedStatement ps = con.prepareStatement("INSERT INTO uuidtest VALUES (?)");
+        ps.setObject(1, uuid, Types.OTHER);
+        ps.executeUpdate();
+        ps.close();
+
+        Statement stmt = con.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT id FROM uuidtest");
+        assertTrue(rs.next());
+
+        UUID uuid2 = (UUID) rs.getObject(1);
+        assertEquals(uuid, rs.getObject(1));
+        assertEquals(uuid.toString(), rs.getString(1));
+
+        rs.close();
+        stmt.close();
+    }
+
+    @Test
+    public void testUUIDString() throws SQLException {
+        String uuid = "0dcdf03a-058c-4fa3-b210-8385cb6810d5";
+        PreparedStatement ps = con.prepareStatement("INSERT INTO uuidtest VALUES (?)");
+        ps.setString(1, uuid);
+        try {
+            ps.executeUpdate();
+            if (getStringType() == StringType.VARCHAR && preferQueryMode != PreferQueryMode.SIMPLE) {
+                Assert.fail(
+                        "setString(, uuid) should fail to insert value into UUID column when stringType=varchar."
+                                + " Expecting error <<column \"id\" is of type uuid but expression is of type character varying>>");
+            }
+        } catch (SQLException e) {
+            if (getStringType() == StringType.VARCHAR
+                    && PSQLState.DATATYPE_MISMATCH.getState().equals(e.getSQLState())) {
+                // The following error is expected in stringType=varchar mode
+                // ERROR: column "id" is of type uuid but expression is of type character varying
+                return;
+            }
+            throw e;
+        } finally {
+            TestUtil.closeQuietly(ps);
+        }
     }
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/WrapperTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/WrapperTest.java
index 4d631f3..a0de237 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/WrapperTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/WrapperTest.java
@@ -25,117 +25,117 @@ import java.sql.Statement;
 
 class WrapperTest {
 
-  private Connection conn;
-  private Statement statement;
+    private Connection conn;
+    private Statement statement;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    conn = TestUtil.openDB();
-    statement = conn.prepareStatement("SELECT 1");
-  }
-
-  @AfterEach
-  void tearDown() throws SQLException {
-    statement.close();
-    TestUtil.closeDB(conn);
-  }
-
-  /**
-   * This interface is private, and so cannot be supported by any wrapper.
-   */
-  private interface PrivateInterface {
-  }
-
-  @Test
-  void connectionIsWrapperForPrivate() throws SQLException {
-    assertFalse(conn.isWrapperFor(PrivateInterface.class));
-  }
-
-  @Test
-  void connectionIsWrapperForConnection() throws SQLException {
-    assertTrue(conn.isWrapperFor(Connection.class));
-  }
-
-  @Test
-  void connectionIsWrapperForPGConnection() throws SQLException {
-    assertTrue(conn.isWrapperFor(PGConnection.class));
-  }
-
-  @Test
-  void connectionUnwrapPrivate() throws SQLException {
-    try {
-      conn.unwrap(PrivateInterface.class);
-      fail("unwrap of non-wrapped interface should fail");
-    } catch (SQLException e) {
+    @BeforeEach
+    void setUp() throws Exception {
+        conn = TestUtil.openDB();
+        statement = conn.prepareStatement("SELECT 1");
     }
-  }
 
-  @Test
-  void connectionUnwrapConnection() throws SQLException {
-    Object v = conn.unwrap(Connection.class);
-    assertNotNull(v);
-    assertTrue(v instanceof Connection, "connection.unwrap(PGConnection.class) should return PGConnection instance"
-        + ", actual instance is " + v);
-  }
-
-  @Test
-  void connectionUnwrapPGConnection() throws SQLException {
-    Object v = conn.unwrap(PGConnection.class);
-    assertNotNull(v);
-    assertTrue(v instanceof PGConnection, "connection.unwrap(PGConnection.class) should return PGConnection instance"
-        + ", actual instance is " + v);
-  }
-
-  @Test
-  void connectionUnwrapPGDataSource() throws SQLException {
-    PGSimpleDataSource dataSource = new PGSimpleDataSource();
-    dataSource.setDatabaseName(TestUtil.getDatabase());
-    dataSource.setServerName(TestUtil.getServer());
-    dataSource.setPortNumber(TestUtil.getPort());
-    Connection connection = dataSource.getConnection(TestUtil.getUser(), TestUtil.getPassword());
-    assertNotNull(connection, "Unable to obtain a connection from PGSimpleDataSource");
-    Object v = connection.unwrap(PGConnection.class);
-    assertTrue(v instanceof PGConnection,
-        "connection.unwrap(PGConnection.class) should return PGConnection instance"
-            + ", actual instance is " + v);
-  }
-
-  @Test
-  void statementIsWrapperForPrivate() throws SQLException {
-    assertFalse(statement.isWrapperFor(PrivateInterface.class), "Should not be a wrapper for PrivateInterface");
-  }
-
-  @Test
-  void statementIsWrapperForStatement() throws SQLException {
-    assertTrue(statement.isWrapperFor(Statement.class), "Should be a wrapper for Statement");
-  }
-
-  @Test
-  void statementIsWrapperForPGStatement() throws SQLException {
-    assertTrue(statement.isWrapperFor(PGStatement.class), "Should be a wrapper for PGStatement");
-  }
-
-  @Test
-  void statementUnwrapPrivate() throws SQLException {
-    try {
-      statement.unwrap(PrivateInterface.class);
-      fail("unwrap of non-wrapped interface should fail");
-    } catch (SQLException e) {
+    @AfterEach
+    void tearDown() throws SQLException {
+        statement.close();
+        TestUtil.closeDB(conn);
     }
-  }
 
-  @Test
-  void statementUnwrapStatement() throws SQLException {
-    Object v = statement.unwrap(Statement.class);
-    assertNotNull(v);
-    assertTrue(v instanceof Statement, "Should be instance of Statement, actual instance of " + v);
-  }
+    @Test
+    void connectionIsWrapperForPrivate() throws SQLException {
+        assertFalse(conn.isWrapperFor(PrivateInterface.class));
+    }
 
-  @Test
-  void statementUnwrapPGStatement() throws SQLException {
-    Object v = statement.unwrap(PGStatement.class);
-    assertNotNull(v);
-    assertTrue(v instanceof PGStatement, "Should be instance of PGStatement, actual instance of " + v);
-  }
+    @Test
+    void connectionIsWrapperForConnection() throws SQLException {
+        assertTrue(conn.isWrapperFor(Connection.class));
+    }
+
+    @Test
+    void connectionIsWrapperForPGConnection() throws SQLException {
+        assertTrue(conn.isWrapperFor(PGConnection.class));
+    }
+
+    @Test
+    void connectionUnwrapPrivate() throws SQLException {
+        try {
+            conn.unwrap(PrivateInterface.class);
+            fail("unwrap of non-wrapped interface should fail");
+        } catch (SQLException e) {
+        }
+    }
+
+    @Test
+    void connectionUnwrapConnection() throws SQLException {
+        Object v = conn.unwrap(Connection.class);
+        assertNotNull(v);
+        assertTrue(v instanceof Connection, "connection.unwrap(PGConnection.class) should return PGConnection instance"
+                + ", actual instance is " + v);
+    }
+
+    @Test
+    void connectionUnwrapPGConnection() throws SQLException {
+        Object v = conn.unwrap(PGConnection.class);
+        assertNotNull(v);
+        assertTrue(v instanceof PGConnection, "connection.unwrap(PGConnection.class) should return PGConnection instance"
+                + ", actual instance is " + v);
+    }
+
+    @Test
+    void connectionUnwrapPGDataSource() throws SQLException {
+        PGSimpleDataSource dataSource = new PGSimpleDataSource();
+        dataSource.setDatabaseName(TestUtil.getDatabase());
+        dataSource.setServerName(TestUtil.getServer());
+        dataSource.setPortNumber(TestUtil.getPort());
+        Connection connection = dataSource.getConnection(TestUtil.getUser(), TestUtil.getPassword());
+        assertNotNull(connection, "Unable to obtain a connection from PGSimpleDataSource");
+        Object v = connection.unwrap(PGConnection.class);
+        assertTrue(v instanceof PGConnection,
+                "connection.unwrap(PGConnection.class) should return PGConnection instance"
+                        + ", actual instance is " + v);
+    }
+
+    @Test
+    void statementIsWrapperForPrivate() throws SQLException {
+        assertFalse(statement.isWrapperFor(PrivateInterface.class), "Should not be a wrapper for PrivateInterface");
+    }
+
+    @Test
+    void statementIsWrapperForStatement() throws SQLException {
+        assertTrue(statement.isWrapperFor(Statement.class), "Should be a wrapper for Statement");
+    }
+
+    @Test
+    void statementIsWrapperForPGStatement() throws SQLException {
+        assertTrue(statement.isWrapperFor(PGStatement.class), "Should be a wrapper for PGStatement");
+    }
+
+    @Test
+    void statementUnwrapPrivate() throws SQLException {
+        try {
+            statement.unwrap(PrivateInterface.class);
+            fail("unwrap of non-wrapped interface should fail");
+        } catch (SQLException e) {
+        }
+    }
+
+    @Test
+    void statementUnwrapStatement() throws SQLException {
+        Object v = statement.unwrap(Statement.class);
+        assertNotNull(v);
+        assertTrue(v instanceof Statement, "Should be instance of Statement, actual instance of " + v);
+    }
+
+    @Test
+    void statementUnwrapPGStatement() throws SQLException {
+        Object v = statement.unwrap(PGStatement.class);
+        assertNotNull(v);
+        assertTrue(v instanceof PGStatement, "Should be instance of PGStatement, actual instance of " + v);
+    }
+
+    /**
+     * This interface is private, and so cannot be supported by any wrapper.
+     */
+    private interface PrivateInterface {
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/XmlTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/XmlTest.java
index 74a4925..4d0b97c 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/XmlTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/XmlTest.java
@@ -50,347 +50,347 @@ import javax.xml.transform.stream.StreamResult;
 import javax.xml.transform.stream.StreamSource;
 
 public class XmlTest extends BaseTest4 {
-  private static final String _xsl =
-          "<xsl:stylesheet version=\"1.0\" xmlns:xsl=\"http://www.w3.org/1999/XSL/Transform\"><xsl:output method=\"text\" indent=\"no\" /><xsl:template match=\"/a\"><xsl:for-each select=\"/a/b\">B<xsl:value-of select=\".\" /></xsl:for-each></xsl:template></xsl:stylesheet>";
-  private static final String _xmlDocument = "<a><b>1</b><b>2</b></a>";
-  private static final String _xmlFragment = "<a>f</a><b>g</b>";
+    private static final String _xsl =
+            "<xsl:stylesheet version=\"1.0\" xmlns:xsl=\"http://www.w3.org/1999/XSL/Transform\"><xsl:output method=\"text\" indent=\"no\" /><xsl:template match=\"/a\"><xsl:for-each select=\"/a/b\">B<xsl:value-of select=\".\" /></xsl:for-each></xsl:template></xsl:stylesheet>";
+    private static final String _xmlDocument = "<a><b>1</b><b>2</b></a>";
+    private static final String _xmlFragment = "<a>f</a><b>g</b>";
 
-  private final Transformer xslTransformer;
-  private final Transformer identityTransformer;
+    private final Transformer xslTransformer;
+    private final Transformer identityTransformer;
 
-  public XmlTest() throws Exception {
-    TransformerFactory factory = TransformerFactory.newInstance();
-    xslTransformer = factory.newTransformer(new StreamSource(new StringReader(_xsl)));
-    xslTransformer.setErrorListener(new Ignorer());
-    identityTransformer = factory.newTransformer();
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    assumeMinimumServerVersion(ServerVersion.v8_3);
-    assumeTrue("Server has been compiled --with-libxml", isXmlEnabled(con));
-
-    Statement stmt = con.createStatement();
-    stmt.execute("CREATE TEMP TABLE xmltest(id int primary key, val xml)");
-    stmt.execute("INSERT INTO xmltest VALUES (1, '" + _xmlDocument + "')");
-    stmt.execute("INSERT INTO xmltest VALUES (2, '" + _xmlFragment + "')");
-    stmt.close();
-  }
-
-  private static boolean isXmlEnabled(Connection conn) {
-    try {
-      Statement stmt = conn.createStatement();
-      ResultSet rs = stmt.executeQuery("SELECT '<a>b</a>'::xml");
-      rs.close();
-      stmt.close();
-      return true;
-    } catch (SQLException sqle) {
-      return false;
-    }
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.execute("DROP TABLE IF EXISTS xmltest");
-    stmt.close();
-    super.tearDown();
-  }
-
-  private ResultSet getRS() throws SQLException {
-    Statement stmt = con.createStatement();
-    return stmt.executeQuery("SELECT val FROM xmltest");
-  }
-
-  @Test
-  public void testUpdateRS() throws SQLException {
-    Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
-    ResultSet rs = stmt.executeQuery("SELECT id, val FROM xmltest");
-    assertTrue(rs.next());
-    SQLXML xml = rs.getSQLXML(2);
-    rs.updateSQLXML(2, xml);
-    rs.updateRow();
-  }
-
-  @Test
-  public void testDOMParse() throws SQLException {
-    ResultSet rs = getRS();
-
-    assertTrue(rs.next());
-    SQLXML xml = rs.getSQLXML(1);
-    DOMSource source = xml.getSource(DOMSource.class);
-    Node doc = source.getNode();
-    Node root = doc.getFirstChild();
-    assertEquals("a", root.getNodeName());
-    Node first = root.getFirstChild();
-    assertEquals("b", first.getNodeName());
-    assertEquals("1", first.getTextContent());
-    Node last = root.getLastChild();
-    assertEquals("b", last.getNodeName());
-    assertEquals("2", last.getTextContent());
-
-    assertTrue(rs.next());
-    try {
-      xml = rs.getSQLXML(1);
-      source = xml.getSource(DOMSource.class);
-      fail("Can't retrieve a fragment.");
-    } catch (SQLException sqle) {
-    }
-  }
-
-  private void transform(Source source) throws Exception {
-    StringWriter writer = new StringWriter();
-    StreamResult result = new StreamResult(writer);
-    xslTransformer.transform(source, result);
-    assertEquals("B1B2", writer.toString());
-  }
-
-  private <T extends Source> void testRead(Class<T> sourceClass) throws Exception {
-    ResultSet rs = getRS();
-
-    assertTrue(rs.next());
-    SQLXML xml = rs.getSQLXML(1);
-    Source source = xml.getSource(sourceClass);
-    transform(source);
-
-    assertTrue(rs.next());
-    xml = rs.getSQLXML(1);
-    try {
-      source = xml.getSource(sourceClass);
-      transform(source);
-      fail("Can't transform a fragment.");
-    } catch (Exception sqle) {
-    }
-  }
-
-  @Test
-  public void testDOMRead() throws Exception {
-    testRead(DOMSource.class);
-  }
-
-  @Test
-  public void testSAXRead() throws Exception {
-    testRead(SAXSource.class);
-  }
-
-  @Test
-  public void testStAXRead() throws Exception {
-    testRead(StAXSource.class);
-  }
-
-  @Test
-  public void testStreamRead() throws Exception {
-    testRead(StreamSource.class);
-  }
-
-  private <T extends Result> void testWrite(Class<T> resultClass) throws Exception {
-    Statement stmt = con.createStatement();
-    stmt.execute("DELETE FROM xmltest");
-    stmt.close();
-
-    PreparedStatement ps = con.prepareStatement("INSERT INTO xmltest VALUES (?,?)");
-    SQLXML xml = con.createSQLXML();
-    Result result = xml.setResult(resultClass);
-
-    Source source = new StreamSource(new StringReader(_xmlDocument));
-    identityTransformer.transform(source, result);
-
-    ps.setInt(1, 1);
-    ps.setSQLXML(2, xml);
-    ps.executeUpdate();
-    ps.close();
-
-    ResultSet rs = getRS();
-    assertTrue(rs.next());
-
-    // DOMResults tack on the additional <?xml ...?> header.
-    //
-    String header = "";
-    if (DOMResult.class.equals(resultClass)) {
-      header = "<?xml version=\"1.0\" standalone=\"no\"?>";
+    public XmlTest() throws Exception {
+        TransformerFactory factory = TransformerFactory.newInstance();
+        xslTransformer = factory.newTransformer(new StreamSource(new StringReader(_xsl)));
+        xslTransformer.setErrorListener(new Ignorer());
+        identityTransformer = factory.newTransformer();
     }
 
-    assertEquals(header + _xmlDocument, rs.getString(1));
-    xml = rs.getSQLXML(1);
-    assertEquals(header + _xmlDocument, xml.getString());
-
-    assertTrue(!rs.next());
-  }
-
-  @Test
-  public void testDomWrite() throws Exception {
-    testWrite(DOMResult.class);
-  }
-
-  @Test
-  public void testStAXWrite() throws Exception {
-    testWrite(StAXResult.class);
-  }
-
-  @Test
-  public void testStreamWrite() throws Exception {
-    testWrite(StreamResult.class);
-  }
-
-  @Test
-  public void testSAXWrite() throws Exception {
-    testWrite(SAXResult.class);
-  }
-
-  @Test
-  public void testFree() throws SQLException {
-    ResultSet rs = getRS();
-    assertTrue(rs.next());
-    SQLXML xml = rs.getSQLXML(1);
-    xml.free();
-    xml.free();
-    try {
-      xml.getString();
-      fail("Not freed.");
-    } catch (SQLException sqle) {
-    }
-  }
-
-  @Test
-  public void testGetObject() throws SQLException {
-    ResultSet rs = getRS();
-    assertTrue(rs.next());
-    SQLXML xml = (SQLXML) rs.getObject(1);
-  }
-
-  private SQLXML newConsumableSQLXML(String content) throws Exception {
-    SQLXML xml = (SQLXML) Proxy.newProxyInstance(getClass().getClassLoader(), new Class[]{SQLXML.class}, new InvocationHandler() {
-      SQLXML xml = con.createSQLXML();
-      boolean consumed = false;
-      Set<Method> consumingMethods = new HashSet<>(Arrays.asList(
-          SQLXML.class.getMethod("getBinaryStream"),
-          SQLXML.class.getMethod("getCharacterStream"),
-          SQLXML.class.getMethod("getString")
-      ));
-
-      @Override
-      public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
-        if (consumingMethods.contains(method)) {
-          if (consumed) {
-            fail("SQLXML-object already consumed");
-          } else {
-            consumed = true;
-          }
+    private static boolean isXmlEnabled(Connection conn) {
+        try {
+            Statement stmt = conn.createStatement();
+            ResultSet rs = stmt.executeQuery("SELECT '<a>b</a>'::xml");
+            rs.close();
+            stmt.close();
+            return true;
+        } catch (SQLException sqle) {
+            return false;
         }
-        return method.invoke(xml, args);
-      }
-    });
-    xml.setString(content);
-    return xml;
-  }
-
-  @Test
-  public void testSet() throws Exception {
-    Statement stmt = con.createStatement();
-    stmt.execute("DELETE FROM xmltest");
-    stmt.close();
-
-    PreparedStatement ps = con.prepareStatement("INSERT INTO xmltest VALUES (?,?)");
-    ps.setInt(1, 1);
-    ps.setSQLXML(2, newConsumableSQLXML(_xmlDocument));
-    assertEquals(1, ps.executeUpdate());
-    ps.setInt(1, 2);
-    ps.setObject(2, newConsumableSQLXML(_xmlDocument));
-    assertEquals(1, ps.executeUpdate());
-    ResultSet rs = getRS();
-    assertTrue(rs.next());
-    Object o = rs.getObject(1);
-    assertTrue(o instanceof SQLXML);
-    assertEquals(_xmlDocument, ((SQLXML) o).getString());
-    assertTrue(rs.next());
-    assertEquals(_xmlDocument, rs.getSQLXML(1).getString());
-    assertTrue(!rs.next());
-  }
-
-  @Test
-  public void testSetNull() throws SQLException {
-    Statement stmt = con.createStatement();
-    stmt.execute("DELETE FROM xmltest");
-    stmt.close();
-
-    PreparedStatement ps = con.prepareStatement("INSERT INTO xmltest VALUES (?,?)");
-    ps.setInt(1, 1);
-    ps.setNull(2, Types.SQLXML);
-    ps.executeUpdate();
-    ps.setInt(1, 2);
-    ps.setObject(2, null, Types.SQLXML);
-    ps.executeUpdate();
-    SQLXML xml = con.createSQLXML();
-    xml.setString(null);
-    ps.setInt(1, 3);
-    ps.setObject(2, xml);
-    ps.executeUpdate();
-    ps.close();
-
-    ResultSet rs = getRS();
-    assertTrue(rs.next());
-    assertNull(rs.getObject(1));
-    assertTrue(rs.next());
-    assertNull(rs.getSQLXML(1));
-    assertTrue(rs.next());
-    assertNull(rs.getSQLXML("val"));
-    assertTrue(!rs.next());
-  }
-
-  @Test
-  public void testEmpty() throws SQLException, IOException {
-    SQLXML xml = con.createSQLXML();
-
-    try {
-      xml.getString();
-      fail("Cannot retrieve data from an uninitialized object.");
-    } catch (SQLException sqle) {
-    }
-
-    try {
-      xml.getSource(null);
-      fail("Cannot retrieve data from an uninitialized object.");
-    } catch (SQLException sqle) {
-    }
-  }
-
-  @Test
-  public void testDoubleSet() throws SQLException {
-    SQLXML xml = con.createSQLXML();
-
-    xml.setString("");
-
-    try {
-      xml.setString("");
-      fail("Can't set a value after its been initialized.");
-    } catch (SQLException sqle) {
-    }
-
-    ResultSet rs = getRS();
-    assertTrue(rs.next());
-    xml = rs.getSQLXML(1);
-    try {
-      xml.setString("");
-      fail("Can't set a value after its been initialized.");
-    } catch (SQLException sqle) {
-    }
-  }
-
-  // Don't print warning and errors to System.err, it just
-  // clutters the display.
-  static class Ignorer implements ErrorListener {
-    @Override
-    public void error(TransformerException t) {
     }
 
     @Override
-    public void fatalError(TransformerException t) {
+    public void setUp() throws Exception {
+        super.setUp();
+        assumeMinimumServerVersion(ServerVersion.v8_3);
+        assumeTrue("Server has been compiled --with-libxml", isXmlEnabled(con));
+
+        Statement stmt = con.createStatement();
+        stmt.execute("CREATE TEMP TABLE xmltest(id int primary key, val xml)");
+        stmt.execute("INSERT INTO xmltest VALUES (1, '" + _xmlDocument + "')");
+        stmt.execute("INSERT INTO xmltest VALUES (2, '" + _xmlFragment + "')");
+        stmt.close();
     }
 
     @Override
-    public void warning(TransformerException t) {
+    public void tearDown() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.execute("DROP TABLE IF EXISTS xmltest");
+        stmt.close();
+        super.tearDown();
+    }
+
+    private ResultSet getRS() throws SQLException {
+        Statement stmt = con.createStatement();
+        return stmt.executeQuery("SELECT val FROM xmltest");
+    }
+
+    @Test
+    public void testUpdateRS() throws SQLException {
+        Statement stmt = con.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_UPDATABLE);
+        ResultSet rs = stmt.executeQuery("SELECT id, val FROM xmltest");
+        assertTrue(rs.next());
+        SQLXML xml = rs.getSQLXML(2);
+        rs.updateSQLXML(2, xml);
+        rs.updateRow();
+    }
+
+    @Test
+    public void testDOMParse() throws SQLException {
+        ResultSet rs = getRS();
+
+        assertTrue(rs.next());
+        SQLXML xml = rs.getSQLXML(1);
+        DOMSource source = xml.getSource(DOMSource.class);
+        Node doc = source.getNode();
+        Node root = doc.getFirstChild();
+        assertEquals("a", root.getNodeName());
+        Node first = root.getFirstChild();
+        assertEquals("b", first.getNodeName());
+        assertEquals("1", first.getTextContent());
+        Node last = root.getLastChild();
+        assertEquals("b", last.getNodeName());
+        assertEquals("2", last.getTextContent());
+
+        assertTrue(rs.next());
+        try {
+            xml = rs.getSQLXML(1);
+            source = xml.getSource(DOMSource.class);
+            fail("Can't retrieve a fragment.");
+        } catch (SQLException sqle) {
+        }
+    }
+
+    private void transform(Source source) throws Exception {
+        StringWriter writer = new StringWriter();
+        StreamResult result = new StreamResult(writer);
+        xslTransformer.transform(source, result);
+        assertEquals("B1B2", writer.toString());
+    }
+
+    private <T extends Source> void testRead(Class<T> sourceClass) throws Exception {
+        ResultSet rs = getRS();
+
+        assertTrue(rs.next());
+        SQLXML xml = rs.getSQLXML(1);
+        Source source = xml.getSource(sourceClass);
+        transform(source);
+
+        assertTrue(rs.next());
+        xml = rs.getSQLXML(1);
+        try {
+            source = xml.getSource(sourceClass);
+            transform(source);
+            fail("Can't transform a fragment.");
+        } catch (Exception sqle) {
+        }
+    }
+
+    @Test
+    public void testDOMRead() throws Exception {
+        testRead(DOMSource.class);
+    }
+
+    @Test
+    public void testSAXRead() throws Exception {
+        testRead(SAXSource.class);
+    }
+
+    @Test
+    public void testStAXRead() throws Exception {
+        testRead(StAXSource.class);
+    }
+
+    @Test
+    public void testStreamRead() throws Exception {
+        testRead(StreamSource.class);
+    }
+
+    private <T extends Result> void testWrite(Class<T> resultClass) throws Exception {
+        Statement stmt = con.createStatement();
+        stmt.execute("DELETE FROM xmltest");
+        stmt.close();
+
+        PreparedStatement ps = con.prepareStatement("INSERT INTO xmltest VALUES (?,?)");
+        SQLXML xml = con.createSQLXML();
+        Result result = xml.setResult(resultClass);
+
+        Source source = new StreamSource(new StringReader(_xmlDocument));
+        identityTransformer.transform(source, result);
+
+        ps.setInt(1, 1);
+        ps.setSQLXML(2, xml);
+        ps.executeUpdate();
+        ps.close();
+
+        ResultSet rs = getRS();
+        assertTrue(rs.next());
+
+        // DOMResults tack on the additional <?xml ...?> header.
+        //
+        String header = "";
+        if (DOMResult.class.equals(resultClass)) {
+            header = "<?xml version=\"1.0\" standalone=\"no\"?>";
+        }
+
+        assertEquals(header + _xmlDocument, rs.getString(1));
+        xml = rs.getSQLXML(1);
+        assertEquals(header + _xmlDocument, xml.getString());
+
+        assertTrue(!rs.next());
+    }
+
+    @Test
+    public void testDomWrite() throws Exception {
+        testWrite(DOMResult.class);
+    }
+
+    @Test
+    public void testStAXWrite() throws Exception {
+        testWrite(StAXResult.class);
+    }
+
+    @Test
+    public void testStreamWrite() throws Exception {
+        testWrite(StreamResult.class);
+    }
+
+    @Test
+    public void testSAXWrite() throws Exception {
+        testWrite(SAXResult.class);
+    }
+
+    @Test
+    public void testFree() throws SQLException {
+        ResultSet rs = getRS();
+        assertTrue(rs.next());
+        SQLXML xml = rs.getSQLXML(1);
+        xml.free();
+        xml.free();
+        try {
+            xml.getString();
+            fail("Not freed.");
+        } catch (SQLException sqle) {
+        }
+    }
+
+    @Test
+    public void testGetObject() throws SQLException {
+        ResultSet rs = getRS();
+        assertTrue(rs.next());
+        SQLXML xml = (SQLXML) rs.getObject(1);
+    }
+
+    private SQLXML newConsumableSQLXML(String content) throws Exception {
+        SQLXML xml = (SQLXML) Proxy.newProxyInstance(getClass().getClassLoader(), new Class[]{SQLXML.class}, new InvocationHandler() {
+            SQLXML xml = con.createSQLXML();
+            boolean consumed = false;
+            Set<Method> consumingMethods = new HashSet<>(Arrays.asList(
+                    SQLXML.class.getMethod("getBinaryStream"),
+                    SQLXML.class.getMethod("getCharacterStream"),
+                    SQLXML.class.getMethod("getString")
+            ));
+
+            @Override
+            public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
+                if (consumingMethods.contains(method)) {
+                    if (consumed) {
+                        fail("SQLXML-object already consumed");
+                    } else {
+                        consumed = true;
+                    }
+                }
+                return method.invoke(xml, args);
+            }
+        });
+        xml.setString(content);
+        return xml;
+    }
+
+    @Test
+    public void testSet() throws Exception {
+        Statement stmt = con.createStatement();
+        stmt.execute("DELETE FROM xmltest");
+        stmt.close();
+
+        PreparedStatement ps = con.prepareStatement("INSERT INTO xmltest VALUES (?,?)");
+        ps.setInt(1, 1);
+        ps.setSQLXML(2, newConsumableSQLXML(_xmlDocument));
+        assertEquals(1, ps.executeUpdate());
+        ps.setInt(1, 2);
+        ps.setObject(2, newConsumableSQLXML(_xmlDocument));
+        assertEquals(1, ps.executeUpdate());
+        ResultSet rs = getRS();
+        assertTrue(rs.next());
+        Object o = rs.getObject(1);
+        assertTrue(o instanceof SQLXML);
+        assertEquals(_xmlDocument, ((SQLXML) o).getString());
+        assertTrue(rs.next());
+        assertEquals(_xmlDocument, rs.getSQLXML(1).getString());
+        assertTrue(!rs.next());
+    }
+
+    @Test
+    public void testSetNull() throws SQLException {
+        Statement stmt = con.createStatement();
+        stmt.execute("DELETE FROM xmltest");
+        stmt.close();
+
+        PreparedStatement ps = con.prepareStatement("INSERT INTO xmltest VALUES (?,?)");
+        ps.setInt(1, 1);
+        ps.setNull(2, Types.SQLXML);
+        ps.executeUpdate();
+        ps.setInt(1, 2);
+        ps.setObject(2, null, Types.SQLXML);
+        ps.executeUpdate();
+        SQLXML xml = con.createSQLXML();
+        xml.setString(null);
+        ps.setInt(1, 3);
+        ps.setObject(2, xml);
+        ps.executeUpdate();
+        ps.close();
+
+        ResultSet rs = getRS();
+        assertTrue(rs.next());
+        assertNull(rs.getObject(1));
+        assertTrue(rs.next());
+        assertNull(rs.getSQLXML(1));
+        assertTrue(rs.next());
+        assertNull(rs.getSQLXML("val"));
+        assertTrue(!rs.next());
+    }
+
+    @Test
+    public void testEmpty() throws SQLException, IOException {
+        SQLXML xml = con.createSQLXML();
+
+        try {
+            xml.getString();
+            fail("Cannot retrieve data from an uninitialized object.");
+        } catch (SQLException sqle) {
+        }
+
+        try {
+            xml.getSource(null);
+            fail("Cannot retrieve data from an uninitialized object.");
+        } catch (SQLException sqle) {
+        }
+    }
+
+    @Test
+    public void testDoubleSet() throws SQLException {
+        SQLXML xml = con.createSQLXML();
+
+        xml.setString("");
+
+        try {
+            xml.setString("");
+            fail("Can't set a value after its been initialized.");
+        } catch (SQLException sqle) {
+        }
+
+        ResultSet rs = getRS();
+        assertTrue(rs.next());
+        xml = rs.getSQLXML(1);
+        try {
+            xml.setString("");
+            fail("Can't set a value after its been initialized.");
+        } catch (SQLException sqle) {
+        }
+    }
+
+    // Don't print warning and errors to System.err, it just
+    // clutters the display.
+    static class Ignorer implements ErrorListener {
+        @Override
+        public void error(TransformerException t) {
+        }
+
+        @Override
+        public void fatalError(TransformerException t) {
+        }
+
+        @Override
+        public void warning(TransformerException t) {
+        }
     }
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/AbortTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/AbortTest.java
index e619cd9..19da0ba 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/AbortTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/AbortTest.java
@@ -23,74 +23,74 @@ import java.util.concurrent.TimeUnit;
 
 public class AbortTest extends BaseTest4 {
 
-  private static final int SLEEP_SECONDS = 30;
-  private static final int SLEEP_MILLISECONDS = SLEEP_SECONDS * 1000;
+    private static final int SLEEP_SECONDS = 30;
+    private static final int SLEEP_MILLISECONDS = SLEEP_SECONDS * 1000;
 
-  @Test
-  public void testAbort() throws SQLException, InterruptedException, ExecutionException {
-    final ExecutorService executor = Executors.newFixedThreadPool(2);
-    long startTime = System.currentTimeMillis();
-    Future<SQLException> workerFuture = executor.submit(new Callable<SQLException>() {
-      public SQLException call() {
+    @Test
+    public void testAbort() throws SQLException, InterruptedException, ExecutionException {
+        final ExecutorService executor = Executors.newFixedThreadPool(2);
+        long startTime = System.currentTimeMillis();
+        Future<SQLException> workerFuture = executor.submit(new Callable<SQLException>() {
+            public SQLException call() {
+                try {
+                    Statement stmt = con.createStatement();
+                    stmt.execute("SELECT pg_sleep(" + SLEEP_SECONDS + ")");
+                } catch (SQLException e) {
+                    return e;
+                }
+                return null;
+            }
+        });
+        Future<SQLException> abortFuture = executor.submit(new Callable<SQLException>() {
+            public SQLException call() {
+                ExecutorService abortExecutor = Executors.newSingleThreadExecutor();
+                try {
+                    con.abort(abortExecutor);
+                } catch (SQLException e) {
+                    return e;
+                }
+                abortExecutor.shutdown();
+                try {
+                    abortExecutor.awaitTermination(SLEEP_SECONDS, TimeUnit.SECONDS);
+                } catch (InterruptedException e) {
+                }
+                return null;
+            }
+        });
+        SQLException workerException = workerFuture.get();
+        long endTime = System.currentTimeMillis();
+        SQLException abortException = abortFuture.get();
+        if (abortException != null) {
+            throw abortException;
+        }
+        if (workerException == null) {
+            fail("Statement execution should have been aborted, thus throwing an exception");
+        }
+        // suppose that if it took at least 95% of sleep time, aborting has failed and we've waited the
+        // full time
+        assertTrue(endTime - startTime < SLEEP_MILLISECONDS * 95 / 100);
+        assertTrue(con.isClosed());
+    }
+
+    /**
+     * According to the javadoc, calling abort on a closed connection is a no-op.
+     */
+    @Test
+    public void testAbortOnClosedConnection() throws SQLException {
+        con.close();
         try {
-          Statement stmt = con.createStatement();
-          stmt.execute("SELECT pg_sleep(" + SLEEP_SECONDS + ")");
+            con.abort(Executors.newSingleThreadExecutor());
         } catch (SQLException e) {
-          return e;
+            fail(e.getMessage());
         }
-        return null;
-      }
-    });
-    Future<SQLException> abortFuture = executor.submit(new Callable<SQLException>() {
-      public SQLException call() {
-        ExecutorService abortExecutor = Executors.newSingleThreadExecutor();
-        try {
-          con.abort(abortExecutor);
-        } catch (SQLException e) {
-          return e;
-        }
-        abortExecutor.shutdown();
-        try {
-          abortExecutor.awaitTermination(SLEEP_SECONDS, TimeUnit.SECONDS);
-        } catch (InterruptedException e) {
-        }
-        return null;
-      }
-    });
-    SQLException workerException = workerFuture.get();
-    long endTime = System.currentTimeMillis();
-    SQLException abortException = abortFuture.get();
-    if (abortException != null) {
-      throw abortException;
     }
-    if (workerException == null) {
-      fail("Statement execution should have been aborted, thus throwing an exception");
-    }
-    // suppose that if it took at least 95% of sleep time, aborting has failed and we've waited the
-    // full time
-    assertTrue(endTime - startTime < SLEEP_MILLISECONDS * 95 / 100);
-    assertTrue(con.isClosed());
-  }
 
-  /**
-   * According to the javadoc, calling abort on a closed connection is a no-op.
-   */
-  @Test
-  public void testAbortOnClosedConnection() throws SQLException {
-    con.close();
-    try {
-      con.abort(Executors.newSingleThreadExecutor());
-    } catch (SQLException e) {
-      fail(e.getMessage());
+    /**
+     * According to the javadoc, calling abort when the {@code executor} is {@code null}
+     * results in SQLException
+     */
+    @Test(expected = SQLException.class)
+    public void abortWithNullExecutor() throws SQLException {
+        con.abort(null);
     }
-  }
-
-  /**
-   * According to the javadoc, calling abort when the {@code executor} is {@code null}
-   * results in SQLException
-   */
-  @Test(expected = SQLException.class)
-  public void abortWithNullExecutor() throws SQLException {
-    con.abort(null);
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/CloseOnCompletionTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/CloseOnCompletionTest.java
index b6ce2aa..5ecab94 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/CloseOnCompletionTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/CloseOnCompletionTest.java
@@ -23,101 +23,101 @@ import java.sql.SQLException;
 import java.sql.Statement;
 
 class CloseOnCompletionTest {
-  private Connection conn;
+    private Connection conn;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    conn = TestUtil.openDB();
-    TestUtil.createTable(conn, "table1", "id integer");
-  }
+    @BeforeEach
+    void setUp() throws Exception {
+        conn = TestUtil.openDB();
+        TestUtil.createTable(conn, "table1", "id integer");
+    }
 
-  @AfterEach
-  void tearDown() throws SQLException {
-    TestUtil.dropTable(conn, "table1");
-    TestUtil.closeDB(conn);
-  }
+    @AfterEach
+    void tearDown() throws SQLException {
+        TestUtil.dropTable(conn, "table1");
+        TestUtil.closeDB(conn);
+    }
 
-  /**
-   * Test that the statement is not automatically closed if we do not ask for it.
-   */
-  @Test
-  void withoutCloseOnCompletion() throws SQLException {
-    Statement stmt = conn.createStatement();
+    /**
+     * Test that the statement is not automatically closed if we do not ask for it.
+     */
+    @Test
+    void withoutCloseOnCompletion() throws SQLException {
+        Statement stmt = conn.createStatement();
 
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "*"));
-    rs.close();
-    assertFalse(stmt.isClosed());
-  }
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "*"));
+        rs.close();
+        assertFalse(stmt.isClosed());
+    }
 
-  /**
-   * Test the behavior of closeOnCompletion with a single result set.
-   */
-  @Test
-  void singleResultSet() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.closeOnCompletion();
+    /**
+     * Test the behavior of closeOnCompletion with a single result set.
+     */
+    @Test
+    void singleResultSet() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.closeOnCompletion();
 
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "*"));
-    rs.close();
-    assertTrue(stmt.isClosed());
-  }
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "*"));
+        rs.close();
+        assertTrue(stmt.isClosed());
+    }
 
-  /**
-   * Test the behavior of closeOnCompletion with a multiple result sets.
-   */
-  @Test
-  void multipleResultSet() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.closeOnCompletion();
+    /**
+     * Test the behavior of closeOnCompletion with a multiple result sets.
+     */
+    @Test
+    void multipleResultSet() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.closeOnCompletion();
 
-    stmt.execute(TestUtil.selectSQL("table1", "*") + ";" + TestUtil.selectSQL("table1", "*") + ";");
-    ResultSet rs = stmt.getResultSet();
-    rs.close();
-    assertFalse(stmt.isClosed());
-    stmt.getMoreResults();
-    rs = stmt.getResultSet();
-    rs.close();
-    assertTrue(stmt.isClosed());
-  }
+        stmt.execute(TestUtil.selectSQL("table1", "*") + ";" + TestUtil.selectSQL("table1", "*") + ";");
+        ResultSet rs = stmt.getResultSet();
+        rs.close();
+        assertFalse(stmt.isClosed());
+        stmt.getMoreResults();
+        rs = stmt.getResultSet();
+        rs.close();
+        assertTrue(stmt.isClosed());
+    }
 
-  /**
-   * Test that when execution does not produce any result sets, closeOnCompletion has no effect
-   * (spec).
-   */
-  @Test
-  void noResultSet() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.closeOnCompletion();
+    /**
+     * Test that when execution does not produce any result sets, closeOnCompletion has no effect
+     * (spec).
+     */
+    @Test
+    void noResultSet() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.closeOnCompletion();
 
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "1"));
-    assertFalse(stmt.isClosed());
-  }
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "1"));
+        assertFalse(stmt.isClosed());
+    }
 
-  @Test
-  void executeTwice() throws SQLException {
-    PreparedStatement s = conn.prepareStatement("SELECT 1");
+    @Test
+    void executeTwice() throws SQLException {
+        PreparedStatement s = conn.prepareStatement("SELECT 1");
 
-    s.executeQuery();
-    s.executeQuery();
+        s.executeQuery();
+        s.executeQuery();
 
-  }
+    }
 
-  @Test
-  void closeOnCompletionExecuteTwice() throws SQLException {
-    PreparedStatement s = conn.prepareStatement("SELECT 1");
+    @Test
+    void closeOnCompletionExecuteTwice() throws SQLException {
+        PreparedStatement s = conn.prepareStatement("SELECT 1");
 
     /*
      once we set close on completion we should only be able to execute one as the second execution
      will close the resultsets from the first one which will close the statement.
      */
 
-    s.closeOnCompletion();
-    s.executeQuery();
-    try {
-      s.executeQuery();
-    } catch (SQLException ex) {
-      assertEquals(PSQLState.OBJECT_NOT_IN_STATE.getState(), ex.getSQLState(), "Expecting <<This statement has been closed>>");
-    }
+        s.closeOnCompletion();
+        s.executeQuery();
+        try {
+            s.executeQuery();
+        } catch (SQLException ex) {
+            assertEquals(PSQLState.OBJECT_NOT_IN_STATE.getState(), ex.getSQLState(), "Expecting <<This statement has been closed>>");
+        }
 
-  }
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/DriverSupportsClassUnloadingTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/DriverSupportsClassUnloadingTest.java
index b1ac622..e4de899 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/DriverSupportsClassUnloadingTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/DriverSupportsClassUnloadingTest.java
@@ -31,97 +31,97 @@ import java.sql.Types;
 @RunWith(JUnitClassloaderRunner.class)
 @LeakPreventor(DriverSupportsClassUnloadingTest.LeakPreventor.class)
 @PackagesLoadedOutsideClassLoader(
-    packages = {"java.", "javax.", "jdk.", "com.sun.", "sun.", "org.w3c", "org.junit.", "junit.",
-        "se.jiderhamn."}
+        packages = {"java.", "javax.", "jdk.", "com.sun.", "sun.", "org.w3c", "org.junit.", "junit.",
+                "se.jiderhamn."}
 )
 class DriverSupportsClassUnloadingTest {
-  // See https://github.com/mjiderhamn/classloader-leak-prevention/tree/master/classloader-leak-test-framework#verifying-prevention-measures
-  public static class LeakPreventor implements Runnable {
-    @Override
-    public void run() {
-      try {
-        if (Driver.isRegistered()) {
-          Driver.deregister();
+    @BeforeAll
+    static void setSmallCleanupThreadTtl() {
+        // Make the tests faster
+        System.setProperty("pgjdbc.config.cleanup.thread.ttl", "100");
+    }
+
+    @AfterAll
+    static void resetCleanupThreadTtl() {
+        System.clearProperty("pgjdbc.config.cleanup.thread.ttl");
+    }
+
+    @Test
+    @Leaks(dumpHeapOnError = true)
+    void driverUnloadsWhenConnectionLeaks() throws SQLException, InterruptedException {
+        if (!Driver.isRegistered()) {
+            Driver.register();
         }
-        for (int i = 0; i < 3; i++) {
-          // Allow cleanup thread to detect and close the leaked connection
-          JUnitClassloaderRunner.forceGc();
-          // JUnitClassloaderRunner uses finalizers
-          System.runFinalization();
-        }
-        // Allow for the cleanup thread to terminate
-        Thread.sleep(2000);
-      } catch (Throwable e) {
-        throw new RuntimeException(e);
-      }
-    }
-  }
-
-  @BeforeAll
-  static void setSmallCleanupThreadTtl() {
-    // Make the tests faster
-    System.setProperty("pgjdbc.config.cleanup.thread.ttl", "100");
-  }
-
-  @AfterAll
-  static void resetCleanupThreadTtl() {
-    System.clearProperty("pgjdbc.config.cleanup.thread.ttl");
-  }
-
-  @Test
-  @Leaks(dumpHeapOnError = true)
-  void driverUnloadsWhenConnectionLeaks() throws SQLException, InterruptedException {
-    if (!Driver.isRegistered()) {
-      Driver.register();
-    }
-    // This code intentionally leaks connection, prepared statement to verify if the classes
-    // will still be able to unload
-    Connection con = TestUtil.openDB();
-    PreparedStatement ps = con.prepareStatement("select 1 c1, 'hello' c2");
-    // TODO: getMetaData throws AssertionError, however, it should probably not
-    if (con.unwrap(PgConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE) {
-      ResultSetMetaData md = ps.getMetaData();
-      assertEquals(
-          Types.INTEGER,
-          md.getColumnType(1),
-          ".getColumnType for column 1 c1 should be INTEGER"
-      );
-    }
-
-    // This is to trigger "query timeout" code to increase the chances for memory leaks
-    ps.setQueryTimeout(1000);
-    ResultSet rs = ps.executeQuery();
-    rs.next();
-    assertEquals(1, rs.getInt(1), ".getInt for column c1");
-  }
-
-  @Test
-  @Leaks(dumpHeapOnError = true)
-  void driverUnloadsWhenConnectionClosedExplicitly() throws SQLException {
-    if (!Driver.isRegistered()) {
-      Driver.register();
-    }
-    // This code intentionally leaks connection, prepared statement to verify if the classes
-    // will still be able to unload
-    try (Connection con = TestUtil.openDB()) {
-      try (PreparedStatement ps = con.prepareStatement("select 1 c1, 'hello' c2")) {
+        // This code intentionally leaks connection, prepared statement to verify if the classes
+        // will still be able to unload
+        Connection con = TestUtil.openDB();
+        PreparedStatement ps = con.prepareStatement("select 1 c1, 'hello' c2");
         // TODO: getMetaData throws AssertionError, however, it should probably not
         if (con.unwrap(PgConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE) {
-          ResultSetMetaData md = ps.getMetaData();
-          assertEquals(
-              Types.INTEGER,
-              md.getColumnType(1),
-              ".getColumnType for column 1 c1 should be INTEGER"
-          );
+            ResultSetMetaData md = ps.getMetaData();
+            assertEquals(
+                    Types.INTEGER,
+                    md.getColumnType(1),
+                    ".getColumnType for column 1 c1 should be INTEGER"
+            );
         }
 
         // This is to trigger "query timeout" code to increase the chances for memory leaks
         ps.setQueryTimeout(1000);
-        try (ResultSet rs = ps.executeQuery()) {
-          rs.next();
-          assertEquals(1, rs.getInt(1), ".getInt for column c1");
-        }
-      }
+        ResultSet rs = ps.executeQuery();
+        rs.next();
+        assertEquals(1, rs.getInt(1), ".getInt for column c1");
+    }
+
+    @Test
+    @Leaks(dumpHeapOnError = true)
+    void driverUnloadsWhenConnectionClosedExplicitly() throws SQLException {
+        if (!Driver.isRegistered()) {
+            Driver.register();
+        }
+        // This code intentionally leaks connection, prepared statement to verify if the classes
+        // will still be able to unload
+        try (Connection con = TestUtil.openDB()) {
+            try (PreparedStatement ps = con.prepareStatement("select 1 c1, 'hello' c2")) {
+                // TODO: getMetaData throws AssertionError, however, it should probably not
+                if (con.unwrap(PgConnection.class).getPreferQueryMode() != PreferQueryMode.SIMPLE) {
+                    ResultSetMetaData md = ps.getMetaData();
+                    assertEquals(
+                            Types.INTEGER,
+                            md.getColumnType(1),
+                            ".getColumnType for column 1 c1 should be INTEGER"
+                    );
+                }
+
+                // This is to trigger "query timeout" code to increase the chances for memory leaks
+                ps.setQueryTimeout(1000);
+                try (ResultSet rs = ps.executeQuery()) {
+                    rs.next();
+                    assertEquals(1, rs.getInt(1), ".getInt for column c1");
+                }
+            }
+        }
+    }
+
+    // See https://github.com/mjiderhamn/classloader-leak-prevention/tree/master/classloader-leak-test-framework#verifying-prevention-measures
+    public static class LeakPreventor implements Runnable {
+        @Override
+        public void run() {
+            try {
+                if (Driver.isRegistered()) {
+                    Driver.deregister();
+                }
+                for (int i = 0; i < 3; i++) {
+                    // Allow cleanup thread to detect and close the leaked connection
+                    JUnitClassloaderRunner.forceGc();
+                    // JUnitClassloaderRunner uses finalizers
+                    System.runFinalization();
+                }
+                // Allow for the cleanup thread to terminate
+                Thread.sleep(2000);
+            } catch (Throwable e) {
+                throw new RuntimeException(e);
+            }
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/GetObjectTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/GetObjectTest.java
index e3ad502..adb0b25 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/GetObjectTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/GetObjectTest.java
@@ -55,962 +55,962 @@ import javax.sql.rowset.serial.SerialBlob;
 import javax.sql.rowset.serial.SerialClob;
 
 class GetObjectTest {
-  private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); // +0000 always
-  private static final TimeZone GMT03 = TimeZone.getTimeZone("GMT+03"); // +0300 always
-  private static final TimeZone GMT05 = TimeZone.getTimeZone("GMT-05"); // -0500 always
-  private static final TimeZone GMT13 = TimeZone.getTimeZone("GMT+13"); // +1300 always
+    private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); // +0000 always
+    private static final TimeZone GMT03 = TimeZone.getTimeZone("GMT+03"); // +0300 always
+    private static final TimeZone GMT05 = TimeZone.getTimeZone("GMT-05"); // -0500 always
+    private static final TimeZone GMT13 = TimeZone.getTimeZone("GMT+13"); // +1300 always
 
-  private Connection conn;
+    private Connection conn;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    conn = TestUtil.openDB();
-    TestUtil.createTable(conn, "table1", "varchar_column varchar(16), "
-            + "char_column char(10), "
-            + "boolean_column boolean,"
-            + "smallint_column smallint,"
-            + "integer_column integer,"
-            + "bigint_column bigint,"
-            + "decimal_column decimal,"
-            + "numeric_column numeric,"
+    @BeforeEach
+    void setUp() throws Exception {
+        conn = TestUtil.openDB();
+        TestUtil.createTable(conn, "table1", "varchar_column varchar(16), "
+                + "char_column char(10), "
+                + "boolean_column boolean,"
+                + "smallint_column smallint,"
+                + "integer_column integer,"
+                + "bigint_column bigint,"
+                + "decimal_column decimal,"
+                + "numeric_column numeric,"
+                // smallserial requires 9.2 or later
+                + (((BaseConnection) conn).haveMinimumServerVersion(ServerVersion.v9_2) ? "smallserial_column smallserial," : "")
+                + "serial_column serial,"
+                + "bigserial_column bigserial,"
+                + "real_column real,"
+                + "double_column double precision,"
+                + "timestamp_without_time_zone_column timestamp without time zone,"
+                + "timestamp_with_time_zone_column timestamp with time zone,"
+                + "date_column date,"
+                + "time_without_time_zone_column time without time zone,"
+                + "time_with_time_zone_column time with time zone,"
+                + "blob_column bytea,"
+                + "lob_column oid,"
+                + "array_column text[],"
+                + "point_column point,"
+                + "line_column line,"
+                + "lseg_column lseg,"
+                + "box_column box,"
+                + "path_column path,"
+                + "polygon_column polygon,"
+                + "circle_column circle,"
+                + "money_column money,"
+                + "interval_column interval,"
+                + (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3) ? "uuid_column uuid," : "")
+                + "inet_column inet,"
+                + "cidr_column cidr,"
+                + "macaddr_column macaddr"
+                + (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3) ? ",xml_column xml" : "")
+        );
+    }
+
+    @AfterEach
+    void tearDown() throws SQLException {
+        TestUtil.dropTable(conn, "table1");
+        TestUtil.closeDB(conn);
+    }
+
+    /**
+     * Test the behavior getObject for string columns.
+     */
+    @Test
+    void getString() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "varchar_column,char_column", "'varchar_value','char_value'"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "varchar_column, char_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals("varchar_value", rs.getObject("varchar_column", String.class));
+            assertEquals("varchar_value", rs.getObject(1, String.class));
+            assertEquals("char_value", rs.getObject("char_column", String.class));
+            assertEquals("char_value", rs.getObject(2, String.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for string columns.
+     */
+    @Test
+    void getClob() throws SQLException {
+        Statement stmt = conn.createStatement();
+        conn.setAutoCommit(false);
+        try {
+            char[] data = new char[]{'d', 'e', 'a', 'd', 'b', 'e', 'e', 'f'};
+            PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("table1", "lob_column", "?"));
+            try {
+                insertPS.setObject(1, new SerialClob(data), Types.CLOB);
+                insertPS.executeUpdate();
+            } finally {
+                insertPS.close();
+            }
+
+            ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "lob_column"));
+            try {
+                assertTrue(rs.next());
+                Clob blob = rs.getObject("lob_column", Clob.class);
+                assertEquals(data.length, blob.length());
+                assertEquals(new String(data), blob.getSubString(1, data.length));
+                blob.free();
+
+                blob = rs.getObject(1, Clob.class);
+                assertEquals(data.length, blob.length());
+                assertEquals(new String(data), blob.getSubString(1, data.length));
+                blob.free();
+            } finally {
+                rs.close();
+            }
+        } finally {
+            conn.setAutoCommit(true);
+        }
+    }
+
+    /**
+     * Test the behavior getObject for big decimal columns.
+     */
+    @Test
+    void getBigDecimal() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "decimal_column,numeric_column", "0.1,0.1"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "decimal_column, numeric_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(new BigDecimal("0.1"), rs.getObject("decimal_column", BigDecimal.class));
+            assertEquals(new BigDecimal("0.1"), rs.getObject(1, BigDecimal.class));
+            assertEquals(new BigDecimal("0.1"), rs.getObject("numeric_column", BigDecimal.class));
+            assertEquals(new BigDecimal("0.1"), rs.getObject(2, BigDecimal.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for timestamp columns.
+     */
+    @Test
+    void getTimestamp() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "timestamp_without_time_zone_column", "TIMESTAMP '2004-10-19 10:23:54'"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "timestamp_without_time_zone_column"));
+        try {
+            assertTrue(rs.next());
+            Calendar calendar = GregorianCalendar.getInstance();
+            calendar.clear();
+            calendar.set(Calendar.YEAR, 2004);
+            calendar.set(Calendar.MONTH, Calendar.OCTOBER);
+            calendar.set(Calendar.DAY_OF_MONTH, 19);
+            calendar.set(Calendar.HOUR_OF_DAY, 10);
+            calendar.set(Calendar.MINUTE, 23);
+            calendar.set(Calendar.SECOND, 54);
+            Timestamp expectedNoZone = new Timestamp(calendar.getTimeInMillis());
+            assertEquals(expectedNoZone, rs.getObject("timestamp_without_time_zone_column", Timestamp.class));
+            assertEquals(expectedNoZone, rs.getObject(1, Timestamp.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for timestamp columns.
+     */
+    @Test
+    void getJavaUtilDate() throws SQLException {
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("select TIMESTAMP '2004-10-19 10:23:54'::timestamp as timestamp_without_time_zone_column"
+                + ", null::timestamp as null_timestamp");
+        try {
+            assertTrue(rs.next());
+            Calendar calendar = GregorianCalendar.getInstance();
+            calendar.clear();
+            calendar.set(Calendar.YEAR, 2004);
+            calendar.set(Calendar.MONTH, Calendar.OCTOBER);
+            calendar.set(Calendar.DAY_OF_MONTH, 19);
+            calendar.set(Calendar.HOUR_OF_DAY, 10);
+            calendar.set(Calendar.MINUTE, 23);
+            calendar.set(Calendar.SECOND, 54);
+            java.util.Date expected = new java.util.Date(calendar.getTimeInMillis());
+            assertEquals(expected, rs.getObject("timestamp_without_time_zone_column", java.util.Date.class));
+            assertEquals(expected, rs.getObject(1, java.util.Date.class));
+            assertNull(rs.getObject(2, java.util.Date.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for timestamp columns.
+     */
+    @Test
+    void getTimestampWithTimeZone() throws SQLException {
+        runGetTimestampWithTimeZone(UTC, "Z");
+        runGetTimestampWithTimeZone(GMT03, "+03:00");
+        runGetTimestampWithTimeZone(GMT05, "-05:00");
+        runGetTimestampWithTimeZone(GMT13, "+13:00");
+    }
+
+    private void runGetTimestampWithTimeZone(TimeZone timeZone, String zoneString) throws SQLException {
+        Statement stmt = conn.createStatement();
+        try {
+            stmt.executeUpdate(TestUtil.insertSQL("table1", "timestamp_with_time_zone_column", "TIMESTAMP WITH TIME ZONE '2004-10-19 10:23:54" + zoneString + "'"));
+
+            ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "timestamp_with_time_zone_column"));
+            try {
+                assertTrue(rs.next());
+
+                Calendar calendar = GregorianCalendar.getInstance(timeZone);
+                calendar.clear();
+                calendar.set(Calendar.YEAR, 2004);
+                calendar.set(Calendar.MONTH, Calendar.OCTOBER);
+                calendar.set(Calendar.DAY_OF_MONTH, 19);
+                calendar.set(Calendar.HOUR_OF_DAY, 10);
+                calendar.set(Calendar.MINUTE, 23);
+                calendar.set(Calendar.SECOND, 54);
+                Timestamp expectedWithZone = new Timestamp(calendar.getTimeInMillis());
+                assertEquals(expectedWithZone, rs.getObject("timestamp_with_time_zone_column", Timestamp.class));
+                assertEquals(expectedWithZone, rs.getObject(1, Timestamp.class));
+            } finally {
+                rs.close();
+            }
+            stmt.executeUpdate("DELETE FROM table1");
+        } finally {
+            stmt.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for timestamp columns.
+     */
+    @Test
+    void getCalendar() throws SQLException {
+        Statement stmt = conn.createStatement();
+
+        ResultSet rs = stmt.executeQuery("select TIMESTAMP '2004-10-19 10:23:54'::timestamp as timestamp_without_time_zone_column"
+                + ", TIMESTAMP '2004-10-19 10:23:54+02'::timestamp as timestamp_with_time_zone_column, null::timestamp as null_timestamp");
+        try {
+            assertTrue(rs.next());
+            Calendar calendar = GregorianCalendar.getInstance();
+            calendar.clear();
+            calendar.set(Calendar.YEAR, 2004);
+            calendar.set(Calendar.MONTH, Calendar.OCTOBER);
+            calendar.set(Calendar.DAY_OF_MONTH, 19);
+            calendar.set(Calendar.HOUR_OF_DAY, 10);
+            calendar.set(Calendar.MINUTE, 23);
+            calendar.set(Calendar.SECOND, 54);
+            long expected = calendar.getTimeInMillis();
+            assertEquals(expected, rs.getObject("timestamp_without_time_zone_column", Calendar.class).getTimeInMillis());
+            assertEquals(expected, rs.getObject(1, Calendar.class).getTimeInMillis());
+            assertNull(rs.getObject(3, Calendar.class));
+            calendar.setTimeZone(TimeZone.getTimeZone("GMT+2:00"));
+            expected = calendar.getTimeInMillis();
+            assertEquals(expected, rs.getObject("timestamp_with_time_zone_column", Calendar.class).getTimeInMillis());
+            assertEquals(expected, rs.getObject(2, Calendar.class).getTimeInMillis());
+            assertNull(rs.getObject(3, Calendar.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for date columns.
+     */
+    @Test
+    void getDate() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "date_column", "DATE '1999-01-08'"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "date_column"));
+        try {
+            assertTrue(rs.next());
+            Calendar calendar = GregorianCalendar.getInstance();
+            calendar.clear();
+            calendar.set(Calendar.YEAR, 1999);
+            calendar.set(Calendar.MONTH, Calendar.JANUARY);
+            calendar.set(Calendar.DAY_OF_MONTH, 8);
+            Date expectedNoZone = new Date(calendar.getTimeInMillis());
+            assertEquals(expectedNoZone, rs.getObject("date_column", Date.class));
+            assertEquals(expectedNoZone, rs.getObject(1, Date.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    @Test
+    void getNullDate() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "date_column", "NULL"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "date_column"));
+        try {
+            assertTrue(rs.next());
+            Date date = rs.getObject(1, Date.class);
+            assertTrue(rs.wasNull());
+        } finally {
+            rs.close();
+        }
+    }
+
+    @Test
+    void getNullTimestamp() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "timestamp_without_time_zone_column", "NULL"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "timestamp_without_time_zone_column"));
+        try {
+            assertTrue(rs.next());
+            java.util.Date ts = rs.getObject(1, java.util.Date.class);
+            assertTrue(rs.wasNull());
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for time columns.
+     */
+    @Test
+    void getTime() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "time_without_time_zone_column", "TIME '04:05:06'"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "time_without_time_zone_column"));
+        try {
+            assertTrue(rs.next());
+            Calendar calendar = GregorianCalendar.getInstance();
+            calendar.clear();
+            calendar.set(Calendar.YEAR, 1970);
+            calendar.set(Calendar.MONTH, Calendar.JANUARY);
+            calendar.set(Calendar.DAY_OF_MONTH, 1);
+            calendar.set(Calendar.HOUR, 4);
+            calendar.set(Calendar.MINUTE, 5);
+            calendar.set(Calendar.SECOND, 6);
+            Time expectedNoZone = new Time(calendar.getTimeInMillis());
+            assertEquals(expectedNoZone, rs.getObject("time_without_time_zone_column", Time.class));
+            assertEquals(expectedNoZone, rs.getObject(1, Time.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for small integer columns.
+     */
+    @Test
+    void getShort() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "smallint_column", "1"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "smallint_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(Short.valueOf((short) 1), rs.getObject("smallint_column", Short.class));
+            assertEquals(Short.valueOf((short) 1), rs.getObject(1, Short.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for small integer columns.
+     */
+    @Test
+    void getShortNull() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "smallint_column", "NULL"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "smallint_column"));
+        try {
+            assertTrue(rs.next());
+            assertNull(rs.getObject("smallint_column", Short.class));
+            assertNull(rs.getObject(1, Short.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for integer columns.
+     */
+    @Test
+    void getInteger() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "smallint_column, integer_column", "1, 2"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "smallint_column, integer_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(Integer.valueOf(1), rs.getObject("smallint_column", Integer.class));
+            assertEquals(Integer.valueOf(1), rs.getObject(1, Integer.class));
+            assertEquals(Integer.valueOf(2), rs.getObject("integer_column", Integer.class));
+            assertEquals(Integer.valueOf(2), rs.getObject(2, Integer.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for integer columns.
+     */
+    @Test
+    void getIntegerNull() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "smallint_column, integer_column", "NULL, NULL"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "smallint_column, integer_column"));
+        try {
+            assertTrue(rs.next());
+            assertNull(rs.getObject("smallint_column", Integer.class));
+            assertNull(rs.getObject(1, Integer.class));
+            assertNull(rs.getObject("integer_column", Integer.class));
+            assertNull(rs.getObject(2, Integer.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for long columns.
+     */
+    @Test
+    void getBigInteger() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "bigint_column", "2147483648"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "bigint_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(BigInteger.valueOf(2147483648L), rs.getObject("bigint_column", BigInteger.class));
+            assertEquals(BigInteger.valueOf(2147483648L), rs.getObject(1, BigInteger.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for long columns.
+     */
+    @Test
+    void getLong() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "bigint_column", "2147483648"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "bigint_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(Long.valueOf(2147483648L), rs.getObject("bigint_column", Long.class));
+            assertEquals(Long.valueOf(2147483648L), rs.getObject(1, Long.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for long columns.
+     */
+    @Test
+    void getLongNull() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "bigint_column", "NULL"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "bigint_column"));
+        try {
+            assertTrue(rs.next());
+            assertNull(rs.getObject("bigint_column", Long.class));
+            assertNull(rs.getObject(1, Long.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for double columns.
+     */
+    @Test
+    void getDouble() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "double_column", "1.0"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "double_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(Double.valueOf(1.0d), rs.getObject("double_column", Double.class));
+            assertEquals(Double.valueOf(1.0d), rs.getObject(1, Double.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for double columns.
+     */
+    @Test
+    void getDoubleNull() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "double_column", "NULL"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "double_column"));
+        try {
+            assertTrue(rs.next());
+            assertNull(rs.getObject("double_column", Double.class));
+            assertNull(rs.getObject(1, Double.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for float columns.
+     */
+    @Test
+    void getFloat() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "real_column", "1.0"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "real_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(Float.valueOf(1.0f), rs.getObject("real_column", Float.class));
+            assertEquals(Float.valueOf(1.0f), rs.getObject(1, Float.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for float columns.
+     */
+    @Test
+    void getFloatNull() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "real_column", "NULL"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "real_column"));
+        try {
+            assertTrue(rs.next());
+            assertNull(rs.getObject("real_column", Float.class));
+            assertNull(rs.getObject(1, Float.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for serial columns.
+     */
+    @Test
+    void getSerial() throws SQLException {
+        if (!((BaseConnection) conn).haveMinimumServerVersion(ServerVersion.v9_2)) {
             // smallserial requires 9.2 or later
-            + (((BaseConnection) conn).haveMinimumServerVersion(ServerVersion.v9_2) ? "smallserial_column smallserial," : "")
-            + "serial_column serial,"
-            + "bigserial_column bigserial,"
-            + "real_column real,"
-            + "double_column double precision,"
-            + "timestamp_without_time_zone_column timestamp without time zone,"
-            + "timestamp_with_time_zone_column timestamp with time zone,"
-            + "date_column date,"
-            + "time_without_time_zone_column time without time zone,"
-            + "time_with_time_zone_column time with time zone,"
-            + "blob_column bytea,"
-            + "lob_column oid,"
-            + "array_column text[],"
-            + "point_column point,"
-            + "line_column line,"
-            + "lseg_column lseg,"
-            + "box_column box,"
-            + "path_column path,"
-            + "polygon_column polygon,"
-            + "circle_column circle,"
-            + "money_column money,"
-            + "interval_column interval,"
-            + (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3) ? "uuid_column uuid," : "")
-            + "inet_column inet,"
-            + "cidr_column cidr,"
-            + "macaddr_column macaddr"
-            + (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3) ? ",xml_column xml" : "")
-    );
-  }
-
-  @AfterEach
-  void tearDown() throws SQLException {
-    TestUtil.dropTable(conn, "table1");
-    TestUtil.closeDB(conn);
-  }
-
-  /**
-   * Test the behavior getObject for string columns.
-   */
-  @Test
-  void getString() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "varchar_column,char_column", "'varchar_value','char_value'"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "varchar_column, char_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals("varchar_value", rs.getObject("varchar_column", String.class));
-      assertEquals("varchar_value", rs.getObject(1, String.class));
-      assertEquals("char_value", rs.getObject("char_column", String.class));
-      assertEquals("char_value", rs.getObject(2, String.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for string columns.
-   */
-  @Test
-  void getClob() throws SQLException {
-    Statement stmt = conn.createStatement();
-    conn.setAutoCommit(false);
-    try {
-      char[] data = new char[]{'d', 'e', 'a', 'd', 'b', 'e', 'e', 'f'};
-      PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("table1", "lob_column", "?"));
-      try {
-        insertPS.setObject(1, new SerialClob(data), Types.CLOB);
-        insertPS.executeUpdate();
-      } finally {
-        insertPS.close();
-      }
-
-      ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "lob_column"));
-      try {
-        assertTrue(rs.next());
-        Clob blob = rs.getObject("lob_column", Clob.class);
-        assertEquals(data.length, blob.length());
-        assertEquals(new String(data), blob.getSubString(1, data.length));
-        blob.free();
-
-        blob = rs.getObject(1, Clob.class);
-        assertEquals(data.length, blob.length());
-        assertEquals(new String(data), blob.getSubString(1, data.length));
-        blob.free();
-      } finally {
-        rs.close();
-      }
-    } finally {
-      conn.setAutoCommit(true);
-    }
-  }
-
-  /**
-   * Test the behavior getObject for big decimal columns.
-   */
-  @Test
-  void getBigDecimal() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "decimal_column,numeric_column", "0.1,0.1"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "decimal_column, numeric_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(new BigDecimal("0.1"), rs.getObject("decimal_column", BigDecimal.class));
-      assertEquals(new BigDecimal("0.1"), rs.getObject(1, BigDecimal.class));
-      assertEquals(new BigDecimal("0.1"), rs.getObject("numeric_column", BigDecimal.class));
-      assertEquals(new BigDecimal("0.1"), rs.getObject(2, BigDecimal.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for timestamp columns.
-   */
-  @Test
-  void getTimestamp() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "timestamp_without_time_zone_column", "TIMESTAMP '2004-10-19 10:23:54'"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "timestamp_without_time_zone_column"));
-    try {
-      assertTrue(rs.next());
-      Calendar calendar = GregorianCalendar.getInstance();
-      calendar.clear();
-      calendar.set(Calendar.YEAR, 2004);
-      calendar.set(Calendar.MONTH, Calendar.OCTOBER);
-      calendar.set(Calendar.DAY_OF_MONTH, 19);
-      calendar.set(Calendar.HOUR_OF_DAY, 10);
-      calendar.set(Calendar.MINUTE, 23);
-      calendar.set(Calendar.SECOND, 54);
-      Timestamp expectedNoZone = new Timestamp(calendar.getTimeInMillis());
-      assertEquals(expectedNoZone, rs.getObject("timestamp_without_time_zone_column", Timestamp.class));
-      assertEquals(expectedNoZone, rs.getObject(1, Timestamp.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for timestamp columns.
-   */
-  @Test
-  void getJavaUtilDate() throws SQLException {
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("select TIMESTAMP '2004-10-19 10:23:54'::timestamp as timestamp_without_time_zone_column"
-        + ", null::timestamp as null_timestamp");
-    try {
-      assertTrue(rs.next());
-      Calendar calendar = GregorianCalendar.getInstance();
-      calendar.clear();
-      calendar.set(Calendar.YEAR, 2004);
-      calendar.set(Calendar.MONTH, Calendar.OCTOBER);
-      calendar.set(Calendar.DAY_OF_MONTH, 19);
-      calendar.set(Calendar.HOUR_OF_DAY, 10);
-      calendar.set(Calendar.MINUTE, 23);
-      calendar.set(Calendar.SECOND, 54);
-      java.util.Date expected = new java.util.Date(calendar.getTimeInMillis());
-      assertEquals(expected, rs.getObject("timestamp_without_time_zone_column", java.util.Date.class));
-      assertEquals(expected, rs.getObject(1, java.util.Date.class));
-      assertNull(rs.getObject(2, java.util.Date.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for timestamp columns.
-   */
-  @Test
-  void getTimestampWithTimeZone() throws SQLException {
-    runGetTimestampWithTimeZone(UTC, "Z");
-    runGetTimestampWithTimeZone(GMT03, "+03:00");
-    runGetTimestampWithTimeZone(GMT05, "-05:00");
-    runGetTimestampWithTimeZone(GMT13, "+13:00");
-  }
-
-  private void runGetTimestampWithTimeZone(TimeZone timeZone, String zoneString) throws SQLException {
-    Statement stmt = conn.createStatement();
-    try {
-      stmt.executeUpdate(TestUtil.insertSQL("table1", "timestamp_with_time_zone_column", "TIMESTAMP WITH TIME ZONE '2004-10-19 10:23:54" + zoneString + "'"));
-
-      ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "timestamp_with_time_zone_column"));
-      try {
-        assertTrue(rs.next());
-
-        Calendar calendar = GregorianCalendar.getInstance(timeZone);
-        calendar.clear();
-        calendar.set(Calendar.YEAR, 2004);
-        calendar.set(Calendar.MONTH, Calendar.OCTOBER);
-        calendar.set(Calendar.DAY_OF_MONTH, 19);
-        calendar.set(Calendar.HOUR_OF_DAY, 10);
-        calendar.set(Calendar.MINUTE, 23);
-        calendar.set(Calendar.SECOND, 54);
-        Timestamp expectedWithZone = new Timestamp(calendar.getTimeInMillis());
-        assertEquals(expectedWithZone, rs.getObject("timestamp_with_time_zone_column", Timestamp.class));
-        assertEquals(expectedWithZone, rs.getObject(1, Timestamp.class));
-      } finally {
-        rs.close();
-      }
-      stmt.executeUpdate("DELETE FROM table1");
-    } finally {
-      stmt.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for timestamp columns.
-   */
-  @Test
-  void getCalendar() throws SQLException {
-    Statement stmt = conn.createStatement();
-
-    ResultSet rs = stmt.executeQuery("select TIMESTAMP '2004-10-19 10:23:54'::timestamp as timestamp_without_time_zone_column"
-        + ", TIMESTAMP '2004-10-19 10:23:54+02'::timestamp as timestamp_with_time_zone_column, null::timestamp as null_timestamp");
-    try {
-      assertTrue(rs.next());
-      Calendar calendar = GregorianCalendar.getInstance();
-      calendar.clear();
-      calendar.set(Calendar.YEAR, 2004);
-      calendar.set(Calendar.MONTH, Calendar.OCTOBER);
-      calendar.set(Calendar.DAY_OF_MONTH, 19);
-      calendar.set(Calendar.HOUR_OF_DAY, 10);
-      calendar.set(Calendar.MINUTE, 23);
-      calendar.set(Calendar.SECOND, 54);
-      long expected = calendar.getTimeInMillis();
-      assertEquals(expected, rs.getObject("timestamp_without_time_zone_column", Calendar.class).getTimeInMillis());
-      assertEquals(expected, rs.getObject(1, Calendar.class).getTimeInMillis());
-      assertNull(rs.getObject(3, Calendar.class));
-      calendar.setTimeZone(TimeZone.getTimeZone("GMT+2:00"));
-      expected = calendar.getTimeInMillis();
-      assertEquals(expected, rs.getObject("timestamp_with_time_zone_column", Calendar.class).getTimeInMillis());
-      assertEquals(expected, rs.getObject(2, Calendar.class).getTimeInMillis());
-      assertNull(rs.getObject(3, Calendar.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for date columns.
-   */
-  @Test
-  void getDate() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "date_column", "DATE '1999-01-08'"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "date_column"));
-    try {
-      assertTrue(rs.next());
-      Calendar calendar = GregorianCalendar.getInstance();
-      calendar.clear();
-      calendar.set(Calendar.YEAR, 1999);
-      calendar.set(Calendar.MONTH, Calendar.JANUARY);
-      calendar.set(Calendar.DAY_OF_MONTH, 8);
-      Date expectedNoZone = new Date(calendar.getTimeInMillis());
-      assertEquals(expectedNoZone, rs.getObject("date_column", Date.class));
-      assertEquals(expectedNoZone, rs.getObject(1, Date.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  @Test
-  void getNullDate() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "date_column", "NULL"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "date_column"));
-    try {
-      assertTrue(rs.next());
-      Date date = rs.getObject(1, Date.class);
-      assertTrue(rs.wasNull());
-    } finally {
-      rs.close();
-    }
-  }
-
-  @Test
-  void getNullTimestamp() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "timestamp_without_time_zone_column", "NULL"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "timestamp_without_time_zone_column"));
-    try {
-      assertTrue(rs.next());
-      java.util.Date ts = rs.getObject(1, java.util.Date.class);
-      assertTrue(rs.wasNull());
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for time columns.
-   */
-  @Test
-  void getTime() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "time_without_time_zone_column", "TIME '04:05:06'"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "time_without_time_zone_column"));
-    try {
-      assertTrue(rs.next());
-      Calendar calendar = GregorianCalendar.getInstance();
-      calendar.clear();
-      calendar.set(Calendar.YEAR, 1970);
-      calendar.set(Calendar.MONTH, Calendar.JANUARY);
-      calendar.set(Calendar.DAY_OF_MONTH, 1);
-      calendar.set(Calendar.HOUR, 4);
-      calendar.set(Calendar.MINUTE, 5);
-      calendar.set(Calendar.SECOND, 6);
-      Time expectedNoZone = new Time(calendar.getTimeInMillis());
-      assertEquals(expectedNoZone, rs.getObject("time_without_time_zone_column", Time.class));
-      assertEquals(expectedNoZone, rs.getObject(1, Time.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for small integer columns.
-   */
-  @Test
-  void getShort() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "smallint_column", "1"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "smallint_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(Short.valueOf((short) 1), rs.getObject("smallint_column", Short.class));
-      assertEquals(Short.valueOf((short) 1), rs.getObject(1, Short.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for small integer columns.
-   */
-  @Test
-  void getShortNull() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "smallint_column", "NULL"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "smallint_column"));
-    try {
-      assertTrue(rs.next());
-      assertNull(rs.getObject("smallint_column", Short.class));
-      assertNull(rs.getObject(1, Short.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for integer columns.
-   */
-  @Test
-  void getInteger() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "smallint_column, integer_column", "1, 2"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "smallint_column, integer_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(Integer.valueOf(1), rs.getObject("smallint_column", Integer.class));
-      assertEquals(Integer.valueOf(1), rs.getObject(1, Integer.class));
-      assertEquals(Integer.valueOf(2), rs.getObject("integer_column", Integer.class));
-      assertEquals(Integer.valueOf(2), rs.getObject(2, Integer.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for integer columns.
-   */
-  @Test
-  void getIntegerNull() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "smallint_column, integer_column", "NULL, NULL"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "smallint_column, integer_column"));
-    try {
-      assertTrue(rs.next());
-      assertNull(rs.getObject("smallint_column", Integer.class));
-      assertNull(rs.getObject(1, Integer.class));
-      assertNull(rs.getObject("integer_column", Integer.class));
-      assertNull(rs.getObject(2, Integer.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for long columns.
-   */
-  @Test
-  void getBigInteger() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "bigint_column", "2147483648"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "bigint_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(BigInteger.valueOf(2147483648L), rs.getObject("bigint_column", BigInteger.class));
-      assertEquals(BigInteger.valueOf(2147483648L), rs.getObject(1, BigInteger.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for long columns.
-   */
-  @Test
-  void getLong() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "bigint_column", "2147483648"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "bigint_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(Long.valueOf(2147483648L), rs.getObject("bigint_column", Long.class));
-      assertEquals(Long.valueOf(2147483648L), rs.getObject(1, Long.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for long columns.
-   */
-  @Test
-  void getLongNull() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "bigint_column", "NULL"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "bigint_column"));
-    try {
-      assertTrue(rs.next());
-      assertNull(rs.getObject("bigint_column", Long.class));
-      assertNull(rs.getObject(1, Long.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for double columns.
-   */
-  @Test
-  void getDouble() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "double_column", "1.0"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "double_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(Double.valueOf(1.0d), rs.getObject("double_column", Double.class));
-      assertEquals(Double.valueOf(1.0d), rs.getObject(1, Double.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for double columns.
-   */
-  @Test
-  void getDoubleNull() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "double_column", "NULL"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "double_column"));
-    try {
-      assertTrue(rs.next());
-      assertNull(rs.getObject("double_column", Double.class));
-      assertNull(rs.getObject(1, Double.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for float columns.
-   */
-  @Test
-  void getFloat() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "real_column", "1.0"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "real_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(Float.valueOf(1.0f), rs.getObject("real_column", Float.class));
-      assertEquals(Float.valueOf(1.0f), rs.getObject(1, Float.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for float columns.
-   */
-  @Test
-  void getFloatNull() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "real_column", "NULL"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "real_column"));
-    try {
-      assertTrue(rs.next());
-      assertNull(rs.getObject("real_column", Float.class));
-      assertNull(rs.getObject(1, Float.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for serial columns.
-   */
-  @Test
-  void getSerial() throws SQLException {
-    if (!((BaseConnection) conn).haveMinimumServerVersion(ServerVersion.v9_2)) {
-      // smallserial requires 9.2 or later
-      return;
-    }
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "smallserial_column, serial_column", "1, 2"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "smallserial_column, serial_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(Integer.valueOf(1), rs.getObject("smallserial_column", Integer.class));
-      assertEquals(Integer.valueOf(1), rs.getObject(1, Integer.class));
-      assertEquals(Integer.valueOf(2), rs.getObject("serial_column", Integer.class));
-      assertEquals(Integer.valueOf(2), rs.getObject(2, Integer.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for boolean columns.
-   */
-  @Test
-  void getBoolean() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "boolean_column", "TRUE"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "boolean_column"));
-    try {
-      assertTrue(rs.next());
-      assertTrue(rs.getObject("boolean_column", Boolean.class));
-      assertTrue(rs.getObject(1, Boolean.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for boolean columns.
-   */
-  @Test
-  void getBooleanNull() throws SQLException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "boolean_column", "NULL"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "boolean_column"));
-    try {
-      assertTrue(rs.next());
-      assertNull(rs.getObject("boolean_column", Boolean.class));
-      assertNull(rs.getObject(1, Boolean.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for xml columns.
-   */
-  @Test
-  void getBlob() throws SQLException {
-    Statement stmt = conn.createStatement();
-    conn.setAutoCommit(false);
-    try {
-      byte[] data = new byte[]{(byte) 0xDE, (byte) 0xAD, (byte) 0xBE, (byte) 0xEF};
-      PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("table1", "lob_column", "?"));
-      try {
-        insertPS.setObject(1, new SerialBlob(data), Types.BLOB);
-        insertPS.executeUpdate();
-      } finally {
-        insertPS.close();
-      }
-
-      ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "lob_column"));
-      try {
-        assertTrue(rs.next());
-        Blob blob = rs.getObject("lob_column", Blob.class);
-        assertEquals(data.length, blob.length());
-        assertArrayEquals(data, blob.getBytes(1, data.length));
-        blob.free();
-
-        blob = rs.getObject(1, Blob.class);
-        assertEquals(data.length, blob.length());
-        assertArrayEquals(data, blob.getBytes(1, data.length));
-        blob.free();
-      } finally {
-        rs.close();
-      }
-    } finally {
-      conn.setAutoCommit(true);
-    }
-  }
-
-  /**
-   * Test the behavior getObject for array columns.
-   */
-  @Test
-  void getArray() throws SQLException {
-    Statement stmt = conn.createStatement();
-    String[] data = new String[]{"java", "jdbc"};
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "array_column", "'{\"java\", \"jdbc\"}'"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "array_column"));
-    try {
-      assertTrue(rs.next());
-      Array array = rs.getObject("array_column", Array.class);
-      assertArrayEquals(data, (String[]) array.getArray());
-      array.free();
-
-      array = rs.getObject(1, Array.class);
-      assertArrayEquals(data, (String[]) array.getArray());
-      array.free();
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for xml columns.
-   */
-  @Test
-  void getXml() throws SQLException {
-    if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3)) {
-      // XML column requires PostgreSQL 8.3+
-      return;
-    }
-    Statement stmt = conn.createStatement();
-    String content = "<book><title>Manual</title></book>";
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "xml_column", "XMLPARSE (DOCUMENT '<?xml version=\"1.0\"?><book><title>Manual</title></book>')"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "xml_column"));
-    try {
-      assertTrue(rs.next());
-      SQLXML sqlXml = rs.getObject("xml_column", SQLXML.class);
-      assertEquals(content, sqlXml.getString());
-      sqlXml.free();
-
-      sqlXml = rs.getObject(1, SQLXML.class);
-      assertEquals(content, sqlXml.getString());
-      sqlXml.free();
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * <p>Test the behavior getObject for money columns.</p>
-   *
-   * <p>The test is ignored as it is locale-dependent.</p>
-   */
-  @Disabled
-  @Test
-  void getMoney() throws SQLException {
-    Statement stmt = conn.createStatement();
-    String expected = "12.34";
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "money_column", "'12.34'::float8::numeric::money"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "money_column"));
-    try {
-      assertTrue(rs.next());
-      PGmoney money = rs.getObject("money_column", PGmoney.class);
-      assertTrue(money.getValue().endsWith(expected));
-
-      money = rs.getObject(1, PGmoney.class);
-      assertTrue(money.getValue().endsWith(expected));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for point columns.
-   */
-  @Test
-  void getPoint() throws SQLException {
-    Statement stmt = conn.createStatement();
-    PGpoint expected = new PGpoint(1.0d, 2.0d);
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "point_column", "point '(1, 2)'"));
-
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "point_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(expected, rs.getObject("point_column", PGpoint.class));
-      assertEquals(expected, rs.getObject(1, PGpoint.class));
-    } finally {
-      rs.close();
-    }
-  }
-
-  /**
-   * Test the behavior getObject for line columns.
-   */
-  @Test
-  void getLine() throws SQLException {
-    if (!((BaseConnection) conn).haveMinimumServerVersion(ServerVersion.v9_4)) {
-      // only 9.4 and later ship with full line support by default
-      return;
+            return;
+        }
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "smallserial_column, serial_column", "1, 2"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "smallserial_column, serial_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(Integer.valueOf(1), rs.getObject("smallserial_column", Integer.class));
+            assertEquals(Integer.valueOf(1), rs.getObject(1, Integer.class));
+            assertEquals(Integer.valueOf(2), rs.getObject("serial_column", Integer.class));
+            assertEquals(Integer.valueOf(2), rs.getObject(2, Integer.class));
+        } finally {
+            rs.close();
+        }
     }
 
-    Statement stmt = conn.createStatement();
-    PGline expected = new PGline(1.0d, 2.0d, 3.0d);
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "line_column", "line '{1, 2, 3}'"));
+    /**
+     * Test the behavior getObject for boolean columns.
+     */
+    @Test
+    void getBoolean() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "boolean_column", "TRUE"));
 
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "line_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(expected, rs.getObject("line_column", PGline.class));
-      assertEquals(expected, rs.getObject(1, PGline.class));
-    } finally {
-      rs.close();
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "boolean_column"));
+        try {
+            assertTrue(rs.next());
+            assertTrue(rs.getObject("boolean_column", Boolean.class));
+            assertTrue(rs.getObject(1, Boolean.class));
+        } finally {
+            rs.close();
+        }
     }
-  }
 
-  /**
-   * Test the behavior getObject for lseg columns.
-   */
-  @Test
-  void getLineseg() throws SQLException {
-    Statement stmt = conn.createStatement();
-    PGlseg expected = new PGlseg(1.0d, 2.0d, 3.0d, 4.0d);
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "lseg_column", "lseg '[(1, 2), (3, 4)]'"));
+    /**
+     * Test the behavior getObject for boolean columns.
+     */
+    @Test
+    void getBooleanNull() throws SQLException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "boolean_column", "NULL"));
 
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "lseg_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(expected, rs.getObject("lseg_column", PGlseg.class));
-      assertEquals(expected, rs.getObject(1, PGlseg.class));
-    } finally {
-      rs.close();
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "boolean_column"));
+        try {
+            assertTrue(rs.next());
+            assertNull(rs.getObject("boolean_column", Boolean.class));
+            assertNull(rs.getObject(1, Boolean.class));
+        } finally {
+            rs.close();
+        }
     }
-  }
 
-  /**
-   * Test the behavior getObject for box columns.
-   */
-  @Test
-  void getBox() throws SQLException {
-    Statement stmt = conn.createStatement();
-    PGbox expected = new PGbox(1.0d, 2.0d, 3.0d, 4.0d);
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "box_column", "box '((1, 2), (3, 4))'"));
+    /**
+     * Test the behavior getObject for xml columns.
+     */
+    @Test
+    void getBlob() throws SQLException {
+        Statement stmt = conn.createStatement();
+        conn.setAutoCommit(false);
+        try {
+            byte[] data = new byte[]{(byte) 0xDE, (byte) 0xAD, (byte) 0xBE, (byte) 0xEF};
+            PreparedStatement insertPS = conn.prepareStatement(TestUtil.insertSQL("table1", "lob_column", "?"));
+            try {
+                insertPS.setObject(1, new SerialBlob(data), Types.BLOB);
+                insertPS.executeUpdate();
+            } finally {
+                insertPS.close();
+            }
 
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "box_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(expected, rs.getObject("box_column", PGbox.class));
-      assertEquals(expected, rs.getObject(1, PGbox.class));
-    } finally {
-      rs.close();
+            ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "lob_column"));
+            try {
+                assertTrue(rs.next());
+                Blob blob = rs.getObject("lob_column", Blob.class);
+                assertEquals(data.length, blob.length());
+                assertArrayEquals(data, blob.getBytes(1, data.length));
+                blob.free();
+
+                blob = rs.getObject(1, Blob.class);
+                assertEquals(data.length, blob.length());
+                assertArrayEquals(data, blob.getBytes(1, data.length));
+                blob.free();
+            } finally {
+                rs.close();
+            }
+        } finally {
+            conn.setAutoCommit(true);
+        }
     }
-  }
 
-  /**
-   * Test the behavior getObject for path columns.
-   */
-  @Test
-  void getPath() throws SQLException {
-    Statement stmt = conn.createStatement();
-    PGpath expected = new PGpath(new PGpoint[]{new PGpoint(1.0d, 2.0d), new PGpoint(3.0d, 4.0d)}, true);
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "path_column", "path '[(1, 2), (3, 4)]'"));
+    /**
+     * Test the behavior getObject for array columns.
+     */
+    @Test
+    void getArray() throws SQLException {
+        Statement stmt = conn.createStatement();
+        String[] data = new String[]{"java", "jdbc"};
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "array_column", "'{\"java\", \"jdbc\"}'"));
 
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "path_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(expected, rs.getObject("path_column", PGpath.class));
-      assertEquals(expected, rs.getObject(1, PGpath.class));
-    } finally {
-      rs.close();
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "array_column"));
+        try {
+            assertTrue(rs.next());
+            Array array = rs.getObject("array_column", Array.class);
+            assertArrayEquals(data, (String[]) array.getArray());
+            array.free();
+
+            array = rs.getObject(1, Array.class);
+            assertArrayEquals(data, (String[]) array.getArray());
+            array.free();
+        } finally {
+            rs.close();
+        }
     }
-  }
 
-  /**
-   * Test the behavior getObject for polygon columns.
-   */
-  @Test
-  void getPolygon() throws SQLException {
-    Statement stmt = conn.createStatement();
-    PGpolygon expected = new PGpolygon(new PGpoint[]{new PGpoint(1.0d, 2.0d), new PGpoint(3.0d, 4.0d)});
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "polygon_column", "polygon '((1, 2), (3, 4))'"));
+    /**
+     * Test the behavior getObject for xml columns.
+     */
+    @Test
+    void getXml() throws SQLException {
+        if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3)) {
+            // XML column requires PostgreSQL 8.3+
+            return;
+        }
+        Statement stmt = conn.createStatement();
+        String content = "<book><title>Manual</title></book>";
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "xml_column", "XMLPARSE (DOCUMENT '<?xml version=\"1.0\"?><book><title>Manual</title></book>')"));
 
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "polygon_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(expected, rs.getObject("polygon_column", PGpolygon.class));
-      assertEquals(expected, rs.getObject(1, PGpolygon.class));
-    } finally {
-      rs.close();
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "xml_column"));
+        try {
+            assertTrue(rs.next());
+            SQLXML sqlXml = rs.getObject("xml_column", SQLXML.class);
+            assertEquals(content, sqlXml.getString());
+            sqlXml.free();
+
+            sqlXml = rs.getObject(1, SQLXML.class);
+            assertEquals(content, sqlXml.getString());
+            sqlXml.free();
+        } finally {
+            rs.close();
+        }
     }
-  }
 
-  /**
-   * Test the behavior getObject for circle columns.
-   */
-  @Test
-  void getCircle() throws SQLException {
-    Statement stmt = conn.createStatement();
-    PGcircle expected = new PGcircle(1.0d, 2.0d, 3.0d);
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "circle_column", "circle '<(1, 2), 3>'"));
+    /**
+     * <p>Test the behavior getObject for money columns.</p>
+     *
+     * <p>The test is ignored as it is locale-dependent.</p>
+     */
+    @Disabled
+    @Test
+    void getMoney() throws SQLException {
+        Statement stmt = conn.createStatement();
+        String expected = "12.34";
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "money_column", "'12.34'::float8::numeric::money"));
 
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "circle_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(expected, rs.getObject("circle_column", PGcircle.class));
-      assertEquals(expected, rs.getObject(1, PGcircle.class));
-    } finally {
-      rs.close();
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "money_column"));
+        try {
+            assertTrue(rs.next());
+            PGmoney money = rs.getObject("money_column", PGmoney.class);
+            assertTrue(money.getValue().endsWith(expected));
+
+            money = rs.getObject(1, PGmoney.class);
+            assertTrue(money.getValue().endsWith(expected));
+        } finally {
+            rs.close();
+        }
     }
-  }
 
-  /**
-   * Test the behavior getObject for interval columns.
-   */
-  @Test
-  void getInterval() throws SQLException {
-    Statement stmt = conn.createStatement();
-    PGInterval expected = new PGInterval(0, 0, 3, 4, 5, 6.0d);
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "interval_column", "interval '3 4:05:06'"));
+    /**
+     * Test the behavior getObject for point columns.
+     */
+    @Test
+    void getPoint() throws SQLException {
+        Statement stmt = conn.createStatement();
+        PGpoint expected = new PGpoint(1.0d, 2.0d);
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "point_column", "point '(1, 2)'"));
 
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "interval_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(expected, rs.getObject("interval_column", PGInterval.class));
-      assertEquals(expected, rs.getObject(1, PGInterval.class));
-    } finally {
-      rs.close();
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "point_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(expected, rs.getObject("point_column", PGpoint.class));
+            assertEquals(expected, rs.getObject(1, PGpoint.class));
+        } finally {
+            rs.close();
+        }
     }
-  }
 
-  /**
-   * Test the behavior getObject for uuid columns.
-   */
-  @Test
-  void getUuid() throws SQLException {
-    if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3)) {
-      // UUID requires PostgreSQL 8.3+
-      return;
+    /**
+     * Test the behavior getObject for line columns.
+     */
+    @Test
+    void getLine() throws SQLException {
+        if (!((BaseConnection) conn).haveMinimumServerVersion(ServerVersion.v9_4)) {
+            // only 9.4 and later ship with full line support by default
+            return;
+        }
+
+        Statement stmt = conn.createStatement();
+        PGline expected = new PGline(1.0d, 2.0d, 3.0d);
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "line_column", "line '{1, 2, 3}'"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "line_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(expected, rs.getObject("line_column", PGline.class));
+            assertEquals(expected, rs.getObject(1, PGline.class));
+        } finally {
+            rs.close();
+        }
     }
-    Statement stmt = conn.createStatement();
-    String expected = "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11";
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "uuid_column", "'" + expected + "'"));
 
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "uuid_column"));
-    try {
-      assertTrue(rs.next());
-      assertEquals(UUID.fromString(expected), rs.getObject("uuid_column", UUID.class));
-      assertEquals(UUID.fromString(expected), rs.getObject(1, UUID.class));
-    } finally {
-      rs.close();
+    /**
+     * Test the behavior getObject for lseg columns.
+     */
+    @Test
+    void getLineseg() throws SQLException {
+        Statement stmt = conn.createStatement();
+        PGlseg expected = new PGlseg(1.0d, 2.0d, 3.0d, 4.0d);
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "lseg_column", "lseg '[(1, 2), (3, 4)]'"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "lseg_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(expected, rs.getObject("lseg_column", PGlseg.class));
+            assertEquals(expected, rs.getObject(1, PGlseg.class));
+        } finally {
+            rs.close();
+        }
     }
-  }
 
-  /**
-   * Test the behavior getObject for inet columns.
-   */
-  @Test
-  void getInetAddressNull() throws SQLException, UnknownHostException {
-    Statement stmt = conn.createStatement();
-    stmt.executeUpdate(TestUtil.insertSQL("table1", "inet_column", "NULL"));
+    /**
+     * Test the behavior getObject for box columns.
+     */
+    @Test
+    void getBox() throws SQLException {
+        Statement stmt = conn.createStatement();
+        PGbox expected = new PGbox(1.0d, 2.0d, 3.0d, 4.0d);
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "box_column", "box '((1, 2), (3, 4))'"));
 
-    ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "inet_column"));
-    try {
-      assertTrue(rs.next());
-      assertNull(rs.getObject("inet_column", InetAddress.class));
-      assertNull(rs.getObject(1, InetAddress.class));
-    } finally {
-      rs.close();
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "box_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(expected, rs.getObject("box_column", PGbox.class));
+            assertEquals(expected, rs.getObject(1, PGbox.class));
+        } finally {
+            rs.close();
+        }
     }
-  }
 
-  private void testInet(String inet, InetAddress expectedAddr, String expectedText) throws SQLException, UnknownHostException {
-    PGobject expectedObj = new PGobject();
-    expectedObj.setType("inet");
-    expectedObj.setValue(expectedText);
-    Statement stmt = conn.createStatement();
-    ResultSet rs = stmt.executeQuery("SELECT '" + inet + "'::inet AS inet_column");
-    try {
-      assertTrue(rs.next());
-      assertEquals(expectedText, rs.getString(1), "The string value of the inet should match when fetched via getString(...)");
-      assertEquals(expectedText, rs.getString("inet_column"), "The string value of the inet should match when fetched via getString(...)");
-      assertEquals(expectedObj, rs.getObject(1), "The object value of the inet should match when fetched via getObject(...)");
-      assertEquals(expectedObj, rs.getObject("inet_column"), "The object value of the inet should match when fetched via getObject(...)");
-      assertEquals(expectedAddr, rs.getObject("inet_column", InetAddress.class), "The InetAddress value should match when fetched via getObject(..., InetAddress.class)");
-      assertEquals(expectedAddr, rs.getObject(1, InetAddress.class), "The InetAddress value should match when fetched via getObject(..., InetAddress.class)");
-    } finally {
-      rs.close();
-      stmt.close();
+    /**
+     * Test the behavior getObject for path columns.
+     */
+    @Test
+    void getPath() throws SQLException {
+        Statement stmt = conn.createStatement();
+        PGpath expected = new PGpath(new PGpoint[]{new PGpoint(1.0d, 2.0d), new PGpoint(3.0d, 4.0d)}, true);
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "path_column", "path '[(1, 2), (3, 4)]'"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "path_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(expected, rs.getObject("path_column", PGpath.class));
+            assertEquals(expected, rs.getObject(1, PGpath.class));
+        } finally {
+            rs.close();
+        }
     }
-  }
 
-  /**
-   * Test the behavior getObject for ipv4 inet columns.
-   */
-  @Test
-  void getInet4Address() throws SQLException, UnknownHostException {
-    String inet = "192.168.100.128";
-    InetAddress addr = InetAddress.getByName(inet);
-    testInet(inet, addr, inet);
-    testInet(inet + "/16", addr, inet + "/16");
-    testInet(inet + "/32", addr, inet);
-  }
+    /**
+     * Test the behavior getObject for polygon columns.
+     */
+    @Test
+    void getPolygon() throws SQLException {
+        Statement stmt = conn.createStatement();
+        PGpolygon expected = new PGpolygon(new PGpoint[]{new PGpoint(1.0d, 2.0d), new PGpoint(3.0d, 4.0d)});
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "polygon_column", "polygon '((1, 2), (3, 4))'"));
 
-  /**
-   * Test the behavior getObject for ipv6 inet columns.
-   */
-  @Test
-  void getInet6Address() throws SQLException, UnknownHostException {
-    String inet = "2001:4f8:3:ba:2e0:81ff:fe22:d1f1";
-    InetAddress addr = InetAddress.getByName(inet);
-    testInet(inet, addr, inet);
-    testInet(inet + "/16", addr, inet + "/16");
-    testInet(inet + "/128", addr, inet);
-  }
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "polygon_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(expected, rs.getObject("polygon_column", PGpolygon.class));
+            assertEquals(expected, rs.getObject(1, PGpolygon.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for circle columns.
+     */
+    @Test
+    void getCircle() throws SQLException {
+        Statement stmt = conn.createStatement();
+        PGcircle expected = new PGcircle(1.0d, 2.0d, 3.0d);
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "circle_column", "circle '<(1, 2), 3>'"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "circle_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(expected, rs.getObject("circle_column", PGcircle.class));
+            assertEquals(expected, rs.getObject(1, PGcircle.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for interval columns.
+     */
+    @Test
+    void getInterval() throws SQLException {
+        Statement stmt = conn.createStatement();
+        PGInterval expected = new PGInterval(0, 0, 3, 4, 5, 6.0d);
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "interval_column", "interval '3 4:05:06'"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "interval_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(expected, rs.getObject("interval_column", PGInterval.class));
+            assertEquals(expected, rs.getObject(1, PGInterval.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for uuid columns.
+     */
+    @Test
+    void getUuid() throws SQLException {
+        if (!TestUtil.haveMinimumServerVersion(conn, ServerVersion.v8_3)) {
+            // UUID requires PostgreSQL 8.3+
+            return;
+        }
+        Statement stmt = conn.createStatement();
+        String expected = "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11";
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "uuid_column", "'" + expected + "'"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "uuid_column"));
+        try {
+            assertTrue(rs.next());
+            assertEquals(UUID.fromString(expected), rs.getObject("uuid_column", UUID.class));
+            assertEquals(UUID.fromString(expected), rs.getObject(1, UUID.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for inet columns.
+     */
+    @Test
+    void getInetAddressNull() throws SQLException, UnknownHostException {
+        Statement stmt = conn.createStatement();
+        stmt.executeUpdate(TestUtil.insertSQL("table1", "inet_column", "NULL"));
+
+        ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "inet_column"));
+        try {
+            assertTrue(rs.next());
+            assertNull(rs.getObject("inet_column", InetAddress.class));
+            assertNull(rs.getObject(1, InetAddress.class));
+        } finally {
+            rs.close();
+        }
+    }
+
+    private void testInet(String inet, InetAddress expectedAddr, String expectedText) throws SQLException, UnknownHostException {
+        PGobject expectedObj = new PGobject();
+        expectedObj.setType("inet");
+        expectedObj.setValue(expectedText);
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery("SELECT '" + inet + "'::inet AS inet_column");
+        try {
+            assertTrue(rs.next());
+            assertEquals(expectedText, rs.getString(1), "The string value of the inet should match when fetched via getString(...)");
+            assertEquals(expectedText, rs.getString("inet_column"), "The string value of the inet should match when fetched via getString(...)");
+            assertEquals(expectedObj, rs.getObject(1), "The object value of the inet should match when fetched via getObject(...)");
+            assertEquals(expectedObj, rs.getObject("inet_column"), "The object value of the inet should match when fetched via getObject(...)");
+            assertEquals(expectedAddr, rs.getObject("inet_column", InetAddress.class), "The InetAddress value should match when fetched via getObject(..., InetAddress.class)");
+            assertEquals(expectedAddr, rs.getObject(1, InetAddress.class), "The InetAddress value should match when fetched via getObject(..., InetAddress.class)");
+        } finally {
+            rs.close();
+            stmt.close();
+        }
+    }
+
+    /**
+     * Test the behavior getObject for ipv4 inet columns.
+     */
+    @Test
+    void getInet4Address() throws SQLException, UnknownHostException {
+        String inet = "192.168.100.128";
+        InetAddress addr = InetAddress.getByName(inet);
+        testInet(inet, addr, inet);
+        testInet(inet + "/16", addr, inet + "/16");
+        testInet(inet + "/32", addr, inet);
+    }
+
+    /**
+     * Test the behavior getObject for ipv6 inet columns.
+     */
+    @Test
+    void getInet6Address() throws SQLException, UnknownHostException {
+        String inet = "2001:4f8:3:ba:2e0:81ff:fe22:d1f1";
+        InetAddress addr = InetAddress.getByName(inet);
+        testInet(inet, addr, inet);
+        testInet(inet + "/16", addr, inet + "/16");
+        testInet(inet + "/128", addr, inet);
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/Jdbc41TestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/Jdbc41TestSuite.java
index 921e5a4..2b82097 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/Jdbc41TestSuite.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/Jdbc41TestSuite.java
@@ -13,11 +13,11 @@ import org.junit.runners.Suite;
  */
 @RunWith(Suite.class)
 @Suite.SuiteClasses({
-    AbortTest.class,
-    CloseOnCompletionTest.class,
-    GetObjectTest.class,
-    NetworkTimeoutTest.class,
-    SchemaTest.class,
+        AbortTest.class,
+        CloseOnCompletionTest.class,
+        GetObjectTest.class,
+        NetworkTimeoutTest.class,
+        SchemaTest.class,
 })
 public class Jdbc41TestSuite {
 
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/NetworkTimeoutTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/NetworkTimeoutTest.java
index 24fed8d..65b15a8 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/NetworkTimeoutTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/NetworkTimeoutTest.java
@@ -20,51 +20,51 @@ import java.sql.Statement;
 import java.util.concurrent.TimeUnit;
 
 class NetworkTimeoutTest {
-  @Test
-  void setNetworkTimeout() throws Exception {
-    Connection conn = TestUtil.openDB();
-    assertDoesNotThrow(() -> {
-      conn.setNetworkTimeout(null, 0);
-    }, "Connection.setNetworkTimeout() throw exception");
-  }
-
-  @Test
-  void setNetworkTimeoutInvalid() throws Exception {
-    Connection conn = TestUtil.openDB();
-    try {
-      conn.setNetworkTimeout(null, -1);
-      fail("Connection.setNetworkTimeout() did not throw expected exception");
-    } catch (SQLException e) {
-      // Passed
-    } finally {
-      TestUtil.closeDB(conn);
+    @Test
+    void setNetworkTimeout() throws Exception {
+        Connection conn = TestUtil.openDB();
+        assertDoesNotThrow(() -> {
+            conn.setNetworkTimeout(null, 0);
+        }, "Connection.setNetworkTimeout() throw exception");
     }
-  }
 
-  @Test
-  void setNetworkTimeoutValid() throws Exception {
-    Connection conn = TestUtil.openDB();
-    assertDoesNotThrow(() -> {
-      conn.setNetworkTimeout(null, (int) TimeUnit.SECONDS.toMillis(5));
-      assertEquals(TimeUnit.SECONDS.toMillis(5), conn.getNetworkTimeout());
-    }, "Connection.setNetworkTimeout() throw exception");
-  }
-
-  @Test
-  void setNetworkTimeoutEnforcement() throws Exception {
-    Connection conn = TestUtil.openDB();
-    Statement stmt = null;
-    try {
-      conn.setNetworkTimeout(null, (int) TimeUnit.SECONDS.toMillis(1));
-      stmt = conn.createStatement();
-      stmt.execute("SELECT pg_sleep(2)");
-      fail("Connection.setNetworkTimeout() did not throw expected exception");
-    } catch (SQLException e) {
-      // assertTrue(stmt.isClosed());
-      assertTrue(conn.isClosed());
-    } finally {
-      TestUtil.closeQuietly(stmt);
-      TestUtil.closeDB(conn);
+    @Test
+    void setNetworkTimeoutInvalid() throws Exception {
+        Connection conn = TestUtil.openDB();
+        try {
+            conn.setNetworkTimeout(null, -1);
+            fail("Connection.setNetworkTimeout() did not throw expected exception");
+        } catch (SQLException e) {
+            // Passed
+        } finally {
+            TestUtil.closeDB(conn);
+        }
+    }
+
+    @Test
+    void setNetworkTimeoutValid() throws Exception {
+        Connection conn = TestUtil.openDB();
+        assertDoesNotThrow(() -> {
+            conn.setNetworkTimeout(null, (int) TimeUnit.SECONDS.toMillis(5));
+            assertEquals(TimeUnit.SECONDS.toMillis(5), conn.getNetworkTimeout());
+        }, "Connection.setNetworkTimeout() throw exception");
+    }
+
+    @Test
+    void setNetworkTimeoutEnforcement() throws Exception {
+        Connection conn = TestUtil.openDB();
+        Statement stmt = null;
+        try {
+            conn.setNetworkTimeout(null, (int) TimeUnit.SECONDS.toMillis(1));
+            stmt = conn.createStatement();
+            stmt.execute("SELECT pg_sleep(2)");
+            fail("Connection.setNetworkTimeout() did not throw expected exception");
+        } catch (SQLException e) {
+            // assertTrue(stmt.isClosed());
+            assertTrue(conn.isClosed());
+        } finally {
+            TestUtil.closeQuietly(stmt);
+            TestUtil.closeDB(conn);
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/SchemaTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/SchemaTest.java
index be64b47..ea81463 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/SchemaTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc4/jdbc41/SchemaTest.java
@@ -31,287 +31,287 @@ import java.sql.Types;
 import java.util.Properties;
 
 class SchemaTest {
-  private Connection conn;
-  private boolean dropUserSchema;
+    private Connection conn;
+    private boolean dropUserSchema;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    conn = TestUtil.openDB();
-    Statement stmt = conn.createStatement();
-    try {
-      stmt.execute("CREATE SCHEMA " + TestUtil.getUser());
-      dropUserSchema = true;
-    } catch (SQLException e) {
-      /* assume schema existed */
+    @BeforeEach
+    void setUp() throws Exception {
+        conn = TestUtil.openDB();
+        Statement stmt = conn.createStatement();
+        try {
+            stmt.execute("CREATE SCHEMA " + TestUtil.getUser());
+            dropUserSchema = true;
+        } catch (SQLException e) {
+            /* assume schema existed */
+        }
+        stmt.execute("CREATE SCHEMA schema1");
+        stmt.execute("CREATE SCHEMA schema2");
+        stmt.execute("CREATE SCHEMA \"schema 3\"");
+        stmt.execute("CREATE SCHEMA \"schema \"\"4\"");
+        stmt.execute("CREATE SCHEMA \"schema '5\"");
+        stmt.execute("CREATE SCHEMA \"schema ,6\"");
+        stmt.execute("CREATE SCHEMA \"UpperCase\"");
+        TestUtil.createTable(conn, "schema1.table1", "id integer");
+        TestUtil.createTable(conn, "schema2.table2", "id integer");
+        TestUtil.createTable(conn, "\"UpperCase\".table3", "id integer");
+        TestUtil.createTable(conn, "schema1.sptest", "id integer");
+        TestUtil.createTable(conn, "schema2.sptest", "id varchar");
     }
-    stmt.execute("CREATE SCHEMA schema1");
-    stmt.execute("CREATE SCHEMA schema2");
-    stmt.execute("CREATE SCHEMA \"schema 3\"");
-    stmt.execute("CREATE SCHEMA \"schema \"\"4\"");
-    stmt.execute("CREATE SCHEMA \"schema '5\"");
-    stmt.execute("CREATE SCHEMA \"schema ,6\"");
-    stmt.execute("CREATE SCHEMA \"UpperCase\"");
-    TestUtil.createTable(conn, "schema1.table1", "id integer");
-    TestUtil.createTable(conn, "schema2.table2", "id integer");
-    TestUtil.createTable(conn, "\"UpperCase\".table3", "id integer");
-    TestUtil.createTable(conn, "schema1.sptest", "id integer");
-    TestUtil.createTable(conn, "schema2.sptest", "id varchar");
-  }
 
-  @AfterEach
-  void tearDown() throws SQLException {
-    conn.setAutoCommit(true);
-    conn.setSchema(null);
-    Statement stmt = conn.createStatement();
-    if (dropUserSchema) {
-      stmt.execute("DROP SCHEMA " + TestUtil.getUser() + " CASCADE");
+    @AfterEach
+    void tearDown() throws SQLException {
+        conn.setAutoCommit(true);
+        conn.setSchema(null);
+        Statement stmt = conn.createStatement();
+        if (dropUserSchema) {
+            stmt.execute("DROP SCHEMA " + TestUtil.getUser() + " CASCADE");
+        }
+        stmt.execute("DROP SCHEMA schema1 CASCADE");
+        stmt.execute("DROP SCHEMA schema2 CASCADE");
+        stmt.execute("DROP SCHEMA \"schema 3\" CASCADE");
+        stmt.execute("DROP SCHEMA \"schema \"\"4\" CASCADE");
+        stmt.execute("DROP SCHEMA \"schema '5\" CASCADE");
+        stmt.execute("DROP SCHEMA \"schema ,6\"");
+        stmt.execute("DROP SCHEMA \"UpperCase\" CASCADE");
+        TestUtil.closeDB(conn);
     }
-    stmt.execute("DROP SCHEMA schema1 CASCADE");
-    stmt.execute("DROP SCHEMA schema2 CASCADE");
-    stmt.execute("DROP SCHEMA \"schema 3\" CASCADE");
-    stmt.execute("DROP SCHEMA \"schema \"\"4\" CASCADE");
-    stmt.execute("DROP SCHEMA \"schema '5\" CASCADE");
-    stmt.execute("DROP SCHEMA \"schema ,6\"");
-    stmt.execute("DROP SCHEMA \"UpperCase\" CASCADE");
-    TestUtil.closeDB(conn);
-  }
 
-  /**
-   * Test that what you set is what you get.
-   */
-  @Test
-  void getSetSchema() throws SQLException {
-    conn.setSchema("schema1");
-    assertEquals("schema1", conn.getSchema());
-    conn.setSchema("schema2");
-    assertEquals("schema2", conn.getSchema());
-    conn.setSchema("schema 3");
-    assertEquals("schema 3", conn.getSchema());
-    conn.setSchema("schema \"4");
-    assertEquals("schema \"4", conn.getSchema());
-    conn.setSchema("schema '5");
-    assertEquals("schema '5", conn.getSchema());
-    conn.setSchema("UpperCase");
-    assertEquals("UpperCase", conn.getSchema());
-  }
-
-  /**
-   * Test that setting the schema allows to access objects of this schema without prefix, hide
-   * objects from other schemas but doesn't prevent to prefix-access to them.
-   */
-  @Test
-  void usingSchema() throws SQLException {
-    Statement stmt = conn.createStatement();
-    try {
-      assertDoesNotThrow(() -> {
+    /**
+     * Test that what you set is what you get.
+     */
+    @Test
+    void getSetSchema() throws SQLException {
         conn.setSchema("schema1");
-        stmt.executeQuery(TestUtil.selectSQL("table1", "*"));
-        stmt.executeQuery(TestUtil.selectSQL("schema2.table2", "*"));
-        try {
-          stmt.executeQuery(TestUtil.selectSQL("table2", "*"));
-          fail("Objects of schema2 should not be visible without prefix");
-        } catch (SQLException e) {
-          // expected
-        }
-
+        assertEquals("schema1", conn.getSchema());
         conn.setSchema("schema2");
-        stmt.executeQuery(TestUtil.selectSQL("table2", "*"));
-        stmt.executeQuery(TestUtil.selectSQL("schema1.table1", "*"));
-        try {
-          stmt.executeQuery(TestUtil.selectSQL("table1", "*"));
-          fail("Objects of schema1 should not be visible without prefix");
-        } catch (SQLException e) {
-          // expected
-        }
-
+        assertEquals("schema2", conn.getSchema());
+        conn.setSchema("schema 3");
+        assertEquals("schema 3", conn.getSchema());
+        conn.setSchema("schema \"4");
+        assertEquals("schema \"4", conn.getSchema());
+        conn.setSchema("schema '5");
+        assertEquals("schema '5", conn.getSchema());
         conn.setSchema("UpperCase");
-        stmt.executeQuery(TestUtil.selectSQL("table3", "*"));
-        stmt.executeQuery(TestUtil.selectSQL("schema1.table1", "*"));
+        assertEquals("UpperCase", conn.getSchema());
+    }
+
+    /**
+     * Test that setting the schema allows to access objects of this schema without prefix, hide
+     * objects from other schemas but doesn't prevent to prefix-access to them.
+     */
+    @Test
+    void usingSchema() throws SQLException {
+        Statement stmt = conn.createStatement();
         try {
-          stmt.executeQuery(TestUtil.selectSQL("table1", "*"));
-          fail("Objects of schema1 should not be visible without prefix");
-        } catch (SQLException e) {
-          // expected
+            assertDoesNotThrow(() -> {
+                conn.setSchema("schema1");
+                stmt.executeQuery(TestUtil.selectSQL("table1", "*"));
+                stmt.executeQuery(TestUtil.selectSQL("schema2.table2", "*"));
+                try {
+                    stmt.executeQuery(TestUtil.selectSQL("table2", "*"));
+                    fail("Objects of schema2 should not be visible without prefix");
+                } catch (SQLException e) {
+                    // expected
+                }
+
+                conn.setSchema("schema2");
+                stmt.executeQuery(TestUtil.selectSQL("table2", "*"));
+                stmt.executeQuery(TestUtil.selectSQL("schema1.table1", "*"));
+                try {
+                    stmt.executeQuery(TestUtil.selectSQL("table1", "*"));
+                    fail("Objects of schema1 should not be visible without prefix");
+                } catch (SQLException e) {
+                    // expected
+                }
+
+                conn.setSchema("UpperCase");
+                stmt.executeQuery(TestUtil.selectSQL("table3", "*"));
+                stmt.executeQuery(TestUtil.selectSQL("schema1.table1", "*"));
+                try {
+                    stmt.executeQuery(TestUtil.selectSQL("table1", "*"));
+                    fail("Objects of schema1 should not be visible without prefix");
+                } catch (SQLException e) {
+                    // expected
+                }
+            }, "Could not find expected schema elements: ");
+        } finally {
+            try {
+                stmt.close();
+            } catch (SQLException e) {
+            }
         }
-      }, "Could not find expected schema elements: ");
-    } finally {
-      try {
-        stmt.close();
-      } catch (SQLException e) {
-      }
     }
-  }
 
-  /**
-   * Test that get schema returns the schema with the highest priority in the search path.
-   */
-  @Test
-  void multipleSearchPath() throws SQLException {
-    execute("SET search_path TO schema1,schema2");
-    assertEquals("schema1", conn.getSchema());
+    /**
+     * Test that get schema returns the schema with the highest priority in the search path.
+     */
+    @Test
+    void multipleSearchPath() throws SQLException {
+        execute("SET search_path TO schema1,schema2");
+        assertEquals("schema1", conn.getSchema());
 
-    execute("SET search_path TO \"schema ,6\",schema2");
-    assertEquals("schema ,6", conn.getSchema());
-  }
-
-  @Test
-  void schemaInProperties() throws Exception {
-    Properties properties = new Properties();
-    properties.setProperty("currentSchema", "schema1");
-    Connection conn = TestUtil.openDB(properties);
-    try {
-      assertEquals("schema1", conn.getSchema());
-
-      Statement stmt = conn.createStatement();
-      stmt.executeQuery(TestUtil.selectSQL("table1", "*"));
-      stmt.executeQuery(TestUtil.selectSQL("schema2.table2", "*"));
-      try {
-        stmt.executeQuery(TestUtil.selectSQL("table2", "*"));
-        fail("Objects of schema2 should not be visible without prefix");
-      } catch (SQLException e) {
-        // expected
-      }
-    } finally {
-      TestUtil.closeDB(conn);
+        execute("SET search_path TO \"schema ,6\",schema2");
+        assertEquals("schema ,6", conn.getSchema());
     }
-  }
 
-  @Test
-  public void schemaPath$User() throws Exception {
-    execute("SET search_path TO \"$user\",public,schema2");
-    assertEquals(TestUtil.getUser(), conn.getSchema());
-  }
+    @Test
+    void schemaInProperties() throws Exception {
+        Properties properties = new Properties();
+        properties.setProperty("currentSchema", "schema1");
+        Connection conn = TestUtil.openDB(properties);
+        try {
+            assertEquals("schema1", conn.getSchema());
 
-  private void execute(String sql) throws SQLException {
-    Statement stmt = conn.createStatement();
-    try {
-      stmt.execute(sql);
-    } finally {
-      try {
-        stmt.close();
-      } catch (SQLException e) {
-      }
+            Statement stmt = conn.createStatement();
+            stmt.executeQuery(TestUtil.selectSQL("table1", "*"));
+            stmt.executeQuery(TestUtil.selectSQL("schema2.table2", "*"));
+            try {
+                stmt.executeQuery(TestUtil.selectSQL("table2", "*"));
+                fail("Objects of schema2 should not be visible without prefix");
+            } catch (SQLException e) {
+                // expected
+            }
+        } finally {
+            TestUtil.closeDB(conn);
+        }
     }
-  }
 
-  @Test
-  void searchPathPreparedStatementAutoCommitFalse() throws SQLException {
-    conn.setAutoCommit(false);
-    searchPathPreparedStatementAutoCommitTrue();
-  }
-
-  @Test
-  void searchPathPreparedStatementAutoCommitTrue() throws SQLException {
-    searchPathPreparedStatement();
-  }
-
-  @Test
-  void searchPathPreparedStatement() throws SQLException {
-    execute("set search_path to schema1,public");
-    PreparedStatement ps = conn.prepareStatement("select * from sptest");
-    for (int i = 0; i < 10; i++) {
-      ps.execute();
+    @Test
+    public void schemaPath$User() throws Exception {
+        execute("SET search_path TO \"$user\",public,schema2");
+        assertEquals(TestUtil.getUser(), conn.getSchema());
     }
-    assertColType(ps, "sptest should point to schema1.sptest, thus column type should be INT",
-        Types.INTEGER);
-    ps.close();
-    execute("set search_path to schema2,public");
-    ps = conn.prepareStatement("select * from sptest");
-    assertColType(ps, "sptest should point to schema2.sptest, thus column type should be VARCHAR",
-        Types.VARCHAR);
-    ps.close();
-  }
 
-  @Test
-  void currentSchemaPropertyVisibilityTableDuringFunctionCreation() throws SQLException {
-    Properties properties = new Properties();
-    properties.setProperty(PGProperty.CURRENT_SCHEMA.getName(), "public,schema1,schema2");
-    Connection connection = TestUtil.openDB(properties);
-
-    TestUtil.execute(connection, "create table schema1.check_table (test_col text)");
-    TestUtil.execute(connection, "insert into schema1.check_table (test_col) values ('test_value')");
-    TestUtil.execute(connection, "create or replace function schema2.check_fun () returns text as $$"
-        + " select test_col from check_table"
-        + "$$ language sql stable");
-    connection.close();
-  }
-
-  @Test
-  void currentSchemaPropertyNotVisibilityTableDuringFunctionCreation() throws SQLException {
-    Properties properties = new Properties();
-    properties.setProperty(PGProperty.CURRENT_SCHEMA.getName(), "public,schema2");
-
-    try (Connection connection = TestUtil.openDB(properties)) {
-      TestUtil.execute(connection, "create table schema1.check_table (test_col text)");
-      TestUtil.execute(connection, "insert into schema1.check_table (test_col) values ('test_value')");
-      TestUtil.execute(connection, "create or replace function schema2.check_fun (txt text) returns text as $$"
-          + " select test_col from check_table"
-          + "$$ language sql immutable");
-    } catch (PSQLException e) {
-      String sqlState = e.getSQLState();
-      String message = e.getMessage();
-      assertThat("Test creates function in schema 'schema2' and this function try use table \"check_table\" "
-            + "from schema 'schema1'. We expect here sql error code - "
-            + PSQLState.UNDEFINED_TABLE + ", because search_path does not contains schema 'schema1' and "
-            + "postgres does not see table \"check_table\"",
-            sqlState,
-            equalTo(PSQLState.UNDEFINED_TABLE.getState())
-      );
-      assertThat(
-          "Test creates function in schema 'schema2' and this function try use table \"check_table\" "
-              + "from schema 'schema1'. We expect here that sql error message will be contains \"check_table\", "
-              + "because search_path does not contains schema 'schema1' and postgres does not see "
-              + "table \"check_table\"",
-            message,
-            containsString("\"check_table\"")
-      );
+    private void execute(String sql) throws SQLException {
+        Statement stmt = conn.createStatement();
+        try {
+            stmt.execute(sql);
+        } finally {
+            try {
+                stmt.close();
+            } catch (SQLException e) {
+            }
+        }
     }
-  }
 
-  @Test
-  void currentSchemaPropertyVisibilityFunction() throws SQLException {
-    currentSchemaPropertyVisibilityTableDuringFunctionCreation();
-    Properties properties = new Properties();
-    properties.setProperty(PGProperty.CURRENT_SCHEMA.getName(), "public,schema1,schema2");
-    Connection connection = TestUtil.openDB(properties);
-
-    TestUtil.execute(connection, "select check_fun()");
-    connection.close();
-  }
-
-  @Test
-  void currentSchemaPropertyNotVisibilityTableInsideFunction() throws SQLException {
-    currentSchemaPropertyVisibilityTableDuringFunctionCreation();
-    Properties properties = new Properties();
-    properties.setProperty(PGProperty.CURRENT_SCHEMA.getName(), "public,schema2");
-
-    try (Connection connection = TestUtil.openDB(properties)) {
-      TestUtil.execute(connection, "select check_fun()");
-    } catch (PSQLException e) {
-      String sqlState = e.getSQLState();
-      String message = e.getMessage();
-      assertThat("Test call function in schema 'schema2' and this function uses table \"check_table\" "
-            + "from schema 'schema1'. We expect here sql error code - " + PSQLState.UNDEFINED_TABLE + ", "
-            + "because search_path does not contains schema 'schema1' and postgres does not see table \"check_table\".",
-            sqlState,
-            equalTo(PSQLState.UNDEFINED_TABLE.getState())
-      );
-      assertThat(
-          "Test call function in schema 'schema2' and this function uses table \"check_table\" "
-              + "from schema 'schema1'. We expect here that sql error message will be contains \"check_table\", because "
-              + " search_path does not contains schema 'schema1' and postgres does not see table \"check_table\"",
-          message,
-          containsString("\"check_table\"")
-      );
+    @Test
+    void searchPathPreparedStatementAutoCommitFalse() throws SQLException {
+        conn.setAutoCommit(false);
+        searchPathPreparedStatementAutoCommitTrue();
     }
-  }
 
-  private void assertColType(PreparedStatement ps, String message, int expected) throws SQLException {
-    ResultSet rs = ps.executeQuery();
-    ResultSetMetaData md = rs.getMetaData();
-    int columnType = md.getColumnType(1);
-    assertEquals(expected, columnType, message);
-    rs.close();
-  }
+    @Test
+    void searchPathPreparedStatementAutoCommitTrue() throws SQLException {
+        searchPathPreparedStatement();
+    }
+
+    @Test
+    void searchPathPreparedStatement() throws SQLException {
+        execute("set search_path to schema1,public");
+        PreparedStatement ps = conn.prepareStatement("select * from sptest");
+        for (int i = 0; i < 10; i++) {
+            ps.execute();
+        }
+        assertColType(ps, "sptest should point to schema1.sptest, thus column type should be INT",
+                Types.INTEGER);
+        ps.close();
+        execute("set search_path to schema2,public");
+        ps = conn.prepareStatement("select * from sptest");
+        assertColType(ps, "sptest should point to schema2.sptest, thus column type should be VARCHAR",
+                Types.VARCHAR);
+        ps.close();
+    }
+
+    @Test
+    void currentSchemaPropertyVisibilityTableDuringFunctionCreation() throws SQLException {
+        Properties properties = new Properties();
+        properties.setProperty(PGProperty.CURRENT_SCHEMA.getName(), "public,schema1,schema2");
+        Connection connection = TestUtil.openDB(properties);
+
+        TestUtil.execute(connection, "create table schema1.check_table (test_col text)");
+        TestUtil.execute(connection, "insert into schema1.check_table (test_col) values ('test_value')");
+        TestUtil.execute(connection, "create or replace function schema2.check_fun () returns text as $$"
+                + " select test_col from check_table"
+                + "$$ language sql stable");
+        connection.close();
+    }
+
+    @Test
+    void currentSchemaPropertyNotVisibilityTableDuringFunctionCreation() throws SQLException {
+        Properties properties = new Properties();
+        properties.setProperty(PGProperty.CURRENT_SCHEMA.getName(), "public,schema2");
+
+        try (Connection connection = TestUtil.openDB(properties)) {
+            TestUtil.execute(connection, "create table schema1.check_table (test_col text)");
+            TestUtil.execute(connection, "insert into schema1.check_table (test_col) values ('test_value')");
+            TestUtil.execute(connection, "create or replace function schema2.check_fun (txt text) returns text as $$"
+                    + " select test_col from check_table"
+                    + "$$ language sql immutable");
+        } catch (PSQLException e) {
+            String sqlState = e.getSQLState();
+            String message = e.getMessage();
+            assertThat("Test creates function in schema 'schema2' and this function try use table \"check_table\" "
+                            + "from schema 'schema1'. We expect here sql error code - "
+                            + PSQLState.UNDEFINED_TABLE + ", because search_path does not contains schema 'schema1' and "
+                            + "postgres does not see table \"check_table\"",
+                    sqlState,
+                    equalTo(PSQLState.UNDEFINED_TABLE.getState())
+            );
+            assertThat(
+                    "Test creates function in schema 'schema2' and this function try use table \"check_table\" "
+                            + "from schema 'schema1'. We expect here that sql error message will be contains \"check_table\", "
+                            + "because search_path does not contains schema 'schema1' and postgres does not see "
+                            + "table \"check_table\"",
+                    message,
+                    containsString("\"check_table\"")
+            );
+        }
+    }
+
+    @Test
+    void currentSchemaPropertyVisibilityFunction() throws SQLException {
+        currentSchemaPropertyVisibilityTableDuringFunctionCreation();
+        Properties properties = new Properties();
+        properties.setProperty(PGProperty.CURRENT_SCHEMA.getName(), "public,schema1,schema2");
+        Connection connection = TestUtil.openDB(properties);
+
+        TestUtil.execute(connection, "select check_fun()");
+        connection.close();
+    }
+
+    @Test
+    void currentSchemaPropertyNotVisibilityTableInsideFunction() throws SQLException {
+        currentSchemaPropertyVisibilityTableDuringFunctionCreation();
+        Properties properties = new Properties();
+        properties.setProperty(PGProperty.CURRENT_SCHEMA.getName(), "public,schema2");
+
+        try (Connection connection = TestUtil.openDB(properties)) {
+            TestUtil.execute(connection, "select check_fun()");
+        } catch (PSQLException e) {
+            String sqlState = e.getSQLState();
+            String message = e.getMessage();
+            assertThat("Test call function in schema 'schema2' and this function uses table \"check_table\" "
+                            + "from schema 'schema1'. We expect here sql error code - " + PSQLState.UNDEFINED_TABLE + ", "
+                            + "because search_path does not contains schema 'schema1' and postgres does not see table \"check_table\".",
+                    sqlState,
+                    equalTo(PSQLState.UNDEFINED_TABLE.getState())
+            );
+            assertThat(
+                    "Test call function in schema 'schema2' and this function uses table \"check_table\" "
+                            + "from schema 'schema1'. We expect here that sql error message will be contains \"check_table\", because "
+                            + " search_path does not contains schema 'schema1' and postgres does not see table \"check_table\"",
+                    message,
+                    containsString("\"check_table\"")
+            );
+        }
+    }
+
+    private void assertColType(PreparedStatement ps, String message, int expected) throws SQLException {
+        ResultSet rs = ps.executeQuery();
+        ResultSetMetaData md = rs.getMetaData();
+        int columnType = md.getColumnType(1);
+        assertEquals(expected, columnType, message);
+        rs.close();
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/AdaptiveFetchSizeTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/AdaptiveFetchSizeTest.java
index ae19db7..c2cbc31 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/AdaptiveFetchSizeTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/AdaptiveFetchSizeTest.java
@@ -28,281 +28,280 @@ import java.util.Properties;
  */
 class AdaptiveFetchSizeTest {
 
-  private Connection connection;
-  private PreparedStatement statement;
-  private ResultSet resultSet;
+    private final String table = "test_adaptive_fetch";
+    private final String columns = "value VARCHAR";
+    private Connection connection;
+    private PreparedStatement statement;
+    private ResultSet resultSet;
 
-  private final String table = "test_adaptive_fetch";
-  private final String columns = "value VARCHAR";
-
-  /**
-   * Drop table and close connection.
-   */
-  @AfterEach
-  void tearDown() throws SQLException {
-    if (connection != null && !connection.isClosed()) {
-      connection.setAutoCommit(true);
-      if (resultSet != null) {
-        resultSet.close();
-      }
-      if (statement != null) {
-        statement.close();
-      }
-      TestUtil.dropTable(connection, table);
-      TestUtil.closeDB(connection);
-    }
-  }
-
-  /**
-   * Simple integration test. At start created is table with rows sizes like 4 x 35B, 1 x 40B, 45 x
-   * 30B. Next fetching is happening. Starting fetch is using default fetch size, so it returns 4
-   * rows. After reading 4 rows, new fetch size is computed. As biggest rows size so far was 35B,
-   * then 300/35B = 8 rows. Next fetch is done with 8 rows. First row in this fetch has size 40B,
-   * which gonna change fetch size to 7 rows (300/40B = 7), next fetch reads won't change size and 7
-   * will be used to the end.
-   * To check if this works correctly checked is:
-   * - if starting 4 rows from result set have fetch size as 4;
-   * - if next 8 rows from result set have fetch size as 8;
-   * - if next 38 rows from result set have fetch size as 7;
-   * - check if all 50 rows were read.
-   */
-  @Test
-  void adaptiveFetching() throws SQLException {
-    int startFetchSize = 4;
-    int expectedFirstSize = 8;
-    int expectedSecondSize = 7;
-    int expectedCounter = 50;
-    int resultCounter = 0;
-
-    Properties properties = new Properties();
-    PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, startFetchSize);
-    PGProperty.MAX_RESULT_BUFFER.set(properties, "300");
-    PGProperty.ADAPTIVE_FETCH.set(properties, true);
-
-    openConnectionAndCreateTable(properties);
-
-    for (int i = 0; i < expectedCounter; i++) {
-      if (i == 4) {
-        addStringWithSize(40);
-      } else {
-        addStringWithSize(35);
-      }
+    /**
+     * Drop table and close connection.
+     */
+    @AfterEach
+    void tearDown() throws SQLException {
+        if (connection != null && !connection.isClosed()) {
+            connection.setAutoCommit(true);
+            if (resultSet != null) {
+                resultSet.close();
+            }
+            if (statement != null) {
+                statement.close();
+            }
+            TestUtil.dropTable(connection, table);
+            TestUtil.closeDB(connection);
+        }
     }
 
-    executeFetchingQuery();
+    /**
+     * Simple integration test. At start created is table with rows sizes like 4 x 35B, 1 x 40B, 45 x
+     * 30B. Next fetching is happening. Starting fetch is using default fetch size, so it returns 4
+     * rows. After reading 4 rows, new fetch size is computed. As biggest rows size so far was 35B,
+     * then 300/35B = 8 rows. Next fetch is done with 8 rows. First row in this fetch has size 40B,
+     * which gonna change fetch size to 7 rows (300/40B = 7), next fetch reads won't change size and 7
+     * will be used to the end.
+     * To check if this works correctly checked is:
+     * - if starting 4 rows from result set have fetch size as 4;
+     * - if next 8 rows from result set have fetch size as 8;
+     * - if next 38 rows from result set have fetch size as 7;
+     * - check if all 50 rows were read.
+     */
+    @Test
+    void adaptiveFetching() throws SQLException {
+        int startFetchSize = 4;
+        int expectedFirstSize = 8;
+        int expectedSecondSize = 7;
+        int expectedCounter = 50;
+        int resultCounter = 0;
 
-    for (int i = 0; i < 4; i++) {
-      resultSet.next();
-      resultCounter++;
-      assertEquals(startFetchSize, resultSet.getFetchSize());
-    }
-    for (int i = 0; i < 8; i++) {
-      resultSet.next();
-      resultCounter++;
-      assertEquals(expectedFirstSize, resultSet.getFetchSize());
-    }
-    while (resultSet.next()) {
-      resultCounter++;
-      assertEquals(expectedSecondSize, resultSet.getFetchSize());
+        Properties properties = new Properties();
+        PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, startFetchSize);
+        PGProperty.MAX_RESULT_BUFFER.set(properties, "300");
+        PGProperty.ADAPTIVE_FETCH.set(properties, true);
+
+        openConnectionAndCreateTable(properties);
+
+        for (int i = 0; i < expectedCounter; i++) {
+            if (i == 4) {
+                addStringWithSize(40);
+            } else {
+                addStringWithSize(35);
+            }
+        }
+
+        executeFetchingQuery();
+
+        for (int i = 0; i < 4; i++) {
+            resultSet.next();
+            resultCounter++;
+            assertEquals(startFetchSize, resultSet.getFetchSize());
+        }
+        for (int i = 0; i < 8; i++) {
+            resultSet.next();
+            resultCounter++;
+            assertEquals(expectedFirstSize, resultSet.getFetchSize());
+        }
+        while (resultSet.next()) {
+            resultCounter++;
+            assertEquals(expectedSecondSize, resultSet.getFetchSize());
+        }
+
+        assertEquals(expectedCounter, resultCounter);
     }
 
-    assertEquals(expectedCounter, resultCounter);
-  }
+    /**
+     * The main purpose of this set is to check if minimum size was used during adaptive fetching. To
+     * a table are added 50 rows with sizes: 1x270B, 49x10B. Starting fetch is done with default size
+     * 4. As first row from result set have size 270B, then computed size should be 1 (300/270 = 1),
+     * however minimum size set to 10 should make that next fetch should be done with size 10. After
+     * this fetch size shouldn't change to the end.
+     * To check if this works correctly checked is:
+     * - if starting 4 rows from result set have fetch size as 4;
+     * - if next 46 rows from result set have fetch size as 10;
+     * - check if all 50 rows were read.
+     */
+    @Test
+    void adaptiveFetchingWithMinimumSize() throws SQLException {
+        int startFetchSize = 4;
+        int expectedSize = 10;
+        int expectedCounter = 50;
+        int resultCounter = 0;
 
-  /**
-   * The main purpose of this set is to check if minimum size was used during adaptive fetching. To
-   * a table are added 50 rows with sizes: 1x270B, 49x10B. Starting fetch is done with default size
-   * 4. As first row from result set have size 270B, then computed size should be 1 (300/270 = 1),
-   * however minimum size set to 10 should make that next fetch should be done with size 10. After
-   * this fetch size shouldn't change to the end.
-   * To check if this works correctly checked is:
-   * - if starting 4 rows from result set have fetch size as 4;
-   * - if next 46 rows from result set have fetch size as 10;
-   * - check if all 50 rows were read.
-   */
-  @Test
-  void adaptiveFetchingWithMinimumSize() throws SQLException {
-    int startFetchSize = 4;
-    int expectedSize = 10;
-    int expectedCounter = 50;
-    int resultCounter = 0;
+        Properties properties = new Properties();
+        PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, startFetchSize);
+        PGProperty.MAX_RESULT_BUFFER.set(properties, "300");
+        PGProperty.ADAPTIVE_FETCH.set(properties, true);
+        PGProperty.ADAPTIVE_FETCH_MINIMUM.set(properties, expectedSize);
 
-    Properties properties = new Properties();
-    PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, startFetchSize);
-    PGProperty.MAX_RESULT_BUFFER.set(properties, "300");
-    PGProperty.ADAPTIVE_FETCH.set(properties, true);
-    PGProperty.ADAPTIVE_FETCH_MINIMUM.set(properties, expectedSize);
+        openConnectionAndCreateTable(properties);
 
-    openConnectionAndCreateTable(properties);
+        for (int i = 0; i < expectedCounter; i++) {
+            if (i == 0) {
+                addStringWithSize(270);
+            } else {
+                addStringWithSize(10);
+            }
+        }
 
-    for (int i = 0; i < expectedCounter; i++) {
-      if (i == 0) {
-        addStringWithSize(270);
-      } else {
-        addStringWithSize(10);
-      }
+        executeFetchingQuery();
+
+        for (int i = 0; i < 4; i++) {
+            resultSet.next();
+            resultCounter++;
+            assertEquals(startFetchSize, resultSet.getFetchSize());
+        }
+        while (resultSet.next()) {
+            resultCounter++;
+            assertEquals(expectedSize, resultSet.getFetchSize());
+        }
+
+        assertEquals(expectedCounter, resultCounter);
     }
 
-    executeFetchingQuery();
+    /**
+     * The main purpose of this set is to check if maximum size was used during adaptive fetching. To
+     * a table are added 50 rows with sizes: 4x10B, 46x30B. Starting fetch is done with default size
+     * 4. As first fetch have only rows with size 10B, then computed fetch size should be 30 (300/10 =
+     * 30), however maximum size set to 10 should make that next fetch should be done with size 10 (in
+     * other situation next rows will exceed size of maxResultBuffer). After this fetch size shouldn't
+     * change to the end.
+     * To check if this works correctly checked is:
+     * - if starting 4 rows from result set have fetch size as 4;
+     * - if next 46 rows from result set have fetch size as 10;
+     * - check if all 50 rows were read.
+     */
+    @Test
+    void adaptiveFetchingWithMaximumSize() throws SQLException {
+        int startFetchSize = 4;
+        int expectedSize = 10;
+        int expectedCounter = 50;
+        int resultCounter = 0;
 
-    for (int i = 0; i < 4; i++) {
-      resultSet.next();
-      resultCounter++;
-      assertEquals(startFetchSize, resultSet.getFetchSize());
-    }
-    while (resultSet.next()) {
-      resultCounter++;
-      assertEquals(expectedSize, resultSet.getFetchSize());
+        Properties properties = new Properties();
+        PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, startFetchSize);
+        PGProperty.MAX_RESULT_BUFFER.set(properties, "300");
+        PGProperty.ADAPTIVE_FETCH.set(properties, true);
+        PGProperty.ADAPTIVE_FETCH_MAXIMUM.set(properties, expectedSize);
+
+        openConnectionAndCreateTable(properties);
+
+        for (int i = 0; i < expectedCounter; i++) {
+            if (i < 4) {
+                addStringWithSize(10);
+            } else {
+                addStringWithSize(30);
+            }
+        }
+
+        executeFetchingQuery();
+
+        for (int i = 0; i < 4; i++) {
+            resultSet.next();
+            resultCounter++;
+            assertEquals(startFetchSize, resultSet.getFetchSize());
+        }
+        while (resultSet.next()) {
+            resultCounter++;
+            assertEquals(expectedSize, resultSet.getFetchSize());
+        }
+
+        assertEquals(expectedCounter, resultCounter);
     }
 
-    assertEquals(expectedCounter, resultCounter);
-  }
+    /**
+     * The main purpose of this set is to do fetching with maximum possible buffer. To a table are
+     * added 1000 rows with sizes 10B each. Starting fetch is done with default size 4, then next
+     * fetch should have size computed on maxResultBuffer, most probably that the next fetch would be
+     * the last.
+     * To check if this works correctly checked is:
+     * - if starting 4 rows from result set have fetch size as 4;
+     * - if next 996 rows from result set have fetch size computed with using max size of
+     * maxResultBuffer;
+     * - check if all 1000 rows were read.
+     */
+    @Test
+    void adaptiveFetchingWithMoreData() throws SQLException {
+        int startFetchSize = 4;
+        int expectedCounter = 1000;
+        int resultCounter = 0;
+        int expectedSize = (int) (
+                (long) (0.90 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()) / 10);
 
-  /**
-   * The main purpose of this set is to check if maximum size was used during adaptive fetching. To
-   * a table are added 50 rows with sizes: 4x10B, 46x30B. Starting fetch is done with default size
-   * 4. As first fetch have only rows with size 10B, then computed fetch size should be 30 (300/10 =
-   * 30), however maximum size set to 10 should make that next fetch should be done with size 10 (in
-   * other situation next rows will exceed size of maxResultBuffer). After this fetch size shouldn't
-   * change to the end.
-   * To check if this works correctly checked is:
-   * - if starting 4 rows from result set have fetch size as 4;
-   * - if next 46 rows from result set have fetch size as 10;
-   * - check if all 50 rows were read.
-   */
-  @Test
-  void adaptiveFetchingWithMaximumSize() throws SQLException {
-    int startFetchSize = 4;
-    int expectedSize = 10;
-    int expectedCounter = 50;
-    int resultCounter = 0;
+        Properties properties = new Properties();
+        PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, startFetchSize);
+        PGProperty.MAX_RESULT_BUFFER.set(properties, "90p");
+        PGProperty.ADAPTIVE_FETCH.set(properties, true);
 
-    Properties properties = new Properties();
-    PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, startFetchSize);
-    PGProperty.MAX_RESULT_BUFFER.set(properties, "300");
-    PGProperty.ADAPTIVE_FETCH.set(properties, true);
-    PGProperty.ADAPTIVE_FETCH_MAXIMUM.set(properties, expectedSize);
+        openConnectionAndCreateTable(properties);
 
-    openConnectionAndCreateTable(properties);
+        for (int i = 0; i < expectedCounter; i++) {
+            addStringWithSize(10);
+        }
 
-    for (int i = 0; i < expectedCounter; i++) {
-      if (i < 4) {
-        addStringWithSize(10);
-      } else {
-        addStringWithSize(30);
-      }
+        executeFetchingQuery();
+
+        for (int i = 0; i < 4; i++) {
+            resultSet.next();
+            resultCounter++;
+            assertEquals(startFetchSize, resultSet.getFetchSize());
+        }
+        while (resultSet.next()) {
+            resultCounter++;
+            assertEquals(expectedSize, resultSet.getFetchSize());
+        }
+
+        assertEquals(expectedCounter, resultCounter);
     }
 
-    executeFetchingQuery();
+    /**
+     * Execute query, which gonna be fetched. Sets auto commit to false to make fetching
+     * happen.
+     */
+    private void executeFetchingQuery() throws SQLException {
+        connection.setAutoCommit(false);
 
-    for (int i = 0; i < 4; i++) {
-      resultSet.next();
-      resultCounter++;
-      assertEquals(startFetchSize, resultSet.getFetchSize());
-    }
-    while (resultSet.next()) {
-      resultCounter++;
-      assertEquals(expectedSize, resultSet.getFetchSize());
+        statement = connection.prepareStatement("SELECT * FROM " + table);
+        resultSet = statement.executeQuery();
     }
 
-    assertEquals(expectedCounter, resultCounter);
-  }
-
-  /**
-   * The main purpose of this set is to do fetching with maximum possible buffer. To a table are
-   * added 1000 rows with sizes 10B each. Starting fetch is done with default size 4, then next
-   * fetch should have size computed on maxResultBuffer, most probably that the next fetch would be
-   * the last.
-   * To check if this works correctly checked is:
-   * - if starting 4 rows from result set have fetch size as 4;
-   * - if next 996 rows from result set have fetch size computed with using max size of
-   * maxResultBuffer;
-   * - check if all 1000 rows were read.
-   */
-  @Test
-  void adaptiveFetchingWithMoreData() throws SQLException {
-    int startFetchSize = 4;
-    int expectedCounter = 1000;
-    int resultCounter = 0;
-    int expectedSize = (int) (
-        (long) (0.90 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()) / 10);
-
-    Properties properties = new Properties();
-    PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, startFetchSize);
-    PGProperty.MAX_RESULT_BUFFER.set(properties, "90p");
-    PGProperty.ADAPTIVE_FETCH.set(properties, true);
-
-    openConnectionAndCreateTable(properties);
-
-    for (int i = 0; i < expectedCounter; i++) {
-      addStringWithSize(10);
+    /**
+     * Insert string with given size to a table.
+     *
+     * @param size desired size of a string to be inserted in the table
+     */
+    private void addStringWithSize(int size) throws SQLException {
+        StringBuilder sb = new StringBuilder(size + 2);
+        sb.append("'");
+        for (int i = 0; i < size; i++) {
+            sb.append('H');
+        }
+        sb.append("'");
+        String insert = TestUtil.insertSQL(table, "value", sb.toString());
+        TestUtil.execute(connection, insert);
     }
 
-    executeFetchingQuery();
-
-    for (int i = 0; i < 4; i++) {
-      resultSet.next();
-      resultCounter++;
-      assertEquals(startFetchSize, resultSet.getFetchSize());
-    }
-    while (resultSet.next()) {
-      resultCounter++;
-      assertEquals(expectedSize, resultSet.getFetchSize());
+    /**
+     * Open connection, check if fetch can be performed and create table.
+     *
+     * @param properties Properties to be used during opening connection.
+     */
+    private void openConnectionAndCreateTable(Properties properties) throws SQLException {
+        connection = TestUtil.openDB(properties);
+        //After opening connection we should check if will be possible to do a fetch
+        checkIfFetchTestCanBePerformed(connection);
+        TestUtil.createTable(connection, table, columns);
     }
 
-    assertEquals(expectedCounter, resultCounter);
-  }
-
-  /**
-   * Execute query, which gonna be fetched. Sets auto commit to false to make fetching
-   * happen.
-   */
-  private void executeFetchingQuery() throws SQLException {
-    connection.setAutoCommit(false);
-
-    statement = connection.prepareStatement("SELECT * FROM " + table);
-    resultSet = statement.executeQuery();
-  }
-
-  /**
-   * Insert string with given size to a table.
-   *
-   * @param size desired size of a string to be inserted in the table
-   */
-  private void addStringWithSize(int size) throws SQLException {
-    StringBuilder sb = new StringBuilder(size + 2);
-    sb.append("'");
-    for (int i = 0; i < size; i++) {
-      sb.append('H');
+    /**
+     * Check if a fetch can be performed - PreferQueryMode is not set to Simple.
+     *
+     * @param connection Connection to be checked.
+     */
+    private void checkIfFetchTestCanBePerformed(Connection connection) throws SQLException {
+        PGConnection pgConnection = connection.unwrap(PGConnection.class);
+        PreferQueryMode preferQueryMode =
+                pgConnection == null ? PreferQueryMode.EXTENDED : pgConnection.getPreferQueryMode();
+        Assumptions.assumeTrue(preferQueryMode != PreferQueryMode.SIMPLE,
+                "Fetching tests can't be performed in simple mode");
     }
-    sb.append("'");
-    String insert = TestUtil.insertSQL(table, "value", sb.toString());
-    TestUtil.execute(connection, insert);
-  }
-
-  /**
-   * Open connection, check if fetch can be performed and create table.
-   *
-   * @param properties Properties to be used during opening connection.
-   */
-  private void openConnectionAndCreateTable(Properties properties) throws SQLException {
-    connection = TestUtil.openDB(properties);
-    //After opening connection we should check if will be possible to do a fetch
-    checkIfFetchTestCanBePerformed(connection);
-    TestUtil.createTable(connection, table, columns);
-  }
-
-  /**
-   * Check if a fetch can be performed - PreferQueryMode is not set to Simple.
-   *
-   * @param connection Connection to be checked.
-   */
-  private void checkIfFetchTestCanBePerformed(Connection connection) throws SQLException {
-    PGConnection pgConnection = connection.unwrap(PGConnection.class);
-    PreferQueryMode preferQueryMode =
-        pgConnection == null ? PreferQueryMode.EXTENDED : pgConnection.getPreferQueryMode();
-    Assumptions.assumeTrue(preferQueryMode != PreferQueryMode.SIMPLE,
-        "Fetching tests can't be performed in simple mode");
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/CustomizeDefaultFetchSizeTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/CustomizeDefaultFetchSizeTest.java
index 66ae16e..8607297 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/CustomizeDefaultFetchSizeTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/CustomizeDefaultFetchSizeTest.java
@@ -24,63 +24,63 @@ import java.util.Properties;
 
 class CustomizeDefaultFetchSizeTest {
 
-  private Connection connection;
+    private Connection connection;
 
-  @AfterEach
-  void tearDown() throws Exception {
-    if (connection != null) {
-      TestUtil.closeDB(connection);
+    @AfterEach
+    void tearDown() throws Exception {
+        if (connection != null) {
+            TestUtil.closeDB(connection);
+        }
     }
-  }
 
-  @Test
-  void setPredefineDefaultFetchSizeOnStatement() throws Exception {
-    final int waitFetchSize = 13;
-    Properties properties = new Properties();
-    PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, waitFetchSize);
+    @Test
+    void setPredefineDefaultFetchSizeOnStatement() throws Exception {
+        final int waitFetchSize = 13;
+        Properties properties = new Properties();
+        PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, waitFetchSize);
 
-    connection = TestUtil.openDB(properties);
+        connection = TestUtil.openDB(properties);
 
-    Statement statement = connection.createStatement();
-    int resultFetchSize = statement.getFetchSize();
+        Statement statement = connection.createStatement();
+        int resultFetchSize = statement.getFetchSize();
 
-    statement.close();
+        statement.close();
 
-    assertThat(
-        "PGProperty.DEFAULT_ROW_FETCH_SIZE should be propagate to Statement that was create from connection "
-            + "on that define it parameter",
-        resultFetchSize, CoreMatchers.equalTo(waitFetchSize));
-  }
+        assertThat(
+                "PGProperty.DEFAULT_ROW_FETCH_SIZE should be propagate to Statement that was create from connection "
+                        + "on that define it parameter",
+                resultFetchSize, CoreMatchers.equalTo(waitFetchSize));
+    }
 
-  @Test
-  void setPredefineDefaultFetchSizeOnPreparedStatement() throws Exception {
-    final int waitFetchSize = 14;
+    @Test
+    void setPredefineDefaultFetchSizeOnPreparedStatement() throws Exception {
+        final int waitFetchSize = 14;
 
-    Properties properties = new Properties();
-    PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, waitFetchSize);
+        Properties properties = new Properties();
+        PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, waitFetchSize);
 
-    connection = TestUtil.openDB(properties);
+        connection = TestUtil.openDB(properties);
 
-    CallableStatement statement = connection.prepareCall("{ call unnest(array[1, 2, 3, 5])}");
-    int resultFetchSize = statement.getFetchSize();
+        CallableStatement statement = connection.prepareCall("{ call unnest(array[1, 2, 3, 5])}");
+        int resultFetchSize = statement.getFetchSize();
 
-    assertThat(
-        "PGProperty.DEFAULT_ROW_FETCH_SIZE should be propagate to CallableStatement that was create from connection "
-            + "on that define it parameter",
-        resultFetchSize, CoreMatchers.equalTo(waitFetchSize));
-  }
+        assertThat(
+                "PGProperty.DEFAULT_ROW_FETCH_SIZE should be propagate to CallableStatement that was create from connection "
+                        + "on that define it parameter",
+                resultFetchSize, CoreMatchers.equalTo(waitFetchSize));
+    }
 
-  @Test
-  void notAvailableSpecifyNegativeFetchSize() throws Exception {
-    assertThrows(SQLException.class, () -> {
-      Properties properties = new Properties();
-      PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, Integer.MIN_VALUE);
+    @Test
+    void notAvailableSpecifyNegativeFetchSize() throws Exception {
+        assertThrows(SQLException.class, () -> {
+            Properties properties = new Properties();
+            PGProperty.DEFAULT_ROW_FETCH_SIZE.set(properties, Integer.MIN_VALUE);
 
-      connection = TestUtil.openDB(properties);
+            connection = TestUtil.openDB(properties);
 
-      fail(
-          "On step initialize connection we know about not valid parameter PGProperty.DEFAULT_ROW_FETCH_SIZE they can't be negative, "
-              + "so we should throw correspond exception about it rather than fall with exception in runtime for example during create statement");
-    });
-  }
+            fail(
+                    "On step initialize connection we know about not valid parameter PGProperty.DEFAULT_ROW_FETCH_SIZE they can't be negative, "
+                            + "so we should throw correspond exception about it rather than fall with exception in runtime for example during create statement");
+        });
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/DatabaseMetaDataTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/DatabaseMetaDataTest.java
index 9878b91..7dbe829 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/DatabaseMetaDataTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/DatabaseMetaDataTest.java
@@ -29,139 +29,139 @@ import java.sql.Types;
 
 class DatabaseMetaDataTest {
 
-  private Connection conn;
+    private Connection conn;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    conn = TestUtil.openDB();
-    TestUtil.createSchema(conn, "test_schema");
-    TestUtil.createEnumType(conn, "test_schema.test_enum", "'val'");
-    TestUtil.createTable(conn, "test_schema.off_path_table", "var test_schema.test_enum[]");
-    TestUtil.createEnumType(conn, "_test_enum", "'evil'");
-    TestUtil.createEnumType(conn, "test_enum", "'other'");
-    TestUtil.createTable(conn, "on_path_table", "a test_schema.test_enum[], b _test_enum, c test_enum[]");
-    TestUtil.createTable(conn, "decimaltest", "a decimal, b decimal(10, 5)");
-  }
-
-  @AfterEach
-  void tearDown() throws Exception {
-    TestUtil.dropTable(conn, "decimaltest");
-    TestUtil.dropTable(conn, "on_path_table");
-    TestUtil.dropType(conn, "test_enum");
-    TestUtil.dropType(conn, "_test_enum");
-    TestUtil.dropSchema(conn, "test_schema");
-    TestUtil.closeDB(conn);
-  }
-
-  @Test
-  void getColumnsForNullScale() throws Exception {
-    DatabaseMetaData dbmd = conn.getMetaData();
-
-    ResultSet rs = dbmd.getColumns("%", "%", "decimaltest", "%");
-    assertTrue(rs.next());
-    assertEquals("a", rs.getString("COLUMN_NAME"));
-    assertEquals(0, rs.getInt("DECIMAL_DIGITS"));
-    assertTrue(rs.wasNull());
-
-    assertTrue(rs.next());
-    assertEquals("b", rs.getString("COLUMN_NAME"));
-    assertEquals(5, rs.getInt("DECIMAL_DIGITS"));
-    assertFalse(rs.wasNull());
-
-    assertFalse(rs.next());
-  }
-
-  @Test
-  void getCorrectSQLTypeForOffPathTypes() throws Exception {
-    DatabaseMetaData dbmd = conn.getMetaData();
-
-    ResultSet rs = dbmd.getColumns("%", "%", "off_path_table", "%");
-    assertTrue(rs.next());
-    assertEquals("var", rs.getString("COLUMN_NAME"));
-    assertEquals("\"test_schema\".\"_test_enum\"", rs.getString("TYPE_NAME"), "Detects correct off-path type name");
-    assertEquals(Types.ARRAY, rs.getInt("DATA_TYPE"), "Detects correct SQL type for off-path types");
-
-    assertFalse(rs.next());
-  }
-
-  @Test
-  void getCorrectSQLTypeForShadowedTypes() throws Exception {
-    DatabaseMetaData dbmd = conn.getMetaData();
-
-    ResultSet rs = dbmd.getColumns("%", "%", "on_path_table", "%");
-
-    assertTrue(rs.next());
-    assertEquals("a", rs.getString("COLUMN_NAME"));
-    assertEquals("\"test_schema\".\"_test_enum\"", rs.getString("TYPE_NAME"), "Correctly maps types from other schemas");
-    assertEquals(Types.ARRAY, rs.getInt("DATA_TYPE"));
-
-    assertTrue(rs.next());
-    assertEquals("b", rs.getString("COLUMN_NAME"));
-    // = TYPE _test_enum AS ENUM ('evil')
-    assertEquals("_test_enum", rs.getString("TYPE_NAME"));
-    assertEquals(Types.VARCHAR, rs.getInt("DATA_TYPE"));
-
-    assertTrue(rs.next());
-    assertEquals("c", rs.getString("COLUMN_NAME"));
-    // = array of TYPE test_enum AS ENUM ('value')
-    if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v16)) {
-      assertEquals("_test_enum_1", rs.getString("TYPE_NAME"), "Correctly detects shadowed array type name");
-    } else {
-      assertEquals("___test_enum", rs.getString("TYPE_NAME"), "Correctly detects shadowed array type name");
+    @BeforeEach
+    void setUp() throws Exception {
+        conn = TestUtil.openDB();
+        TestUtil.createSchema(conn, "test_schema");
+        TestUtil.createEnumType(conn, "test_schema.test_enum", "'val'");
+        TestUtil.createTable(conn, "test_schema.off_path_table", "var test_schema.test_enum[]");
+        TestUtil.createEnumType(conn, "_test_enum", "'evil'");
+        TestUtil.createEnumType(conn, "test_enum", "'other'");
+        TestUtil.createTable(conn, "on_path_table", "a test_schema.test_enum[], b _test_enum, c test_enum[]");
+        TestUtil.createTable(conn, "decimaltest", "a decimal, b decimal(10, 5)");
     }
-    assertEquals(Types.ARRAY, rs.getInt("DATA_TYPE"), "Correctly detects type of shadowed name");
 
-    assertFalse(rs.next());
-  }
-
-  @Test
-  void largeOidIsHandledCorrectly() throws SQLException {
-    TypeInfo ti = conn.unwrap(PgConnection.class).getTypeInfo();
-
-    try {
-      ti.getSQLType((int) 4294967295L); // (presumably) unused OID 4294967295, which is 2**32 - 1
-    } catch (PSQLException ex) {
-      assertEquals(ex.getSQLState(), PSQLState.NO_DATA.getState());
+    @AfterEach
+    void tearDown() throws Exception {
+        TestUtil.dropTable(conn, "decimaltest");
+        TestUtil.dropTable(conn, "on_path_table");
+        TestUtil.dropType(conn, "test_enum");
+        TestUtil.dropType(conn, "_test_enum");
+        TestUtil.dropSchema(conn, "test_schema");
+        TestUtil.closeDB(conn);
     }
-  }
 
-  @Test
-  void oidConversion() throws SQLException {
-    TypeInfo ti = conn.unwrap(PgConnection.class).getTypeInfo();
-    int oid = 0;
-    long loid = 0;
-    assertEquals(oid, ti.longOidToInt(loid));
-    assertEquals(loid, ti.intOidToLong(oid));
+    @Test
+    void getColumnsForNullScale() throws Exception {
+        DatabaseMetaData dbmd = conn.getMetaData();
 
-    oid = Integer.MAX_VALUE;
-    loid = Integer.MAX_VALUE;
-    assertEquals(oid, ti.longOidToInt(loid));
-    assertEquals(loid, ti.intOidToLong(oid));
+        ResultSet rs = dbmd.getColumns("%", "%", "decimaltest", "%");
+        assertTrue(rs.next());
+        assertEquals("a", rs.getString("COLUMN_NAME"));
+        assertEquals(0, rs.getInt("DECIMAL_DIGITS"));
+        assertTrue(rs.wasNull());
 
-    oid = Integer.MIN_VALUE;
-    loid = 1L << 31;
-    assertEquals(oid, ti.longOidToInt(loid));
-    assertEquals(loid, ti.intOidToLong(oid));
+        assertTrue(rs.next());
+        assertEquals("b", rs.getString("COLUMN_NAME"));
+        assertEquals(5, rs.getInt("DECIMAL_DIGITS"));
+        assertFalse(rs.wasNull());
 
-    oid = -1;
-    loid = 0xFFFFFFFFL;
-    assertEquals(oid, ti.longOidToInt(loid));
-    assertEquals(loid, ti.intOidToLong(oid));
-  }
+        assertFalse(rs.next());
+    }
 
-  @Test
-  void oidConversionThrowsForNegativeLongValues() throws SQLException {
-    assertThrows(PSQLException.class, () -> {
-      TypeInfo ti = conn.unwrap(PgConnection.class).getTypeInfo();
-      ti.longOidToInt(-1);
-    });
-  }
+    @Test
+    void getCorrectSQLTypeForOffPathTypes() throws Exception {
+        DatabaseMetaData dbmd = conn.getMetaData();
 
-  @Test
-  void oidConversionThrowsForTooLargeLongValues() throws SQLException {
-    assertThrows(PSQLException.class, () -> {
-      TypeInfo ti = conn.unwrap(PgConnection.class).getTypeInfo();
-      ti.longOidToInt(1L << 32);
-    });
-  }
+        ResultSet rs = dbmd.getColumns("%", "%", "off_path_table", "%");
+        assertTrue(rs.next());
+        assertEquals("var", rs.getString("COLUMN_NAME"));
+        assertEquals("\"test_schema\".\"_test_enum\"", rs.getString("TYPE_NAME"), "Detects correct off-path type name");
+        assertEquals(Types.ARRAY, rs.getInt("DATA_TYPE"), "Detects correct SQL type for off-path types");
+
+        assertFalse(rs.next());
+    }
+
+    @Test
+    void getCorrectSQLTypeForShadowedTypes() throws Exception {
+        DatabaseMetaData dbmd = conn.getMetaData();
+
+        ResultSet rs = dbmd.getColumns("%", "%", "on_path_table", "%");
+
+        assertTrue(rs.next());
+        assertEquals("a", rs.getString("COLUMN_NAME"));
+        assertEquals("\"test_schema\".\"_test_enum\"", rs.getString("TYPE_NAME"), "Correctly maps types from other schemas");
+        assertEquals(Types.ARRAY, rs.getInt("DATA_TYPE"));
+
+        assertTrue(rs.next());
+        assertEquals("b", rs.getString("COLUMN_NAME"));
+        // = TYPE _test_enum AS ENUM ('evil')
+        assertEquals("_test_enum", rs.getString("TYPE_NAME"));
+        assertEquals(Types.VARCHAR, rs.getInt("DATA_TYPE"));
+
+        assertTrue(rs.next());
+        assertEquals("c", rs.getString("COLUMN_NAME"));
+        // = array of TYPE test_enum AS ENUM ('value')
+        if (TestUtil.haveMinimumServerVersion(conn, ServerVersion.v16)) {
+            assertEquals("_test_enum_1", rs.getString("TYPE_NAME"), "Correctly detects shadowed array type name");
+        } else {
+            assertEquals("___test_enum", rs.getString("TYPE_NAME"), "Correctly detects shadowed array type name");
+        }
+        assertEquals(Types.ARRAY, rs.getInt("DATA_TYPE"), "Correctly detects type of shadowed name");
+
+        assertFalse(rs.next());
+    }
+
+    @Test
+    void largeOidIsHandledCorrectly() throws SQLException {
+        TypeInfo ti = conn.unwrap(PgConnection.class).getTypeInfo();
+
+        try {
+            ti.getSQLType((int) 4294967295L); // (presumably) unused OID 4294967295, which is 2**32 - 1
+        } catch (PSQLException ex) {
+            assertEquals(ex.getSQLState(), PSQLState.NO_DATA.getState());
+        }
+    }
+
+    @Test
+    void oidConversion() throws SQLException {
+        TypeInfo ti = conn.unwrap(PgConnection.class).getTypeInfo();
+        int oid = 0;
+        long loid = 0;
+        assertEquals(oid, ti.longOidToInt(loid));
+        assertEquals(loid, ti.intOidToLong(oid));
+
+        oid = Integer.MAX_VALUE;
+        loid = Integer.MAX_VALUE;
+        assertEquals(oid, ti.longOidToInt(loid));
+        assertEquals(loid, ti.intOidToLong(oid));
+
+        oid = Integer.MIN_VALUE;
+        loid = 1L << 31;
+        assertEquals(oid, ti.longOidToInt(loid));
+        assertEquals(loid, ti.intOidToLong(oid));
+
+        oid = -1;
+        loid = 0xFFFFFFFFL;
+        assertEquals(oid, ti.longOidToInt(loid));
+        assertEquals(loid, ti.intOidToLong(oid));
+    }
+
+    @Test
+    void oidConversionThrowsForNegativeLongValues() throws SQLException {
+        assertThrows(PSQLException.class, () -> {
+            TypeInfo ti = conn.unwrap(PgConnection.class).getTypeInfo();
+            ti.longOidToInt(-1);
+        });
+    }
+
+    @Test
+    void oidConversionThrowsForTooLargeLongValues() throws SQLException {
+        assertThrows(PSQLException.class, () -> {
+            TypeInfo ti = conn.unwrap(PgConnection.class).getTypeInfo();
+            ti.longOidToInt(1L << 32);
+        });
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/GetObject310InfinityTests.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/GetObject310InfinityTests.java
index 87dd142..2d2afcc 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/GetObject310InfinityTests.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/GetObject310InfinityTests.java
@@ -28,70 +28,70 @@ import java.util.Collection;
 
 @RunWith(Parameterized.class)
 public class GetObject310InfinityTests extends BaseTest4 {
-  private final String expression;
-  private final String pgType;
-  private final Class<?> klass;
-  private final Object expectedValue;
+    private final String expression;
+    private final String pgType;
+    private final Class<?> klass;
+    private final Object expectedValue;
 
-  public GetObject310InfinityTests(BinaryMode binaryMode, String expression,
-      String pgType, Class<?> klass, Object expectedValue) {
-    setBinaryMode(binaryMode);
-    this.expression = expression;
-    this.pgType = pgType;
-    this.klass = klass;
-    this.expectedValue = expectedValue;
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    Assume.assumeTrue("PostgreSQL 8.3 does not support 'infinity' for 'date'",
-        !"date".equals(pgType) || TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4));
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}, expr = {1}, pgType = {2}, klass = {3}")
-  public static Iterable<Object[]> data() throws IllegalAccessException {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      for (String expression : Arrays.asList("-infinity", "infinity")) {
-        for (String pgType : Arrays.asList("date", "timestamp",
-            "timestamp with time zone")) {
-          for (Class<?> klass : Arrays.asList(LocalDate.class, LocalDateTime.class,
-              OffsetDateTime.class)) {
-            if (klass.equals(LocalDate.class) && !"date".equals(pgType)) {
-              continue;
-            }
-            if (klass.equals(LocalDateTime.class) && !pgType.startsWith("timestamp")) {
-              continue;
-            }
-            if (klass.equals(OffsetDateTime.class) && !pgType.startsWith("timestamp")) {
-              continue;
-            }
-            if (klass.equals(LocalDateTime.class) && "timestamp with time zone".equals(pgType)) {
-              // org.postgresql.util.PSQLException: Cannot convert the column of type TIMESTAMPTZ to requested type timestamp.
-              continue;
-            }
-            Field field = null;
-            try {
-              field = klass.getField(expression.startsWith("-") ? "MIN" : "MAX");
-            } catch (NoSuchFieldException e) {
-              throw new IllegalStateException("No min/max field in " + klass, e);
-            }
-            Object expected = field.get(null);
-            ids.add(new Object[]{binaryMode, expression, pgType, klass, expected});
-          }
-        }
-      }
+    public GetObject310InfinityTests(BinaryMode binaryMode, String expression,
+                                     String pgType, Class<?> klass, Object expectedValue) {
+        setBinaryMode(binaryMode);
+        this.expression = expression;
+        this.pgType = pgType;
+        this.klass = klass;
+        this.expectedValue = expectedValue;
     }
-    return ids;
-  }
 
-  @Test
-  public void test() throws SQLException {
-    PreparedStatement stmt = con.prepareStatement("select '" + expression + "'::" + pgType);
-    ResultSet rs = stmt.executeQuery();
-    rs.next();
-    Object res = rs.getObject(1, klass);
-    Assert.assertEquals(expectedValue, res);
-  }
+    @Parameterized.Parameters(name = "binary = {0}, expr = {1}, pgType = {2}, klass = {3}")
+    public static Iterable<Object[]> data() throws IllegalAccessException {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            for (String expression : Arrays.asList("-infinity", "infinity")) {
+                for (String pgType : Arrays.asList("date", "timestamp",
+                        "timestamp with time zone")) {
+                    for (Class<?> klass : Arrays.asList(LocalDate.class, LocalDateTime.class,
+                            OffsetDateTime.class)) {
+                        if (klass.equals(LocalDate.class) && !"date".equals(pgType)) {
+                            continue;
+                        }
+                        if (klass.equals(LocalDateTime.class) && !pgType.startsWith("timestamp")) {
+                            continue;
+                        }
+                        if (klass.equals(OffsetDateTime.class) && !pgType.startsWith("timestamp")) {
+                            continue;
+                        }
+                        if (klass.equals(LocalDateTime.class) && "timestamp with time zone".equals(pgType)) {
+                            // org.postgresql.util.PSQLException: Cannot convert the column of type TIMESTAMPTZ to requested type timestamp.
+                            continue;
+                        }
+                        Field field = null;
+                        try {
+                            field = klass.getField(expression.startsWith("-") ? "MIN" : "MAX");
+                        } catch (NoSuchFieldException e) {
+                            throw new IllegalStateException("No min/max field in " + klass, e);
+                        }
+                        Object expected = field.get(null);
+                        ids.add(new Object[]{binaryMode, expression, pgType, klass, expected});
+                    }
+                }
+            }
+        }
+        return ids;
+    }
+
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        Assume.assumeTrue("PostgreSQL 8.3 does not support 'infinity' for 'date'",
+                !"date".equals(pgType) || TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4));
+    }
+
+    @Test
+    public void test() throws SQLException {
+        PreparedStatement stmt = con.prepareStatement("select '" + expression + "'::" + pgType);
+        ResultSet rs = stmt.executeQuery();
+        rs.next();
+        Object res = rs.getObject(1, klass);
+        Assert.assertEquals(expectedValue, res);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/GetObject310Test.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/GetObject310Test.java
index 5ab253a..adabebd 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/GetObject310Test.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/GetObject310Test.java
@@ -48,395 +48,397 @@ import java.util.stream.Stream;
 @RunWith(Parameterized.class)
 public class GetObject310Test extends BaseTest4 {
 
-  private static final TimeZone saveTZ = TimeZone.getDefault();
+    private static final TimeZone saveTZ = TimeZone.getDefault();
 
-  private static final ZoneOffset UTC = ZoneOffset.UTC; // +0000 always
-  private static final ZoneOffset GMT03 = ZoneOffset.of("+03:00"); // +0300 always
-  private static final ZoneOffset GMT05 = ZoneOffset.of("-05:00"); // -0500 always
-  private static final ZoneOffset GMT13 = ZoneOffset.of("+13:00"); // +1300 always
+    private static final ZoneOffset UTC = ZoneOffset.UTC; // +0000 always
+    private static final ZoneOffset GMT03 = ZoneOffset.of("+03:00"); // +0300 always
+    private static final ZoneOffset GMT05 = ZoneOffset.of("-05:00"); // -0500 always
+    private static final ZoneOffset GMT13 = ZoneOffset.of("+13:00"); // +1300 always
 
-  private static final IsoChronology ISO = IsoChronology.INSTANCE;
+    private static final IsoChronology ISO = IsoChronology.INSTANCE;
 
-  public GetObject310Test(BinaryMode binaryMode) {
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
-    }
-    return ids;
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTable(con, "table1", "timestamp_without_time_zone_column timestamp without time zone,"
-            + "timestamp_with_time_zone_column timestamp with time zone,"
-            + "date_column date,"
-            + "time_without_time_zone_column time without time zone,"
-            + "time_with_time_zone_column time with time zone"
-    );
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TimeZone.setDefault(saveTZ);
-    TestUtil.dropTable(con, "table1");
-    super.tearDown();
-  }
-
-  /**
-   * Test the behavior getObject for date columns.
-   */
-  @Test
-  public void testGetLocalDate() throws SQLException {
-    assumeTrue(TestUtil.haveIntegerDateTimes(con));
-
-    List<String> zoneIdsToTest = new ArrayList<>();
-    zoneIdsToTest.add("Africa/Casablanca"); // It is something like GMT+0..GMT+1
-    zoneIdsToTest.add("America/Adak"); // It is something like GMT-10..GMT-9
-    zoneIdsToTest.add("Atlantic/Azores"); // It is something like GMT-1..GMT+0
-    zoneIdsToTest.add("Europe/Berlin"); // It is something like GMT+1..GMT+2
-    zoneIdsToTest.add("Europe/Moscow"); // It is something like GMT+3..GMT+4 for 2000s
-    zoneIdsToTest.add("Pacific/Apia"); // It is something like GMT+13..GMT+14
-    zoneIdsToTest.add("Pacific/Niue"); // It is something like GMT-11..GMT-11
-    for (int i = -12; i <= 13; i++) {
-      zoneIdsToTest.add(String.format("GMT%+02d", i));
+    public GetObject310Test(BinaryMode binaryMode) {
+        setBinaryMode(binaryMode);
     }
 
-    List<String> datesToTest = Arrays.asList("1998-01-08",
-            // Some random dates
-            "1981-12-11", "2022-02-22",
-            "2015-09-03", "2015-06-30",
-            "1997-06-30", "1997-07-01", "2012-06-30", "2012-07-01",
-            "2015-06-30", "2015-07-01", "2005-12-31", "2006-01-01",
-            "2008-12-31", "2009-01-01", "2015-06-30", "2015-07-31",
-            "2015-07-31",
-
-            // On 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00
-            "2003-03-25", "2000-03-26", "2000-03-27",
-
-            // This is a pre-1970 date, so check if it is rounded properly
-            "1950-07-20",
-
-            // Ensure the calendar is proleptic
-            "1582-01-01", "1582-12-31",
-            "1582-09-30", "1582-10-16",
-
-            // https://github.com/pgjdbc/pgjdbc/issues/2221
-            "0001-01-01",
-            "1000-01-01", "1000-06-01", "0999-12-31",
-
-            // On 2000-10-29 03:00:00 Moscow went to regular time, thus local time became 02:00:00
-            "2000-10-28", "2000-10-29", "2000-10-30");
-
-    for (String zoneId : zoneIdsToTest) {
-      ZoneId zone = ZoneId.of(zoneId);
-      for (String date : datesToTest) {
-        localDate(zone, date);
-      }
-    }
-  }
-
-  public void localDate(ZoneId zoneId, String date) throws SQLException {
-    TimeZone.setDefault(TimeZone.getTimeZone(zoneId));
-    try (Statement stmt = con.createStatement() ) {
-      stmt.executeUpdate(TestUtil.insertSQL("table1", "date_column", "DATE '" + date + "'"));
-
-      try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "date_column")) ) {
-        assertTrue(rs.next());
-        LocalDate localDate = LocalDate.parse(date);
-        assertEquals(localDate, rs.getObject("date_column", LocalDate.class));
-        assertEquals(localDate, rs.getObject(1, LocalDate.class));
-      }
-      stmt.executeUpdate("DELETE FROM table1");
-    }
-  }
-
-  /**
-   * Test the behavior getObject for timetz columns.
-   */
-  @Test
-  public void testGetOffsetTime() throws SQLException {
-    List<String> timesToTest = Arrays.asList("00:00:00+00:00", "00:00:00+00:30",
-        "01:02:03.333444+02:00", "23:59:59.999999-12:00",
-        "11:22:59.4711-08:00", "23:59:59.0-12:00",
-        "11:22:59.4711+15:59:12", "23:59:59.0-15:59:12"
-    );
-
-    for (String time : timesToTest) {
-      try (Statement stmt = con.createStatement() ) {
-        stmt.executeUpdate(TestUtil.insertSQL("table1", "time_with_time_zone_column", "time with time zone '" + time + "'"));
-
-        try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "time_with_time_zone_column")) ) {
-          assertTrue(rs.next());
-          OffsetTime offsetTime = OffsetTime.parse(time);
-          assertEquals(offsetTime, rs.getObject("time_with_time_zone_column", OffsetTime.class));
-          assertEquals(offsetTime, rs.getObject(1, OffsetTime.class));
-
-          //Also test that we get the correct values when retrieving the data as OffsetDateTime objects on EPOCH (required by JDBC)
-          OffsetDateTime offsetDT = offsetTime.atDate(LocalDate.of(1970, 1, 1));
-          assertEquals(offsetDT, rs.getObject("time_with_time_zone_column", OffsetDateTime.class));
-          assertEquals(offsetDT, rs.getObject(1, OffsetDateTime.class));
-
-          assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalDate.class);
-          assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalTime.class);
-          assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalDateTime.class);
+    @Parameterized.Parameters(name = "binary = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
         }
-        stmt.executeUpdate("DELETE FROM table1");
-      }
-    }
-  }
-
-  /**
-   * Test the behavior getObject for time columns.
-   */
-  @Test
-  public void testGetLocalTime() throws SQLException {
-    try (Statement stmt = con.createStatement() ) {
-      stmt.executeUpdate(TestUtil.insertSQL("table1", "time_without_time_zone_column", "TIME '04:05:06.123456'"));
-
-      try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "time_without_time_zone_column"))) {
-        assertTrue(rs.next());
-        LocalTime localTime = LocalTime.of(4, 5, 6, 123456000);
-        assertEquals(localTime, rs.getObject("time_without_time_zone_column", LocalTime.class));
-        assertEquals(localTime, rs.getObject(1, LocalTime.class));
-
-        assertDataTypeMismatch(rs, "time_without_time_zone_column", OffsetTime.class);
-        assertDataTypeMismatch(rs, "time_without_time_zone_column", OffsetDateTime.class);
-        assertDataTypeMismatch(rs, "time_without_time_zone_column", LocalDate.class);
-        assertDataTypeMismatch(rs, "time_without_time_zone_column", LocalDateTime.class);
-      }
-      stmt.executeUpdate("DELETE FROM table1");
-    }
-  }
-
-  /**
-   * Test the behavior getObject for time columns with null.
-   */
-  @Test
-  public void testGetLocalTimeNull() throws SQLException {
-    try (Statement stmt = con.createStatement() ) {
-      stmt.executeUpdate(TestUtil.insertSQL("table1", "time_without_time_zone_column", "NULL"));
-
-      try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "time_without_time_zone_column"))) {
-        assertTrue(rs.next());
-        assertNull(rs.getObject("time_without_time_zone_column", LocalTime.class));
-        assertNull(rs.getObject(1, LocalTime.class));
-      }
-      stmt.executeUpdate("DELETE FROM table1");
-    }
-  }
-
-  /**
-   * Test the behavior getObject for time columns with invalid type.
-   */
-  @Test
-  public void testGetLocalTimeInvalidType() throws SQLException {
-    try (Statement stmt = con.createStatement() ) {
-      stmt.executeUpdate(TestUtil.insertSQL("table1", "time_with_time_zone_column", "TIME '04:05:06.123456-08:00'"));
-
-      try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "time_with_time_zone_column"))) {
-        assertTrue(rs.next());
-        assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalTime.class);
-        assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalDateTime.class);
-        assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalDate.class);
-      }
-      stmt.executeUpdate("DELETE FROM table1");
-    }
-  }
-
-  /**
-   * Test the behavior getObject for timestamp columns.
-   */
-  @Test
-  public void testGetLocalDateTime() throws SQLException {
-    assumeTrue(TestUtil.haveIntegerDateTimes(con));
-
-    List<String> zoneIdsToTest = new ArrayList<>();
-    zoneIdsToTest.add("Africa/Casablanca"); // It is something like GMT+0..GMT+1
-    zoneIdsToTest.add("America/Adak"); // It is something like GMT-10..GMT-9
-    zoneIdsToTest.add("Atlantic/Azores"); // It is something like GMT-1..GMT+0
-    zoneIdsToTest.add("Europe/Moscow"); // It is something like GMT+3..GMT+4 for 2000s
-    zoneIdsToTest.add("Pacific/Apia"); // It is something like GMT+13..GMT+14
-    zoneIdsToTest.add("Pacific/Niue"); // It is something like GMT-11..GMT-11
-    for (int i = -12; i <= 13; i++) {
-      zoneIdsToTest.add(String.format("GMT%+02d", i));
+        return ids;
     }
 
-    List<String> datesToTest = Arrays.asList("2015-09-03T12:00:00", "2015-06-30T23:59:58",
-            "1997-06-30T23:59:59", "1997-07-01T00:00:00", "2012-06-30T23:59:59", "2012-07-01T00:00:00",
-            "2015-06-30T23:59:59", "2015-07-01T00:00:00", "2005-12-31T23:59:59", "2006-01-01T00:00:00",
-            "2008-12-31T23:59:59", "2009-01-01T00:00:00", /* "2015-06-30T23:59:60", */ "2015-07-31T00:00:00",
-            "2015-07-31T00:00:01", "2015-07-31T00:00:00.000001",
+    /**
+     * checks if getObject with given column name or index 1 throws an exception with DATA_TYPE_MISMATCH as SQLState
+     */
+    private static void assertDataTypeMismatch(ResultSet rs, String columnName, Class<?> typeToGet) {
+        PSQLException ex = assertThrows(PSQLException.class, () -> rs.getObject(columnName, typeToGet));
+        assertEquals(PSQLState.DATA_TYPE_MISMATCH.getState(), ex.getSQLState());
 
-            // On 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00
-            "2000-03-26T01:59:59", "2000-03-26T02:00:00", "2000-03-26T02:00:01", "2000-03-26T02:59:59",
-            "2000-03-26T03:00:00", "2000-03-26T03:00:01", "2000-03-26T03:59:59", "2000-03-26T04:00:00",
-            "2000-03-26T04:00:01", "2000-03-26T04:00:00.000001",
-
-            // This is a pre-1970 date, so check if it is rounded properly
-            "1950-07-20T02:00:00",
-
-            // Ensure the calendar is proleptic
-            "1582-09-30T00:00:00", "1582-10-16T00:00:00",
-
-            // https://github.com/pgjdbc/pgjdbc/issues/2221
-            "0001-01-01T00:00:00",
-            "1000-01-01T00:00:00",
-            "1000-01-01T23:59:59", "1000-06-01T01:00:00", "0999-12-31T23:59:59",
-
-            // On 2000-10-29 03:00:00 Moscow went to regular time, thus local time became 02:00:00
-            "2000-10-29T01:59:59", "2000-10-29T02:00:00", "2000-10-29T02:00:01", "2000-10-29T02:59:59",
-            "2000-10-29T03:00:00", "2000-10-29T03:00:01", "2000-10-29T03:59:59", "2000-10-29T04:00:00",
-            "2000-10-29T04:00:01", "2000-10-29T04:00:00.000001");
-
-    for (String zoneId : zoneIdsToTest) {
-      ZoneId zone = ZoneId.of(zoneId);
-      for (String date : datesToTest) {
-        localTimestamps(zone, date);
-      }
+        ex = assertThrows(PSQLException.class, () -> rs.getObject(1, typeToGet));
+        assertEquals(PSQLState.DATA_TYPE_MISMATCH.getState(), ex.getSQLState());
     }
-  }
 
-  public void localTimestamps(ZoneId zoneId, String timestamp) throws SQLException {
-    TimeZone.setDefault(TimeZone.getTimeZone(zoneId));
-    try (Statement stmt = con.createStatement()) {
-      stmt.executeUpdate(TestUtil.insertSQL("table1", "timestamp_without_time_zone_column", "TIMESTAMP '" + timestamp + "'"));
-
-      try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "timestamp_without_time_zone_column"))) {
-        assertTrue(rs.next());
-        LocalDateTime localDateTime = LocalDateTime.parse(timestamp);
-        assertEquals(localDateTime, rs.getObject("timestamp_without_time_zone_column", LocalDateTime.class));
-        assertEquals(localDateTime, rs.getObject(1, LocalDateTime.class));
-
-        //Also test that we get the correct values when retrieving the data as LocalDate objects
-        assertEquals(localDateTime.toLocalDate(), rs.getObject("timestamp_without_time_zone_column", LocalDate.class));
-        assertEquals(localDateTime.toLocalDate(), rs.getObject(1, LocalDate.class));
-
-        assertDataTypeMismatch(rs, "timestamp_without_time_zone_column", OffsetTime.class);
-        // TODO: this should also not work, but that's an open discussion (see https://github.com/pgjdbc/pgjdbc/pull/2467):
-        // assertDataTypeMismatch(rs, "timestamp_without_time_zone_column", OffsetDateTime.class);
-      }
-      stmt.executeUpdate("DELETE FROM table1");
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTable(con, "table1", "timestamp_without_time_zone_column timestamp without time zone,"
+                + "timestamp_with_time_zone_column timestamp with time zone,"
+                + "date_column date,"
+                + "time_without_time_zone_column time without time zone,"
+                + "time_with_time_zone_column time with time zone"
+        );
     }
-  }
 
-  /**
-   * Test the behavior getObject for timestamp with time zone columns.
-   */
-  @Test
-  public void testGetTimestampWithTimeZone() throws SQLException {
-    runGetOffsetDateTime(UTC);
-    runGetOffsetDateTime(GMT03);
-    runGetOffsetDateTime(GMT05);
-    runGetOffsetDateTime(GMT13);
-  }
-
-  private void runGetOffsetDateTime(ZoneOffset offset) throws SQLException {
-    try (Statement stmt = con.createStatement()) {
-      stmt.executeUpdate(TestUtil.insertSQL("table1", "timestamp_with_time_zone_column", "TIMESTAMP WITH TIME ZONE '2004-10-19 10:23:54.123456" + offset.toString() + "'"));
-
-      try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "timestamp_with_time_zone_column"))) {
-        assertTrue(rs.next());
-        LocalDateTime localDateTime = LocalDateTime.of(2004, 10, 19, 10, 23, 54, 123456000);
-
-        OffsetDateTime offsetDateTime = localDateTime.atOffset(offset).withOffsetSameInstant(ZoneOffset.UTC);
-        assertEquals(offsetDateTime, rs.getObject("timestamp_with_time_zone_column", OffsetDateTime.class));
-        assertEquals(offsetDateTime, rs.getObject(1, OffsetDateTime.class));
-
-        assertDataTypeMismatch(rs, "timestamp_with_time_zone_column", LocalTime.class);
-        assertDataTypeMismatch(rs, "timestamp_with_time_zone_column", LocalDateTime.class);
-      }
-      stmt.executeUpdate("DELETE FROM table1");
+    @Override
+    public void tearDown() throws SQLException {
+        TimeZone.setDefault(saveTZ);
+        TestUtil.dropTable(con, "table1");
+        super.tearDown();
     }
-  }
 
-  @Test
-  public void testBcDate() throws SQLException {
-    try (Statement stmt = con.createStatement(); ResultSet rs = stmt.executeQuery("SELECT '1582-09-30 BC'::date")) {
-      assertTrue(rs.next());
-      LocalDate expected = ISO.date(IsoEra.BCE, 1582, 9, 30);
-      LocalDate actual = rs.getObject(1, LocalDate.class);
-      assertEquals(expected, actual);
-      assertFalse(rs.next());
+    /**
+     * Test the behavior getObject for date columns.
+     */
+    @Test
+    public void testGetLocalDate() throws SQLException {
+        assumeTrue(TestUtil.haveIntegerDateTimes(con));
+
+        List<String> zoneIdsToTest = new ArrayList<>();
+        zoneIdsToTest.add("Africa/Casablanca"); // It is something like GMT+0..GMT+1
+        zoneIdsToTest.add("America/Adak"); // It is something like GMT-10..GMT-9
+        zoneIdsToTest.add("Atlantic/Azores"); // It is something like GMT-1..GMT+0
+        zoneIdsToTest.add("Europe/Berlin"); // It is something like GMT+1..GMT+2
+        zoneIdsToTest.add("Europe/Moscow"); // It is something like GMT+3..GMT+4 for 2000s
+        zoneIdsToTest.add("Pacific/Apia"); // It is something like GMT+13..GMT+14
+        zoneIdsToTest.add("Pacific/Niue"); // It is something like GMT-11..GMT-11
+        for (int i = -12; i <= 13; i++) {
+            zoneIdsToTest.add(String.format("GMT%+02d", i));
+        }
+
+        List<String> datesToTest = Arrays.asList("1998-01-08",
+                // Some random dates
+                "1981-12-11", "2022-02-22",
+                "2015-09-03", "2015-06-30",
+                "1997-06-30", "1997-07-01", "2012-06-30", "2012-07-01",
+                "2015-06-30", "2015-07-01", "2005-12-31", "2006-01-01",
+                "2008-12-31", "2009-01-01", "2015-06-30", "2015-07-31",
+                "2015-07-31",
+
+                // On 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00
+                "2003-03-25", "2000-03-26", "2000-03-27",
+
+                // This is a pre-1970 date, so check if it is rounded properly
+                "1950-07-20",
+
+                // Ensure the calendar is proleptic
+                "1582-01-01", "1582-12-31",
+                "1582-09-30", "1582-10-16",
+
+                // https://github.com/pgjdbc/pgjdbc/issues/2221
+                "0001-01-01",
+                "1000-01-01", "1000-06-01", "0999-12-31",
+
+                // On 2000-10-29 03:00:00 Moscow went to regular time, thus local time became 02:00:00
+                "2000-10-28", "2000-10-29", "2000-10-30");
+
+        for (String zoneId : zoneIdsToTest) {
+            ZoneId zone = ZoneId.of(zoneId);
+            for (String date : datesToTest) {
+                localDate(zone, date);
+            }
+        }
     }
-  }
 
-  @Test
-  public void testBcTimestamp() throws SQLException {
-    try (Statement stmt = con.createStatement(); ResultSet rs = stmt.executeQuery("SELECT '1582-09-30 12:34:56 BC'::timestamp")) {
-      assertTrue(rs.next());
-      LocalDateTime expected = ISO.date(IsoEra.BCE, 1582, 9, 30).atTime(12, 34, 56);
-      LocalDateTime actual = rs.getObject(1, LocalDateTime.class);
-      assertEquals(expected, actual);
-      assertFalse(rs.next());
+    public void localDate(ZoneId zoneId, String date) throws SQLException {
+        TimeZone.setDefault(TimeZone.getTimeZone(zoneId));
+        try (Statement stmt = con.createStatement()) {
+            stmt.executeUpdate(TestUtil.insertSQL("table1", "date_column", "DATE '" + date + "'"));
+
+            try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "date_column"))) {
+                assertTrue(rs.next());
+                LocalDate localDate = LocalDate.parse(date);
+                assertEquals(localDate, rs.getObject("date_column", LocalDate.class));
+                assertEquals(localDate, rs.getObject(1, LocalDate.class));
+            }
+            stmt.executeUpdate("DELETE FROM table1");
+        }
     }
-  }
 
-  @Test
-  public void testBcTimestamptz() throws SQLException {
-    try (Statement stmt = con.createStatement();
-        ResultSet rs = stmt.executeQuery("SELECT '1582-09-30 12:34:56Z BC'::timestamp")) {
-      assertTrue(rs.next());
-      OffsetDateTime expected = ISO.date(IsoEra.BCE, 1582, 9, 30).atTime(OffsetTime.of(12, 34, 56, 0, UTC));
-      OffsetDateTime actual = rs.getObject(1, OffsetDateTime.class);
-      assertEquals(expected, actual);
-      assertFalse(rs.next());
+    /**
+     * Test the behavior getObject for timetz columns.
+     */
+    @Test
+    public void testGetOffsetTime() throws SQLException {
+        List<String> timesToTest = Arrays.asList("00:00:00+00:00", "00:00:00+00:30",
+                "01:02:03.333444+02:00", "23:59:59.999999-12:00",
+                "11:22:59.4711-08:00", "23:59:59.0-12:00",
+                "11:22:59.4711+15:59:12", "23:59:59.0-15:59:12"
+        );
+
+        for (String time : timesToTest) {
+            try (Statement stmt = con.createStatement()) {
+                stmt.executeUpdate(TestUtil.insertSQL("table1", "time_with_time_zone_column", "time with time zone '" + time + "'"));
+
+                try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "time_with_time_zone_column"))) {
+                    assertTrue(rs.next());
+                    OffsetTime offsetTime = OffsetTime.parse(time);
+                    assertEquals(offsetTime, rs.getObject("time_with_time_zone_column", OffsetTime.class));
+                    assertEquals(offsetTime, rs.getObject(1, OffsetTime.class));
+
+                    //Also test that we get the correct values when retrieving the data as OffsetDateTime objects on EPOCH (required by JDBC)
+                    OffsetDateTime offsetDT = offsetTime.atDate(LocalDate.of(1970, 1, 1));
+                    assertEquals(offsetDT, rs.getObject("time_with_time_zone_column", OffsetDateTime.class));
+                    assertEquals(offsetDT, rs.getObject(1, OffsetDateTime.class));
+
+                    assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalDate.class);
+                    assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalTime.class);
+                    assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalDateTime.class);
+                }
+                stmt.executeUpdate("DELETE FROM table1");
+            }
+        }
     }
-  }
 
-  @Test
-  public void testProlepticCalendarTimestamp() throws SQLException {
-    // date time ranges and CTEs are both new with 8.4
-    assumeMinimumServerVersion(ServerVersion.v8_4);
-    LocalDateTime start = LocalDate.of(1582, 9, 30).atStartOfDay();
-    LocalDateTime end = LocalDate.of(1582, 10, 16).atStartOfDay();
-    long numberOfDays = Duration.between(start, end).toDays() + 1L;
-    List<LocalDateTime> range = Stream.iterate(start, x -> x.plusDays(1))
-        .limit(numberOfDays)
-        .collect(Collectors.toList());
+    /**
+     * Test the behavior getObject for time columns.
+     */
+    @Test
+    public void testGetLocalTime() throws SQLException {
+        try (Statement stmt = con.createStatement()) {
+            stmt.executeUpdate(TestUtil.insertSQL("table1", "time_without_time_zone_column", "TIME '04:05:06.123456'"));
 
-    runProlepticTests(LocalDateTime.class, "'1582-09-30 00:00'::timestamp, '1582-10-16 00:00'::timestamp", range);
-  }
+            try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "time_without_time_zone_column"))) {
+                assertTrue(rs.next());
+                LocalTime localTime = LocalTime.of(4, 5, 6, 123456000);
+                assertEquals(localTime, rs.getObject("time_without_time_zone_column", LocalTime.class));
+                assertEquals(localTime, rs.getObject(1, LocalTime.class));
 
-  @Test
-  public void testProlepticCalendarTimestamptz() throws SQLException {
-    // date time ranges and CTEs are both new with 8.4
-    assumeMinimumServerVersion(ServerVersion.v8_4);
-    OffsetDateTime start = LocalDate.of(1582, 9, 30).atStartOfDay().atOffset(UTC);
-    OffsetDateTime end = LocalDate.of(1582, 10, 16).atStartOfDay().atOffset(UTC);
-    long numberOfDays = Duration.between(start, end).toDays() + 1L;
-    List<OffsetDateTime> range = Stream.iterate(start, x -> x.plusDays(1))
-        .limit(numberOfDays)
-        .collect(Collectors.toList());
-
-    runProlepticTests(OffsetDateTime.class, "'1582-09-30 00:00:00 Z'::timestamptz, '1582-10-16 00:00:00 Z'::timestamptz", range);
-  }
-
-  private <T extends Temporal> void runProlepticTests(Class<T> clazz, String selectRange, List<T> range) throws SQLException {
-    List<T> temporals = new ArrayList<>(range.size());
-
-    try (PreparedStatement stmt = con.prepareStatement("SELECT * FROM generate_series(" + selectRange + ", '1 day');");
-        ResultSet rs = stmt.executeQuery()) {
-      while (rs.next()) {
-        T temporal = rs.getObject(1, clazz);
-        temporals.add(temporal);
-      }
-      assertEquals(range, temporals);
+                assertDataTypeMismatch(rs, "time_without_time_zone_column", OffsetTime.class);
+                assertDataTypeMismatch(rs, "time_without_time_zone_column", OffsetDateTime.class);
+                assertDataTypeMismatch(rs, "time_without_time_zone_column", LocalDate.class);
+                assertDataTypeMismatch(rs, "time_without_time_zone_column", LocalDateTime.class);
+            }
+            stmt.executeUpdate("DELETE FROM table1");
+        }
     }
-  }
 
-  /** checks if getObject with given column name or index 1 throws an exception with DATA_TYPE_MISMATCH as SQLState */
-  private static void assertDataTypeMismatch(ResultSet rs, String columnName, Class<?> typeToGet) {
-    PSQLException ex = assertThrows(PSQLException.class, () -> rs.getObject(columnName, typeToGet));
-    assertEquals(PSQLState.DATA_TYPE_MISMATCH.getState(), ex.getSQLState());
+    /**
+     * Test the behavior getObject for time columns with null.
+     */
+    @Test
+    public void testGetLocalTimeNull() throws SQLException {
+        try (Statement stmt = con.createStatement()) {
+            stmt.executeUpdate(TestUtil.insertSQL("table1", "time_without_time_zone_column", "NULL"));
 
-    ex = assertThrows(PSQLException.class, () -> rs.getObject(1, typeToGet));
-    assertEquals(PSQLState.DATA_TYPE_MISMATCH.getState(), ex.getSQLState());
-  }
+            try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "time_without_time_zone_column"))) {
+                assertTrue(rs.next());
+                assertNull(rs.getObject("time_without_time_zone_column", LocalTime.class));
+                assertNull(rs.getObject(1, LocalTime.class));
+            }
+            stmt.executeUpdate("DELETE FROM table1");
+        }
+    }
+
+    /**
+     * Test the behavior getObject for time columns with invalid type.
+     */
+    @Test
+    public void testGetLocalTimeInvalidType() throws SQLException {
+        try (Statement stmt = con.createStatement()) {
+            stmt.executeUpdate(TestUtil.insertSQL("table1", "time_with_time_zone_column", "TIME '04:05:06.123456-08:00'"));
+
+            try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "time_with_time_zone_column"))) {
+                assertTrue(rs.next());
+                assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalTime.class);
+                assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalDateTime.class);
+                assertDataTypeMismatch(rs, "time_with_time_zone_column", LocalDate.class);
+            }
+            stmt.executeUpdate("DELETE FROM table1");
+        }
+    }
+
+    /**
+     * Test the behavior getObject for timestamp columns.
+     */
+    @Test
+    public void testGetLocalDateTime() throws SQLException {
+        assumeTrue(TestUtil.haveIntegerDateTimes(con));
+
+        List<String> zoneIdsToTest = new ArrayList<>();
+        zoneIdsToTest.add("Africa/Casablanca"); // It is something like GMT+0..GMT+1
+        zoneIdsToTest.add("America/Adak"); // It is something like GMT-10..GMT-9
+        zoneIdsToTest.add("Atlantic/Azores"); // It is something like GMT-1..GMT+0
+        zoneIdsToTest.add("Europe/Moscow"); // It is something like GMT+3..GMT+4 for 2000s
+        zoneIdsToTest.add("Pacific/Apia"); // It is something like GMT+13..GMT+14
+        zoneIdsToTest.add("Pacific/Niue"); // It is something like GMT-11..GMT-11
+        for (int i = -12; i <= 13; i++) {
+            zoneIdsToTest.add(String.format("GMT%+02d", i));
+        }
+
+        List<String> datesToTest = Arrays.asList("2015-09-03T12:00:00", "2015-06-30T23:59:58",
+                "1997-06-30T23:59:59", "1997-07-01T00:00:00", "2012-06-30T23:59:59", "2012-07-01T00:00:00",
+                "2015-06-30T23:59:59", "2015-07-01T00:00:00", "2005-12-31T23:59:59", "2006-01-01T00:00:00",
+                "2008-12-31T23:59:59", "2009-01-01T00:00:00", /* "2015-06-30T23:59:60", */ "2015-07-31T00:00:00",
+                "2015-07-31T00:00:01", "2015-07-31T00:00:00.000001",
+
+                // On 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00
+                "2000-03-26T01:59:59", "2000-03-26T02:00:00", "2000-03-26T02:00:01", "2000-03-26T02:59:59",
+                "2000-03-26T03:00:00", "2000-03-26T03:00:01", "2000-03-26T03:59:59", "2000-03-26T04:00:00",
+                "2000-03-26T04:00:01", "2000-03-26T04:00:00.000001",
+
+                // This is a pre-1970 date, so check if it is rounded properly
+                "1950-07-20T02:00:00",
+
+                // Ensure the calendar is proleptic
+                "1582-09-30T00:00:00", "1582-10-16T00:00:00",
+
+                // https://github.com/pgjdbc/pgjdbc/issues/2221
+                "0001-01-01T00:00:00",
+                "1000-01-01T00:00:00",
+                "1000-01-01T23:59:59", "1000-06-01T01:00:00", "0999-12-31T23:59:59",
+
+                // On 2000-10-29 03:00:00 Moscow went to regular time, thus local time became 02:00:00
+                "2000-10-29T01:59:59", "2000-10-29T02:00:00", "2000-10-29T02:00:01", "2000-10-29T02:59:59",
+                "2000-10-29T03:00:00", "2000-10-29T03:00:01", "2000-10-29T03:59:59", "2000-10-29T04:00:00",
+                "2000-10-29T04:00:01", "2000-10-29T04:00:00.000001");
+
+        for (String zoneId : zoneIdsToTest) {
+            ZoneId zone = ZoneId.of(zoneId);
+            for (String date : datesToTest) {
+                localTimestamps(zone, date);
+            }
+        }
+    }
+
+    public void localTimestamps(ZoneId zoneId, String timestamp) throws SQLException {
+        TimeZone.setDefault(TimeZone.getTimeZone(zoneId));
+        try (Statement stmt = con.createStatement()) {
+            stmt.executeUpdate(TestUtil.insertSQL("table1", "timestamp_without_time_zone_column", "TIMESTAMP '" + timestamp + "'"));
+
+            try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "timestamp_without_time_zone_column"))) {
+                assertTrue(rs.next());
+                LocalDateTime localDateTime = LocalDateTime.parse(timestamp);
+                assertEquals(localDateTime, rs.getObject("timestamp_without_time_zone_column", LocalDateTime.class));
+                assertEquals(localDateTime, rs.getObject(1, LocalDateTime.class));
+
+                //Also test that we get the correct values when retrieving the data as LocalDate objects
+                assertEquals(localDateTime.toLocalDate(), rs.getObject("timestamp_without_time_zone_column", LocalDate.class));
+                assertEquals(localDateTime.toLocalDate(), rs.getObject(1, LocalDate.class));
+
+                assertDataTypeMismatch(rs, "timestamp_without_time_zone_column", OffsetTime.class);
+                // TODO: this should also not work, but that's an open discussion (see https://github.com/pgjdbc/pgjdbc/pull/2467):
+                // assertDataTypeMismatch(rs, "timestamp_without_time_zone_column", OffsetDateTime.class);
+            }
+            stmt.executeUpdate("DELETE FROM table1");
+        }
+    }
+
+    /**
+     * Test the behavior getObject for timestamp with time zone columns.
+     */
+    @Test
+    public void testGetTimestampWithTimeZone() throws SQLException {
+        runGetOffsetDateTime(UTC);
+        runGetOffsetDateTime(GMT03);
+        runGetOffsetDateTime(GMT05);
+        runGetOffsetDateTime(GMT13);
+    }
+
+    private void runGetOffsetDateTime(ZoneOffset offset) throws SQLException {
+        try (Statement stmt = con.createStatement()) {
+            stmt.executeUpdate(TestUtil.insertSQL("table1", "timestamp_with_time_zone_column", "TIMESTAMP WITH TIME ZONE '2004-10-19 10:23:54.123456" + offset.toString() + "'"));
+
+            try (ResultSet rs = stmt.executeQuery(TestUtil.selectSQL("table1", "timestamp_with_time_zone_column"))) {
+                assertTrue(rs.next());
+                LocalDateTime localDateTime = LocalDateTime.of(2004, 10, 19, 10, 23, 54, 123456000);
+
+                OffsetDateTime offsetDateTime = localDateTime.atOffset(offset).withOffsetSameInstant(ZoneOffset.UTC);
+                assertEquals(offsetDateTime, rs.getObject("timestamp_with_time_zone_column", OffsetDateTime.class));
+                assertEquals(offsetDateTime, rs.getObject(1, OffsetDateTime.class));
+
+                assertDataTypeMismatch(rs, "timestamp_with_time_zone_column", LocalTime.class);
+                assertDataTypeMismatch(rs, "timestamp_with_time_zone_column", LocalDateTime.class);
+            }
+            stmt.executeUpdate("DELETE FROM table1");
+        }
+    }
+
+    @Test
+    public void testBcDate() throws SQLException {
+        try (Statement stmt = con.createStatement(); ResultSet rs = stmt.executeQuery("SELECT '1582-09-30 BC'::date")) {
+            assertTrue(rs.next());
+            LocalDate expected = ISO.date(IsoEra.BCE, 1582, 9, 30);
+            LocalDate actual = rs.getObject(1, LocalDate.class);
+            assertEquals(expected, actual);
+            assertFalse(rs.next());
+        }
+    }
+
+    @Test
+    public void testBcTimestamp() throws SQLException {
+        try (Statement stmt = con.createStatement(); ResultSet rs = stmt.executeQuery("SELECT '1582-09-30 12:34:56 BC'::timestamp")) {
+            assertTrue(rs.next());
+            LocalDateTime expected = ISO.date(IsoEra.BCE, 1582, 9, 30).atTime(12, 34, 56);
+            LocalDateTime actual = rs.getObject(1, LocalDateTime.class);
+            assertEquals(expected, actual);
+            assertFalse(rs.next());
+        }
+    }
+
+    @Test
+    public void testBcTimestamptz() throws SQLException {
+        try (Statement stmt = con.createStatement();
+             ResultSet rs = stmt.executeQuery("SELECT '1582-09-30 12:34:56Z BC'::timestamp")) {
+            assertTrue(rs.next());
+            OffsetDateTime expected = ISO.date(IsoEra.BCE, 1582, 9, 30).atTime(OffsetTime.of(12, 34, 56, 0, UTC));
+            OffsetDateTime actual = rs.getObject(1, OffsetDateTime.class);
+            assertEquals(expected, actual);
+            assertFalse(rs.next());
+        }
+    }
+
+    @Test
+    public void testProlepticCalendarTimestamp() throws SQLException {
+        // date time ranges and CTEs are both new with 8.4
+        assumeMinimumServerVersion(ServerVersion.v8_4);
+        LocalDateTime start = LocalDate.of(1582, 9, 30).atStartOfDay();
+        LocalDateTime end = LocalDate.of(1582, 10, 16).atStartOfDay();
+        long numberOfDays = Duration.between(start, end).toDays() + 1L;
+        List<LocalDateTime> range = Stream.iterate(start, x -> x.plusDays(1))
+                .limit(numberOfDays)
+                .collect(Collectors.toList());
+
+        runProlepticTests(LocalDateTime.class, "'1582-09-30 00:00'::timestamp, '1582-10-16 00:00'::timestamp", range);
+    }
+
+    @Test
+    public void testProlepticCalendarTimestamptz() throws SQLException {
+        // date time ranges and CTEs are both new with 8.4
+        assumeMinimumServerVersion(ServerVersion.v8_4);
+        OffsetDateTime start = LocalDate.of(1582, 9, 30).atStartOfDay().atOffset(UTC);
+        OffsetDateTime end = LocalDate.of(1582, 10, 16).atStartOfDay().atOffset(UTC);
+        long numberOfDays = Duration.between(start, end).toDays() + 1L;
+        List<OffsetDateTime> range = Stream.iterate(start, x -> x.plusDays(1))
+                .limit(numberOfDays)
+                .collect(Collectors.toList());
+
+        runProlepticTests(OffsetDateTime.class, "'1582-09-30 00:00:00 Z'::timestamptz, '1582-10-16 00:00:00 Z'::timestamptz", range);
+    }
+
+    private <T extends Temporal> void runProlepticTests(Class<T> clazz, String selectRange, List<T> range) throws SQLException {
+        List<T> temporals = new ArrayList<>(range.size());
+
+        try (PreparedStatement stmt = con.prepareStatement("SELECT * FROM generate_series(" + selectRange + ", '1 day');");
+             ResultSet rs = stmt.executeQuery()) {
+            while (rs.next()) {
+                T temporal = rs.getObject(1, clazz);
+                temporals.add(temporal);
+            }
+            assertEquals(range, temporals);
+        }
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/Jdbc42CallableStatementTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/Jdbc42CallableStatementTest.java
index 6a55c75..00b0e3c 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/Jdbc42CallableStatementTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/Jdbc42CallableStatementTest.java
@@ -27,89 +27,89 @@ import java.util.List;
  */
 public class Jdbc42CallableStatementTest extends BaseTest4 {
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
+    final String func = "{ ? = call ";
+    final String pkgName = "testspg__";
 
-    try (Statement stmt = con.createStatement()) {
-      stmt.execute(
-              "CREATE OR REPLACE FUNCTION testspg__getResultSetWithoutArg() "
-                      + "RETURNS refcursor AS '  "
-                      + "declare ref refcursor;"
-                      + "begin OPEN ref FOR SELECT 1; RETURN ref; end; ' LANGUAGE plpgsql;");
-    }
-  }
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
 
-  final String func = "{ ? = call ";
-  final String pkgName = "testspg__";
-
-  @Override
-  public void tearDown() throws SQLException {
-    try (Statement stmt = con.createStatement()) {
-      stmt.execute("drop FUNCTION testspg__getResultSetWithoutArg ();");
-    }
-    super.tearDown();
-  }
-
-  @Test
-  public void testGetResultSetWithoutArg() throws SQLException {
-    assumeCallableStatementsSupported();
-    try (CallableStatement call = con.prepareCall(func + pkgName + "getResultSetWithoutArg () }")) {
-      con.setAutoCommit(false); // ref cursors only work if auto commit is off
-      call.registerOutParameter(1, Types.REF_CURSOR);
-      call.execute();
-      List<Integer> values = new ArrayList<>(1);
-      try (ResultSet rs = call.getObject(1, ResultSet.class)) {
-        while (rs.next()) {
-          values.add(rs.getInt(1));
+        try (Statement stmt = con.createStatement()) {
+            stmt.execute(
+                    "CREATE OR REPLACE FUNCTION testspg__getResultSetWithoutArg() "
+                            + "RETURNS refcursor AS '  "
+                            + "declare ref refcursor;"
+                            + "begin OPEN ref FOR SELECT 1; RETURN ref; end; ' LANGUAGE plpgsql;");
         }
-      }
-      assertEquals(Collections.singletonList(1), values);
-    } finally {
-      con.setAutoCommit(true);
     }
-  }
 
-  @Test
-  public void testGetResultSetWithoutArgUnsupportedConversion() throws SQLException {
-    assumeCallableStatementsSupported();
-    try (CallableStatement call = con.prepareCall(func + pkgName + "getResultSetWithoutArg () }")) {
-      con.setAutoCommit(false); // ref cursors only work if auto commit is off
-      call.registerOutParameter(1, Types.REF_CURSOR);
-      call.execute();
-      try {
-        // this should never be allowed even if more types will be implemented in the future
-        call.getObject(1, ResultSetMetaData.class);
-        fail("conversion from ResultSet to ResultSetMetaData should not be supported");
-      } catch (SQLException e) {
-        // should reach
-      }
-    } finally {
-      con.setAutoCommit(true);
+    @Override
+    public void tearDown() throws SQLException {
+        try (Statement stmt = con.createStatement()) {
+            stmt.execute("drop FUNCTION testspg__getResultSetWithoutArg ();");
+        }
+        super.tearDown();
     }
-  }
 
-  @Test
-  public void testRegisterOutParameter() throws SQLException {
+    @Test
+    public void testGetResultSetWithoutArg() throws SQLException {
+        assumeCallableStatementsSupported();
+        try (CallableStatement call = con.prepareCall(func + pkgName + "getResultSetWithoutArg () }")) {
+            con.setAutoCommit(false); // ref cursors only work if auto commit is off
+            call.registerOutParameter(1, Types.REF_CURSOR);
+            call.execute();
+            List<Integer> values = new ArrayList<>(1);
+            try (ResultSet rs = call.getObject(1, ResultSet.class)) {
+                while (rs.next()) {
+                    values.add(rs.getInt(1));
+                }
+            }
+            assertEquals(Collections.singletonList(1), values);
+        } finally {
+            con.setAutoCommit(true);
+        }
+    }
 
-    CallableStatement cs = null;
+    @Test
+    public void testGetResultSetWithoutArgUnsupportedConversion() throws SQLException {
+        assumeCallableStatementsSupported();
+        try (CallableStatement call = con.prepareCall(func + pkgName + "getResultSetWithoutArg () }")) {
+            con.setAutoCommit(false); // ref cursors only work if auto commit is off
+            call.registerOutParameter(1, Types.REF_CURSOR);
+            call.execute();
+            try {
+                // this should never be allowed even if more types will be implemented in the future
+                call.getObject(1, ResultSetMetaData.class);
+                fail("conversion from ResultSet to ResultSetMetaData should not be supported");
+            } catch (SQLException e) {
+                // should reach
+            }
+        } finally {
+            con.setAutoCommit(true);
+        }
+    }
 
-    cs = con.prepareCall("{ ? = call xxxx.yyyy (?,?,?,?)}");
-    cs.registerOutParameter(1, Types.REF_CURSOR);
+    @Test
+    public void testRegisterOutParameter() throws SQLException {
 
-    cs.setLong(2, 1000L);
-    cs.setLong(3, 500);
-    cs.setLong(4, 3000);
-    cs.setNull(5, Types.NUMERIC);
-  }
+        CallableStatement cs = null;
 
-  @Test
-  public void testRegisterInoutParameter() throws SQLException {
+        cs = con.prepareCall("{ ? = call xxxx.yyyy (?,?,?,?)}");
+        cs.registerOutParameter(1, Types.REF_CURSOR);
 
-    CallableStatement cs = null;
+        cs.setLong(2, 1000L);
+        cs.setLong(3, 500);
+        cs.setLong(4, 3000);
+        cs.setNull(5, Types.NUMERIC);
+    }
 
-    cs = con.prepareCall("{call xxxx.yyyy (?)}");
-    cs.setNull(1, Types.REF_CURSOR);
-    cs.registerOutParameter(1, Types.REF_CURSOR);
-  }
+    @Test
+    public void testRegisterInoutParameter() throws SQLException {
+
+        CallableStatement cs = null;
+
+        cs = con.prepareCall("{call xxxx.yyyy (?)}");
+        cs.setNull(1, Types.REF_CURSOR);
+        cs.registerOutParameter(1, Types.REF_CURSOR);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/Jdbc42TestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/Jdbc42TestSuite.java
index 6b471c3..5f599d3 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/Jdbc42TestSuite.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/Jdbc42TestSuite.java
@@ -11,15 +11,15 @@ import org.junit.runners.Suite.SuiteClasses;
 
 @RunWith(Suite.class)
 @SuiteClasses({
-    AdaptiveFetchSizeTest.class,
-    CustomizeDefaultFetchSizeTest.class,
-    GetObject310InfinityTests.class,
-    GetObject310Test.class,
-    Jdbc42CallableStatementTest.class,
-    LargeCountJdbc42Test.class,
-    PreparedStatementTest.class,
-    SetObject310Test.class,
-    SimpleJdbc42Test.class,
+        AdaptiveFetchSizeTest.class,
+        CustomizeDefaultFetchSizeTest.class,
+        GetObject310InfinityTests.class,
+        GetObject310Test.class,
+        Jdbc42CallableStatementTest.class,
+        LargeCountJdbc42Test.class,
+        PreparedStatementTest.class,
+        SetObject310Test.class,
+        SimpleJdbc42Test.class,
 })
 public class Jdbc42TestSuite {
 
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/LargeCountJdbc42Test.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/LargeCountJdbc42Test.java
index fef3362..a1c1766 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/LargeCountJdbc42Test.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/LargeCountJdbc42Test.java
@@ -30,383 +30,383 @@ import java.util.Properties;
 @RunWith(Parameterized.class)
 public class LargeCountJdbc42Test extends BaseTest4 {
 
-  private final boolean insertRewrite;
+    private final boolean insertRewrite;
 
-  public LargeCountJdbc42Test(BinaryMode binaryMode, boolean insertRewrite) {
-    this.insertRewrite = insertRewrite;
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}, insertRewrite = {1}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BinaryMode binaryMode : BinaryMode.values()) {
-      for (boolean insertRewrite : new boolean[]{false, true}) {
-        ids.add(new Object[]{binaryMode, insertRewrite});
-      }
+    public LargeCountJdbc42Test(BinaryMode binaryMode, boolean insertRewrite) {
+        this.insertRewrite = insertRewrite;
+        setBinaryMode(binaryMode);
     }
-    return ids;
-  }
 
-  @Override
-  protected void updateProperties(Properties props) {
-    super.updateProperties(props);
-    PGProperty.REWRITE_BATCHED_INSERTS.set(props, insertRewrite);
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createUnloggedTable(con, "largetable", "a boolean");
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "largetable");
-    TestUtil.closeDB(con);
-  }
-
-  // ********************* EXECUTE LARGE UPDATES *********************
-  //    FINEST: simple execute, handler=org.postgresql.jdbc.PgStatement$StatementResultHandler@38cccef, maxRows=0, fetchSize=0, flags=21
-  //    FINEST: FE=> Parse(stmt=null,query="insert into largetable select true from generate_series($1, $2)",oids={20,20})
-  //    FINEST: FE=> Bind(stmt=null,portal=null,$1=<1>,$2=<2147483757>)
-  //    FINEST: FE=> Describe(portal=null)
-  //    FINEST: FE=> Execute(portal=null,limit=1)
-  //    FINEST: FE=> Sync
-  //    FINEST: <=BE ParseComplete [null]
-  //    FINEST: <=BE BindComplete [unnamed]
-  //    FINEST: <=BE NoData
-  //    FINEST: <=BE CommandStatus(INSERT 0 2147483757)
-  //    FINEST: <=BE ReadyForQuery(I)
-  //    FINEST: simple execute, handler=org.postgresql.jdbc.PgStatement$StatementResultHandler@5679c6c6, maxRows=0, fetchSize=0, flags=21
-  //    FINEST: FE=> Parse(stmt=null,query="delete from largetable",oids={})
-  //    FINEST: FE=> Bind(stmt=null,portal=null)
-  //    FINEST: FE=> Describe(portal=null)
-  //    FINEST: FE=> Execute(portal=null,limit=1)
-  //    FINEST: FE=> Sync
-  //    FINEST: <=BE ParseComplete [null]
-  //    FINEST: <=BE BindComplete [unnamed]
-  //    FINEST: <=BE NoData
-  //    FINEST: <=BE CommandStatus(DELETE 2147483757)
-
-  /*
-   * Test PreparedStatement.executeLargeUpdate() and Statement.executeLargeUpdate(String sql)
-   */
-  @Ignore("This is the big and SLOW test")
-  @Test
-  public void testExecuteLargeUpdateBIG() throws Exception {
-    long expected = Integer.MAX_VALUE + 110L;
-    con.setAutoCommit(false);
-    // Test PreparedStatement.executeLargeUpdate()
-    try (PreparedStatement stmt = con.prepareStatement("insert into largetable "
-        + "select true from generate_series(?, ?)")) {
-      stmt.setLong(1, 1);
-      stmt.setLong(2, 2_147_483_757L); // Integer.MAX_VALUE + 110L
-      long count = stmt.executeLargeUpdate();
-      Assert.assertEquals("PreparedStatement 110 rows more than Integer.MAX_VALUE", expected, count);
-    }
-    // Test Statement.executeLargeUpdate(String sql)
-    try (Statement stmt = con.createStatement()) {
-      long count = stmt.executeLargeUpdate("delete from largetable");
-      Assert.assertEquals("Statement 110 rows more than Integer.MAX_VALUE", expected, count);
-    }
-    con.setAutoCommit(true);
-  }
-
-  /*
-   * Test Statement.executeLargeUpdate(String sql)
-   */
-  @Test
-  public void testExecuteLargeUpdateStatementSMALL() throws Exception {
-    try (Statement stmt = con.createStatement()) {
-      long count = stmt.executeLargeUpdate("insert into largetable "
-          + "select true from generate_series(1, 1010)");
-      long expected = 1010L;
-      Assert.assertEquals("Small long return 1010L", expected, count);
-    }
-  }
-
-  /*
-   * Test PreparedStatement.executeLargeUpdate();
-   */
-  @Test
-  public void testExecuteLargeUpdatePreparedStatementSMALL() throws Exception {
-    try (PreparedStatement stmt = con.prepareStatement("insert into largetable "
-        + "select true from generate_series(?, ?)")) {
-      stmt.setLong(1, 1);
-      stmt.setLong(2, 1010L);
-      long count = stmt.executeLargeUpdate();
-      long expected = 1010L;
-      Assert.assertEquals("Small long return 1010L", expected, count);
-    }
-  }
-
-  /*
-   * Test Statement.getLargeUpdateCount();
-   */
-  @Test
-  public void testGetLargeUpdateCountStatementSMALL() throws Exception {
-    try (Statement stmt = con.createStatement()) {
-      boolean isResult = stmt.execute("insert into largetable "
-          + "select true from generate_series(1, 1010)");
-      Assert.assertFalse("False if it is an update count or there are no results", isResult);
-      long count = stmt.getLargeUpdateCount();
-      long expected = 1010L;
-      Assert.assertEquals("Small long return 1010L", expected, count);
-    }
-  }
-
-  /*
-   * Test PreparedStatement.getLargeUpdateCount();
-   */
-  @Test
-  public void testGetLargeUpdateCountPreparedStatementSMALL() throws Exception {
-    try (PreparedStatement stmt = con.prepareStatement("insert into largetable "
-        + "select true from generate_series(?, ?)")) {
-      stmt.setInt(1, 1);
-      stmt.setInt(2, 1010);
-      boolean isResult = stmt.execute();
-      Assert.assertFalse("False if it is an update count or there are no results", isResult);
-      long count = stmt.getLargeUpdateCount();
-      long expected = 1010L;
-      Assert.assertEquals("Small long return 1010L", expected, count);
-    }
-  }
-
-  /*
-   * Test fail SELECT Statement.executeLargeUpdate(String sql)
-   */
-  @Test
-  public void testExecuteLargeUpdateStatementSELECT() throws Exception {
-    try (Statement stmt = con.createStatement()) {
-      long count = stmt.executeLargeUpdate("select true from generate_series(1, 5)");
-      Assert.fail("A result was returned when none was expected. Returned: " + count);
-    } catch (SQLException e) {
-      Assert.assertEquals(PSQLState.TOO_MANY_RESULTS.getState(), e.getSQLState());
-    }
-  }
-
-  /*
-   * Test fail SELECT PreparedStatement.executeLargeUpdate();
-   */
-  @Test
-  public void testExecuteLargeUpdatePreparedStatementSELECT() throws Exception {
-    try (PreparedStatement stmt = con.prepareStatement("select true from generate_series(?, ?)")) {
-      stmt.setLong(1, 1);
-      stmt.setLong(2, 5L);
-      long count = stmt.executeLargeUpdate();
-      Assert.fail("A result was returned when none was expected. Returned: " + count);
-    } catch (SQLException e) {
-      Assert.assertEquals(PSQLState.TOO_MANY_RESULTS.getState(), e.getSQLState());
-    }
-  }
-
-  /*
-   * Test Statement.getLargeUpdateCount();
-   */
-  @Test
-  public void testGetLargeUpdateCountStatementSELECT() throws Exception {
-    try (Statement stmt = con.createStatement()) {
-      boolean isResult = stmt.execute("select true from generate_series(1, 5)");
-      Assert.assertTrue("True since this is a SELECT", isResult);
-      long count = stmt.getLargeUpdateCount();
-      long expected = -1L;
-      Assert.assertEquals("-1 if the current result is a ResultSet object", expected, count);
-    }
-  }
-
-  /*
-   * Test PreparedStatement.getLargeUpdateCount();
-   */
-  @Test
-  public void testGetLargeUpdateCountPreparedStatementSELECT() throws Exception {
-    try (PreparedStatement stmt = con.prepareStatement("select true from generate_series(?, ?)")) {
-      stmt.setLong(1, 1);
-      stmt.setLong(2, 5L);
-      boolean isResult = stmt.execute();
-      Assert.assertTrue("True since this is a SELECT", isResult);
-      long count = stmt.getLargeUpdateCount();
-      long expected = -1L;
-      Assert.assertEquals("-1 if the current result is a ResultSet object", expected, count);
-    }
-  }
-
-  // ********************* BATCH LARGE UPDATES *********************
-  //    FINEST: batch execute 3 queries, handler=org.postgresql.jdbc.BatchResultHandler@3d04a311, maxRows=0, fetchSize=0, flags=21
-  //    FINEST: FE=> Parse(stmt=null,query="insert into largetable select true from generate_series($1, $2)",oids={23,23})
-  //    FINEST: FE=> Bind(stmt=null,portal=null,$1=<1>,$2=<200>)
-  //    FINEST: FE=> Describe(portal=null)
-  //    FINEST: FE=> Execute(portal=null,limit=1)
-  //    FINEST: FE=> Parse(stmt=null,query="insert into largetable select true from generate_series($1, $2)",oids={23,20})
-  //    FINEST: FE=> Bind(stmt=null,portal=null,$1=<1>,$2=<3000000000>)
-  //    FINEST: FE=> Describe(portal=null)
-  //    FINEST: FE=> Execute(portal=null,limit=1)
-  //    FINEST: FE=> Parse(stmt=null,query="insert into largetable select true from generate_series($1, $2)",oids={23,23})
-  //    FINEST: FE=> Bind(stmt=null,portal=null,$1=<1>,$2=<50>)
-  //    FINEST: FE=> Describe(portal=null)
-  //    FINEST: FE=> Execute(portal=null,limit=1)
-  //    FINEST: FE=> Sync
-  //    FINEST: <=BE ParseComplete [null]
-  //    FINEST: <=BE BindComplete [unnamed]
-  //    FINEST: <=BE NoData
-  //    FINEST: <=BE CommandStatus(INSERT 0 200)
-  //    FINEST: <=BE ParseComplete [null]
-  //    FINEST: <=BE BindComplete [unnamed]
-  //    FINEST: <=BE NoData
-  //    FINEST: <=BE CommandStatus(INSERT 0 3000000000)
-  //    FINEST: <=BE ParseComplete [null]
-  //    FINEST: <=BE BindComplete [unnamed]
-  //    FINEST: <=BE NoData
-  //    FINEST: <=BE CommandStatus(INSERT 0 50)
-
-  /*
-   * Test simple PreparedStatement.executeLargeBatch();
-   */
-  @Ignore("This is the big and SLOW test")
-  @Test
-  public void testExecuteLargeBatchStatementBIG() throws Exception {
-    con.setAutoCommit(false);
-    try (PreparedStatement stmt = con.prepareStatement("insert into largetable "
-        + "select true from generate_series(?, ?)")) {
-      stmt.setInt(1, 1);
-      stmt.setInt(2, 200);
-      stmt.addBatch(); // statement one
-      stmt.setInt(1, 1);
-      stmt.setLong(2, 3_000_000_000L);
-      stmt.addBatch(); // statement two
-      stmt.setInt(1, 1);
-      stmt.setInt(2, 50);
-      stmt.addBatch(); // statement three
-      long[] actual = stmt.executeLargeBatch();
-      Assert.assertArrayEquals("Large rows inserted via 3 batch", new long[]{200L, 3_000_000_000L, 50L}, actual);
-    }
-    con.setAutoCommit(true);
-  }
-
-  /*
-   * Test simple Statement.executeLargeBatch();
-   */
-  @Test
-  public void testExecuteLargeBatchStatementSMALL() throws Exception {
-    try (Statement stmt = con.createStatement()) {
-      stmt.addBatch("insert into largetable(a) select true"); // statement one
-      stmt.addBatch("insert into largetable select false"); // statement two
-      stmt.addBatch("insert into largetable(a) values(true)"); // statement three
-      stmt.addBatch("insert into largetable values(false)"); // statement four
-      long[] actual = stmt.executeLargeBatch();
-      Assert.assertArrayEquals("Rows inserted via 4 batch", new long[]{1L, 1L, 1L, 1L}, actual);
-    }
-  }
-
-  /*
-   * Test simple PreparedStatement.executeLargeBatch();
-   */
-  @Test
-  public void testExecuteLargePreparedStatementStatementSMALL() throws Exception {
-    try (PreparedStatement stmt = con.prepareStatement("insert into largetable "
-        + "select true from generate_series(?, ?)")) {
-      stmt.setInt(1, 1);
-      stmt.setInt(2, 200);
-      stmt.addBatch(); // statement one
-      stmt.setInt(1, 1);
-      stmt.setInt(2, 100);
-      stmt.addBatch(); // statement two
-      stmt.setInt(1, 1);
-      stmt.setInt(2, 50);
-      stmt.addBatch(); // statement three
-      stmt.addBatch(); // statement four, same parms as three
-      long[] actual = stmt.executeLargeBatch();
-      Assert.assertArrayEquals("Rows inserted via 4 batch", new long[]{200L, 100L, 50L, 50L}, actual);
-    }
-  }
-
-  /*
-   * Test loop PreparedStatement.executeLargeBatch();
-   */
-  @Test
-  public void testExecuteLargePreparedStatementStatementLoopSMALL() throws Exception {
-    long[] loop = {200, 100, 50, 300, 20, 60, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
-    try (PreparedStatement stmt = con.prepareStatement("insert into largetable "
-        + "select true from generate_series(?, ?)")) {
-      for (long i : loop) {
-        stmt.setInt(1, 1);
-        stmt.setLong(2, i);
-        stmt.addBatch();
-      }
-      long[] actual = stmt.executeLargeBatch();
-      Assert.assertArrayEquals("Rows inserted via batch", loop, actual);
-    }
-  }
-
-  /*
-   * Test loop PreparedStatement.executeLargeBatch();
-   */
-  @Test
-  public void testExecuteLargeBatchValuesInsertSMALL() throws Exception {
-    boolean[] loop = {true, false, true, false, false, false, true, true, true, true, false, true};
-    try (PreparedStatement stmt = con.prepareStatement("insert into largetable values(?)")) {
-      for (boolean i : loop) {
-        stmt.setBoolean(1, i);
-        stmt.addBatch();
-      }
-      long[] actual = stmt.executeLargeBatch();
-      Assert.assertEquals("Rows inserted via batch", loop.length, actual.length);
-      for (long i : actual) {
-        if (insertRewrite) {
-          Assert.assertEquals(Statement.SUCCESS_NO_INFO, i);
-        } else {
-          Assert.assertEquals(1, i);
+    @Parameterized.Parameters(name = "binary = {0}, insertRewrite = {1}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BinaryMode binaryMode : BinaryMode.values()) {
+            for (boolean insertRewrite : new boolean[]{false, true}) {
+                ids.add(new Object[]{binaryMode, insertRewrite});
+            }
         }
-      }
+        return ids;
     }
-  }
 
-  /*
-   * Test null PreparedStatement.executeLargeBatch();
-   */
-  @Test
-  public void testNullExecuteLargeBatchStatement() throws Exception {
-    try (Statement stmt = con.createStatement()) {
-      long[] actual = stmt.executeLargeBatch();
-      Assert.assertArrayEquals("addBatch() not called batchStatements is null", new long[0], actual);
+    @Override
+    protected void updateProperties(Properties props) {
+        super.updateProperties(props);
+        PGProperty.REWRITE_BATCHED_INSERTS.set(props, insertRewrite);
     }
-  }
 
-  /*
-   * Test empty PreparedStatement.executeLargeBatch();
-   */
-  @Test
-  public void testEmptyExecuteLargeBatchStatement() throws Exception {
-    try (Statement stmt = con.createStatement()) {
-      stmt.addBatch("");
-      stmt.clearBatch();
-      long[] actual = stmt.executeLargeBatch();
-      Assert.assertArrayEquals("clearBatch() called, batchStatements.isEmpty()", new long[0], actual);
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createUnloggedTable(con, "largetable", "a boolean");
     }
-  }
 
-  /*
-   * Test null PreparedStatement.executeLargeBatch();
-   */
-  @Test
-  public void testNullExecuteLargeBatchPreparedStatement() throws Exception {
-    try (PreparedStatement stmt = con.prepareStatement("")) {
-      long[] actual = stmt.executeLargeBatch();
-      Assert.assertArrayEquals("addBatch() not called batchStatements is null", new long[0], actual);
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "largetable");
+        TestUtil.closeDB(con);
     }
-  }
 
-  /*
-   * Test empty PreparedStatement.executeLargeBatch();
-   */
-  @Test
-  public void testEmptyExecuteLargeBatchPreparedStatement() throws Exception {
-    try (PreparedStatement stmt = con.prepareStatement("")) {
-      stmt.addBatch();
-      stmt.clearBatch();
-      long[] actual = stmt.executeLargeBatch();
-      Assert.assertArrayEquals("clearBatch() called, batchStatements.isEmpty()", new long[0], actual);
+    // ********************* EXECUTE LARGE UPDATES *********************
+    //    FINEST: simple execute, handler=org.postgresql.jdbc.PgStatement$StatementResultHandler@38cccef, maxRows=0, fetchSize=0, flags=21
+    //    FINEST: FE=> Parse(stmt=null,query="insert into largetable select true from generate_series($1, $2)",oids={20,20})
+    //    FINEST: FE=> Bind(stmt=null,portal=null,$1=<1>,$2=<2147483757>)
+    //    FINEST: FE=> Describe(portal=null)
+    //    FINEST: FE=> Execute(portal=null,limit=1)
+    //    FINEST: FE=> Sync
+    //    FINEST: <=BE ParseComplete [null]
+    //    FINEST: <=BE BindComplete [unnamed]
+    //    FINEST: <=BE NoData
+    //    FINEST: <=BE CommandStatus(INSERT 0 2147483757)
+    //    FINEST: <=BE ReadyForQuery(I)
+    //    FINEST: simple execute, handler=org.postgresql.jdbc.PgStatement$StatementResultHandler@5679c6c6, maxRows=0, fetchSize=0, flags=21
+    //    FINEST: FE=> Parse(stmt=null,query="delete from largetable",oids={})
+    //    FINEST: FE=> Bind(stmt=null,portal=null)
+    //    FINEST: FE=> Describe(portal=null)
+    //    FINEST: FE=> Execute(portal=null,limit=1)
+    //    FINEST: FE=> Sync
+    //    FINEST: <=BE ParseComplete [null]
+    //    FINEST: <=BE BindComplete [unnamed]
+    //    FINEST: <=BE NoData
+    //    FINEST: <=BE CommandStatus(DELETE 2147483757)
+
+    /*
+     * Test PreparedStatement.executeLargeUpdate() and Statement.executeLargeUpdate(String sql)
+     */
+    @Ignore("This is the big and SLOW test")
+    @Test
+    public void testExecuteLargeUpdateBIG() throws Exception {
+        long expected = Integer.MAX_VALUE + 110L;
+        con.setAutoCommit(false);
+        // Test PreparedStatement.executeLargeUpdate()
+        try (PreparedStatement stmt = con.prepareStatement("insert into largetable "
+                + "select true from generate_series(?, ?)")) {
+            stmt.setLong(1, 1);
+            stmt.setLong(2, 2_147_483_757L); // Integer.MAX_VALUE + 110L
+            long count = stmt.executeLargeUpdate();
+            Assert.assertEquals("PreparedStatement 110 rows more than Integer.MAX_VALUE", expected, count);
+        }
+        // Test Statement.executeLargeUpdate(String sql)
+        try (Statement stmt = con.createStatement()) {
+            long count = stmt.executeLargeUpdate("delete from largetable");
+            Assert.assertEquals("Statement 110 rows more than Integer.MAX_VALUE", expected, count);
+        }
+        con.setAutoCommit(true);
+    }
+
+    /*
+     * Test Statement.executeLargeUpdate(String sql)
+     */
+    @Test
+    public void testExecuteLargeUpdateStatementSMALL() throws Exception {
+        try (Statement stmt = con.createStatement()) {
+            long count = stmt.executeLargeUpdate("insert into largetable "
+                    + "select true from generate_series(1, 1010)");
+            long expected = 1010L;
+            Assert.assertEquals("Small long return 1010L", expected, count);
+        }
+    }
+
+    /*
+     * Test PreparedStatement.executeLargeUpdate();
+     */
+    @Test
+    public void testExecuteLargeUpdatePreparedStatementSMALL() throws Exception {
+        try (PreparedStatement stmt = con.prepareStatement("insert into largetable "
+                + "select true from generate_series(?, ?)")) {
+            stmt.setLong(1, 1);
+            stmt.setLong(2, 1010L);
+            long count = stmt.executeLargeUpdate();
+            long expected = 1010L;
+            Assert.assertEquals("Small long return 1010L", expected, count);
+        }
+    }
+
+    /*
+     * Test Statement.getLargeUpdateCount();
+     */
+    @Test
+    public void testGetLargeUpdateCountStatementSMALL() throws Exception {
+        try (Statement stmt = con.createStatement()) {
+            boolean isResult = stmt.execute("insert into largetable "
+                    + "select true from generate_series(1, 1010)");
+            Assert.assertFalse("False if it is an update count or there are no results", isResult);
+            long count = stmt.getLargeUpdateCount();
+            long expected = 1010L;
+            Assert.assertEquals("Small long return 1010L", expected, count);
+        }
+    }
+
+    /*
+     * Test PreparedStatement.getLargeUpdateCount();
+     */
+    @Test
+    public void testGetLargeUpdateCountPreparedStatementSMALL() throws Exception {
+        try (PreparedStatement stmt = con.prepareStatement("insert into largetable "
+                + "select true from generate_series(?, ?)")) {
+            stmt.setInt(1, 1);
+            stmt.setInt(2, 1010);
+            boolean isResult = stmt.execute();
+            Assert.assertFalse("False if it is an update count or there are no results", isResult);
+            long count = stmt.getLargeUpdateCount();
+            long expected = 1010L;
+            Assert.assertEquals("Small long return 1010L", expected, count);
+        }
+    }
+
+    /*
+     * Test fail SELECT Statement.executeLargeUpdate(String sql)
+     */
+    @Test
+    public void testExecuteLargeUpdateStatementSELECT() throws Exception {
+        try (Statement stmt = con.createStatement()) {
+            long count = stmt.executeLargeUpdate("select true from generate_series(1, 5)");
+            Assert.fail("A result was returned when none was expected. Returned: " + count);
+        } catch (SQLException e) {
+            Assert.assertEquals(PSQLState.TOO_MANY_RESULTS.getState(), e.getSQLState());
+        }
+    }
+
+    /*
+     * Test fail SELECT PreparedStatement.executeLargeUpdate();
+     */
+    @Test
+    public void testExecuteLargeUpdatePreparedStatementSELECT() throws Exception {
+        try (PreparedStatement stmt = con.prepareStatement("select true from generate_series(?, ?)")) {
+            stmt.setLong(1, 1);
+            stmt.setLong(2, 5L);
+            long count = stmt.executeLargeUpdate();
+            Assert.fail("A result was returned when none was expected. Returned: " + count);
+        } catch (SQLException e) {
+            Assert.assertEquals(PSQLState.TOO_MANY_RESULTS.getState(), e.getSQLState());
+        }
+    }
+
+    /*
+     * Test Statement.getLargeUpdateCount();
+     */
+    @Test
+    public void testGetLargeUpdateCountStatementSELECT() throws Exception {
+        try (Statement stmt = con.createStatement()) {
+            boolean isResult = stmt.execute("select true from generate_series(1, 5)");
+            Assert.assertTrue("True since this is a SELECT", isResult);
+            long count = stmt.getLargeUpdateCount();
+            long expected = -1L;
+            Assert.assertEquals("-1 if the current result is a ResultSet object", expected, count);
+        }
+    }
+
+    /*
+     * Test PreparedStatement.getLargeUpdateCount();
+     */
+    @Test
+    public void testGetLargeUpdateCountPreparedStatementSELECT() throws Exception {
+        try (PreparedStatement stmt = con.prepareStatement("select true from generate_series(?, ?)")) {
+            stmt.setLong(1, 1);
+            stmt.setLong(2, 5L);
+            boolean isResult = stmt.execute();
+            Assert.assertTrue("True since this is a SELECT", isResult);
+            long count = stmt.getLargeUpdateCount();
+            long expected = -1L;
+            Assert.assertEquals("-1 if the current result is a ResultSet object", expected, count);
+        }
+    }
+
+    // ********************* BATCH LARGE UPDATES *********************
+    //    FINEST: batch execute 3 queries, handler=org.postgresql.jdbc.BatchResultHandler@3d04a311, maxRows=0, fetchSize=0, flags=21
+    //    FINEST: FE=> Parse(stmt=null,query="insert into largetable select true from generate_series($1, $2)",oids={23,23})
+    //    FINEST: FE=> Bind(stmt=null,portal=null,$1=<1>,$2=<200>)
+    //    FINEST: FE=> Describe(portal=null)
+    //    FINEST: FE=> Execute(portal=null,limit=1)
+    //    FINEST: FE=> Parse(stmt=null,query="insert into largetable select true from generate_series($1, $2)",oids={23,20})
+    //    FINEST: FE=> Bind(stmt=null,portal=null,$1=<1>,$2=<3000000000>)
+    //    FINEST: FE=> Describe(portal=null)
+    //    FINEST: FE=> Execute(portal=null,limit=1)
+    //    FINEST: FE=> Parse(stmt=null,query="insert into largetable select true from generate_series($1, $2)",oids={23,23})
+    //    FINEST: FE=> Bind(stmt=null,portal=null,$1=<1>,$2=<50>)
+    //    FINEST: FE=> Describe(portal=null)
+    //    FINEST: FE=> Execute(portal=null,limit=1)
+    //    FINEST: FE=> Sync
+    //    FINEST: <=BE ParseComplete [null]
+    //    FINEST: <=BE BindComplete [unnamed]
+    //    FINEST: <=BE NoData
+    //    FINEST: <=BE CommandStatus(INSERT 0 200)
+    //    FINEST: <=BE ParseComplete [null]
+    //    FINEST: <=BE BindComplete [unnamed]
+    //    FINEST: <=BE NoData
+    //    FINEST: <=BE CommandStatus(INSERT 0 3000000000)
+    //    FINEST: <=BE ParseComplete [null]
+    //    FINEST: <=BE BindComplete [unnamed]
+    //    FINEST: <=BE NoData
+    //    FINEST: <=BE CommandStatus(INSERT 0 50)
+
+    /*
+     * Test simple PreparedStatement.executeLargeBatch();
+     */
+    @Ignore("This is the big and SLOW test")
+    @Test
+    public void testExecuteLargeBatchStatementBIG() throws Exception {
+        con.setAutoCommit(false);
+        try (PreparedStatement stmt = con.prepareStatement("insert into largetable "
+                + "select true from generate_series(?, ?)")) {
+            stmt.setInt(1, 1);
+            stmt.setInt(2, 200);
+            stmt.addBatch(); // statement one
+            stmt.setInt(1, 1);
+            stmt.setLong(2, 3_000_000_000L);
+            stmt.addBatch(); // statement two
+            stmt.setInt(1, 1);
+            stmt.setInt(2, 50);
+            stmt.addBatch(); // statement three
+            long[] actual = stmt.executeLargeBatch();
+            Assert.assertArrayEquals("Large rows inserted via 3 batch", new long[]{200L, 3_000_000_000L, 50L}, actual);
+        }
+        con.setAutoCommit(true);
+    }
+
+    /*
+     * Test simple Statement.executeLargeBatch();
+     */
+    @Test
+    public void testExecuteLargeBatchStatementSMALL() throws Exception {
+        try (Statement stmt = con.createStatement()) {
+            stmt.addBatch("insert into largetable(a) select true"); // statement one
+            stmt.addBatch("insert into largetable select false"); // statement two
+            stmt.addBatch("insert into largetable(a) values(true)"); // statement three
+            stmt.addBatch("insert into largetable values(false)"); // statement four
+            long[] actual = stmt.executeLargeBatch();
+            Assert.assertArrayEquals("Rows inserted via 4 batch", new long[]{1L, 1L, 1L, 1L}, actual);
+        }
+    }
+
+    /*
+     * Test simple PreparedStatement.executeLargeBatch();
+     */
+    @Test
+    public void testExecuteLargePreparedStatementStatementSMALL() throws Exception {
+        try (PreparedStatement stmt = con.prepareStatement("insert into largetable "
+                + "select true from generate_series(?, ?)")) {
+            stmt.setInt(1, 1);
+            stmt.setInt(2, 200);
+            stmt.addBatch(); // statement one
+            stmt.setInt(1, 1);
+            stmt.setInt(2, 100);
+            stmt.addBatch(); // statement two
+            stmt.setInt(1, 1);
+            stmt.setInt(2, 50);
+            stmt.addBatch(); // statement three
+            stmt.addBatch(); // statement four, same parms as three
+            long[] actual = stmt.executeLargeBatch();
+            Assert.assertArrayEquals("Rows inserted via 4 batch", new long[]{200L, 100L, 50L, 50L}, actual);
+        }
+    }
+
+    /*
+     * Test loop PreparedStatement.executeLargeBatch();
+     */
+    @Test
+    public void testExecuteLargePreparedStatementStatementLoopSMALL() throws Exception {
+        long[] loop = {200, 100, 50, 300, 20, 60, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
+        try (PreparedStatement stmt = con.prepareStatement("insert into largetable "
+                + "select true from generate_series(?, ?)")) {
+            for (long i : loop) {
+                stmt.setInt(1, 1);
+                stmt.setLong(2, i);
+                stmt.addBatch();
+            }
+            long[] actual = stmt.executeLargeBatch();
+            Assert.assertArrayEquals("Rows inserted via batch", loop, actual);
+        }
+    }
+
+    /*
+     * Test loop PreparedStatement.executeLargeBatch();
+     */
+    @Test
+    public void testExecuteLargeBatchValuesInsertSMALL() throws Exception {
+        boolean[] loop = {true, false, true, false, false, false, true, true, true, true, false, true};
+        try (PreparedStatement stmt = con.prepareStatement("insert into largetable values(?)")) {
+            for (boolean i : loop) {
+                stmt.setBoolean(1, i);
+                stmt.addBatch();
+            }
+            long[] actual = stmt.executeLargeBatch();
+            Assert.assertEquals("Rows inserted via batch", loop.length, actual.length);
+            for (long i : actual) {
+                if (insertRewrite) {
+                    Assert.assertEquals(Statement.SUCCESS_NO_INFO, i);
+                } else {
+                    Assert.assertEquals(1, i);
+                }
+            }
+        }
+    }
+
+    /*
+     * Test null PreparedStatement.executeLargeBatch();
+     */
+    @Test
+    public void testNullExecuteLargeBatchStatement() throws Exception {
+        try (Statement stmt = con.createStatement()) {
+            long[] actual = stmt.executeLargeBatch();
+            Assert.assertArrayEquals("addBatch() not called batchStatements is null", new long[0], actual);
+        }
+    }
+
+    /*
+     * Test empty PreparedStatement.executeLargeBatch();
+     */
+    @Test
+    public void testEmptyExecuteLargeBatchStatement() throws Exception {
+        try (Statement stmt = con.createStatement()) {
+            stmt.addBatch("");
+            stmt.clearBatch();
+            long[] actual = stmt.executeLargeBatch();
+            Assert.assertArrayEquals("clearBatch() called, batchStatements.isEmpty()", new long[0], actual);
+        }
+    }
+
+    /*
+     * Test null PreparedStatement.executeLargeBatch();
+     */
+    @Test
+    public void testNullExecuteLargeBatchPreparedStatement() throws Exception {
+        try (PreparedStatement stmt = con.prepareStatement("")) {
+            long[] actual = stmt.executeLargeBatch();
+            Assert.assertArrayEquals("addBatch() not called batchStatements is null", new long[0], actual);
+        }
+    }
+
+    /*
+     * Test empty PreparedStatement.executeLargeBatch();
+     */
+    @Test
+    public void testEmptyExecuteLargeBatchPreparedStatement() throws Exception {
+        try (PreparedStatement stmt = con.prepareStatement("")) {
+            stmt.addBatch();
+            stmt.clearBatch();
+            long[] actual = stmt.executeLargeBatch();
+            Assert.assertArrayEquals("clearBatch() called, batchStatements.isEmpty()", new long[0], actual);
+        }
     }
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/PreparedStatement64KBindsTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/PreparedStatement64KBindsTest.java
index 8f3fb3a..d6c732c 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/PreparedStatement64KBindsTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/PreparedStatement64KBindsTest.java
@@ -29,73 +29,73 @@ import java.util.stream.IntStream;
 
 @RunWith(Parameterized.class)
 public class PreparedStatement64KBindsTest extends BaseTest4 {
-  private final int numBinds;
-  private final PreferQueryMode preferQueryMode;
-  private final BinaryMode binaryMode;
+    private final int numBinds;
+    private final PreferQueryMode preferQueryMode;
+    private final BinaryMode binaryMode;
 
-  public PreparedStatement64KBindsTest(int numBinds, PreferQueryMode preferQueryMode,
-      BinaryMode binaryMode) {
-    this.numBinds = numBinds;
-    this.preferQueryMode = preferQueryMode;
-    this.binaryMode = binaryMode;
-  }
-
-  @Parameterized.Parameters(name = "numBinds={0}, preferQueryMode={1}, binaryMode={2}}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (PreferQueryMode preferQueryMode : PreferQueryMode.values()) {
-      for (BinaryMode binaryMode : BinaryMode.values()) {
-        for (int numBinds : new int[]{32766, 32767, 32768, 65534, 65535, 65536}) {
-          ids.add(new Object[]{numBinds, preferQueryMode, binaryMode});
-        }
-      }
+    public PreparedStatement64KBindsTest(int numBinds, PreferQueryMode preferQueryMode,
+                                         BinaryMode binaryMode) {
+        this.numBinds = numBinds;
+        this.preferQueryMode = preferQueryMode;
+        this.binaryMode = binaryMode;
     }
-    return ids;
-  }
 
-  @Override
-  protected void updateProperties(Properties props) {
-    super.updateProperties(props);
-    PGProperty.PREFER_QUERY_MODE.set(props, preferQueryMode.value());
-    setBinaryMode(binaryMode);
-  }
-
-  @Test
-  public void executeWith65535BindsWorks() throws SQLException {
-    String sql = Collections.nCopies(numBinds, "?").stream()
-        .collect(Collectors.joining(",", "select ARRAY[", "]"));
-
-    try (PreparedStatement ps = con.prepareStatement(sql)) {
-      for (int i = 1; i <= numBinds; i++) {
-        ps.setString(i, "v" + i);
-      }
-      String expected = Arrays.toString(
-          IntStream.rangeClosed(1, numBinds)
-              .mapToObj(i -> "v" + i).toArray()
-      );
-
-      try (ResultSet rs = ps.executeQuery()) {
-        rs.next();
-        Array res = rs.getArray(1);
-        Object[] elements = (Object[]) res.getArray();
-        String actual = Arrays.toString(elements);
-
-        if (preferQueryMode == PreferQueryMode.SIMPLE || numBinds <= 65535) {
-          Assert.assertEquals("SELECT query with " + numBinds + " should work", actual, expected);
-        } else {
-          Assert.fail("con.prepareStatement(..." + numBinds + " binds) should fail since the wire protocol allows only 65535 parameters");
+    @Parameterized.Parameters(name = "numBinds={0}, preferQueryMode={1}, binaryMode={2}}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (PreferQueryMode preferQueryMode : PreferQueryMode.values()) {
+            for (BinaryMode binaryMode : BinaryMode.values()) {
+                for (int numBinds : new int[]{32766, 32767, 32768, 65534, 65535, 65536}) {
+                    ids.add(new Object[]{numBinds, preferQueryMode, binaryMode});
+                }
+            }
+        }
+        return ids;
+    }
+
+    @Override
+    protected void updateProperties(Properties props) {
+        super.updateProperties(props);
+        PGProperty.PREFER_QUERY_MODE.set(props, preferQueryMode.value());
+        setBinaryMode(binaryMode);
+    }
+
+    @Test
+    public void executeWith65535BindsWorks() throws SQLException {
+        String sql = Collections.nCopies(numBinds, "?").stream()
+                .collect(Collectors.joining(",", "select ARRAY[", "]"));
+
+        try (PreparedStatement ps = con.prepareStatement(sql)) {
+            for (int i = 1; i <= numBinds; i++) {
+                ps.setString(i, "v" + i);
+            }
+            String expected = Arrays.toString(
+                    IntStream.rangeClosed(1, numBinds)
+                            .mapToObj(i -> "v" + i).toArray()
+            );
+
+            try (ResultSet rs = ps.executeQuery()) {
+                rs.next();
+                Array res = rs.getArray(1);
+                Object[] elements = (Object[]) res.getArray();
+                String actual = Arrays.toString(elements);
+
+                if (preferQueryMode == PreferQueryMode.SIMPLE || numBinds <= 65535) {
+                    Assert.assertEquals("SELECT query with " + numBinds + " should work", actual, expected);
+                } else {
+                    Assert.fail("con.prepareStatement(..." + numBinds + " binds) should fail since the wire protocol allows only 65535 parameters");
+                }
+            }
+        } catch (SQLException e) {
+            if (preferQueryMode != PreferQueryMode.SIMPLE && numBinds > 65535) {
+                Assert.assertEquals(
+                        "con.prepareStatement(..." + numBinds + " binds) should fail since the wire protocol allows only 65535 parameters. SQL State is ",
+                        PSQLState.INVALID_PARAMETER_VALUE.getState(),
+                        e.getSQLState()
+                );
+            } else {
+                throw e;
+            }
         }
-      }
-    } catch (SQLException e) {
-      if (preferQueryMode != PreferQueryMode.SIMPLE && numBinds > 65535) {
-        Assert.assertEquals(
-            "con.prepareStatement(..." + numBinds + " binds) should fail since the wire protocol allows only 65535 parameters. SQL State is ",
-            PSQLState.INVALID_PARAMETER_VALUE.getState(),
-            e.getSQLState()
-        );
-      } else {
-        throw e;
-      }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/PreparedStatementTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/PreparedStatementTest.java
index d31e439..f0321ad 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/PreparedStatementTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/PreparedStatementTest.java
@@ -21,98 +21,98 @@ import java.time.LocalTime;
 import java.util.Properties;
 
 public class PreparedStatementTest extends BaseTest4 {
-  protected void updateProperties(Properties props) {
-    PGProperty.PREFER_QUERY_MODE.set(props, "simple");
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.createTable(con, "timestamptztable", "tstz timestamptz");
-    TestUtil.createTable(con, "timetztable", "ttz timetz");
-    TestUtil.createTable(con, "timetable", "id serial, tt time");
-  }
-
-  @Override
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "timestamptztable");
-    TestUtil.dropTable(con, "timetztable");
-    TestUtil.dropTable(con, "timetable");
-    super.tearDown();
-  }
-
-  @Test
-  public void testSetNumber() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement("SELECT ? * 2");
-
-    pstmt.setBigDecimal(1, new BigDecimal("1.6"));
-    ResultSet rs = pstmt.executeQuery();
-    rs.next();
-    BigDecimal d = rs.getBigDecimal(1);
-    pstmt.close();
-
-    Assert.assertEquals(new BigDecimal("3.2"), d);
-  }
-
-  @Test
-  public void testSetBoolean() throws SQLException {
-    try (PreparedStatement ps = con.prepareStatement("select false union select (select ?)")) {
-      ps.setBoolean(1, true);
-
-      try (ResultSet rs = ps.executeQuery()) {
-        assert (rs.next());
-        rs.getBoolean(1);
-      }
+    protected void updateProperties(Properties props) {
+        PGProperty.PREFER_QUERY_MODE.set(props, "simple");
     }
-  }
 
-  @Test
-  public void testTimestampTzSetNull() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement("INSERT INTO timestamptztable (tstz) VALUES (?)");
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.createTable(con, "timestamptztable", "tstz timestamptz");
+        TestUtil.createTable(con, "timetztable", "ttz timetz");
+        TestUtil.createTable(con, "timetable", "id serial, tt time");
+    }
 
-    // valid: fully qualified type to setNull()
-    pstmt.setNull(1, Types.TIMESTAMP_WITH_TIMEZONE);
-    pstmt.executeUpdate();
+    @Override
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "timestamptztable");
+        TestUtil.dropTable(con, "timetztable");
+        TestUtil.dropTable(con, "timetable");
+        super.tearDown();
+    }
 
-    // valid: fully qualified type to setObject()
-    pstmt.setObject(1, null, Types.TIMESTAMP_WITH_TIMEZONE);
-    pstmt.executeUpdate();
+    @Test
+    public void testSetNumber() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement("SELECT ? * 2");
 
-    pstmt.close();
-  }
+        pstmt.setBigDecimal(1, new BigDecimal("1.6"));
+        ResultSet rs = pstmt.executeQuery();
+        rs.next();
+        BigDecimal d = rs.getBigDecimal(1);
+        pstmt.close();
 
-  @Test
-  public void testTimeTzSetNull() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement("INSERT INTO timetztable (ttz) VALUES (?)");
+        Assert.assertEquals(new BigDecimal("3.2"), d);
+    }
 
-    // valid: fully qualified type to setNull()
-    pstmt.setNull(1, Types.TIME_WITH_TIMEZONE);
-    pstmt.executeUpdate();
+    @Test
+    public void testSetBoolean() throws SQLException {
+        try (PreparedStatement ps = con.prepareStatement("select false union select (select ?)")) {
+            ps.setBoolean(1, true);
 
-    // valid: fully qualified type to setObject()
-    pstmt.setObject(1, null, Types.TIME_WITH_TIMEZONE);
-    pstmt.executeUpdate();
+            try (ResultSet rs = ps.executeQuery()) {
+                assert (rs.next());
+                rs.getBoolean(1);
+            }
+        }
+    }
 
-    pstmt.close();
-  }
+    @Test
+    public void testTimestampTzSetNull() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement("INSERT INTO timestamptztable (tstz) VALUES (?)");
 
-  @Test
-  public void testLocalTimeMax() throws SQLException {
-    PreparedStatement pstmt = con.prepareStatement("INSERT INTO timetable (tt) VALUES (?)");
+        // valid: fully qualified type to setNull()
+        pstmt.setNull(1, Types.TIMESTAMP_WITH_TIMEZONE);
+        pstmt.executeUpdate();
 
-    pstmt.setObject(1, LocalTime.MAX);
-    pstmt.executeUpdate();
+        // valid: fully qualified type to setObject()
+        pstmt.setObject(1, null, Types.TIMESTAMP_WITH_TIMEZONE);
+        pstmt.executeUpdate();
 
-    pstmt.setObject(1, LocalTime.MIN);
-    pstmt.executeUpdate();
+        pstmt.close();
+    }
 
-    ResultSet rs = con.createStatement().executeQuery("select tt from timetable order by id asc");
-    Assert.assertTrue(rs.next());
-    LocalTime localTime = (LocalTime) rs.getObject(1, LocalTime.class);
-    Assert.assertEquals(LocalTime.MAX, localTime);
+    @Test
+    public void testTimeTzSetNull() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement("INSERT INTO timetztable (ttz) VALUES (?)");
 
-    Assert.assertTrue(rs.next());
-    localTime = (LocalTime) rs.getObject(1, LocalTime.class);
-    Assert.assertEquals(LocalTime.MIN, localTime);
-  }
+        // valid: fully qualified type to setNull()
+        pstmt.setNull(1, Types.TIME_WITH_TIMEZONE);
+        pstmt.executeUpdate();
+
+        // valid: fully qualified type to setObject()
+        pstmt.setObject(1, null, Types.TIME_WITH_TIMEZONE);
+        pstmt.executeUpdate();
+
+        pstmt.close();
+    }
+
+    @Test
+    public void testLocalTimeMax() throws SQLException {
+        PreparedStatement pstmt = con.prepareStatement("INSERT INTO timetable (tt) VALUES (?)");
+
+        pstmt.setObject(1, LocalTime.MAX);
+        pstmt.executeUpdate();
+
+        pstmt.setObject(1, LocalTime.MIN);
+        pstmt.executeUpdate();
+
+        ResultSet rs = con.createStatement().executeQuery("select tt from timetable order by id asc");
+        Assert.assertTrue(rs.next());
+        LocalTime localTime = (LocalTime) rs.getObject(1, LocalTime.class);
+        Assert.assertEquals(LocalTime.MAX, localTime);
+
+        Assert.assertTrue(rs.next());
+        localTime = (LocalTime) rs.getObject(1, LocalTime.class);
+        Assert.assertEquals(LocalTime.MIN, localTime);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SetObject310InfinityTests.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SetObject310InfinityTests.java
index 86457aa..09e2868 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SetObject310InfinityTests.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SetObject310InfinityTests.java
@@ -32,101 +32,101 @@ import java.util.Collection;
 @RunWith(Parameterized.class)
 public class SetObject310InfinityTests extends BaseTest4 {
 
-  public SetObject310InfinityTests(BinaryMode binaryMode) {
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>(2);
-    for (BaseTest4.BinaryMode binaryMode : BaseTest4.BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
+    public SetObject310InfinityTests(BinaryMode binaryMode) {
+        setBinaryMode(binaryMode);
     }
-    return ids;
-  }
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    Assume.assumeTrue("PostgreSQL 8.3 does not support 'infinity' for 'date'",
-        TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4));
-    super.setUp();
-    TestUtil.createTable(con, "table1", "timestamp_without_time_zone_column timestamp without time zone,"
-            + "timestamp_with_time_zone_column timestamp with time zone,"
-            + "date_column date"
-    );
-  }
-
-  @After
-  public void tearDown() throws SQLException {
-    TestUtil.dropTable(con, "table1");
-    super.tearDown();
-  }
-
-  @Test
-  public void testTimestamptz() throws SQLException {
-    runTestforType(OffsetDateTime.MAX, OffsetDateTime.MIN, "timestamp_without_time_zone_column", null);
-  }
-
-  @Test
-  public void testTimestamp() throws SQLException {
-    runTestforType(LocalDateTime.MAX, LocalDateTime.MIN, "timestamp_without_time_zone_column", null);
-  }
-
-  @Test
-  public void testDate() throws SQLException {
-    runTestforType(LocalDate.MAX, LocalDate.MIN, "date_column", null);
-  }
-
-  private void runTestforType(Object max, Object min, String columnName, Integer type) throws SQLException {
-    insert(max, columnName, type);
-    String readback = readString(columnName);
-    assertEquals("infinity", readback);
-    delete();
-
-    insert(min, columnName, type);
-    readback = readString(columnName);
-    assertEquals("-infinity", readback);
-    delete();
-  }
-
-  private void insert(Object data, String columnName, Integer type) throws SQLException {
-    PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("table1", columnName, "?"));
-    try {
-      if (type != null) {
-        ps.setObject(1, data, type);
-      } else {
-        ps.setObject(1, data);
-      }
-      assertEquals(1, ps.executeUpdate());
-    } finally {
-      ps.close();
+    @Parameterized.Parameters(name = "binary = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>(2);
+        for (BaseTest4.BinaryMode binaryMode : BaseTest4.BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
+        }
+        return ids;
     }
-  }
 
-  private String readString(String columnName) throws SQLException {
-    Statement st = con.createStatement();
-    try {
-      ResultSet rs = st.executeQuery(TestUtil.selectSQL("table1", columnName));
-      try {
-        assertNotNull(rs);
-        assertTrue(rs.next());
-        return rs.getString(1);
-      } finally {
-        rs.close();
-      }
-    } finally {
-      st.close();
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        Assume.assumeTrue("PostgreSQL 8.3 does not support 'infinity' for 'date'",
+                TestUtil.haveMinimumServerVersion(con, ServerVersion.v8_4));
+        super.setUp();
+        TestUtil.createTable(con, "table1", "timestamp_without_time_zone_column timestamp without time zone,"
+                + "timestamp_with_time_zone_column timestamp with time zone,"
+                + "date_column date"
+        );
     }
-  }
 
-  private void delete() throws SQLException {
-    Statement st = con.createStatement();
-    try {
-      st.execute("DELETE FROM table1");
-    } finally {
-      st.close();
+    @After
+    public void tearDown() throws SQLException {
+        TestUtil.dropTable(con, "table1");
+        super.tearDown();
+    }
+
+    @Test
+    public void testTimestamptz() throws SQLException {
+        runTestforType(OffsetDateTime.MAX, OffsetDateTime.MIN, "timestamp_without_time_zone_column", null);
+    }
+
+    @Test
+    public void testTimestamp() throws SQLException {
+        runTestforType(LocalDateTime.MAX, LocalDateTime.MIN, "timestamp_without_time_zone_column", null);
+    }
+
+    @Test
+    public void testDate() throws SQLException {
+        runTestforType(LocalDate.MAX, LocalDate.MIN, "date_column", null);
+    }
+
+    private void runTestforType(Object max, Object min, String columnName, Integer type) throws SQLException {
+        insert(max, columnName, type);
+        String readback = readString(columnName);
+        assertEquals("infinity", readback);
+        delete();
+
+        insert(min, columnName, type);
+        readback = readString(columnName);
+        assertEquals("-infinity", readback);
+        delete();
+    }
+
+    private void insert(Object data, String columnName, Integer type) throws SQLException {
+        PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("table1", columnName, "?"));
+        try {
+            if (type != null) {
+                ps.setObject(1, data, type);
+            } else {
+                ps.setObject(1, data);
+            }
+            assertEquals(1, ps.executeUpdate());
+        } finally {
+            ps.close();
+        }
+    }
+
+    private String readString(String columnName) throws SQLException {
+        Statement st = con.createStatement();
+        try {
+            ResultSet rs = st.executeQuery(TestUtil.selectSQL("table1", columnName));
+            try {
+                assertNotNull(rs);
+                assertTrue(rs.next());
+                return rs.getString(1);
+            } finally {
+                rs.close();
+            }
+        } finally {
+            st.close();
+        }
+    }
+
+    private void delete() throws SQLException {
+        Statement st = con.createStatement();
+        try {
+            st.execute("DELETE FROM table1");
+        } finally {
+            st.close();
+        }
     }
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SetObject310Test.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SetObject310Test.java
index a39057f..f1cb157 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SetObject310Test.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SetObject310Test.java
@@ -49,444 +49,443 @@ import java.util.TimeZone;
 
 @RunWith(Parameterized.class)
 public class SetObject310Test extends BaseTest4 {
-  private static final TimeZone saveTZ = TimeZone.getDefault();
+    public static final DateTimeFormatter LOCAL_TIME_FORMATTER =
+            new DateTimeFormatterBuilder()
+                    .parseCaseInsensitive()
+                    .appendValue(ChronoField.YEAR_OF_ERA, 4, 10, SignStyle.EXCEEDS_PAD)
+                    .appendLiteral('-')
+                    .appendValue(ChronoField.MONTH_OF_YEAR, 2)
+                    .appendLiteral('-')
+                    .appendValue(ChronoField.DAY_OF_MONTH, 2)
+                    .appendLiteral(' ')
+                    .append(DateTimeFormatter.ISO_LOCAL_TIME)
+                    .optionalStart()
+                    .appendOffset("+HH:mm", "+00")
+                    .optionalEnd()
+                    .optionalStart()
+                    .appendLiteral(' ')
+                    .appendPattern("GG")
+                    .toFormatter(Locale.ROOT)
+                    .withResolverStyle(ResolverStyle.LENIENT)
+                    .withChronology(IsoChronology.INSTANCE);
+    private static final TimeZone saveTZ = TimeZone.getDefault();
 
-  public static final DateTimeFormatter LOCAL_TIME_FORMATTER =
-      new DateTimeFormatterBuilder()
-          .parseCaseInsensitive()
-          .appendValue(ChronoField.YEAR_OF_ERA, 4, 10, SignStyle.EXCEEDS_PAD)
-          .appendLiteral('-')
-          .appendValue(ChronoField.MONTH_OF_YEAR, 2)
-          .appendLiteral('-')
-          .appendValue(ChronoField.DAY_OF_MONTH, 2)
-          .appendLiteral(' ')
-          .append(DateTimeFormatter.ISO_LOCAL_TIME)
-          .optionalStart()
-          .appendOffset("+HH:mm", "+00")
-          .optionalEnd()
-          .optionalStart()
-          .appendLiteral(' ')
-          .appendPattern("GG")
-          .toFormatter(Locale.ROOT)
-          .withResolverStyle(ResolverStyle.LENIENT)
-          .withChronology(IsoChronology.INSTANCE);
-
-  public SetObject310Test(BaseTest4.BinaryMode binaryMode) {
-    setBinaryMode(binaryMode);
-  }
-
-  @Parameterized.Parameters(name = "binary = {0}")
-  public static Iterable<Object[]> data() {
-    Collection<Object[]> ids = new ArrayList<>();
-    for (BaseTest4.BinaryMode binaryMode : BaseTest4.BinaryMode.values()) {
-      ids.add(new Object[]{binaryMode});
-    }
-    return ids;
-  }
-
-  @BeforeClass
-  public static void createTables() throws Exception {
-    try (Connection con = TestUtil.openDB()) {
-      TestUtil.createTable(con, "table1", "timestamp_without_time_zone_column timestamp without time zone,"
-              + "timestamp_with_time_zone_column timestamp with time zone,"
-              + "date_column date,"
-              + "time_without_time_zone_column time without time zone,"
-              + "time_with_time_zone_column time with time zone"
-      );
-    }
-  }
-
-  @AfterClass
-  public static void dropTables() throws Exception {
-    try (Connection con = TestUtil.openDB()) {
-      TestUtil.dropTable(con, "table1");
-    }
-    TimeZone.setDefault(saveTZ);
-  }
-
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    TestUtil.execute(con, "delete from table1");
-  }
-
-  private void insert(Object data, String columnName, Integer type) throws SQLException {
-    PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("table1", columnName, "?"));
-    try {
-      if (type != null) {
-        ps.setObject(1, data, type);
-      } else {
-        ps.setObject(1, data);
-      }
-      assertEquals(1, ps.executeUpdate());
-    } finally {
-      ps.close();
-    }
-  }
-
-  private String readString(String columnName) throws SQLException {
-    Statement st = con.createStatement();
-    try {
-      ResultSet rs = st.executeQuery(TestUtil.selectSQL("table1", columnName));
-      try {
-        assertNotNull(rs);
-        assertTrue(rs.next());
-        return rs.getString(1);
-      } finally {
-        rs.close();
-      }
-    } finally {
-      st.close();
-    }
-  }
-
-  private String insertThenReadStringWithoutType(LocalDateTime data, String columnName) throws SQLException {
-    insert(data, columnName, null);
-    return readString(columnName);
-  }
-
-  private String insertThenReadStringWithType(LocalDateTime data, String columnName) throws SQLException {
-    insert(data, columnName, Types.TIMESTAMP);
-    return readString(columnName);
-  }
-
-  private void insertWithoutType(Object data, String columnName) throws SQLException {
-    insert(data, columnName, null);
-  }
-
-  private <T> T insertThenReadWithoutType(Object data, String columnName, Class<T> expectedType) throws SQLException {
-    return insertThenReadWithoutType(data, columnName, expectedType, true);
-  }
-
-  private <T> T insertThenReadWithoutType(Object data, String columnName, Class<T> expectedType, boolean checkRoundtrip) throws SQLException {
-    PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("table1", columnName, "?"));
-    try {
-      ps.setObject(1, data);
-      assertEquals(1, ps.executeUpdate());
-    } finally {
-      ps.close();
+    public SetObject310Test(BaseTest4.BinaryMode binaryMode) {
+        setBinaryMode(binaryMode);
     }
 
-    Statement st = con.createStatement();
-    try {
-      ResultSet rs = st.executeQuery(TestUtil.selectSQL("table1", columnName));
-      try {
-        assertNotNull(rs);
-
-        assertTrue(rs.next());
-        if (checkRoundtrip) {
-          assertEquals("Roundtrip set/getObject with type should return same result",
-              data, rs.getObject(1, data.getClass()));
+    @Parameterized.Parameters(name = "binary = {0}")
+    public static Iterable<Object[]> data() {
+        Collection<Object[]> ids = new ArrayList<>();
+        for (BaseTest4.BinaryMode binaryMode : BaseTest4.BinaryMode.values()) {
+            ids.add(new Object[]{binaryMode});
         }
-        return expectedType.cast(rs.getObject(1));
-      } finally {
-        rs.close();
-      }
-    } finally {
-      st.close();
-    }
-  }
-
-  private <T> T insertThenReadWithType(Object data, int sqlType, String columnName, Class<T> expectedType) throws SQLException {
-    return insertThenReadWithType(data, sqlType, columnName, expectedType, true);
-  }
-
-  private <T> T insertThenReadWithType(Object data, int sqlType, String columnName, Class<T> expectedType, boolean checkRoundtrip) throws SQLException {
-    PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("table1", columnName, "?"));
-    try {
-      ps.setObject(1, data, sqlType);
-      assertEquals(1, ps.executeUpdate());
-    } finally {
-      ps.close();
+        return ids;
     }
 
-    Statement st = con.createStatement();
-    try {
-      ResultSet rs = st.executeQuery(TestUtil.selectSQL("table1", columnName));
-      try {
-        assertNotNull(rs);
-
-        assertTrue(rs.next());
-        if (checkRoundtrip) {
-          assertEquals("Roundtrip set/getObject with type should return same result",
-              data, rs.getObject(1, data.getClass()));
+    @BeforeClass
+    public static void createTables() throws Exception {
+        try (Connection con = TestUtil.openDB()) {
+            TestUtil.createTable(con, "table1", "timestamp_without_time_zone_column timestamp without time zone,"
+                    + "timestamp_with_time_zone_column timestamp with time zone,"
+                    + "date_column date,"
+                    + "time_without_time_zone_column time without time zone,"
+                    + "time_with_time_zone_column time with time zone"
+            );
         }
-        return expectedType.cast(rs.getObject(1));
-      } finally {
-        rs.close();
-      }
-    } finally {
-      st.close();
     }
-  }
 
-  private void deleteRows() throws SQLException {
-    Statement st = con.createStatement();
-    try {
-      st.executeUpdate("DELETE FROM table1");
-    } finally {
-      st.close();
-    }
-  }
-
-  /**
-   * Test the behavior of setObject for timestamp columns.
-   */
-  @Test
-  public void testSetLocalDateTime() throws SQLException {
-    List<String> zoneIdsToTest = getZoneIdsToTest();
-    List<String> datesToTest = getDatesToTest();
-
-    for (String zoneId : zoneIdsToTest) {
-      ZoneId zone = ZoneId.of(zoneId);
-      for (String date : datesToTest) {
-        LocalDateTime localDateTime = LocalDateTime.parse(date);
-        String expected = date.replace('T', ' ');
-        localTimestamps(zone, localDateTime, expected);
-      }
-    }
-  }
-
-  /**
-   * Test the behavior of setObject for timestamp columns.
-   */
-  @Test
-  public void testSetOffsetDateTime() throws SQLException {
-    List<String> zoneIdsToTest = getZoneIdsToTest();
-    List<TimeZone> storeZones = new ArrayList<>();
-    for (String zoneId : zoneIdsToTest) {
-      storeZones.add(TimeZone.getTimeZone(zoneId));
-    }
-    List<String> datesToTest = getDatesToTest();
-
-    for (TimeZone timeZone : storeZones) {
-      ZoneId zoneId = timeZone.toZoneId();
-      for (String date : datesToTest) {
-        LocalDateTime localDateTime = LocalDateTime.parse(date);
-        String expected = date.replace('T', ' ');
-        offsetTimestamps(zoneId, localDateTime, expected, storeZones);
-      }
-    }
-  }
-
-  private List<String> getDatesToTest() {
-    return Arrays.asList("2015-09-03T12:00:00", "2015-06-30T23:59:58",
-            "1997-06-30T23:59:59", "1997-07-01T00:00:00", "2012-06-30T23:59:59", "2012-07-01T00:00:00",
-            "2015-06-30T23:59:59", "2015-07-01T00:00:00", "2005-12-31T23:59:59", "2006-01-01T00:00:00",
-            "2008-12-31T23:59:59", "2009-01-01T00:00:00", /* "2015-06-30T23:59:60", */ "2015-07-31T00:00:00",
-            "2015-07-31T00:00:01", "2015-07-31T00:00:00.000001",
-
-            // On 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00
-            "2000-03-26T01:59:59", "2000-03-26T02:00:00", "2000-03-26T02:00:01", "2000-03-26T02:59:59",
-            "2000-03-26T03:00:00", "2000-03-26T03:00:01", "2000-03-26T03:59:59", "2000-03-26T04:00:00",
-            "2000-03-26T04:00:01", "2000-03-26T04:00:00.000001",
-
-            // This is a pre-1970 date, so check if it is rounded properly
-            "1950-07-20T02:00:00",
-
-            // Ensure the calendar is proleptic
-            "1582-09-30T00:00:00", "1582-10-16T00:00:00",
-
-            // On 2000-10-29 03:00:00 Moscow went to regular time, thus local time became 02:00:00
-            "2000-10-29T01:59:59", "2000-10-29T02:00:00", "2000-10-29T02:00:01", "2000-10-29T02:59:59",
-            "2000-10-29T03:00:00", "2000-10-29T03:00:01", "2000-10-29T03:59:59", "2000-10-29T04:00:00",
-            "2000-10-29T04:00:01", "2000-10-29T04:00:00.000001");
-  }
-
-  private List<String> getZoneIdsToTest() {
-    List<String> zoneIdsToTest = new ArrayList<>();
-    zoneIdsToTest.add("Africa/Casablanca"); // It is something like GMT+0..GMT+1
-    zoneIdsToTest.add("America/Adak"); // It is something like GMT-10..GMT-9
-    zoneIdsToTest.add("Atlantic/Azores"); // It is something like GMT-1..GMT+0
-    zoneIdsToTest.add("Europe/Moscow"); // It is something like GMT+3..GMT+4 for 2000s
-    zoneIdsToTest.add("Pacific/Apia"); // It is something like GMT+13..GMT+14
-    zoneIdsToTest.add("Pacific/Niue"); // It is something like GMT-11..GMT-11
-    for (int i = -12; i <= 13; i++) {
-      zoneIdsToTest.add(String.format("GMT%+02d", i));
-    }
-    return zoneIdsToTest;
-  }
-
-  private void localTimestamps(ZoneId zoneId, LocalDateTime localDateTime, String expected) throws SQLException {
-    TimeZone.setDefault(TimeZone.getTimeZone(zoneId));
-    String readBack = insertThenReadStringWithoutType(localDateTime, "timestamp_without_time_zone_column");
-    assertEquals(
-        "LocalDateTime=" + localDateTime + ", with TimeZone.default=" + zoneId + ", setObject(int, Object)",
-        expected, readBack);
-    deleteRows();
-
-    readBack = insertThenReadStringWithType(localDateTime, "timestamp_without_time_zone_column");
-    assertEquals(
-        "LocalDateTime=" + localDateTime + ", with TimeZone.default=" + zoneId + ", setObject(int, Object, TIMESTAMP)",
-        expected, readBack);
-    deleteRows();
-  }
-
-  private void offsetTimestamps(ZoneId dataZone, LocalDateTime localDateTime, String expected, List<TimeZone> storeZones) throws SQLException {
-    OffsetDateTime data = localDateTime.atZone(dataZone).toOffsetDateTime();
-    try (PreparedStatement ps = con.prepareStatement(
-        "select ?::timestamp with time zone, ?::timestamp with time zone")) {
-      for (TimeZone storeZone : storeZones) {
-        TimeZone.setDefault(storeZone);
-        ps.setObject(1, data);
-        ps.setObject(2, data, Types.TIMESTAMP_WITH_TIMEZONE);
-        try (ResultSet rs = ps.executeQuery()) {
-          rs.next();
-          String noType = rs.getString(1);
-          OffsetDateTime noTypeRes = parseBackendTimestamp(noType);
-          assertEquals(
-              "OffsetDateTime=" + data + " (with ZoneId=" + dataZone + "), with TimeZone.default="
-                  + storeZone + ", setObject(int, Object)", data.toInstant(),
-              noTypeRes.toInstant());
-          String withType = rs.getString(2);
-          OffsetDateTime withTypeRes = parseBackendTimestamp(withType);
-          assertEquals(
-              "OffsetDateTime=" + data + " (with ZoneId=" + dataZone + "), with TimeZone.default="
-                  + storeZone + ", setObject(int, Object, TIMESTAMP_WITH_TIMEZONE)",
-              data.toInstant(), withTypeRes.toInstant());
+    @AfterClass
+    public static void dropTables() throws Exception {
+        try (Connection con = TestUtil.openDB()) {
+            TestUtil.dropTable(con, "table1");
         }
-      }
+        TimeZone.setDefault(saveTZ);
     }
-  }
 
-  /**
-   * Sometimes backend responds like {@code 1950-07-20 16:20:00+03} and sometimes it responds like
-   * {@code 1582-09-30 13:49:57+02:30:17}, so we need to handle cases when "offset minutes" is missing.
-   */
-  private static OffsetDateTime parseBackendTimestamp(String backendTimestamp) {
-    String isoTimestamp = backendTimestamp.replace(' ', 'T');
-    // If the pattern already has trailing :XX we are fine
-    // Otherwise add :00 for timezone offset minutes
-    if (isoTimestamp.charAt(isoTimestamp.length() - 3) != ':') {
-      isoTimestamp += ":00";
+    /**
+     * Sometimes backend responds like {@code 1950-07-20 16:20:00+03} and sometimes it responds like
+     * {@code 1582-09-30 13:49:57+02:30:17}, so we need to handle cases when "offset minutes" is missing.
+     */
+    private static OffsetDateTime parseBackendTimestamp(String backendTimestamp) {
+        String isoTimestamp = backendTimestamp.replace(' ', 'T');
+        // If the pattern already has trailing :XX we are fine
+        // Otherwise add :00 for timezone offset minutes
+        if (isoTimestamp.charAt(isoTimestamp.length() - 3) != ':') {
+            isoTimestamp += ":00";
+        }
+        return OffsetDateTime.parse(isoTimestamp);
     }
-    return OffsetDateTime.parse(isoTimestamp);
-  }
 
-  @Test
-  public void testLocalDateTimeRounding() throws SQLException {
-    LocalDateTime dateTime = LocalDateTime.parse("2018-12-31T23:59:59.999999500");
-    localTimestamps(ZoneOffset.UTC, dateTime, "2019-01-01 00:00:00");
-  }
-
-  @Test
-  public void testTimeStampRounding() throws SQLException {
-    // TODO: fix for binary
-    assumeBinaryModeRegular();
-    LocalTime time = LocalTime.parse("23:59:59.999999500");
-    Time actual = insertThenReadWithoutType(time, "time_without_time_zone_column", Time.class, false/*no roundtrip*/);
-    assertEquals(Time.valueOf("24:00:00"), actual);
-  }
-
-  @Test
-  public void testTimeStampRoundingWithType() throws SQLException {
-    // TODO: fix for binary
-    assumeBinaryModeRegular();
-    LocalTime time = LocalTime.parse("23:59:59.999999500");
-    Time actual =
-        insertThenReadWithType(time, Types.TIME, "time_without_time_zone_column", Time.class, false/*no roundtrip*/);
-    assertEquals(Time.valueOf("24:00:00"), actual);
-  }
-
-  /**
-   * Test the behavior of setObject for timestamp columns.
-   */
-  @Test
-  public void testSetLocalDateTimeBc() throws SQLException {
-    assumeTrue(TestUtil.haveIntegerDateTimes(con));
-
-    // use BC for funsies
-    List<LocalDateTime> bcDates = new ArrayList<>();
-    bcDates.add(LocalDateTime.parse("1997-06-30T23:59:59.999999").with(ChronoField.ERA, IsoEra.BCE.getValue()));
-    bcDates.add(LocalDateTime.parse("0997-06-30T23:59:59.999999").with(ChronoField.ERA, IsoEra.BCE.getValue()));
-
-    for (LocalDateTime bcDate : bcDates) {
-      String expected = LOCAL_TIME_FORMATTER.format(bcDate);
-      if (expected.endsWith(" BCE")) {
-        // Java 22.ea.25-open prints "BCE" even though previous releases printed "BC"
-        // See https://bugs.openjdk.org/browse/JDK-8320747
-        expected = expected.substring(0, expected.length() - 1);
-      }
-      localTimestamps(ZoneOffset.UTC, bcDate, expected);
+    @Override
+    public void setUp() throws Exception {
+        super.setUp();
+        TestUtil.execute(con, "delete from table1");
     }
-  }
 
-  /**
-   * Test the behavior setObject for date columns.
-   */
-  @Test
-  public void testSetLocalDateWithType() throws SQLException {
-    LocalDate data = LocalDate.parse("1971-12-15");
-    java.sql.Date actual = insertThenReadWithType(data, Types.DATE, "date_column", java.sql.Date.class);
-    java.sql.Date expected = java.sql.Date.valueOf("1971-12-15");
-    assertEquals(expected, actual);
-  }
+    private void insert(Object data, String columnName, Integer type) throws SQLException {
+        PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("table1", columnName, "?"));
+        try {
+            if (type != null) {
+                ps.setObject(1, data, type);
+            } else {
+                ps.setObject(1, data);
+            }
+            assertEquals(1, ps.executeUpdate());
+        } finally {
+            ps.close();
+        }
+    }
 
-  /**
-   * Test the behavior setObject for date columns.
-   */
-  @Test
-  public void testSetLocalDateWithoutType() throws SQLException {
-    LocalDate data = LocalDate.parse("1971-12-15");
-    java.sql.Date actual = insertThenReadWithoutType(data, "date_column", java.sql.Date.class);
-    java.sql.Date expected = java.sql.Date.valueOf("1971-12-15");
-    assertEquals(expected, actual);
-  }
+    private String readString(String columnName) throws SQLException {
+        Statement st = con.createStatement();
+        try {
+            ResultSet rs = st.executeQuery(TestUtil.selectSQL("table1", columnName));
+            try {
+                assertNotNull(rs);
+                assertTrue(rs.next());
+                return rs.getString(1);
+            } finally {
+                rs.close();
+            }
+        } finally {
+            st.close();
+        }
+    }
 
-  /**
-   * Test the behavior setObject for time columns.
-   */
-  @Test
-  public void testSetLocalTimeAndReadBack() throws SQLException {
-    // TODO: fix for binary mode.
-    //  Avoid micros truncation in org.postgresql.jdbc.PgResultSet#internalGetObject
-    assumeBinaryModeRegular();
-    LocalTime data = LocalTime.parse("16:21:51.123456");
+    private String insertThenReadStringWithoutType(LocalDateTime data, String columnName) throws SQLException {
+        insert(data, columnName, null);
+        return readString(columnName);
+    }
 
-    insertWithoutType(data, "time_without_time_zone_column");
+    private String insertThenReadStringWithType(LocalDateTime data, String columnName) throws SQLException {
+        insert(data, columnName, Types.TIMESTAMP);
+        return readString(columnName);
+    }
 
-    String readBack = readString("time_without_time_zone_column");
-    assertEquals("16:21:51.123456", readBack);
-  }
+    private void insertWithoutType(Object data, String columnName) throws SQLException {
+        insert(data, columnName, null);
+    }
 
-  /**
-   * Test the behavior setObject for time columns.
-   */
-  @Test
-  public void testSetLocalTimeWithType() throws SQLException {
-    LocalTime data = LocalTime.parse("16:21:51");
-    Time actual = insertThenReadWithType(data, Types.TIME, "time_without_time_zone_column", Time.class);
-    Time expected = Time.valueOf("16:21:51");
-    assertEquals(expected, actual);
-  }
+    private <T> T insertThenReadWithoutType(Object data, String columnName, Class<T> expectedType) throws SQLException {
+        return insertThenReadWithoutType(data, columnName, expectedType, true);
+    }
 
-  /**
-   * Test the behavior setObject for time columns.
-   */
-  @Test
-  public void testSetLocalTimeWithoutType() throws SQLException {
-    LocalTime data = LocalTime.parse("16:21:51");
-    Time actual = insertThenReadWithoutType(data, "time_without_time_zone_column", Time.class);
-    Time expected = Time.valueOf("16:21:51");
-    assertEquals(expected, actual);
-  }
+    private <T> T insertThenReadWithoutType(Object data, String columnName, Class<T> expectedType, boolean checkRoundtrip) throws SQLException {
+        PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("table1", columnName, "?"));
+        try {
+            ps.setObject(1, data);
+            assertEquals(1, ps.executeUpdate());
+        } finally {
+            ps.close();
+        }
 
-  /**
-   * Test the behavior setObject for time columns.
-   */
-  @Test
-  public void testSetOffsetTimeWithType() throws SQLException {
-    OffsetTime data = OffsetTime.parse("16:21:51+12:34");
-    insertThenReadWithType(data, Types.TIME, "time_with_time_zone_column", Time.class);
-  }
+        Statement st = con.createStatement();
+        try {
+            ResultSet rs = st.executeQuery(TestUtil.selectSQL("table1", columnName));
+            try {
+                assertNotNull(rs);
 
-  /**
-   * Test the behavior setObject for time columns.
-   */
-  @Test
-  public void testSetOffsetTimeWithoutType() throws SQLException {
-    OffsetTime data = OffsetTime.parse("16:21:51+12:34");
-    insertThenReadWithoutType(data, "time_with_time_zone_column", Time.class);
-  }
+                assertTrue(rs.next());
+                if (checkRoundtrip) {
+                    assertEquals("Roundtrip set/getObject with type should return same result",
+                            data, rs.getObject(1, data.getClass()));
+                }
+                return expectedType.cast(rs.getObject(1));
+            } finally {
+                rs.close();
+            }
+        } finally {
+            st.close();
+        }
+    }
+
+    private <T> T insertThenReadWithType(Object data, int sqlType, String columnName, Class<T> expectedType) throws SQLException {
+        return insertThenReadWithType(data, sqlType, columnName, expectedType, true);
+    }
+
+    private <T> T insertThenReadWithType(Object data, int sqlType, String columnName, Class<T> expectedType, boolean checkRoundtrip) throws SQLException {
+        PreparedStatement ps = con.prepareStatement(TestUtil.insertSQL("table1", columnName, "?"));
+        try {
+            ps.setObject(1, data, sqlType);
+            assertEquals(1, ps.executeUpdate());
+        } finally {
+            ps.close();
+        }
+
+        Statement st = con.createStatement();
+        try {
+            ResultSet rs = st.executeQuery(TestUtil.selectSQL("table1", columnName));
+            try {
+                assertNotNull(rs);
+
+                assertTrue(rs.next());
+                if (checkRoundtrip) {
+                    assertEquals("Roundtrip set/getObject with type should return same result",
+                            data, rs.getObject(1, data.getClass()));
+                }
+                return expectedType.cast(rs.getObject(1));
+            } finally {
+                rs.close();
+            }
+        } finally {
+            st.close();
+        }
+    }
+
+    private void deleteRows() throws SQLException {
+        Statement st = con.createStatement();
+        try {
+            st.executeUpdate("DELETE FROM table1");
+        } finally {
+            st.close();
+        }
+    }
+
+    /**
+     * Test the behavior of setObject for timestamp columns.
+     */
+    @Test
+    public void testSetLocalDateTime() throws SQLException {
+        List<String> zoneIdsToTest = getZoneIdsToTest();
+        List<String> datesToTest = getDatesToTest();
+
+        for (String zoneId : zoneIdsToTest) {
+            ZoneId zone = ZoneId.of(zoneId);
+            for (String date : datesToTest) {
+                LocalDateTime localDateTime = LocalDateTime.parse(date);
+                String expected = date.replace('T', ' ');
+                localTimestamps(zone, localDateTime, expected);
+            }
+        }
+    }
+
+    /**
+     * Test the behavior of setObject for timestamp columns.
+     */
+    @Test
+    public void testSetOffsetDateTime() throws SQLException {
+        List<String> zoneIdsToTest = getZoneIdsToTest();
+        List<TimeZone> storeZones = new ArrayList<>();
+        for (String zoneId : zoneIdsToTest) {
+            storeZones.add(TimeZone.getTimeZone(zoneId));
+        }
+        List<String> datesToTest = getDatesToTest();
+
+        for (TimeZone timeZone : storeZones) {
+            ZoneId zoneId = timeZone.toZoneId();
+            for (String date : datesToTest) {
+                LocalDateTime localDateTime = LocalDateTime.parse(date);
+                String expected = date.replace('T', ' ');
+                offsetTimestamps(zoneId, localDateTime, expected, storeZones);
+            }
+        }
+    }
+
+    private List<String> getDatesToTest() {
+        return Arrays.asList("2015-09-03T12:00:00", "2015-06-30T23:59:58",
+                "1997-06-30T23:59:59", "1997-07-01T00:00:00", "2012-06-30T23:59:59", "2012-07-01T00:00:00",
+                "2015-06-30T23:59:59", "2015-07-01T00:00:00", "2005-12-31T23:59:59", "2006-01-01T00:00:00",
+                "2008-12-31T23:59:59", "2009-01-01T00:00:00", /* "2015-06-30T23:59:60", */ "2015-07-31T00:00:00",
+                "2015-07-31T00:00:01", "2015-07-31T00:00:00.000001",
+
+                // On 2000-03-26 02:00:00 Moscow went to DST, thus local time became 03:00:00
+                "2000-03-26T01:59:59", "2000-03-26T02:00:00", "2000-03-26T02:00:01", "2000-03-26T02:59:59",
+                "2000-03-26T03:00:00", "2000-03-26T03:00:01", "2000-03-26T03:59:59", "2000-03-26T04:00:00",
+                "2000-03-26T04:00:01", "2000-03-26T04:00:00.000001",
+
+                // This is a pre-1970 date, so check if it is rounded properly
+                "1950-07-20T02:00:00",
+
+                // Ensure the calendar is proleptic
+                "1582-09-30T00:00:00", "1582-10-16T00:00:00",
+
+                // On 2000-10-29 03:00:00 Moscow went to regular time, thus local time became 02:00:00
+                "2000-10-29T01:59:59", "2000-10-29T02:00:00", "2000-10-29T02:00:01", "2000-10-29T02:59:59",
+                "2000-10-29T03:00:00", "2000-10-29T03:00:01", "2000-10-29T03:59:59", "2000-10-29T04:00:00",
+                "2000-10-29T04:00:01", "2000-10-29T04:00:00.000001");
+    }
+
+    private List<String> getZoneIdsToTest() {
+        List<String> zoneIdsToTest = new ArrayList<>();
+        zoneIdsToTest.add("Africa/Casablanca"); // It is something like GMT+0..GMT+1
+        zoneIdsToTest.add("America/Adak"); // It is something like GMT-10..GMT-9
+        zoneIdsToTest.add("Atlantic/Azores"); // It is something like GMT-1..GMT+0
+        zoneIdsToTest.add("Europe/Moscow"); // It is something like GMT+3..GMT+4 for 2000s
+        zoneIdsToTest.add("Pacific/Apia"); // It is something like GMT+13..GMT+14
+        zoneIdsToTest.add("Pacific/Niue"); // It is something like GMT-11..GMT-11
+        for (int i = -12; i <= 13; i++) {
+            zoneIdsToTest.add(String.format("GMT%+02d", i));
+        }
+        return zoneIdsToTest;
+    }
+
+    private void localTimestamps(ZoneId zoneId, LocalDateTime localDateTime, String expected) throws SQLException {
+        TimeZone.setDefault(TimeZone.getTimeZone(zoneId));
+        String readBack = insertThenReadStringWithoutType(localDateTime, "timestamp_without_time_zone_column");
+        assertEquals(
+                "LocalDateTime=" + localDateTime + ", with TimeZone.default=" + zoneId + ", setObject(int, Object)",
+                expected, readBack);
+        deleteRows();
+
+        readBack = insertThenReadStringWithType(localDateTime, "timestamp_without_time_zone_column");
+        assertEquals(
+                "LocalDateTime=" + localDateTime + ", with TimeZone.default=" + zoneId + ", setObject(int, Object, TIMESTAMP)",
+                expected, readBack);
+        deleteRows();
+    }
+
+    private void offsetTimestamps(ZoneId dataZone, LocalDateTime localDateTime, String expected, List<TimeZone> storeZones) throws SQLException {
+        OffsetDateTime data = localDateTime.atZone(dataZone).toOffsetDateTime();
+        try (PreparedStatement ps = con.prepareStatement(
+                "select ?::timestamp with time zone, ?::timestamp with time zone")) {
+            for (TimeZone storeZone : storeZones) {
+                TimeZone.setDefault(storeZone);
+                ps.setObject(1, data);
+                ps.setObject(2, data, Types.TIMESTAMP_WITH_TIMEZONE);
+                try (ResultSet rs = ps.executeQuery()) {
+                    rs.next();
+                    String noType = rs.getString(1);
+                    OffsetDateTime noTypeRes = parseBackendTimestamp(noType);
+                    assertEquals(
+                            "OffsetDateTime=" + data + " (with ZoneId=" + dataZone + "), with TimeZone.default="
+                                    + storeZone + ", setObject(int, Object)", data.toInstant(),
+                            noTypeRes.toInstant());
+                    String withType = rs.getString(2);
+                    OffsetDateTime withTypeRes = parseBackendTimestamp(withType);
+                    assertEquals(
+                            "OffsetDateTime=" + data + " (with ZoneId=" + dataZone + "), with TimeZone.default="
+                                    + storeZone + ", setObject(int, Object, TIMESTAMP_WITH_TIMEZONE)",
+                            data.toInstant(), withTypeRes.toInstant());
+                }
+            }
+        }
+    }
+
+    @Test
+    public void testLocalDateTimeRounding() throws SQLException {
+        LocalDateTime dateTime = LocalDateTime.parse("2018-12-31T23:59:59.999999500");
+        localTimestamps(ZoneOffset.UTC, dateTime, "2019-01-01 00:00:00");
+    }
+
+    @Test
+    public void testTimeStampRounding() throws SQLException {
+        // TODO: fix for binary
+        assumeBinaryModeRegular();
+        LocalTime time = LocalTime.parse("23:59:59.999999500");
+        Time actual = insertThenReadWithoutType(time, "time_without_time_zone_column", Time.class, false/*no roundtrip*/);
+        assertEquals(Time.valueOf("24:00:00"), actual);
+    }
+
+    @Test
+    public void testTimeStampRoundingWithType() throws SQLException {
+        // TODO: fix for binary
+        assumeBinaryModeRegular();
+        LocalTime time = LocalTime.parse("23:59:59.999999500");
+        Time actual =
+                insertThenReadWithType(time, Types.TIME, "time_without_time_zone_column", Time.class, false/*no roundtrip*/);
+        assertEquals(Time.valueOf("24:00:00"), actual);
+    }
+
+    /**
+     * Test the behavior of setObject for timestamp columns.
+     */
+    @Test
+    public void testSetLocalDateTimeBc() throws SQLException {
+        assumeTrue(TestUtil.haveIntegerDateTimes(con));
+
+        // use BC for funsies
+        List<LocalDateTime> bcDates = new ArrayList<>();
+        bcDates.add(LocalDateTime.parse("1997-06-30T23:59:59.999999").with(ChronoField.ERA, IsoEra.BCE.getValue()));
+        bcDates.add(LocalDateTime.parse("0997-06-30T23:59:59.999999").with(ChronoField.ERA, IsoEra.BCE.getValue()));
+
+        for (LocalDateTime bcDate : bcDates) {
+            String expected = LOCAL_TIME_FORMATTER.format(bcDate);
+            if (expected.endsWith(" BCE")) {
+                // Java 22.ea.25-open prints "BCE" even though previous releases printed "BC"
+                // See https://bugs.openjdk.org/browse/JDK-8320747
+                expected = expected.substring(0, expected.length() - 1);
+            }
+            localTimestamps(ZoneOffset.UTC, bcDate, expected);
+        }
+    }
+
+    /**
+     * Test the behavior setObject for date columns.
+     */
+    @Test
+    public void testSetLocalDateWithType() throws SQLException {
+        LocalDate data = LocalDate.parse("1971-12-15");
+        java.sql.Date actual = insertThenReadWithType(data, Types.DATE, "date_column", java.sql.Date.class);
+        java.sql.Date expected = java.sql.Date.valueOf("1971-12-15");
+        assertEquals(expected, actual);
+    }
+
+    /**
+     * Test the behavior setObject for date columns.
+     */
+    @Test
+    public void testSetLocalDateWithoutType() throws SQLException {
+        LocalDate data = LocalDate.parse("1971-12-15");
+        java.sql.Date actual = insertThenReadWithoutType(data, "date_column", java.sql.Date.class);
+        java.sql.Date expected = java.sql.Date.valueOf("1971-12-15");
+        assertEquals(expected, actual);
+    }
+
+    /**
+     * Test the behavior setObject for time columns.
+     */
+    @Test
+    public void testSetLocalTimeAndReadBack() throws SQLException {
+        // TODO: fix for binary mode.
+        //  Avoid micros truncation in org.postgresql.jdbc.PgResultSet#internalGetObject
+        assumeBinaryModeRegular();
+        LocalTime data = LocalTime.parse("16:21:51.123456");
+
+        insertWithoutType(data, "time_without_time_zone_column");
+
+        String readBack = readString("time_without_time_zone_column");
+        assertEquals("16:21:51.123456", readBack);
+    }
+
+    /**
+     * Test the behavior setObject for time columns.
+     */
+    @Test
+    public void testSetLocalTimeWithType() throws SQLException {
+        LocalTime data = LocalTime.parse("16:21:51");
+        Time actual = insertThenReadWithType(data, Types.TIME, "time_without_time_zone_column", Time.class);
+        Time expected = Time.valueOf("16:21:51");
+        assertEquals(expected, actual);
+    }
+
+    /**
+     * Test the behavior setObject for time columns.
+     */
+    @Test
+    public void testSetLocalTimeWithoutType() throws SQLException {
+        LocalTime data = LocalTime.parse("16:21:51");
+        Time actual = insertThenReadWithoutType(data, "time_without_time_zone_column", Time.class);
+        Time expected = Time.valueOf("16:21:51");
+        assertEquals(expected, actual);
+    }
+
+    /**
+     * Test the behavior setObject for time columns.
+     */
+    @Test
+    public void testSetOffsetTimeWithType() throws SQLException {
+        OffsetTime data = OffsetTime.parse("16:21:51+12:34");
+        insertThenReadWithType(data, Types.TIME, "time_with_time_zone_column", Time.class);
+    }
+
+    /**
+     * Test the behavior setObject for time columns.
+     */
+    @Test
+    public void testSetOffsetTimeWithoutType() throws SQLException {
+        OffsetTime data = OffsetTime.parse("16:21:51+12:34");
+        insertThenReadWithoutType(data, "time_with_time_zone_column", Time.class);
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SimpleJdbc42Test.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SimpleJdbc42Test.java
index d765b6f..eb88596 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SimpleJdbc42Test.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/SimpleJdbc42Test.java
@@ -16,11 +16,11 @@ import org.junit.Test;
  */
 public class SimpleJdbc42Test extends BaseTest4 {
 
-  /**
-   * Test presence of JDBC 4.2 specific methods.
-   */
-  @Test
-  public void testSupportsRefCursors() throws Exception {
-    assertTrue(con.getMetaData().supportsRefCursors());
-  }
+    /**
+     * Test presence of JDBC 4.2 specific methods.
+     */
+    @Test
+    public void testSupportsRefCursors() throws Exception {
+        assertTrue(con.getMetaData().supportsRefCursors());
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/TimestampUtilsTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/TimestampUtilsTest.java
index 14fb14f..6be6181 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jdbc42/TimestampUtilsTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbc42/TimestampUtilsTest.java
@@ -18,125 +18,125 @@ import java.time.OffsetTime;
 import java.util.TimeZone;
 
 class TimestampUtilsTest {
-  private TimestampUtils timestampUtils;
+    private TimestampUtils timestampUtils;
 
-  @BeforeEach
-  void setUp() {
-    timestampUtils = new TimestampUtils(true, TimeZone::getDefault);
-  }
+    @BeforeEach
+    void setUp() {
+        timestampUtils = new TimestampUtils(true, TimeZone::getDefault);
+    }
 
-  @Test
-  void toStringOfLocalTime() {
-    assertToStringOfLocalTime("00:00:00");
-    assertToStringOfLocalTime("00:00:00.1");
-    assertToStringOfLocalTime("00:00:00.12");
-    assertToStringOfLocalTime("00:00:00.123");
-    assertToStringOfLocalTime("00:00:00.1234");
-    assertToStringOfLocalTime("00:00:00.12345");
-    assertToStringOfLocalTime("00:00:00.123456");
+    @Test
+    void toStringOfLocalTime() {
+        assertToStringOfLocalTime("00:00:00");
+        assertToStringOfLocalTime("00:00:00.1");
+        assertToStringOfLocalTime("00:00:00.12");
+        assertToStringOfLocalTime("00:00:00.123");
+        assertToStringOfLocalTime("00:00:00.1234");
+        assertToStringOfLocalTime("00:00:00.12345");
+        assertToStringOfLocalTime("00:00:00.123456");
 
-    assertToStringOfLocalTime("00:00:00.999999");
-    assertToStringOfLocalTime("00:00:00.999999", "00:00:00.999999499", "499 NanoSeconds round down");
-    assertToStringOfLocalTime("00:00:01", "00:00:00.999999500", "500 NanoSeconds round up");
+        assertToStringOfLocalTime("00:00:00.999999");
+        assertToStringOfLocalTime("00:00:00.999999", "00:00:00.999999499", "499 NanoSeconds round down");
+        assertToStringOfLocalTime("00:00:01", "00:00:00.999999500", "500 NanoSeconds round up");
 
-    assertToStringOfLocalTime("23:59:59");
+        assertToStringOfLocalTime("23:59:59");
 
-    assertToStringOfLocalTime("23:59:59.999999");
-    assertToStringOfLocalTime("23:59:59.999999", "23:59:59.999999499", "499 NanoSeconds round down");
-    assertToStringOfLocalTime("24:00:00", "23:59:59.999999500", "500 NanoSeconds round up");
-    assertToStringOfLocalTime("24:00:00", "23:59:59.999999999", "999 NanoSeconds round up");
-  }
+        assertToStringOfLocalTime("23:59:59.999999");
+        assertToStringOfLocalTime("23:59:59.999999", "23:59:59.999999499", "499 NanoSeconds round down");
+        assertToStringOfLocalTime("24:00:00", "23:59:59.999999500", "500 NanoSeconds round up");
+        assertToStringOfLocalTime("24:00:00", "23:59:59.999999999", "999 NanoSeconds round up");
+    }
 
-  private void assertToStringOfLocalTime(String inputTime) {
-    assertToStringOfLocalTime(inputTime, inputTime, null);
-  }
+    private void assertToStringOfLocalTime(String inputTime) {
+        assertToStringOfLocalTime(inputTime, inputTime, null);
+    }
 
-  private void assertToStringOfLocalTime(String expectedOutput, String inputTime, String message) {
-    assertEquals(
-        expectedOutput,
-        timestampUtils.toString(LocalTime.parse(inputTime)),
-        "timestampUtils.toString(LocalTime.parse(" + inputTime + "))"
-            + (message == null ? ": " + message : ""));
-  }
+    private void assertToStringOfLocalTime(String expectedOutput, String inputTime, String message) {
+        assertEquals(
+                expectedOutput,
+                timestampUtils.toString(LocalTime.parse(inputTime)),
+                "timestampUtils.toString(LocalTime.parse(" + inputTime + "))"
+                        + (message == null ? ": " + message : ""));
+    }
 
-  @Test
-  void toLocalTime() throws SQLException {
-    assertToLocalTime("00:00:00");
+    @Test
+    void toLocalTime() throws SQLException {
+        assertToLocalTime("00:00:00");
 
-    assertToLocalTime("00:00:00.1");
-    assertToLocalTime("00:00:00.12");
-    assertToLocalTime("00:00:00.123");
-    assertToLocalTime("00:00:00.1234");
-    assertToLocalTime("00:00:00.12345");
-    assertToLocalTime("00:00:00.123456");
-    assertToLocalTime("00:00:00.999999");
+        assertToLocalTime("00:00:00.1");
+        assertToLocalTime("00:00:00.12");
+        assertToLocalTime("00:00:00.123");
+        assertToLocalTime("00:00:00.1234");
+        assertToLocalTime("00:00:00.12345");
+        assertToLocalTime("00:00:00.123456");
+        assertToLocalTime("00:00:00.999999");
 
-    assertToLocalTime("23:59:59");
-    assertToLocalTime("23:59:59.999999"); // 0 NanoSeconds
-    assertToLocalTime("23:59:59.9999999"); // 900 NanoSeconds
-    assertToLocalTime("23:59:59.99999999"); // 990 NanoSeconds
-    assertToLocalTime("23:59:59.999999998"); // 998 NanoSeconds
-    assertToLocalTime(LocalTime.MAX.toString(), "24:00:00", "LocalTime can't represent 24:00:00");
-  }
+        assertToLocalTime("23:59:59");
+        assertToLocalTime("23:59:59.999999"); // 0 NanoSeconds
+        assertToLocalTime("23:59:59.9999999"); // 900 NanoSeconds
+        assertToLocalTime("23:59:59.99999999"); // 990 NanoSeconds
+        assertToLocalTime("23:59:59.999999998"); // 998 NanoSeconds
+        assertToLocalTime(LocalTime.MAX.toString(), "24:00:00", "LocalTime can't represent 24:00:00");
+    }
 
-  private void assertToLocalTime(String inputTime) throws SQLException {
-    assertToLocalTime(inputTime, inputTime, null);
-  }
+    private void assertToLocalTime(String inputTime) throws SQLException {
+        assertToLocalTime(inputTime, inputTime, null);
+    }
 
-  private void assertToLocalTime(String expectedOutput, String inputTime, String message) throws SQLException {
-    assertEquals(
-        LocalTime.parse(expectedOutput),
-        timestampUtils.toLocalTime(inputTime),
-        "timestampUtils.toLocalTime(" + inputTime + ")"
-            + (message == null ? ": " + message : ""));
-  }
+    private void assertToLocalTime(String expectedOutput, String inputTime, String message) throws SQLException {
+        assertEquals(
+                LocalTime.parse(expectedOutput),
+                timestampUtils.toLocalTime(inputTime),
+                "timestampUtils.toLocalTime(" + inputTime + ")"
+                        + (message == null ? ": " + message : ""));
+    }
 
-  @Test
-  void toStringOfOffsetTime() {
-    assertToStringOfOffsetTime("00:00:00+00", "00:00:00+00:00");
-    assertToStringOfOffsetTime("00:00:00.1+01", "00:00:00.1+01:00");
-    assertToStringOfOffsetTime("00:00:00.12+12", "00:00:00.12+12:00");
-    assertToStringOfOffsetTime("00:00:00.123-01", "00:00:00.123-01:00");
-    assertToStringOfOffsetTime("00:00:00.1234-02", "00:00:00.1234-02:00");
-    assertToStringOfOffsetTime("00:00:00.12345-12", "00:00:00.12345-12:00");
-    assertToStringOfOffsetTime("00:00:00.123456+01:30", "00:00:00.123456+01:30");
-    assertToStringOfOffsetTime("00:00:00.123456-12:34", "00:00:00.123456-12:34");
+    @Test
+    void toStringOfOffsetTime() {
+        assertToStringOfOffsetTime("00:00:00+00", "00:00:00+00:00");
+        assertToStringOfOffsetTime("00:00:00.1+01", "00:00:00.1+01:00");
+        assertToStringOfOffsetTime("00:00:00.12+12", "00:00:00.12+12:00");
+        assertToStringOfOffsetTime("00:00:00.123-01", "00:00:00.123-01:00");
+        assertToStringOfOffsetTime("00:00:00.1234-02", "00:00:00.1234-02:00");
+        assertToStringOfOffsetTime("00:00:00.12345-12", "00:00:00.12345-12:00");
+        assertToStringOfOffsetTime("00:00:00.123456+01:30", "00:00:00.123456+01:30");
+        assertToStringOfOffsetTime("00:00:00.123456-12:34", "00:00:00.123456-12:34");
 
-    assertToStringOfOffsetTime("23:59:59+01", "23:59:59+01:00");
+        assertToStringOfOffsetTime("23:59:59+01", "23:59:59+01:00");
 
-    assertToStringOfOffsetTime("23:59:59.999999+01", "23:59:59.999999+01:00");
-    assertToStringOfOffsetTime("23:59:59.999999+01", "23:59:59.999999499+01:00"); // 499 NanoSeconds
-    assertToStringOfOffsetTime("24:00:00+01", "23:59:59.999999500+01:00"); // 500 NanoSeconds
-    assertToStringOfOffsetTime("24:00:00+01", "23:59:59.999999999+01:00"); // 999 NanoSeconds
-  }
+        assertToStringOfOffsetTime("23:59:59.999999+01", "23:59:59.999999+01:00");
+        assertToStringOfOffsetTime("23:59:59.999999+01", "23:59:59.999999499+01:00"); // 499 NanoSeconds
+        assertToStringOfOffsetTime("24:00:00+01", "23:59:59.999999500+01:00"); // 500 NanoSeconds
+        assertToStringOfOffsetTime("24:00:00+01", "23:59:59.999999999+01:00"); // 999 NanoSeconds
+    }
 
-  private void assertToStringOfOffsetTime(String expectedOutput, String inputTime) {
-    assertEquals(expectedOutput,
-        timestampUtils.toString(OffsetTime.parse(inputTime)),
-        "timestampUtils.toString(OffsetTime.parse(" + inputTime + "))");
-  }
+    private void assertToStringOfOffsetTime(String expectedOutput, String inputTime) {
+        assertEquals(expectedOutput,
+                timestampUtils.toString(OffsetTime.parse(inputTime)),
+                "timestampUtils.toString(OffsetTime.parse(" + inputTime + "))");
+    }
 
-  @Test
-  void toOffsetTime() throws SQLException {
-    assertToOffsetTime("00:00:00+00:00", "00:00:00+00");
-    assertToOffsetTime("00:00:00.1+01:00", "00:00:00.1+01");
-    assertToOffsetTime("00:00:00.12+12:00", "00:00:00.12+12");
-    assertToOffsetTime("00:00:00.123-01:00", "00:00:00.123-01");
-    assertToOffsetTime("00:00:00.1234-02:00", "00:00:00.1234-02");
-    assertToOffsetTime("00:00:00.12345-12:00", "00:00:00.12345-12");
-    assertToOffsetTime("00:00:00.123456+01:30", "00:00:00.123456+01:30");
-    assertToOffsetTime("00:00:00.123456-12:34", "00:00:00.123456-12:34");
+    @Test
+    void toOffsetTime() throws SQLException {
+        assertToOffsetTime("00:00:00+00:00", "00:00:00+00");
+        assertToOffsetTime("00:00:00.1+01:00", "00:00:00.1+01");
+        assertToOffsetTime("00:00:00.12+12:00", "00:00:00.12+12");
+        assertToOffsetTime("00:00:00.123-01:00", "00:00:00.123-01");
+        assertToOffsetTime("00:00:00.1234-02:00", "00:00:00.1234-02");
+        assertToOffsetTime("00:00:00.12345-12:00", "00:00:00.12345-12");
+        assertToOffsetTime("00:00:00.123456+01:30", "00:00:00.123456+01:30");
+        assertToOffsetTime("00:00:00.123456-12:34", "00:00:00.123456-12:34");
 
-    assertToOffsetTime("23:59:59.999999+01:00", "23:59:59.999999+01"); // 0 NanoSeconds
-    assertToOffsetTime("23:59:59.9999999+01:00", "23:59:59.9999999+01"); // 900 NanoSeconds
-    assertToOffsetTime("23:59:59.99999999+01:00", "23:59:59.99999999+01"); // 990 NanoSeconds
-    assertToOffsetTime("23:59:59.999999998+01:00", "23:59:59.999999998+01"); // 998 NanoSeconds
-    assertToOffsetTime(OffsetTime.MAX.toString(), "24:00:00+01");
-  }
+        assertToOffsetTime("23:59:59.999999+01:00", "23:59:59.999999+01"); // 0 NanoSeconds
+        assertToOffsetTime("23:59:59.9999999+01:00", "23:59:59.9999999+01"); // 900 NanoSeconds
+        assertToOffsetTime("23:59:59.99999999+01:00", "23:59:59.99999999+01"); // 990 NanoSeconds
+        assertToOffsetTime("23:59:59.999999998+01:00", "23:59:59.999999998+01"); // 998 NanoSeconds
+        assertToOffsetTime(OffsetTime.MAX.toString(), "24:00:00+01");
+    }
 
-  private void assertToOffsetTime(String expectedOutput, String inputTime) throws SQLException {
-    assertEquals(OffsetTime.parse(expectedOutput),
-        timestampUtils.toOffsetTime(inputTime),
-        "timestampUtils.toOffsetTime(" + inputTime + ")");
-  }
+    private void assertToOffsetTime(String expectedOutput, String inputTime) throws SQLException {
+        assertEquals(OffsetTime.parse(expectedOutput),
+                timestampUtils.toOffsetTime(inputTime),
+                "timestampUtils.toOffsetTime(" + inputTime + ")");
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbcurlresolver/PgPassParserTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbcurlresolver/PgPassParserTest.java
new file mode 100644
index 0000000..77c460c
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbcurlresolver/PgPassParserTest.java
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2021, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbcurlresolver;
+
+import java.net.URL;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGEnvironment;
+import org.postgresql.jdbcurlresolver.PgPassParser;
+import org.postgresql.test.util.StubEnvironmentAndProperties;
+import org.postgresql.test.util.systemstubs.EnvironmentVariables;
+import org.postgresql.test.util.systemstubs.properties.SystemProperties;
+import org.postgresql.test.util.systemstubs.resource.Resources;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+
+/**
+ * Password resource location used is decided based on availability of different environment
+ * variables and file existence in user home directory. Tests verify selection of proper resource.
+ * Also, resource content (* matching, escape character handling, comments etc) can be written
+ * creatively. Test verify several cases.
+ *
+ * @author Marek Läll
+ */
+@StubEnvironmentAndProperties
+class PgPassParserTest {
+
+    // "org.postgresql.pgpassfile" : missing
+    // "PGPASSFILE"                : missing
+    // ".pgpass"                   : missing
+    @Test
+    void getPassword11() throws Exception {
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), "", "APPDATA", "/tmp/dir-nonexistent"),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), "", "user.home", "/tmp/dir-nonexistent")
+        ).execute(() -> {
+            String result = PgPassParser.getPassword("localhost", "5432", "postgres", "postgres");
+            assertNull(result);
+        });
+    }
+
+    // "org.postgresql.pgpassfile" : missing
+    // "PGPASSFILE"                : missing
+    // ".pgpass"                   : exist
+    // <password line>             : exist
+    @Test
+    void getPassword22() throws Exception {
+        URL urlPath = getClass().getResource("/pg_service");
+        assertNotNull(urlPath);
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), "", "APPDATA", urlPath.getPath()),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), "", "user.home", urlPath.getPath())
+        ).execute(() -> {
+            String result = PgPassParser.getPassword("localhost", "5432", "postgres",
+                    "postgres");
+            assertEquals("postgres1", result);
+            result = PgPassParser.getPassword("localhost2", "5432", "postgres", "postgres");
+            assertEquals("postgres\\", result);
+            result = PgPassParser.getPassword("localhost3", "5432", "postgres", "postgres");
+            assertEquals("postgres:", result);
+            result = PgPassParser.getPassword("localhost4", "5432", "postgres", "postgres");
+            assertEquals("postgres1:", result);
+            result = PgPassParser.getPassword("localhost5", "5432", "postgres", "postgres");
+            assertEquals("postgres5", result);
+            result = PgPassParser.getPassword("localhost6", "5432", "postgres", "postgres");
+            assertEquals("post\\gres\\", result);
+            result = PgPassParser.getPassword("localhost7", "5432", "postgres", "postgres");
+            assertEquals(" ab cd", result);
+            result = PgPassParser.getPassword("localhost8", "5432", "postgres", "postgres");
+            assertEquals("", result);
+            //
+            result = PgPassParser.getPassword("::1", "1234", "colon:db", "colon:user");
+            assertEquals("pass:pass", result);
+            result = PgPassParser.getPassword("::1", "12345", "colon:db", "colon:user");
+            assertEquals("pass:pass1", result);
+            result = PgPassParser.getPassword("::1", "1234", "slash\\db", "slash\\user");
+            assertEquals("pass\\pass", result);
+            result = PgPassParser.getPassword("::1", "12345", "slash\\db", "slash\\user");
+            assertEquals("pass\\pass1", result);
+            //
+            result = PgPassParser.getPassword("any", "5432", "postgres", "postgres");
+            assertEquals("anyhost5", result);
+            result = PgPassParser.getPassword("localhost11", "9999", "postgres", "postgres");
+            assertEquals("anyport5", result);
+            result = PgPassParser.getPassword("localhost12", "5432", "anydb", "postgres");
+            assertEquals("anydb5", result);
+            result = PgPassParser.getPassword("localhost13", "5432", "postgres", "anyuser");
+            assertEquals("anyuser5", result);
+            //
+            result = PgPassParser.getPassword("anyhost", "6544", "anydb", "anyuser");
+            assertEquals("absolute-any", result);
+        });
+    }
+
+    // "org.postgresql.pgpassfile" : missing
+    // "PGPASSFILE"                : exist
+    // ".pgpass"                   : exist
+    // <password line>             : missing
+    @Test
+    void getPassword31() throws Exception {
+        URL urlPath = getClass().getResource("/pg_service");
+        assertNotNull(urlPath);
+        URL urlFileEnv = getClass().getResource("/pg_service/pgpassfileEnv.conf");
+        assertNotNull(urlFileEnv);
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), urlFileEnv.getFile(), "APPDATA", urlPath.getPath()),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), "", "user.home", urlPath.getPath())
+        ).execute(() -> {
+            String result = PgPassParser.getPassword("localhost-missing", "5432", "postgres1", "postgres2");
+            assertNull(result);
+        });
+    }
+
+    // "org.postgresql.pgpassfile" : missing
+    // "PGPASSFILE"                : exist
+    // ".pgpass"                   : exist
+    // <password line>             : exist
+    @Test
+    void getPassword32() throws Exception {
+        URL urlPath = getClass().getResource("/pg_service");
+        assertNotNull(urlPath);
+        URL urlFileEnv = getClass().getResource("/pg_service/pgpassfileEnv.conf");
+        assertNotNull(urlFileEnv);
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), urlFileEnv.getPath(), "APPDATA", urlPath.getPath()),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), "", "user.home", urlPath.getPath())
+        ).execute(() -> {
+            String result = PgPassParser.getPassword("localhost", "5432", "postgres1",
+                    "postgres2");
+            assertEquals("postgres3", result);
+        });
+    }
+
+
+    // "org.postgresql.pgpassfile" : exist
+    // "PGPASSFILE"                : exist
+    // ".pgpass"                   : exist
+    // <password line>             : missing
+    @Test
+    void getPassword41() throws Exception {
+        URL urlPath = getClass().getResource("/pg_service");
+        assertNotNull(urlPath);
+        URL urlFileEnv = getClass().getResource("/pg_service/pgpassfileEnv.conf");
+        assertNotNull(urlFileEnv);
+        URL urlFileProps = getClass().getResource("/pg_service/pgpassfileProps.conf");
+        assertNotNull(urlFileProps);
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), urlFileEnv.getFile(), "APPDATA", urlPath.getPath()),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), "", "user.home", urlPath.getPath())
+        ).execute(() -> {
+            String result = PgPassParser.getPassword("localhost-missing", "5432", "postgres1", "postgres2");
+            assertNull(result);
+        });
+    }
+
+    // "org.postgresql.pgpassfile" : exist
+    // "PGPASSFILE"                : exist
+    // ".pgpass"                   : exist
+    // <password line>             : exist
+    @Test
+    void getPassword42() throws Exception {
+        URL urlPath = getClass().getResource("/pg_service");
+        assertNotNull(urlPath);
+        URL urlFileEnv = getClass().getResource("/pg_service/pgpassfileEnv.conf");
+        assertNotNull(urlFileEnv);
+        URL urlFileProps = getClass().getResource("/pg_service/pgpassfileProps.conf");
+        assertNotNull(urlFileProps);
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGPASSFILE.getName(), urlFileEnv.getPath(), "APPDATA", urlPath.getPath()),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGPASSFILE.getName(), urlFileProps.getFile(), "user.home", urlPath.getPath())
+        ).execute(() -> {
+            String result = PgPassParser.getPassword("localhost77", "5432", "any", "postgres11");
+            assertEquals("postgres22", result);
+            result = PgPassParser.getPassword("localhost888", "5432", "any", "postgres11");
+            assertNull(result);
+            result = PgPassParser.getPassword("localhost999", "5432", "any", "postgres11");
+            assertNull(result);
+        });
+    }
+
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jdbcurlresolver/PgServiceConfParserTest.java b/pgjdbc/src/test/java/org/postgresql/test/jdbcurlresolver/PgServiceConfParserTest.java
new file mode 100644
index 0000000..c9a5a0f
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/jdbcurlresolver/PgServiceConfParserTest.java
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2021, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.jdbcurlresolver;
+
+import java.net.URL;
+import java.util.Properties;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGEnvironment;
+import org.postgresql.jdbcurlresolver.PgServiceConfParser;
+import org.postgresql.test.util.StubEnvironmentAndProperties;
+import org.postgresql.test.util.systemstubs.EnvironmentVariables;
+import org.postgresql.test.util.systemstubs.properties.SystemProperties;
+import org.postgresql.test.util.systemstubs.resource.Resources;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+/**
+ * Service resource location used is decided based on availability of different environment
+ * variables and file existence in user home directory. Tests verify selection of proper resource.
+ * Also, resource content (section headers, comments, key-value pairs etc) can be written
+ * creatively. Test verify several cases.
+ *
+ * @author Marek Läll
+ */
+@StubEnvironmentAndProperties
+class PgServiceConfParserTest {
+
+    // "org.postgresql.pgservicefile" : missing
+    // "PGSERVICEFILE"                : missing
+    // ".pg_service.conf"             : missing
+    // "PGSYSCONFDIR"                 : missing
+    @Test
+    void pgService11() throws Exception {
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), ""),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent")
+        ).execute(() -> {
+            Properties result = PgServiceConfParser.getServiceProperties("service-nonexistent");
+            assertNull(result);
+        });
+    }
+
+    // "org.postgresql.pgservicefile" : missing
+    // "PGSERVICEFILE"                : missing
+    // ".pg_service.conf"             : missing
+    // "PGSYSCONFDIR"                 : exist
+    // <service>                      : missing
+    @Test
+    void pgService21() throws Exception {
+        URL urlPath = getClass().getResource("/pg_service");
+        assertNotNull(urlPath);
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent")
+        ).execute(() -> {
+            Properties result = PgServiceConfParser.getServiceProperties("service-nonexistent");
+            assertNull(result);
+            result = PgServiceConfParser.getServiceProperties("empty-service1");
+            assertNotNull(result);
+            assertTrue(result.isEmpty());
+        });
+    }
+
+    // "org.postgresql.pgservicefile" : missing
+    // "PGSERVICEFILE"                : missing
+    // ".pg_service.conf"             : missing
+    // "PGSYSCONFDIR"                 : exist
+    // <service>                      : exist
+    @Test
+    void pgService22() throws Exception {
+        URL urlPath = getClass().getResource("/pg_service");
+        assertNotNull(urlPath);
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent")
+        ).execute(() -> {
+            Properties result = PgServiceConfParser.getServiceProperties("test-service1");
+            assertNotNull(result);
+            assertEquals("test_dbname", result.get("PGDBNAME"));
+            assertEquals("global-test-host.test.net", result.get("PGHOST"));
+            assertEquals("5433", result.get("PGPORT"));
+            assertEquals("admin", result.get("user"));
+            assertEquals(4, result.size());
+        });
+    }
+
+    // "org.postgresql.pgservicefile" : missing
+    // "PGSERVICEFILE"                : missing
+    // ".pg_service.conf"             : missing
+    // "PGSYSCONFDIR"                 : exist - but file itself is missing
+    // <service>                      : exist
+    @Test
+    void pgService23() throws Exception {
+        String nonExistingDir = "non-existing-dir";
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), nonExistingDir),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", "/tmp/dir-nonexistent")
+        ).execute(() -> {
+            Properties result = PgServiceConfParser.getServiceProperties("test-service1");
+            assertNull(result);
+        });
+    }
+
+
+    // "org.postgresql.pgservicefile" : missing
+    // "PGSERVICEFILE"                : missing
+    // ".pg_service.conf"             : exist
+    // "PGSYSCONFDIR"                 : exist
+    // <service>                      : missing
+    @Test
+    void pgService31() throws Exception {
+        URL urlPath = getClass().getResource("/pg_service");
+        assertNotNull(urlPath);
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath())
+        ).execute(() -> {
+            Properties result = PgServiceConfParser.getServiceProperties("service-nonexistent");
+            assertNull(result);
+            result = PgServiceConfParser.getServiceProperties("empty-service1");
+            assertNotNull(result);
+            assertTrue(result.isEmpty());
+        });
+    }
+
+    // "org.postgresql.pgservicefile" : missing
+    // "PGSERVICEFILE"                : missing
+    // ".pg_service.conf"             : exist
+    // "PGSYSCONFDIR"                 : exist
+    // <service>                      : exist
+    @Test
+    void pgService32() throws Exception {
+        URL urlPath = getClass().getResource("/pg_service");
+        assertNotNull(urlPath);
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", "APPDATA", urlPath.getPath(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath())
+        ).execute(() -> {
+            Properties result = PgServiceConfParser.getServiceProperties("test-service1");
+            assertNotNull(result);
+            assertEquals(" test_dbname", result.get("PGDBNAME"));
+            assertEquals("local-test-host.test.net", result.get("PGHOST"));
+            assertEquals("5433", result.get("PGPORT"));
+            assertEquals("admin", result.get("user"));
+            assertEquals(4, result.size());
+        });
+    }
+
+
+    // "org.postgresql.pgservicefile" : missing
+    // "PGSERVICEFILE"                : exist
+    // ".pg_service.conf"             : exist
+    // "PGSYSCONFDIR"                 : exist
+    // <service>                      : missing
+    @Test
+    void pgService41() throws Exception {
+        URL urlPath = getClass().getResource("/pg_service");
+        assertNotNull(urlPath);
+        URL urlFileEnv = getClass().getResource("/pg_service/pgservicefileEnv.conf");
+        assertNotNull(urlFileEnv);
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), urlFileEnv.getFile(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath())
+        ).execute(() -> {
+            Properties result = PgServiceConfParser.getServiceProperties("service-nonexistent");
+            assertNull(result);
+            result = PgServiceConfParser.getServiceProperties("empty-service1");
+            assertNotNull(result);
+            assertTrue(result.isEmpty());
+        });
+    }
+
+    // "org.postgresql.pgservicefile" : missing
+    // "PGSERVICEFILE"                : exist
+    // ".pg_service.conf"             : exist
+    // "PGSYSCONFDIR"                 : exist
+    // <service>                      : exist
+    @Test
+    void pgService42() throws Exception {
+        URL urlPath = getClass().getResource("/pg_service");
+        assertNotNull(urlPath);
+        URL urlFileEnv = getClass().getResource("/pg_service/pgservicefileEnv.conf");
+        assertNotNull(urlFileEnv);
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), urlFileEnv.getFile(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath())
+        ).execute(() -> {
+            Properties result = PgServiceConfParser.getServiceProperties("test-service1");
+            assertNotNull(result);
+            assertEquals("test_dbname", result.get("PGDBNAME"));
+            assertEquals("pgservicefileEnv-test-host.test.net", result.get("PGHOST"));
+            assertEquals("5433", result.get("PGPORT"));
+            assertEquals("admin", result.get("user"));
+            assertEquals("disable", result.get("sslmode"));
+            assertEquals(5, result.size());
+        });
+    }
+
+    // "org.postgresql.pgservicefile" : missing
+    // "PGSERVICEFILE"                : exist - but file itself is missing
+    // ".pg_service.conf"             : exist
+    // "PGSYSCONFDIR"                 : exist
+    // <service>                      : exist
+    @Test
+    void pgService43() throws Exception {
+        URL urlPath = getClass().getResource("/pg_service");
+        assertNotNull(urlPath);
+        String nonExistingFile = "non-existing-file.conf";
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), nonExistingFile, PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath())
+        ).execute(() -> {
+            Properties result = PgServiceConfParser.getServiceProperties("test-service1");
+            assertNull(result);
+        });
+    }
+
+
+    // "org.postgresql.pgservicefile" : exist
+    // "PGSERVICEFILE"                : exist
+    // ".pg_service.conf"             : exist
+    // "PGSYSCONFDIR"                 : exist
+    // <service>                      : missing
+    @Test
+    void pgService51() throws Exception {
+        URL urlPath = getClass().getResource("/pg_service");
+        assertNotNull(urlPath);
+        URL urlFileEnv = getClass().getResource("/pg_service/pgservicefileEnv.conf");
+        assertNotNull(urlFileEnv);
+        URL urlFileProps = getClass().getResource("/pg_service/pgservicefileProps.conf");
+        assertNotNull(urlFileProps);
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), urlFileEnv.getFile(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), urlFileProps.getFile(), "user.home", urlPath.getPath())
+        ).execute(() -> {
+            Properties result = PgServiceConfParser.getServiceProperties("service-nonexistent");
+            assertNull(result);
+            result = PgServiceConfParser.getServiceProperties("empty-service1");
+            assertNotNull(result);
+            assertTrue(result.isEmpty());
+        });
+    }
+
+    // "org.postgresql.pgservicefile" : exist
+    // "PGSERVICEFILE"                : exist
+    // ".pg_service.conf"             : exist
+    // "PGSYSCONFDIR"                 : exist
+    // <service>                      : exist
+    @Test
+    void pgService52() throws Exception {
+        URL urlPath = getClass().getResource("/pg_service");
+        assertNotNull(urlPath);
+        URL urlFileEnv = getClass().getResource("/pg_service/pgservicefileEnv.conf");
+        assertNotNull(urlFileEnv);
+        URL urlFileProps = getClass().getResource("/pg_service/pgservicefileProps.conf");
+        assertNotNull(urlFileProps);
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), urlFileEnv.getFile(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), urlFileProps.getFile(), "user.home", urlPath.getPath())
+        ).execute(() -> {
+            Properties result = PgServiceConfParser.getServiceProperties("test-service1");
+            assertNotNull(result);
+            assertEquals("test_dbname", result.get("PGDBNAME"));
+            assertEquals("pgservicefileProps-test-host.test.net", result.get("PGHOST"));
+            assertEquals("5433", result.get("PGPORT"));
+            assertEquals("admin", result.get("user"));
+            assertEquals(4, result.size());
+        });
+    }
+
+    // "org.postgresql.pgservicefile" : exist - but file itself is missing
+    // "PGSERVICEFILE"                : exist
+    // ".pg_service.conf"             : exist
+    // "PGSYSCONFDIR"                 : exist
+    // <service>                      : exist
+    @Test
+    void pgService53() throws Exception {
+        URL urlPath = getClass().getResource("/pg_service");
+        assertNotNull(urlPath);
+        URL urlFileEnv = getClass().getResource("/pg_service/pgservicefileEnv.conf");
+        assertNotNull(urlFileEnv);
+        String nonExistingFile = "non-existing-file.conf";
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), urlFileEnv.getFile(), PGEnvironment.PGSYSCONFDIR.getName(), urlPath.getPath()),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), nonExistingFile, "user.home", urlPath.getPath())
+        ).execute(() -> {
+            Properties result = PgServiceConfParser.getServiceProperties("test-service1");
+            assertNull(result);
+        });
+    }
+
+
+    // resource content read tests
+    @Test
+    void pgService61() throws Exception {
+        URL urlPath = getClass().getResource("/pg_service");
+        assertNotNull(urlPath);
+        Resources.with(
+                new EnvironmentVariables(PGEnvironment.PGSERVICEFILE.getName(), "", "APPDATA", urlPath.getPath(), PGEnvironment.PGSYSCONFDIR.getName(), ""),
+                new SystemProperties(PGEnvironment.ORG_POSTGRESQL_PGSERVICEFILE.getName(), "", "user.home", urlPath.getPath())
+        ).execute(() -> {
+            Properties result;
+            // fail if there is space between key and equal sign
+            result = PgServiceConfParser.getServiceProperties("fail-case-1");
+            assertNull(result);
+            // service name is case-sensitive
+            result = PgServiceConfParser.getServiceProperties("fail-case-2");
+            assertNull(result);
+            // service name is case-sensitive
+            result = PgServiceConfParser.getServiceProperties("fail-case-2");
+            assertNull(result);
+            // invalid line in the section
+            result = PgServiceConfParser.getServiceProperties("fail-case-3");
+            assertNull(result);
+            // service name: space before and after name becomes part of name
+            result = PgServiceConfParser.getServiceProperties(" success-case-3 ");
+            assertNotNull(result);
+            assertEquals("local-somehost3", result.get("PGHOST"));
+            assertEquals(1, result.size());
+            // service name: space inside name is part of name
+            result = PgServiceConfParser.getServiceProperties("success case 4");
+            assertNotNull(result);
+            assertEquals("local-somehost4", result.get("PGHOST"));
+            assertEquals(1, result.size());
+        });
+    }
+
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jre8/core/Jre8TestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/jre8/core/Jre8TestSuite.java
index 6c971a0..fd892b5 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jre8/core/Jre8TestSuite.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jre8/core/Jre8TestSuite.java
@@ -10,11 +10,11 @@ import org.junit.runners.Suite;
 
 /**
  * @author Joe Kutner on 10/24/17.
- *         Twitter: @codefinger
+ * Twitter: @codefinger
  */
 @RunWith(Suite.class)
 @Suite.SuiteClasses({
-    SocksProxyTest.class,
+        SocksProxyTest.class,
 })
 public class Jre8TestSuite {
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/jre8/core/SocksProxyTest.java b/pgjdbc/src/test/java/org/postgresql/test/jre8/core/SocksProxyTest.java
index ec8e368..1edb760 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/jre8/core/SocksProxyTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/jre8/core/SocksProxyTest.java
@@ -5,44 +5,41 @@
 
 package org.postgresql.test.jre8.core;
 
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-
-import org.postgresql.test.TestUtil;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Test;
-
 import java.sql.Connection;
 import java.sql.DriverManager;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.test.TestUtil;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 /**
  * @author Joe Kutner on 10/9/17.
- *         Twitter: @codefinger
+ * Twitter: @codefinger
  */
 class SocksProxyTest {
 
-  @AfterEach
-  void cleanup() {
-    System.clearProperty("socksProxyHost");
-    System.clearProperty("socksProxyPort");
-    System.clearProperty("socksNonProxyHosts");
-  }
+    @AfterEach
+    void cleanup() {
+        System.clearProperty("socksProxyHost");
+        System.clearProperty("socksProxyPort");
+        System.clearProperty("socksNonProxyHosts");
+    }
 
-  /**
-   * Tests the connect method by connecting to the test database.
-   */
-  @Test
-  void connectWithSocksNonProxyHost() throws Exception {
-    System.setProperty("socksProxyHost", "fake-socks-proxy");
-    System.setProperty("socksProxyPort", "9999");
-    System.setProperty("socksNonProxyHosts", TestUtil.getServer());
+    /**
+     * Tests the connect method by connecting to the test database.
+     */
+    @Test
+    void connectWithSocksNonProxyHost() throws Exception {
+        System.setProperty("socksProxyHost", "fake-socks-proxy");
+        System.setProperty("socksProxyPort", "9999");
+        System.setProperty("socksNonProxyHosts", TestUtil.getServer());
 
-    TestUtil.initDriver(); // Set up log levels, etc.
+        TestUtil.initDriver(); // Set up log levels, etc.
 
-    Connection con =
-        DriverManager.getConnection(TestUtil.getURL(), TestUtil.getUser(), TestUtil.getPassword());
+        Connection con =
+                DriverManager.getConnection(TestUtil.getURL(), TestUtil.getUser(), TestUtil.getPassword());
 
-    assertNotNull(con);
-    con.close();
-  }
+        assertNotNull(con);
+        con.close();
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/osgi/OsgiTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/osgi/OsgiTestSuite.java
deleted file mode 100644
index 7402247..0000000
--- a/pgjdbc/src/test/java/org/postgresql/test/osgi/OsgiTestSuite.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (c) 2003, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.test.osgi;
-
-import org.junit.runner.RunWith;
-import org.junit.runners.Suite;
-import org.junit.runners.Suite.SuiteClasses;
-
-@RunWith(Suite.class)
-@SuiteClasses({
-    PGDataSourceFactoryTest.class,
-})
-public class OsgiTestSuite {
-
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/osgi/PGDataSourceFactoryTest.java b/pgjdbc/src/test/java/org/postgresql/test/osgi/PGDataSourceFactoryTest.java
deleted file mode 100644
index d1d00f1..0000000
--- a/pgjdbc/src/test/java/org/postgresql/test/osgi/PGDataSourceFactoryTest.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2003, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.test.osgi;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.postgresql.jdbc2.optional.ConnectionPool;
-import org.postgresql.jdbc2.optional.PoolingDataSource;
-import org.postgresql.jdbc2.optional.SimpleDataSource;
-import org.postgresql.osgi.PGDataSourceFactory;
-import org.postgresql.xa.PGXADataSource;
-
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.osgi.service.jdbc.DataSourceFactory;
-
-import java.sql.Driver;
-import java.util.Properties;
-
-import javax.sql.ConnectionPoolDataSource;
-import javax.sql.DataSource;
-import javax.sql.XADataSource;
-
-class PGDataSourceFactoryTest {
-
-  private DataSourceFactory dataSourceFactory;
-
-  @BeforeEach
-  void createFactory() {
-    dataSourceFactory = new PGDataSourceFactory();
-  }
-
-  @Test
-  void createDriverDefault() throws Exception {
-    Driver driver = dataSourceFactory.createDriver(null);
-    assertTrue(driver instanceof org.postgresql.Driver);
-  }
-
-  @Test
-  void createDataSourceDefault() throws Exception {
-    DataSource dataSource = dataSourceFactory.createDataSource(null);
-    assertNotNull(dataSource);
-  }
-
-  @Test
-  void createDataSourceSimple() throws Exception {
-    Properties properties = new Properties();
-    properties.put(DataSourceFactory.JDBC_DATABASE_NAME, "db");
-    properties.put("currentSchema", "schema");
-    DataSource dataSource = dataSourceFactory.createDataSource(properties);
-    assertNotNull(dataSource);
-    assertTrue(dataSource instanceof SimpleDataSource);
-    SimpleDataSource simpleDataSource = (SimpleDataSource) dataSource;
-    assertEquals("db", simpleDataSource.getDatabaseName());
-    assertEquals("schema", simpleDataSource.getCurrentSchema());
-  }
-
-  @Test
-  void createDataSourcePooling() throws Exception {
-    Properties properties = new Properties();
-    properties.put(DataSourceFactory.JDBC_DATABASE_NAME, "db");
-    properties.put(DataSourceFactory.JDBC_INITIAL_POOL_SIZE, "5");
-    properties.put(DataSourceFactory.JDBC_MAX_POOL_SIZE, "10");
-    DataSource dataSource = dataSourceFactory.createDataSource(properties);
-    assertNotNull(dataSource);
-    assertTrue(dataSource instanceof PoolingDataSource);
-    PoolingDataSource poolingDataSource = (PoolingDataSource) dataSource;
-    assertEquals("db", poolingDataSource.getDatabaseName());
-    assertEquals(5, poolingDataSource.getInitialConnections());
-    assertEquals(10, poolingDataSource.getMaxConnections());
-  }
-
-  @Test
-  void createConnectionPoolDataSourceDefault() throws Exception {
-    ConnectionPoolDataSource dataSource = dataSourceFactory.createConnectionPoolDataSource(null);
-    assertNotNull(dataSource);
-  }
-
-  @Test
-  void createConnectionPoolDataSourceConfigured() throws Exception {
-    Properties properties = new Properties();
-    properties.put(DataSourceFactory.JDBC_DATABASE_NAME, "db");
-    ConnectionPoolDataSource dataSource =
-        dataSourceFactory.createConnectionPoolDataSource(properties);
-    assertNotNull(dataSource);
-    assertTrue(dataSource instanceof ConnectionPool);
-    ConnectionPool connectionPoolDataSource = (ConnectionPool) dataSource;
-    assertEquals("db", connectionPoolDataSource.getDatabaseName());
-  }
-
-  @Test
-  void createXADataSourceDefault() throws Exception {
-    XADataSource dataSource = dataSourceFactory.createXADataSource(null);
-    assertNotNull(dataSource);
-  }
-
-  @Test
-  void createXADataSourceConfigured() throws Exception {
-    Properties properties = new Properties();
-    properties.put(DataSourceFactory.JDBC_DATABASE_NAME, "db");
-    XADataSource dataSource = dataSourceFactory.createXADataSource(properties);
-    assertNotNull(dataSource);
-    assertTrue(dataSource instanceof PGXADataSource);
-    PGXADataSource xaDataSource = (PGXADataSource) dataSource;
-    assertEquals("db", xaDataSource.getDatabaseName());
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/plugin/AuthenticationPluginTest.java b/pgjdbc/src/test/java/org/postgresql/test/plugin/AuthenticationPluginTest.java
index a25a9e4..8d3b25f 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/plugin/AuthenticationPluginTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/plugin/AuthenticationPluginTest.java
@@ -24,64 +24,64 @@ import java.util.Properties;
 import java.util.function.Consumer;
 
 class AuthenticationPluginTest {
-  @BeforeAll
-  static void setUp() throws SQLException {
-    TestUtil.assumeHaveMinimumServerVersion(ServerVersion.v10);
-  }
-
-  public static class DummyAuthenticationPlugin implements AuthenticationPlugin {
-    private static Consumer<AuthenticationRequestType> onGetPassword;
-
-    @Override
-    public char[] getPassword(AuthenticationRequestType type) throws PSQLException {
-      onGetPassword.accept(type);
-
-      // Ex: "MD5" => "DUMMY-MD5"
-      return ("DUMMY-" + type.toString()).toCharArray();
+    @BeforeAll
+    static void setUp() throws SQLException {
+        TestUtil.assumeHaveMinimumServerVersion(ServerVersion.v10);
     }
-  }
 
-  private void testAuthPlugin(String username, String passwordEncryption, AuthenticationRequestType expectedType) throws SQLException {
-    createRole(username, passwordEncryption, "DUMMY-" + expectedType.toString());
-    try {
-      Properties props = new Properties();
-      props.setProperty(PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getName(), DummyAuthenticationPlugin.class.getName());
-      PGProperty.USER.set(props, username);
-
-      boolean[] wasCalled = {false};
-      DummyAuthenticationPlugin.onGetPassword = type -> {
-        wasCalled[0] = true;
-        assertEquals(expectedType, type, "The authentication type should match");
-      };
-      try (Connection conn = TestUtil.openDB(props)) {
-        assertTrue(wasCalled[0], "The custom authentication plugin should be invoked");
-      }
-    } finally {
-      dropRole(username);
+    private static void createRole(String username, String passwordEncryption, String password) throws SQLException {
+        try (Connection conn = TestUtil.openPrivilegedDB()) {
+            TestUtil.execute(conn, "SET password_encryption='" + passwordEncryption + "'");
+            TestUtil.execute(conn, "DROP ROLE IF EXISTS " + username);
+            TestUtil.execute(conn, "CREATE USER " + username + " WITH PASSWORD '" + password + "'");
+        }
     }
-  }
 
-  @Test
-  void authPluginMD5() throws Exception {
-    testAuthPlugin("auth_plugin_test_md5", "md5", AuthenticationRequestType.MD5_PASSWORD);
-  }
-
-  @Test
-  void authPluginSASL() throws Exception {
-    testAuthPlugin("auth_plugin_test_sasl", "scram-sha-256", AuthenticationRequestType.SASL);
-  }
-
-  private static void createRole(String username, String passwordEncryption, String password) throws SQLException {
-    try (Connection conn = TestUtil.openPrivilegedDB()) {
-      TestUtil.execute(conn, "SET password_encryption='" + passwordEncryption + "'");
-      TestUtil.execute(conn, "DROP ROLE IF EXISTS " + username);
-      TestUtil.execute(conn, "CREATE USER " + username + " WITH PASSWORD '" + password + "'");
+    private static void dropRole(String username) throws SQLException {
+        try (Connection conn = TestUtil.openPrivilegedDB()) {
+            TestUtil.execute(conn, "DROP ROLE IF EXISTS " + username);
+        }
     }
-  }
 
-  private static void dropRole(String username) throws SQLException {
-    try (Connection conn = TestUtil.openPrivilegedDB()) {
-      TestUtil.execute(conn, "DROP ROLE IF EXISTS " + username);
+    private void testAuthPlugin(String username, String passwordEncryption, AuthenticationRequestType expectedType) throws SQLException {
+        createRole(username, passwordEncryption, "DUMMY-" + expectedType.toString());
+        try {
+            Properties props = new Properties();
+            props.setProperty(PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME.getName(), DummyAuthenticationPlugin.class.getName());
+            PGProperty.USER.set(props, username);
+
+            boolean[] wasCalled = {false};
+            DummyAuthenticationPlugin.onGetPassword = type -> {
+                wasCalled[0] = true;
+                assertEquals(expectedType, type, "The authentication type should match");
+            };
+            try (Connection conn = TestUtil.openDB(props)) {
+                assertTrue(wasCalled[0], "The custom authentication plugin should be invoked");
+            }
+        } finally {
+            dropRole(username);
+        }
+    }
+
+    @Test
+    void authPluginMD5() throws Exception {
+        testAuthPlugin("auth_plugin_test_md5", "md5", AuthenticationRequestType.MD5_PASSWORD);
+    }
+
+    @Test
+    void authPluginSASL() throws Exception {
+        testAuthPlugin("auth_plugin_test_sasl", "scram-sha-256", AuthenticationRequestType.SASL);
+    }
+
+    public static class DummyAuthenticationPlugin implements AuthenticationPlugin {
+        private static Consumer<AuthenticationRequestType> onGetPassword;
+
+        @Override
+        public char[] getPassword(AuthenticationRequestType type) throws PSQLException {
+            onGetPassword.accept(type);
+
+            // Ex: "MD5" => "DUMMY-MD5"
+            return ("DUMMY-" + type.toString()).toCharArray();
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/plugin/PluginTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/plugin/PluginTestSuite.java
index 49c4ba9..d695476 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/plugin/PluginTestSuite.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/plugin/PluginTestSuite.java
@@ -11,7 +11,7 @@ import org.junit.runners.Suite.SuiteClasses;
 
 @RunWith(Suite.class)
 @SuiteClasses({
-    AuthenticationPluginTest.class,
+        AuthenticationPluginTest.class,
 })
 public class PluginTestSuite {
 
diff --git a/pgjdbc/src/test/java/org/postgresql/test/replication/CopyBothResponseTest.java b/pgjdbc/src/test/java/org/postgresql/test/replication/CopyBothResponseTest.java
new file mode 100644
index 0000000..137ca91
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/replication/CopyBothResponseTest.java
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.replication;
+
+import java.nio.ByteBuffer;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.concurrent.TimeUnit;
+import org.hamcrest.CoreMatchers;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGConnection;
+import org.postgresql.copy.CopyDual;
+import org.postgresql.copy.CopyManager;
+import org.postgresql.core.BaseConnection;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.replication.LogSequenceNumber;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.annotations.DisabledIfServerVersionBelow;
+import org.postgresql.test.annotations.tags.Replication;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.MatcherAssert.assertThat;
+
+/**
+ * CopyBothResponse use since 9.1 PostgreSQL version for replication protocol.
+ */
+@Replication
+@DisabledIfServerVersionBelow("9.4")
+class CopyBothResponseTest {
+    private Connection sqlConnection;
+    private Connection replConnection;
+
+    @BeforeAll
+    static void beforeClass() throws Exception {
+        Connection con = TestUtil.openDB();
+        TestUtil.createTable(con, "testreplication", "pk serial primary key, name varchar(100)");
+        con.close();
+    }
+
+    @AfterAll
+    static void testAfterClass() throws Exception {
+        Connection con = TestUtil.openDB();
+        TestUtil.dropTable(con, "testreplication");
+        con.close();
+    }
+
+    @BeforeEach
+    void setUp() throws Exception {
+        sqlConnection = TestUtil.openDB();
+        replConnection = TestUtil.openReplicationConnection();
+        replConnection.setAutoCommit(true);
+    }
+
+    @AfterEach
+    void tearDown() throws Exception {
+        sqlConnection.close();
+        replConnection.close();
+    }
+
+    @Test
+    void openConnectByReplicationProtocol() throws Exception {
+        CopyManager cm = ((PGConnection) replConnection).getCopyAPI();
+
+        LogSequenceNumber logSequenceNumber = getCurrentLSN();
+        CopyDual copyDual = cm.copyDual(
+                "START_REPLICATION " + logSequenceNumber.asString());
+        try {
+            assertThat(
+                    "Replication protocol work via copy protocol and initialize as CopyBothResponse, "
+                            + "we want that first initialize will work",
+                    copyDual, CoreMatchers.notNullValue()
+            );
+        } finally {
+            copyDual.endCopy();
+        }
+    }
+
+    @Test
+    void receiveKeepAliveMessage() throws Exception {
+        CopyManager cm = ((PGConnection) replConnection).getCopyAPI();
+
+        LogSequenceNumber logSequenceNumber = getCurrentLSN();
+        CopyDual copyDual = cm.copyDual(
+                "START_REPLICATION " + logSequenceNumber.asString());
+
+        sendStandByUpdate(copyDual, logSequenceNumber, logSequenceNumber, logSequenceNumber, true);
+        ByteBuffer buf = ByteBuffer.wrap(copyDual.readFromCopy());
+
+        int code = buf.get();
+        copyDual.endCopy();
+
+        assertThat(
+                "Streaming replication start with swap keep alive message, we want that first get package will be keep alive",
+                code, equalTo((int) 'k')
+        );
+    }
+
+    @Test
+    void keedAliveContainsCorrectLSN() throws Exception {
+        CopyManager cm = ((PGConnection) replConnection).getCopyAPI();
+
+        LogSequenceNumber startLsn = getCurrentLSN();
+        CopyDual copyDual =
+                cm.copyDual("START_REPLICATION " + startLsn.asString());
+        sendStandByUpdate(copyDual, startLsn, startLsn, startLsn, true);
+
+        ByteBuffer buf = ByteBuffer.wrap(copyDual.readFromCopy());
+
+        int code = buf.get();
+        LogSequenceNumber lastLSN = LogSequenceNumber.valueOf(buf.getLong());
+        copyDual.endCopy();
+
+        assertThat(
+                "Keep alive message contain last lsn on server, we want that before start replication "
+                        + "and get keep alive message not occurs wal modifications",
+                lastLSN, CoreMatchers.equalTo(startLsn)
+        );
+    }
+
+    @Test
+    void receiveXLogData() throws Exception {
+        CopyManager cm = ((PGConnection) replConnection).getCopyAPI();
+
+        LogSequenceNumber startLsn = getCurrentLSN();
+
+        Statement st = sqlConnection.createStatement();
+        st.execute("insert into testreplication(name) values('testing get changes')");
+        st.close();
+
+        CopyDual copyDual =
+                cm.copyDual("START_REPLICATION " + startLsn.asString());
+        sendStandByUpdate(copyDual, startLsn, startLsn, startLsn, false);
+
+        ByteBuffer buf = ByteBuffer.wrap(copyDual.readFromCopy());
+
+        char code = (char) buf.get();
+        copyDual.endCopy();
+
+        assertThat(
+                "When replication starts via slot and specify LSN that lower than last LSN on server, "
+                        + "we should get all changes that occurs between two LSN",
+                code, equalTo('w')
+        );
+    }
+
+    private void sendStandByUpdate(CopyDual copyDual, LogSequenceNumber received,
+                                   LogSequenceNumber flushed, LogSequenceNumber applied, boolean replyRequired)
+            throws SQLException {
+        ByteBuffer response = ByteBuffer.allocate(1 + 8 + 8 + 8 + 8 + 1);
+        response.put((byte) 'r');
+        response.putLong(received.asLong()); //received
+        response.putLong(flushed.asLong()); //flushed
+        response.putLong(applied.asLong()); //applied
+        response.putLong(TimeUnit.MICROSECONDS.convert((System.currentTimeMillis() - 946674000000L),
+                TimeUnit.MICROSECONDS));
+        response.put(replyRequired ? (byte) 1 : (byte) 0); //reply soon as possible
+
+        byte[] standbyUpdate = response.array();
+        copyDual.writeToCopy(standbyUpdate, 0, standbyUpdate.length);
+        copyDual.flushCopy();
+    }
+
+    private LogSequenceNumber getCurrentLSN() throws SQLException {
+        Statement st = sqlConnection.createStatement();
+        ResultSet rs = null;
+        try {
+            rs = st.executeQuery("select "
+                    + (((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10)
+                    ? "pg_current_wal_lsn()" : "pg_current_xlog_location()"));
+
+            if (rs.next()) {
+                String lsn = rs.getString(1);
+                return LogSequenceNumber.valueOf(lsn);
+            } else {
+                return LogSequenceNumber.INVALID_LSN;
+            }
+        } finally {
+            if (rs != null) {
+                rs.close();
+            }
+            st.close();
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/replication/LogSequenceNumberTest.java b/pgjdbc/src/test/java/org/postgresql/test/replication/LogSequenceNumberTest.java
new file mode 100644
index 0000000..ff8e0b6
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/replication/LogSequenceNumberTest.java
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.replication;
+
+import org.junit.jupiter.api.Test;
+import org.postgresql.replication.LogSequenceNumber;
+import org.postgresql.test.annotations.tags.Replication;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.not;
+import static org.hamcrest.CoreMatchers.notNullValue;
+import static org.hamcrest.MatcherAssert.assertThat;
+
+@Replication
+class LogSequenceNumberTest {
+    @Test
+    void notNullWhenCreateFromStr() throws Exception {
+        LogSequenceNumber result = LogSequenceNumber.valueOf("0/15D68C50");
+        assertThat(result, notNullValue());
+    }
+
+    @Test
+    void parseNotValidLSNStr() throws Exception {
+        LogSequenceNumber result = LogSequenceNumber.valueOf("15D68C55");
+        assertThat(result, equalTo(LogSequenceNumber.INVALID_LSN));
+    }
+
+    @Test
+    void parseLSNFromStringAndConvertToLong() throws Exception {
+        LogSequenceNumber result = LogSequenceNumber.valueOf("16/3002D50");
+        assertThat("64-bit number use in replication protocol, "
+                        + "that why we should can convert string represent LSN to long",
+                result.asLong(), equalTo(94539623760L)
+        );
+    }
+
+    @Test
+    void convertNumericLSNToString() throws Exception {
+        LogSequenceNumber result = LogSequenceNumber.valueOf(94539623760L);
+
+        assertThat("64-bit number use in replication protocol, "
+                        + "but more readable standard format use in logs where each 8-bit print in hex form via slash",
+                result.asString(), equalTo("16/3002D50")
+        );
+    }
+
+    @Test
+    void convertNumericLSNToString_2() throws Exception {
+        LogSequenceNumber result = LogSequenceNumber.valueOf(366383352L);
+
+        assertThat("64-bit number use in replication protocol, "
+                        + "but more readable standard format use in logs where each 8-bit print in hex form via slash",
+                result.asString(), equalTo("0/15D690F8")
+        );
+    }
+
+    @Test
+    void equalLSN() throws Exception {
+        LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8");
+        LogSequenceNumber second = LogSequenceNumber.valueOf("0/15D690F8");
+
+        assertThat(first, equalTo(second));
+    }
+
+    @Test
+    void equalLSNCreateByDifferentWay() throws Exception {
+        LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8");
+        LogSequenceNumber second = LogSequenceNumber.valueOf(366383352L);
+
+        assertThat("LSN creates as 64-bit number and as string where each 8-bit print in hex form "
+                        + "via slash represent same position in WAL should be equals",
+                first, equalTo(second)
+        );
+    }
+
+    @Test
+    void notEqualLSN() throws Exception {
+        LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8");
+        LogSequenceNumber second = LogSequenceNumber.valueOf("0/15D68C50");
+
+        assertThat(first, not(equalTo(second)));
+    }
+
+    @Test
+    void differentLSNHaveDifferentHash() throws Exception {
+        LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8");
+        LogSequenceNumber second = LogSequenceNumber.valueOf("0/15D68C50");
+
+        assertThat(first.hashCode(), not(equalTo(second.hashCode())));
+    }
+
+    @Test
+    void sameLSNHaveSameHash() throws Exception {
+        LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8");
+        LogSequenceNumber second = LogSequenceNumber.valueOf("0/15D690F8");
+
+        assertThat(first.hashCode(), equalTo(second.hashCode()));
+    }
+
+    @Test
+    void compareToSameValue() throws Exception {
+        LogSequenceNumber first = LogSequenceNumber.valueOf("0/15D690F8");
+        LogSequenceNumber second = LogSequenceNumber.valueOf("0/15D690F8");
+
+        assertThat(first.compareTo(second), equalTo(0));
+        assertThat(second.compareTo(first), equalTo(0));
+    }
+
+    @Test
+    void compareToPositiveValues() throws Exception {
+        LogSequenceNumber first = LogSequenceNumber.valueOf(1234);
+        LogSequenceNumber second = LogSequenceNumber.valueOf(4321);
+
+        assertThat(first.compareTo(second), equalTo(-1));
+        assertThat(second.compareTo(first), equalTo(1));
+    }
+
+    @Test
+    void compareToNegativeValues() throws Exception {
+        LogSequenceNumber first = LogSequenceNumber.valueOf(0x8000000000000000L);
+        LogSequenceNumber second = LogSequenceNumber.valueOf(0x8000000000000001L);
+
+        assertThat(first.compareTo(second), equalTo(-1));
+        assertThat(second.compareTo(first), equalTo(1));
+    }
+
+    @Test
+    void compareToMixedSign() throws Exception {
+        LogSequenceNumber first = LogSequenceNumber.valueOf(1);
+        LogSequenceNumber second = LogSequenceNumber.valueOf(0x8000000000000001L);
+
+        assertThat(first.compareTo(second), equalTo(-1));
+        assertThat(second.compareTo(first), equalTo(1));
+    }
+
+    @Test
+    void compareToWithInvalid() throws Exception {
+        LogSequenceNumber first = LogSequenceNumber.INVALID_LSN;
+        LogSequenceNumber second = LogSequenceNumber.valueOf(1);
+
+        assertThat(first.compareTo(second), equalTo(-1));
+        assertThat(second.compareTo(first), equalTo(1));
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/replication/LogicalReplicationStatusTest.java b/pgjdbc/src/test/java/org/postgresql/test/replication/LogicalReplicationStatusTest.java
new file mode 100644
index 0000000..2b3ede7
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/replication/LogicalReplicationStatusTest.java
@@ -0,0 +1,548 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.replication;
+
+import java.nio.ByteBuffer;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGConnection;
+import org.postgresql.core.BaseConnection;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.replication.LogSequenceNumber;
+import org.postgresql.replication.PGReplicationStream;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.annotations.DisabledIfServerVersionBelow;
+import org.postgresql.test.annotations.tags.Replication;
+import static org.hamcrest.CoreMatchers.not;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.core.IsEqual.equalTo;
+
+@Replication
+@DisabledIfServerVersionBelow("9.4")
+class LogicalReplicationStatusTest {
+    private static final String SLOT_NAME = "pgjdbc_logical_replication_slot";
+
+    private Connection replicationConnection;
+    private Connection sqlConnection;
+    private Connection secondSqlConnection;
+
+    @BeforeEach
+    void setUp() throws Exception {
+        //statistic available only for privileged user
+        sqlConnection = TestUtil.openPrivilegedDB();
+        secondSqlConnection = TestUtil.openPrivilegedDB("test_2");
+        //DriverManager.setLogWriter(new PrintWriter(System.out));
+        replicationConnection = TestUtil.openReplicationConnection();
+        TestUtil.createTable(sqlConnection, "test_logic_table",
+                "pk serial primary key, name varchar(100)");
+        TestUtil.createTable(secondSqlConnection, "test_logic_table",
+                "pk serial primary key, name varchar(100)");
+
+        TestUtil.recreateLogicalReplicationSlot(sqlConnection, SLOT_NAME, "test_decoding");
+    }
+
+    @AfterEach
+    void tearDown() throws Exception {
+        replicationConnection.close();
+        TestUtil.dropTable(sqlConnection, "test_logic_table");
+        TestUtil.dropTable(secondSqlConnection, "test_logic_table");
+        TestUtil.dropReplicationSlot(sqlConnection, SLOT_NAME);
+        secondSqlConnection.close();
+        sqlConnection.close();
+    }
+
+    @Test
+    void sentLocationEqualToLastReceiveLSN() throws Exception {
+        PGConnection pgConnection = (PGConnection) replicationConnection;
+
+        LogSequenceNumber startLSN = getCurrentLSN();
+
+        insertPreviousChanges(sqlConnection);
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .start();
+
+        final int countMessage = 3;
+
+        List<String> received = receiveMessageWithoutBlock(stream, countMessage);
+        LogSequenceNumber lastReceivedLSN = stream.getLastReceiveLSN();
+        stream.forceUpdateStatus();
+
+        LogSequenceNumber sentByServer = getSentLocationOnView();
+
+        assertThat("When changes absent on server last receive by stream LSN "
+                        + "should be equal to last sent by server LSN",
+                sentByServer, equalTo(lastReceivedLSN)
+        );
+    }
+
+    /**
+     * Test fail on PG version 9.4.5 because postgresql have bug.
+     */
+    @Test
+    @DisabledIfServerVersionBelow("9.4.8")
+    void receivedLSNDependentOnProcessMessage() throws Exception {
+        PGConnection pgConnection = (PGConnection) replicationConnection;
+
+        LogSequenceNumber startLSN = getCurrentLSN();
+
+        insertPreviousChanges(sqlConnection);
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .start();
+
+        receiveMessageWithoutBlock(stream, 1);
+        LogSequenceNumber firstLSN = stream.getLastReceiveLSN();
+
+        receiveMessageWithoutBlock(stream, 1);
+        LogSequenceNumber secondLSN = stream.getLastReceiveLSN();
+
+        assertThat("After receive each new message current LSN updates in stream",
+                firstLSN, not(equalTo(secondLSN))
+        );
+    }
+
+    @Test
+    void lastReceiveLSNCorrectOnView() throws Exception {
+        PGConnection pgConnection = (PGConnection) replicationConnection;
+
+        LogSequenceNumber startLSN = getCurrentLSN();
+
+        insertPreviousChanges(sqlConnection);
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .start();
+
+        receiveMessageWithoutBlock(stream, 2);
+        LogSequenceNumber lastReceivedLSN = stream.getLastReceiveLSN();
+        stream.forceUpdateStatus();
+
+        assertThat(
+                "Replication stream by execute forceUpdateStatus should send to view actual received position "
+                        + "that allow monitoring lag",
+                lastReceivedLSN, equalTo(getWriteLocationOnView())
+        );
+    }
+
+    @Test
+    void writeLocationCanBeLessThanSendLocation() throws Exception {
+        PGConnection pgConnection = (PGConnection) replicationConnection;
+
+        LogSequenceNumber startLSN = getCurrentLSN();
+
+        insertPreviousChanges(sqlConnection);
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .start();
+
+        receiveMessageWithoutBlock(stream, 2);
+        stream.forceUpdateStatus();
+
+        LogSequenceNumber writeLocation = getWriteLocationOnView();
+        LogSequenceNumber sentLocation = getSentLocationOnView();
+
+        assertThat(
+                "In view pg_stat_replication column write_location define which position consume client "
+                        + "but sent_location define which position was sent to client, so in current test we have 1 pending message, "
+                        + "so write and sent can't be equals",
+                writeLocation, not(equalTo(sentLocation))
+        );
+    }
+
+    @Test
+    void flushLocationEqualToSetLocation() throws Exception {
+        PGConnection pgConnection = (PGConnection) replicationConnection;
+
+        LogSequenceNumber startLSN = getCurrentLSN();
+
+        insertPreviousChanges(sqlConnection);
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .start();
+
+        receiveMessageWithoutBlock(stream, 1);
+
+        LogSequenceNumber flushLSN = stream.getLastReceiveLSN();
+        stream.setFlushedLSN(flushLSN);
+
+        //consume another messages
+        receiveMessageWithoutBlock(stream, 2);
+
+        stream.forceUpdateStatus();
+
+        LogSequenceNumber result = getFlushLocationOnView();
+
+        assertThat("Flush LSN use for define which wal can be recycled and it parameter should be "
+                        + "specify manually on replication stream, because only client "
+                        + "of replication stream now which wal not necessary. We wait that it status correct "
+                        + "send to backend and available via view, because if status will "
+                        + "not send it lead to problem when WALs never recycled",
+                result, equalTo(flushLSN)
+        );
+    }
+
+    @Test
+    void flushLocationDoNotChangeDuringReceiveMessage() throws Exception {
+        PGConnection pgConnection = (PGConnection) replicationConnection;
+
+        LogSequenceNumber startLSN = getCurrentLSN();
+
+        insertPreviousChanges(sqlConnection);
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .start();
+
+        receiveMessageWithoutBlock(stream, 1);
+        final LogSequenceNumber flushLSN = stream.getLastReceiveLSN();
+        stream.setFlushedLSN(flushLSN);
+        receiveMessageWithoutBlock(stream, 2);
+
+        assertThat(
+                "Flush LSN it parameter that specify manually on stream and they can not automatically "
+                        + "change during receive another messages, "
+                        + "because auto update can lead to problem when WAL recycled on postgres "
+                        + "because we send feedback that current position successfully flush, but in real they not flush yet",
+                stream.getLastFlushedLSN(), equalTo(flushLSN)
+        );
+    }
+
+    @Test
+    void applyLocationEqualToSetLocation() throws Exception {
+        PGConnection pgConnection = (PGConnection) replicationConnection;
+
+        LogSequenceNumber startLSN = getCurrentLSN();
+
+        insertPreviousChanges(sqlConnection);
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .start();
+
+        receiveMessageWithoutBlock(stream, 1);
+        final LogSequenceNumber applyLSN = stream.getLastReceiveLSN();
+
+        stream.setAppliedLSN(applyLSN);
+        stream.setFlushedLSN(applyLSN);
+
+        receiveMessageWithoutBlock(stream, 2);
+        stream.forceUpdateStatus();
+
+        LogSequenceNumber result = getReplayLocationOnView();
+
+        assertThat(
+                "During receive message from replication stream all feedback parameter "
+                        + "that we set to stream should be sent to backend"
+                        + "because it allow monitoring replication status and also recycle old WALs",
+                result, equalTo(applyLSN)
+        );
+    }
+
+    /**
+     * Test fail on PG version 9.4.5 because postgresql have bug.
+     */
+    @Test
+    @DisabledIfServerVersionBelow("9.4.8")
+    void applyLocationDoNotDependOnFlushLocation() throws Exception {
+        PGConnection pgConnection = (PGConnection) replicationConnection;
+
+        LogSequenceNumber startLSN = getCurrentLSN();
+
+        insertPreviousChanges(sqlConnection);
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .start();
+
+        receiveMessageWithoutBlock(stream, 1);
+        stream.setAppliedLSN(stream.getLastReceiveLSN());
+        stream.setFlushedLSN(stream.getLastReceiveLSN());
+
+        receiveMessageWithoutBlock(stream, 1);
+        stream.setFlushedLSN(stream.getLastReceiveLSN());
+
+        receiveMessageWithoutBlock(stream, 1);
+        stream.forceUpdateStatus();
+
+        LogSequenceNumber flushed = getFlushLocationOnView();
+        LogSequenceNumber applied = getReplayLocationOnView();
+
+        assertThat(
+                "Last applied LSN and last flushed LSN it two not depends parameters and they can be not equal between",
+                applied, not(equalTo(flushed))
+        );
+    }
+
+    @Test
+    void applyLocationDoNotChangeDuringReceiveMessage() throws Exception {
+        PGConnection pgConnection = (PGConnection) replicationConnection;
+
+        LogSequenceNumber startLSN = getCurrentLSN();
+
+        insertPreviousChanges(sqlConnection);
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .start();
+
+        receiveMessageWithoutBlock(stream, 1);
+        final LogSequenceNumber applyLSN = stream.getLastReceiveLSN();
+        stream.setAppliedLSN(applyLSN);
+        receiveMessageWithoutBlock(stream, 2);
+
+        assertThat(
+                "Apply LSN it parameter that specify manually on stream and they can not automatically "
+                        + "change during receive another messages, "
+                        + "because auto update can lead to problem when WAL recycled on postgres "
+                        + "because we send feedback that current position successfully flush, but in real they not flush yet",
+                stream.getLastAppliedLSN(), equalTo(applyLSN)
+        );
+    }
+
+    @Test
+    void statusCanBeSentToBackendAsynchronously() throws Exception {
+        PGConnection pgConnection = (PGConnection) replicationConnection;
+
+        final int intervalTime = 100;
+        final TimeUnit timeFormat = TimeUnit.MILLISECONDS;
+
+        LogSequenceNumber startLSN = getCurrentLSN();
+
+        insertPreviousChanges(sqlConnection);
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .withStatusInterval(intervalTime, timeFormat)
+                        .start();
+
+        receiveMessageWithoutBlock(stream, 3);
+
+        LogSequenceNumber waitLSN = stream.getLastReceiveLSN();
+
+        stream.setAppliedLSN(waitLSN);
+        stream.setFlushedLSN(waitLSN);
+
+        timeFormat.sleep(intervalTime + 1);
+
+        //get pending message and trigger update status by timeout
+        stream.readPending();
+
+        LogSequenceNumber flushLSN = getFlushLocationOnView();
+
+        assertThat("Status can be sent to backend by some time interval, "
+                        + "by default it parameter equals to 10 second, but in current test we change it on few millisecond "
+                        + "and wait that set status on stream will be auto send to backend",
+                flushLSN, equalTo(waitLSN)
+        );
+    }
+
+    private void insertPreviousChanges(Connection sqlConnection) throws SQLException {
+        try (Statement st = sqlConnection.createStatement()) {
+            st.execute("insert into test_logic_table(name) values('previous changes')");
+        }
+    }
+
+    @Test
+    void keepAliveServerLSNCanBeUsedToAdvanceFlushLSN() throws Exception {
+        PGConnection pgConnection = (PGConnection) replicationConnection;
+
+        LogSequenceNumber startLSN = getCurrentLSN();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .withStatusInterval(1, TimeUnit.SECONDS)
+                        .start();
+
+        // create replication changes and poll for messages
+        insertPreviousChanges(sqlConnection);
+
+        receiveMessageWithoutBlock(stream, 3);
+
+        // client confirms flush of these changes. At this point we're in sync with server
+        LogSequenceNumber confirmedClientFlushLSN = stream.getLastReceiveLSN();
+        stream.setFlushedLSN(confirmedClientFlushLSN);
+        stream.forceUpdateStatus();
+
+        // now insert something into other DB (without replication) to generate WAL
+        insertPreviousChanges(secondSqlConnection);
+
+        TimeUnit.SECONDS.sleep(1);
+
+        // read KeepAlive messages - lastServerLSN will have advanced and we can safely confirm it
+        stream.readPending();
+
+        LogSequenceNumber lastFlushedLSN = stream.getLastFlushedLSN();
+        LogSequenceNumber lastReceivedLSN = stream.getLastReceiveLSN();
+
+        assertThat("Activity in other database will generate WAL but no XLogData "
+                        + " messages. Received LSN will begin to advance beyond of confirmed flushLSN",
+                confirmedClientFlushLSN, not(equalTo(lastReceivedLSN))
+        );
+
+        assertThat("When all XLogData messages have been processed, we can confirm "
+                        + " flush of Server LSNs in the KeepAlive messages",
+                lastFlushedLSN, equalTo(lastReceivedLSN)
+        );
+    }
+
+    private LogSequenceNumber getSentLocationOnView() throws Exception {
+        return getLSNFromView((((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10)
+                ? "sent_lsn" : "sent_location"));
+    }
+
+    private LogSequenceNumber getWriteLocationOnView() throws Exception {
+        return getLSNFromView((((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10)
+                ? "write_lsn" : "write_location"));
+    }
+
+    private LogSequenceNumber getFlushLocationOnView() throws Exception {
+        return getLSNFromView((((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10)
+                ? "flush_lsn" : "flush_location"));
+    }
+
+    private LogSequenceNumber getReplayLocationOnView() throws Exception {
+        return getLSNFromView((((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10)
+                ? "replay_lsn" : "replay_location"));
+    }
+
+    private List<String> receiveMessageWithoutBlock(PGReplicationStream stream, int count)
+            throws Exception {
+        List<String> result = new ArrayList<>(3);
+        for (int index = 0; index < count; index++) {
+            ByteBuffer message;
+            do {
+                message = stream.readPending();
+
+                if (message == null) {
+                    TimeUnit.MILLISECONDS.sleep(2);
+                }
+            } while (message == null);
+
+            result.add(toString(message));
+        }
+
+        return result;
+    }
+
+    private String toString(ByteBuffer buffer) {
+        int offset = buffer.arrayOffset();
+        byte[] source = buffer.array();
+        int length = source.length - offset;
+
+        return new String(source, offset, length);
+    }
+
+    private LogSequenceNumber getLSNFromView(String columnName) throws Exception {
+        int pid = ((PGConnection) replicationConnection).getBackendPID();
+
+        int repeatCount = 0;
+        while (true) {
+            try (
+                    Statement st = sqlConnection.createStatement();
+                    ResultSet rs = st.executeQuery("select * from pg_stat_replication where pid = " + pid)
+            ) {
+                String result = null;
+                if (rs.next()) {
+                    result = rs.getString(columnName);
+                }
+
+                if (result == null || result.isEmpty()) {
+                    //replication monitoring view updates with some delay, wait some time and try again
+                    TimeUnit.MILLISECONDS.sleep(100L);
+                    repeatCount++;
+                    if (repeatCount == 10) {
+                        return null;
+                    }
+                } else {
+                    return LogSequenceNumber.valueOf(result);
+                }
+            }
+        }
+    }
+
+    private LogSequenceNumber getCurrentLSN() throws SQLException {
+        try (Statement st = sqlConnection.createStatement();
+             ResultSet rs = st.executeQuery("select "
+                     + (((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10)
+                     ? "pg_current_wal_lsn()" : "pg_current_xlog_location()"))
+        ) {
+            if (rs.next()) {
+                String lsn = rs.getString(1);
+                return LogSequenceNumber.valueOf(lsn);
+            } else {
+                return LogSequenceNumber.INVALID_LSN;
+            }
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/replication/LogicalReplicationTest.java b/pgjdbc/src/test/java/org/postgresql/test/replication/LogicalReplicationTest.java
new file mode 100644
index 0000000..4e9daeb
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/replication/LogicalReplicationTest.java
@@ -0,0 +1,958 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.replication;
+
+import java.nio.ByteBuffer;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import org.hamcrest.CoreMatchers;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import org.postgresql.PGConnection;
+import org.postgresql.core.BaseConnection;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.replication.LogSequenceNumber;
+import org.postgresql.replication.PGReplicationStream;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.annotations.DisabledIfServerVersionBelow;
+import org.postgresql.test.annotations.tags.Replication;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assume.assumeThat;
+import static org.junit.jupiter.api.Assertions.fail;
+
+@Replication
+@DisabledIfServerVersionBelow("9.4")
+class LogicalReplicationTest {
+    private static final String SLOT_NAME = "pgjdbc_logical_replication_slot";
+
+    private Connection replConnection;
+    private Connection sqlConnection;
+
+    private static String toString(ByteBuffer buffer) {
+        int offset = buffer.arrayOffset();
+        byte[] source = buffer.array();
+        int length = source.length - offset;
+
+        return new String(source, offset, length);
+    }
+
+    @BeforeEach
+    void setUp() throws Exception {
+        sqlConnection = TestUtil.openPrivilegedDB();
+        //DriverManager.setLogWriter(new PrintWriter(System.out));
+        replConnection = TestUtil.openReplicationConnection();
+        TestUtil.createTable(sqlConnection, "test_logic_table",
+                "pk serial primary key, name varchar(100)");
+
+        TestUtil.recreateLogicalReplicationSlot(sqlConnection, SLOT_NAME, "test_decoding");
+    }
+
+    @AfterEach
+    void tearDown() throws Exception {
+        replConnection.close();
+        TestUtil.dropTable(sqlConnection, "test_logic_table");
+        TestUtil.dropReplicationSlot(sqlConnection, SLOT_NAME);
+        sqlConnection.close();
+    }
+
+    @Test
+    @Timeout(1)
+    void notAvailableStartNotExistReplicationSlot() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber lsn = getCurrentLSN();
+
+        try {
+            PGReplicationStream stream =
+                    pgConnection
+                            .getReplicationAPI()
+                            .replicationStream()
+                            .logical()
+                            .withSlotName("notExistSlotName")
+                            .withStartPosition(lsn)
+                            .start();
+
+            fail("For logical decoding replication slot name it required parameter "
+                    + "that should be create on server before start replication");
+
+        } catch (PSQLException e) {
+            String state = e.getSQLState();
+
+            assertThat("When replication slot doesn't exists, server can't start replication "
+                            + "and should throw exception about it",
+                    state, equalTo(PSQLState.UNDEFINED_OBJECT.getState())
+            );
+        }
+    }
+
+    @Test
+    @Timeout(1)
+    void receiveChangesOccursBeforeStartReplication() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber lsn = getCurrentLSN();
+
+        Statement st = sqlConnection.createStatement();
+        st.execute("insert into test_logic_table(name) values('previous value')");
+        st.close();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(lsn)
+                        .withSlotOption("include-xids", false)
+                        .start();
+
+        String result = group(receiveMessage(stream, 3));
+
+        String wait = group(
+                Arrays.asList(
+                        "BEGIN",
+                        "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'previous value'",
+                        "COMMIT"
+                )
+        );
+
+        assertThat("Logical replication can be start from some LSN position and all changes that "
+                        + "occurs between last server LSN and specified LSN position should be available to read "
+                        + "via stream in correct order",
+                result, equalTo(wait)
+        );
+    }
+
+    @Test
+    @Timeout(1)
+    void receiveChangesAfterStartReplication() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber lsn = getCurrentLSN();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(lsn)
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        List<String> result = new ArrayList<>();
+
+        Statement st = sqlConnection.createStatement();
+        st.execute(
+                "insert into test_logic_table(name) values('first message after start replication')");
+        st.close();
+
+        result.addAll(receiveMessage(stream, 3));
+
+        st = sqlConnection.createStatement();
+        st.execute(
+                "insert into test_logic_table(name) values('second message after start replication')");
+        st.close();
+
+        result.addAll(receiveMessage(stream, 3));
+
+        String groupedResult = group(result);
+
+        String wait = group(Arrays.asList(
+                "BEGIN",
+                "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'first message after start replication'",
+                "COMMIT",
+                "BEGIN",
+                "table public.test_logic_table: INSERT: pk[integer]:2 name[character varying]:'second message after start replication'",
+                "COMMIT"
+        ));
+
+        assertThat(
+                "After starting replication, from stream should be available also new changes that occurs after start replication",
+                groupedResult, equalTo(wait)
+        );
+    }
+
+    @Test
+    @Timeout(1)
+    void startFromCurrentServerLSNWithoutSpecifyLSNExplicitly() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        Statement st = sqlConnection.createStatement();
+        st.execute("insert into test_logic_table(name) values('last server message')");
+        st.close();
+
+        String result = group(receiveMessage(stream, 3));
+
+        String wait = group(Arrays.asList(
+                "BEGIN",
+                "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'last server message'",
+                "COMMIT"
+        ));
+
+        assertThat(
+                "When start LSN position not specify explicitly, wal should be stream from actual server position",
+                result, equalTo(wait));
+    }
+
+    @Test
+    @Timeout(1)
+    void afterStartStreamingDBSlotStatusActive() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        boolean isActive = isActiveOnView();
+
+        assertThat(
+                "After start streaming, database status should be update on view pg_replication_slots to active",
+                isActive, equalTo(true)
+        );
+    }
+
+    /**
+     * <p>Bug in postgreSQL that should be fixed in 10 version after code review patch <a
+     * href="http://www.postgresql.org/message-id/CAFgjRd3hdYOa33m69TbeOfNNer2BZbwa8FFjt2V5VFzTBvUU3w@mail.gmail.com">
+     * Stopping logical replication protocol</a>.</p>
+     *
+     * <p>If you try to run it test on version before 10 they fail with time out, because postgresql
+     * wait new changes and until waiting messages from client ignores.</p>
+     */
+    @Test
+    @Timeout(1)
+    @DisabledIfServerVersionBelow("11.1")
+    void afterCloseReplicationStreamDBSlotStatusNotActive() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        boolean isActive = isActiveOnView();
+        assumeThat(isActive, equalTo(true));
+
+        stream.close();
+
+        isActive = isActiveOnView();
+        assertThat("Execute close method on PGREplicationStream should lead to stop replication, "
+                        + "as result we wait that on view pg_replication_slots status for slot will change to no active",
+                isActive, equalTo(false)
+        );
+    }
+
+    @Test
+    @Timeout(1)
+    void afterCloseConnectionDBSLotStatusNotActive() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber lsn = getCurrentLSN();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(lsn)
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        boolean isActive = isActiveOnView();
+        assumeThat(isActive, equalTo(true));
+
+        replConnection.close();
+
+        isActive = isActiveOnView();
+        //we doesn't wait replay from server about stop connection that why some delay exists on update view and should wait some time before check view
+        if (isActive) {
+            TimeUnit.MILLISECONDS.sleep(200L);
+            isActive = isActiveOnView();
+        }
+
+        assertThat(
+                "Execute close method on Connection should lead to stop replication as fast as possible, "
+                        + "as result we wait that on view pg_replication_slots status for slot will change to no active",
+                isActive, equalTo(false)
+        );
+    }
+
+    /**
+     * <p>Bug in postgreSQL that should be fixed in 10 version after code review patch <a
+     * href="http://www.postgresql.org/message-id/CAFgjRd3hdYOa33m69TbeOfNNer2BZbwa8FFjt2V5VFzTBvUU3w@mail.gmail.com">
+     * Stopping logical replication protocol</a>.</p>
+     *
+     * <p>If you try to run it test on version before 10 they fail with time out, because postgresql
+     * wait new changes and until waiting messages from client ignores.</p>
+     */
+    @Test
+    @Timeout(10)
+    @DisabledIfServerVersionBelow("12.1")
+    void duringSendBigTransactionConnectionCloseSlotStatusNotActive() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber lsn = getCurrentLSN();
+
+        Statement st = sqlConnection.createStatement();
+        st.execute("insert into test_logic_table\n"
+                + "  select id, md5(random()::text) as name from generate_series(1, 200000) as id;");
+        st.close();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withStartPosition(lsn)
+                        .withSlotName(SLOT_NAME)
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        //wait first message
+        stream.read();
+
+        replConnection.close();
+
+        boolean isActive = isActiveOnView();
+
+        /*
+         * we don't wait for replay from server about stop connection that's why some
+         * delay exists on update view and should wait some time before check view
+         */
+        if (isActive) {
+            TimeUnit.SECONDS.sleep(2L);
+            isActive = isActiveOnView();
+        }
+
+        assertThat(
+                "Execute close method on Connection should lead to stop replication as fast as possible, "
+                        + "as result we wait that on view pg_replication_slots status for slot will change to no active",
+                isActive, equalTo(false)
+        );
+    }
+
+    /**
+     * <p>Bug in postgreSQL that should be fixed in 10 version after code review patch <a
+     * href="http://www.postgresql.org/message-id/CAFgjRd3hdYOa33m69TbeOfNNer2BZbwa8FFjt2V5VFzTBvUU3w@mail.gmail.com">
+     * Stopping logical replication protocol</a>.</p>
+     *
+     * <p>If you try to run it test on version before 10 they fail with time out, because postgresql
+     * wait new changes and until waiting messages from client ignores.</p>
+     */
+    @Test
+    @Timeout(60)
+    @DisabledIfServerVersionBelow("11.1")
+    void duringSendBigTransactionReplicationStreamCloseNotActive() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber lsn = getCurrentLSN();
+
+        Statement st = sqlConnection.createStatement();
+        st.execute("insert into test_logic_table\n"
+                + "  select id, md5(random()::text) as name from generate_series(1, 200000) as id;");
+        st.close();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withStartPosition(lsn)
+                        .withSlotName(SLOT_NAME)
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        //wait first message
+        stream.read();
+
+        stream.close();
+        //after replay from server that replication stream stopped, view already should be updated
+        boolean isActive = isActiveOnView();
+        assertThat("Execute close method on PGREplicationStream should lead to stop replication, "
+                        + "as result we wait that on view pg_replication_slots status for slot will change to no active",
+                isActive, equalTo(false)
+        );
+    }
+
+    //todo fix, fail because backend for logical decoding not reply with CommandComplate & ReadyForQuery
+    @Test
+    @Timeout(5)
+    void repeatWalPositionTwice() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber startLSN = getCurrentLSN();
+
+        Statement st = sqlConnection.createStatement();
+        st.execute("insert into test_logic_table(name) values('message to repeat')");
+        st.close();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        List<String> result = new ArrayList<>();
+        result.addAll(receiveMessage(stream, 3));
+
+        replConnection.close();
+        waitStopReplicationSlot();
+
+        replConnection = TestUtil.openReplicationConnection();
+        pgConnection = (PGConnection) replConnection;
+
+        stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        result.addAll(receiveMessage(stream, 3));
+
+        String groupedResult = group(result);
+        String wait = group(Arrays.asList(
+                "BEGIN",
+                "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'message to repeat'",
+                "COMMIT",
+                "BEGIN",
+                "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'message to repeat'",
+                "COMMIT"
+        ));
+
+        assertThat("Logical replication stream after start streaming can be close and "
+                        + "reopen on previous LSN, that allow reply wal logs, if they was not recycled yet",
+                groupedResult, equalTo(wait)
+        );
+    }
+
+    @Test
+    @Timeout(3)
+    void doesNotHavePendingMessageWhenStartFromLastLSN() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(getCurrentLSN())
+                        .start();
+
+        ByteBuffer result = stream.readPending();
+
+        assertThat("Read pending message allow without lock on socket read message, "
+                        + "and if message absent return null. In current test we start replication from last LSN on server, "
+                        + "so changes absent on server and readPending message will always lead to null ByteBuffer",
+                result, equalTo(null)
+        );
+    }
+
+    @Test
+    @Timeout(3)
+    void readPreviousChangesWithoutBlock() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber startLSN = getCurrentLSN();
+
+        Statement st = sqlConnection.createStatement();
+        st.execute("insert into test_logic_table(name) values('previous changes')");
+        st.close();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        String received = group(receiveMessageWithoutBlock(stream, 3));
+
+        String wait = group(Arrays.asList(
+                "BEGIN",
+                "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'previous changes'",
+                "COMMIT"
+        ));
+
+        assertThat(
+                "Messages from stream can be read by readPending method for avoid long block on Socket, "
+                        + "in current test we wait that behavior will be same as for read message with block",
+                received, equalTo(wait)
+        );
+    }
+
+    @Test
+    @Timeout(3)
+    void readActualChangesWithoutBlock() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(getCurrentLSN())
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        Statement st = sqlConnection.createStatement();
+        st.execute("insert into test_logic_table(name) values('actual changes')");
+        st.close();
+
+        String received = group(receiveMessageWithoutBlock(stream, 3));
+
+        String wait = group(Arrays.asList(
+                "BEGIN",
+                "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'actual changes'",
+                "COMMIT"
+        ));
+
+        assertThat(
+                "Messages from stream can be read by readPending method for avoid long block on Socket, "
+                        + "in current test we wait that behavior will be same as for read message with block",
+                received, equalTo(wait)
+        );
+    }
+
+    @Test
+    @Timeout(10)
+    void avoidTimeoutDisconnectWithDefaultStatusInterval() throws Exception {
+        final int statusInterval = getKeepAliveTimeout();
+
+        ExecutorService executor = Executors.newSingleThreadExecutor();
+        Future future = null;
+        boolean done;
+        try {
+            future =
+                    executor.submit(new Callable<Object>() {
+                        @Override
+                        public Object call() throws Exception {
+                            PGConnection pgConnection = (PGConnection) replConnection;
+
+                            PGReplicationStream stream =
+                                    pgConnection
+                                            .getReplicationAPI()
+                                            .replicationStream()
+                                            .logical()
+                                            .withSlotName(SLOT_NAME)
+                                            .withStartPosition(getCurrentLSN())
+                                            .withStatusInterval(Math.round(statusInterval / 3), TimeUnit.MILLISECONDS)
+                                            .start();
+
+                            while (!Thread.interrupted()) {
+                                stream.read();
+                            }
+
+                            return null;
+                        }
+                    });
+
+            future.get(5, TimeUnit.SECONDS);
+            done = future.isDone();
+        } catch (TimeoutException timeout) {
+            done = future.isDone();
+        } finally {
+            executor.shutdownNow();
+        }
+
+        assertThat(
+                "ReplicationStream should periodically send keep alive message to postgresql to avoid disconnect from server",
+                done, CoreMatchers.equalTo(false)
+        );
+    }
+
+    @Test
+    void restartReplicationFromRestartSlotLSNWhenFeedbackAbsent() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber startLSN = getCurrentLSN();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        Statement st = sqlConnection.createStatement();
+        st.execute("insert into test_logic_table(name) values('first tx changes')");
+        st.close();
+
+        st = sqlConnection.createStatement();
+        st.execute("insert into test_logic_table(name) values('second tx change')");
+        st.close();
+
+        List<String> consumedData = new ArrayList<>();
+        consumedData.addAll(receiveMessageWithoutBlock(stream, 3));
+
+        //emulate replication break
+        replConnection.close();
+        waitStopReplicationSlot();
+
+        replConnection = TestUtil.openReplicationConnection();
+        pgConnection = (PGConnection) replConnection;
+        stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(LogSequenceNumber.INVALID_LSN) /* Invalid LSN indicate for start from restart lsn */
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        consumedData.addAll(receiveMessageWithoutBlock(stream, 3));
+        String result = group(consumedData);
+
+        String wait = group(Arrays.asList(
+                "BEGIN",
+                "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'first tx changes'",
+                "COMMIT",
+                "BEGIN",
+                "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'first tx changes'",
+                "COMMIT"
+        ));
+
+        assertThat(
+                "If was consume message via logical replication stream but wasn't send feedback about apply and flush "
+                        + "consumed LSN, if replication crash, server should restart from last success applied lsn, "
+                        + "in this case it lsn of start replication slot, so we should consume first 3 message twice",
+                result, equalTo(wait)
+        );
+    }
+
+    @Test
+    void replicationRestartFromLastFeedbackPosition() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber startLSN = getCurrentLSN();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        Statement st = sqlConnection.createStatement();
+        st.execute("insert into test_logic_table(name) values('first tx changes')");
+        st.close();
+
+        st = sqlConnection.createStatement();
+        st.execute("insert into test_logic_table(name) values('second tx change')");
+        st.close();
+
+        List<String> consumedData = new ArrayList<>();
+        consumedData.addAll(receiveMessageWithoutBlock(stream, 3));
+        stream.setFlushedLSN(stream.getLastReceiveLSN());
+        stream.setAppliedLSN(stream.getLastReceiveLSN());
+        stream.forceUpdateStatus();
+
+        //emulate replication break
+        replConnection.close();
+        waitStopReplicationSlot();
+
+        replConnection = TestUtil.openReplicationConnection();
+        pgConnection = (PGConnection) replConnection;
+        stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(LogSequenceNumber.INVALID_LSN) /* Invalid LSN indicate for start from restart lsn */
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        consumedData.addAll(receiveMessageWithoutBlock(stream, 3));
+        String result = group(consumedData);
+
+        String wait = group(Arrays.asList(
+                "BEGIN",
+                "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'first tx changes'",
+                "COMMIT",
+                "BEGIN",
+                "table public.test_logic_table: INSERT: pk[integer]:2 name[character varying]:'second tx change'",
+                "COMMIT"
+        ));
+
+        assertThat(
+                "When we add feedback about applied lsn to replication stream(in this case it's force update status)"
+                        + "after restart consume changes via this slot should be started from last success lsn that "
+                        + "we send before via force status update, that why we wait consume both transaction without duplicates",
+                result, equalTo(wait));
+    }
+
+    @Test
+    void replicationRestartFromLastFeedbackPositionParallelTransaction() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber startLSN = getCurrentLSN();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(startLSN)
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        Connection tx1Connection = TestUtil.openPrivilegedDB();
+        tx1Connection.setAutoCommit(false);
+
+        Connection tx2Connection = TestUtil.openPrivilegedDB();
+        tx2Connection.setAutoCommit(false);
+
+        Statement stTx1 = tx1Connection.createStatement();
+        Statement stTx2 = tx2Connection.createStatement();
+
+        stTx1.execute("BEGIN");
+        stTx2.execute("BEGIN");
+
+        stTx1.execute("insert into test_logic_table(name) values('first tx changes')");
+        stTx2.execute("insert into test_logic_table(name) values('second tx changes')");
+
+        tx1Connection.commit();
+        tx2Connection.commit();
+
+        tx1Connection.close();
+        tx2Connection.close();
+
+        List<String> consumedData = new ArrayList<>();
+        consumedData.addAll(receiveMessageWithoutBlock(stream, 3));
+        stream.setFlushedLSN(stream.getLastReceiveLSN());
+        stream.setAppliedLSN(stream.getLastReceiveLSN());
+
+        stream.forceUpdateStatus();
+
+        //emulate replication break
+        replConnection.close();
+        waitStopReplicationSlot();
+
+        replConnection = TestUtil.openReplicationConnection();
+        pgConnection = (PGConnection) replConnection;
+        stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .logical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(LogSequenceNumber.INVALID_LSN) /* Invalid LSN indicate for start from restart lsn */
+                        .withSlotOption("include-xids", false)
+                        .withSlotOption("skip-empty-xacts", true)
+                        .start();
+
+        Statement st = sqlConnection.createStatement();
+        st.execute("insert into test_logic_table(name) values('third tx changes')");
+        st.close();
+
+        consumedData.addAll(receiveMessageWithoutBlock(stream, 3));
+        String result = group(consumedData);
+
+        String wait = group(Arrays.asList(
+                "BEGIN",
+                "table public.test_logic_table: INSERT: pk[integer]:1 name[character varying]:'first tx changes'",
+                "COMMIT",
+                "BEGIN",
+                "table public.test_logic_table: INSERT: pk[integer]:2 name[character varying]:'second tx changes'",
+                "COMMIT"
+        ));
+
+        assertThat(
+                "When we add feedback about applied lsn to replication stream(in this case it's force update status)"
+                        + "after restart consume changes via this slot should be started from last success lsn that "
+                        + "we send before via force status update, that why we wait consume both transaction without duplicates",
+                result, equalTo(wait));
+    }
+
+    private void waitStopReplicationSlot() throws SQLException, InterruptedException {
+        while (true) {
+            PreparedStatement statement =
+                    sqlConnection.prepareStatement(
+                            "select 1 from pg_replication_slots where slot_name = ? and active = true"
+                    );
+            statement.setString(1, SLOT_NAME);
+            ResultSet rs = statement.executeQuery();
+            boolean active = rs.next();
+            rs.close();
+            statement.close();
+
+            if (!active) {
+                return;
+            }
+
+            TimeUnit.MILLISECONDS.sleep(10);
+        }
+    }
+
+    private int getKeepAliveTimeout() throws SQLException {
+        Statement statement = sqlConnection.createStatement();
+        ResultSet resultSet = statement.executeQuery(
+                "select setting, unit from pg_settings where name = 'wal_sender_timeout'");
+        int result = 0;
+        if (resultSet.next()) {
+            result = resultSet.getInt(1);
+            String unit = resultSet.getString(2);
+            if ("sec".equals(unit)) {
+                result = (int) TimeUnit.SECONDS.toMillis(result);
+            }
+        }
+
+        return result;
+    }
+
+    private boolean isActiveOnView() throws SQLException {
+        boolean result = false;
+        Statement st = sqlConnection.createStatement();
+        ResultSet rs =
+                st.executeQuery("select * from pg_replication_slots where slot_name = '" + SLOT_NAME + "'");
+        if (rs.next()) {
+            result = rs.getBoolean("active");
+        }
+        rs.close();
+        st.close();
+        return result;
+    }
+
+    private String group(List<String> messages) {
+        StringBuilder builder = new StringBuilder();
+        boolean isFirst = true;
+        for (String str : messages) {
+            if (isFirst) {
+                isFirst = false;
+            } else {
+                builder.append("\n");
+            }
+
+            builder.append(str);
+        }
+
+        return builder.toString();
+    }
+
+    private List<String> receiveMessage(PGReplicationStream stream, int count) throws SQLException {
+        List<String> result = new ArrayList<>(count);
+        for (int index = 0; index < count; index++) {
+            result.add(toString(stream.read()));
+        }
+
+        return result;
+    }
+
+    private List<String> receiveMessageWithoutBlock(PGReplicationStream stream, int count)
+            throws Exception {
+        List<String> result = new ArrayList<>(3);
+        for (int index = 0; index < count; index++) {
+            ByteBuffer message;
+            do {
+                message = stream.readPending();
+
+                if (message == null) {
+                    TimeUnit.MILLISECONDS.sleep(2);
+                }
+            } while (message == null);
+
+            result.add(toString(message));
+        }
+
+        return result;
+    }
+
+    private LogSequenceNumber getCurrentLSN() throws SQLException {
+        Statement st = sqlConnection.createStatement();
+        ResultSet rs = null;
+        try {
+            rs = st.executeQuery("select "
+                    + (((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10)
+                    ? "pg_current_wal_lsn()" : "pg_current_xlog_location()"));
+
+            if (rs.next()) {
+                String lsn = rs.getString(1);
+                return LogSequenceNumber.valueOf(lsn);
+            } else {
+                return LogSequenceNumber.INVALID_LSN;
+            }
+        } finally {
+            if (rs != null) {
+                rs.close();
+            }
+            st.close();
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/replication/PhysicalReplicationTest.java b/pgjdbc/src/test/java/org/postgresql/test/replication/PhysicalReplicationTest.java
new file mode 100644
index 0000000..151ad73
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/replication/PhysicalReplicationTest.java
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.replication;
+
+import java.nio.ByteBuffer;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Arrays;
+import org.hamcrest.CoreMatchers;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGConnection;
+import org.postgresql.core.BaseConnection;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.replication.LogSequenceNumber;
+import org.postgresql.replication.PGReplicationStream;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.annotations.DisabledIfServerVersionBelow;
+import org.postgresql.test.annotations.tags.Replication;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assume.assumeThat;
+
+@Replication
+@DisabledIfServerVersionBelow("9.4")
+class PhysicalReplicationTest {
+
+    private static final String SLOT_NAME = "pgjdbc_physical_replication_slot";
+
+    private Connection replConnection;
+    private Connection sqlConnection;
+
+    @BeforeEach
+    void setUp() throws Exception {
+        sqlConnection = TestUtil.openPrivilegedDB();
+        //DriverManager.setLogWriter(new PrintWriter(System.out));
+        replConnection = TestUtil.openReplicationConnection();
+        TestUtil.createTable(sqlConnection, "test_physic_table",
+                "pk serial primary key, name varchar(100)");
+        TestUtil.recreatePhysicalReplicationSlot(sqlConnection, SLOT_NAME);
+    }
+
+    @AfterEach
+    void tearDown() throws Exception {
+        replConnection.close();
+        TestUtil.dropTable(sqlConnection, "test_physic_table");
+        TestUtil.dropReplicationSlot(sqlConnection, SLOT_NAME);
+        sqlConnection.close();
+    }
+
+    @Test
+    void receiveChangesWithoutReplicationSlot() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber lsn = getCurrentLSN();
+
+        Statement st = sqlConnection.createStatement();
+        st.execute("insert into test_physic_table(name) values('previous value')");
+        st.close();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .physical()
+                        .withStartPosition(lsn)
+                        .start();
+
+        ByteBuffer read = stream.read();
+
+        assertThat("Physical replication can be start without replication slot",
+                read, CoreMatchers.notNullValue()
+        );
+    }
+
+    @Test
+    void receiveChangesWithReplicationSlot() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber lsn = getCurrentLSN();
+
+        Statement st = sqlConnection.createStatement();
+        st.execute("insert into test_physic_table(name) values('previous value')");
+        st.close();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .physical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(lsn)
+                        .start();
+
+        ByteBuffer read = stream.read();
+
+        assertThat(read, CoreMatchers.notNullValue());
+    }
+
+    @Test
+    void afterStartStreamingDBSlotStatusActive() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber lsn = getCurrentLSN();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .physical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(lsn)
+                        .start();
+
+        boolean isActive = isActiveOnView();
+        stream.close();
+
+        assertThat(
+                "After start streaming, database status should be update on view pg_replication_slots to active",
+                isActive, equalTo(true)
+        );
+    }
+
+    @Test
+    void afterCloseReplicationStreamDBSlotStatusNotActive() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber lsn = getCurrentLSN();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .physical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(lsn)
+                        .start();
+
+        boolean isActive = isActiveOnView();
+        assumeThat(isActive, equalTo(true));
+
+        stream.close();
+
+        isActive = isActiveOnView();
+        assertThat(
+                "Execute close method on PGREplicationStream should lead to stop replication, "
+                        + "as result we wait that on view pg_replication_slots status for slot will change to no active",
+                isActive, equalTo(false)
+        );
+    }
+
+    @Test
+    void walRecordCanBeRepeatBeRestartReplication() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber lsn = getCurrentLSN();
+
+        Statement st = sqlConnection.createStatement();
+        st.execute("insert into test_physic_table(name) values('previous value')");
+        st.close();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .physical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(lsn)
+                        .start();
+
+        byte[] first = toByteArray(stream.read());
+        stream.close();
+
+        //reopen stream
+        stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .physical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(lsn)
+                        .start();
+
+        byte[] second = toByteArray(stream.read());
+        stream.close();
+
+        boolean arrayEquals = Arrays.equals(first, second);
+        assertThat("On same replication connection we can restart replication from already "
+                        + "received LSN if they not recycled yet on backend",
+                arrayEquals, CoreMatchers.equalTo(true)
+        );
+    }
+
+    @Test
+    void restartPhysicalReplicationWithoutRepeatMessage() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        LogSequenceNumber lsn = getCurrentLSN();
+
+        Statement st = sqlConnection.createStatement();
+        st.execute("insert into test_physic_table(name) values('first value')");
+        st.close();
+
+        PGReplicationStream stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .physical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(lsn)
+                        .start();
+
+        byte[] streamOneFirstPart = toByteArray(stream.read());
+        LogSequenceNumber restartLSN = stream.getLastReceiveLSN();
+
+        st = sqlConnection.createStatement();
+        st.execute("insert into test_physic_table(name) values('second value')");
+        st.close();
+
+        byte[] streamOneSecondPart = toByteArray(stream.read());
+        stream.close();
+
+        //reopen stream
+        stream =
+                pgConnection
+                        .getReplicationAPI()
+                        .replicationStream()
+                        .physical()
+                        .withSlotName(SLOT_NAME)
+                        .withStartPosition(restartLSN)
+                        .start();
+
+        byte[] streamTwoFirstPart = toByteArray(stream.read());
+        stream.close();
+
+        boolean arrayEquals = Arrays.equals(streamOneSecondPart, streamTwoFirstPart);
+        assertThat("Interrupt physical replication and restart from lastReceiveLSN should not "
+                        + "lead to repeat messages skip part of them",
+                arrayEquals, CoreMatchers.equalTo(true)
+        );
+    }
+
+    private boolean isActiveOnView() throws SQLException {
+        boolean result = false;
+        Statement st = sqlConnection.createStatement();
+        ResultSet
+                rs =
+                st.executeQuery("select * from pg_replication_slots where slot_name = '" + SLOT_NAME + "'");
+        if (rs.next()) {
+            result = rs.getBoolean("active");
+        }
+        rs.close();
+        st.close();
+        return result;
+    }
+
+    private byte[] toByteArray(ByteBuffer buffer) {
+        int offset = buffer.arrayOffset();
+        byte[] source = buffer.array();
+        return Arrays.copyOfRange(source, offset, source.length);
+    }
+
+    private LogSequenceNumber getCurrentLSN() throws SQLException {
+        Statement st = sqlConnection.createStatement();
+        ResultSet rs = null;
+        try {
+            rs = st.executeQuery("select "
+                    + (((BaseConnection) sqlConnection).haveMinimumServerVersion(ServerVersion.v10)
+                    ? "pg_current_wal_lsn()" : "pg_current_xlog_location()"));
+
+            if (rs.next()) {
+                String lsn = rs.getString(1);
+                return LogSequenceNumber.valueOf(lsn);
+            } else {
+                return LogSequenceNumber.INVALID_LSN;
+            }
+        } finally {
+            if (rs != null) {
+                rs.close();
+            }
+            st.close();
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/replication/ReplicationConnectionTest.java b/pgjdbc/src/test/java/org/postgresql/test/replication/ReplicationConnectionTest.java
new file mode 100644
index 0000000..b94a962
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/replication/ReplicationConnectionTest.java
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.replication;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import org.hamcrest.CoreMatchers;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGConnection;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.annotations.DisabledIfServerVersionBelow;
+import org.postgresql.test.annotations.tags.Replication;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.MatcherAssert.assertThat;
+
+@Replication
+@DisabledIfServerVersionBelow("9.4")
+class ReplicationConnectionTest {
+    private Connection replConnection;
+
+    @BeforeEach
+    void setUp() throws Exception {
+        replConnection = TestUtil.openReplicationConnection();
+        //DriverManager.setLogWriter(new PrintWriter(System.out));
+    }
+
+    @AfterEach
+    void tearDown() throws Exception {
+        replConnection.close();
+    }
+
+    @Test
+    void isValid() throws Exception {
+        boolean result = replConnection.isValid(3);
+
+        PGConnection connection = (PGConnection) replConnection;
+        connection.getBackendPID();
+
+        assertThat("Replication connection as Simple connection can be check on valid",
+                result, equalTo(true)
+        );
+    }
+
+    @Test
+    void connectionNotValidWhenSessionTerminated() throws Exception {
+        TestUtil.terminateBackend(replConnection);
+
+        boolean result = replConnection.isValid(3);
+
+        assertThat("When postgresql terminate session with replication connection, "
+                        + "isValid() should return false, because next query on this connection will fail",
+                result, equalTo(false)
+        );
+    }
+
+    @Test
+    void replicationCommandResultSetAccessByIndex() throws Exception {
+        Statement statement = replConnection.createStatement();
+        ResultSet resultSet = statement.executeQuery("IDENTIFY_SYSTEM");
+
+        String xlogpos = null;
+        if (resultSet.next()) {
+            xlogpos = resultSet.getString(3);
+        }
+
+        resultSet.close();
+        statement.close();
+
+        assertThat("Replication protocol supports a limited number of commands, "
+                        + "and it command can be execute via Statement(simple query protocol), "
+                        + "and result fetch via ResultSet",
+                xlogpos, CoreMatchers.notNullValue()
+        );
+    }
+
+    @Test
+    void replicationCommandResultSetAccessByName() throws Exception {
+        Statement statement = replConnection.createStatement();
+        ResultSet resultSet = statement.executeQuery("IDENTIFY_SYSTEM");
+
+        String xlogpos = null;
+        if (resultSet.next()) {
+            xlogpos = resultSet.getString("xlogpos");
+        }
+
+        resultSet.close();
+        statement.close();
+
+        assertThat("Replication protocol supports a limited number of commands, "
+                        + "and it command can be execute via Statement(simple query protocol), "
+                        + "and result fetch via ResultSet",
+                xlogpos, CoreMatchers.notNullValue()
+        );
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/replication/ReplicationSlotTest.java b/pgjdbc/src/test/java/org/postgresql/test/replication/ReplicationSlotTest.java
new file mode 100644
index 0000000..e8339d1
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/replication/ReplicationSlotTest.java
@@ -0,0 +1,395 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.replication;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
+import java.sql.Statement;
+import org.hamcrest.CoreMatchers;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGConnection;
+import org.postgresql.core.BaseConnection;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.replication.ReplicationSlotInfo;
+import org.postgresql.replication.ReplicationType;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.annotations.DisabledIfServerVersionBelow;
+import org.postgresql.test.annotations.tags.Replication;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assumptions.assumeFalse;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
+
+@Replication
+@DisabledIfServerVersionBelow("9.4")
+class ReplicationSlotTest {
+    private Connection sqlConnection;
+    private Connection replConnection;
+
+    private String slotName;
+
+    @BeforeEach
+    void setUp() throws Exception {
+        sqlConnection = TestUtil.openPrivilegedDB();
+        replConnection = TestUtil.openReplicationConnection();
+        //DriverManager.setLogWriter(new PrintWriter(System.out));
+    }
+
+    @AfterEach
+    void tearDown() throws Exception {
+        replConnection.close();
+        dropReplicationSlot();
+        slotName = null;
+        sqlConnection.close();
+    }
+
+    @Test
+    void notAvailableCreatePhysicalSlotWithoutSlotName() throws Exception {
+        assertThrows(IllegalArgumentException.class, () -> {
+            PGConnection pgConnection = (PGConnection) replConnection;
+
+            pgConnection
+                    .getReplicationAPI()
+                    .createReplicationSlot()
+                    .physical()
+                    .make();
+
+            fail("Replication slot name it required parameter and can't be null");
+        });
+    }
+
+    @Test
+    void createPhysicalSlot() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        slotName = "pgjdbc_test_create_physical_replication_slot";
+
+        pgConnection
+                .getReplicationAPI()
+                .createReplicationSlot()
+                .physical()
+                .withSlotName(slotName)
+                .make();
+
+        boolean result = isPhysicalSlotExists(slotName);
+
+        assertThat("Slot should exist", result, CoreMatchers.equalTo(true));
+
+        result = isSlotTemporary(slotName);
+
+        assertThat("Slot should not be temporary by default", result, CoreMatchers.equalTo(false));
+    }
+
+    @Test
+    void createTemporaryPhysicalSlotPg10AndHigher()
+            throws SQLException {
+        assumeTrue(TestUtil.haveMinimumServerVersion(replConnection, ServerVersion.v10));
+
+        BaseConnection baseConnection = (BaseConnection) replConnection;
+
+        String slotName = "pgjdbc_test_create_temporary_physical_replication_slot_pg_10_or_higher";
+
+        assertDoesNotThrow(() -> {
+
+            baseConnection
+                    .getReplicationAPI()
+                    .createReplicationSlot()
+                    .physical()
+                    .withSlotName(slotName)
+                    .withTemporaryOption()
+                    .make();
+
+        }, "PostgreSQL >= 10 should support temporary replication slots");
+
+        boolean result = isSlotTemporary(slotName);
+
+        assertThat("Slot is not temporary", result, CoreMatchers.equalTo(true));
+    }
+
+    @Test
+    void createTemporaryPhysicalSlotPgLowerThan10()
+            throws SQLException {
+        assumeFalse(TestUtil.haveMinimumServerVersion(replConnection, ServerVersion.v10));
+
+        BaseConnection baseConnection = (BaseConnection) replConnection;
+
+        String slotName = "pgjdbc_test_create_temporary_physical_replication_slot_pg_lower_than_10";
+
+        try {
+
+            baseConnection
+                    .getReplicationAPI()
+                    .createReplicationSlot()
+                    .physical()
+                    .withSlotName(slotName)
+                    .withTemporaryOption()
+                    .make();
+
+            fail("PostgreSQL < 10 does not support temporary replication slots");
+
+        } catch (SQLFeatureNotSupportedException e) {
+            // success
+        }
+    }
+
+    @Test
+    void dropPhysicalSlot() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        slotName = "pgjdbc_test_create_physical_replication_slot";
+
+        pgConnection
+                .getReplicationAPI()
+                .createReplicationSlot()
+                .physical()
+                .withSlotName(slotName)
+                .make();
+
+        pgConnection
+                .getReplicationAPI()
+                .dropReplicationSlot(slotName);
+
+        boolean result = isPhysicalSlotExists(slotName);
+
+        slotName = null;
+
+        assertThat(result, CoreMatchers.equalTo(false));
+    }
+
+    @Test
+    void notAvailableCreateLogicalSlotWithoutSlotName() throws Exception {
+        assertThrows(IllegalArgumentException.class, () -> {
+            PGConnection pgConnection = (PGConnection) replConnection;
+
+            pgConnection
+                    .getReplicationAPI()
+                    .createReplicationSlot()
+                    .logical()
+                    .withOutputPlugin("test_decoding")
+                    .make();
+
+            fail("Replication slot name it required parameter and can't be null");
+        });
+    }
+
+    @Test
+    void notAvailableCreateLogicalSlotWithoutOutputPlugin() throws Exception {
+        assertThrows(IllegalArgumentException.class, () -> {
+            PGConnection pgConnection = (PGConnection) replConnection;
+
+            pgConnection
+                    .getReplicationAPI()
+                    .createReplicationSlot()
+                    .logical()
+                    .withSlotName("pgjdbc_test_create_logical_replication_slot")
+                    .make();
+
+            fail("output plugin required parameter for logical replication slot and can't be null");
+        });
+    }
+
+    @Test
+    void createLogicalSlot() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        slotName = "pgjdbc_test_create_logical_replication_slot";
+
+        pgConnection
+                .getReplicationAPI()
+                .createReplicationSlot()
+                .logical()
+                .withSlotName(slotName)
+                .withOutputPlugin("test_decoding")
+                .make();
+
+        boolean result = isLogicalSlotExists(slotName);
+
+        assertThat("Slot should exist", result, CoreMatchers.equalTo(true));
+
+        result = isSlotTemporary(slotName);
+
+        assertThat("Slot should not be temporary by default", result, CoreMatchers.equalTo(false));
+    }
+
+    @Test
+    void createLogicalSlotReturnedInfo() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        slotName = "pgjdbc_test_create_logical_replication_slot_info";
+
+        ReplicationSlotInfo info = pgConnection
+                .getReplicationAPI()
+                .createReplicationSlot()
+                .logical()
+                .withSlotName(slotName)
+                .withOutputPlugin("test_decoding")
+                .make();
+
+        assertEquals(slotName, info.getSlotName());
+        Assertions.assertEquals(ReplicationType.LOGICAL, info.getReplicationType());
+        assertNotNull(info.getConsistentPoint());
+        assertNotNull(info.getSnapshotName());
+        assertEquals("test_decoding", info.getOutputPlugin());
+    }
+
+    @Test
+    void createPhysicalSlotReturnedInfo() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        slotName = "pgjdbc_test_create_physical_replication_slot_info";
+
+        ReplicationSlotInfo info = pgConnection
+                .getReplicationAPI()
+                .createReplicationSlot()
+                .physical()
+                .withSlotName(slotName)
+                .make();
+
+        assertEquals(slotName, info.getSlotName());
+        assertEquals(ReplicationType.PHYSICAL, info.getReplicationType());
+        assertNotNull(info.getConsistentPoint());
+        assertNull(info.getSnapshotName());
+        assertNull(info.getOutputPlugin());
+    }
+
+    @Test
+    void createTemporaryLogicalSlotPg10AndHigher()
+            throws SQLException {
+        assumeTrue(TestUtil.haveMinimumServerVersion(replConnection, ServerVersion.v10));
+
+        BaseConnection baseConnection = (BaseConnection) replConnection;
+
+        String slotName = "pgjdbc_test_create_temporary_logical_replication_slot_pg_10_or_higher";
+
+        assertDoesNotThrow(() -> {
+
+            baseConnection
+                    .getReplicationAPI()
+                    .createReplicationSlot()
+                    .logical()
+                    .withSlotName(slotName)
+                    .withOutputPlugin("test_decoding")
+                    .withTemporaryOption()
+                    .make();
+
+        }, "PostgreSQL >= 10 should support temporary replication slots");
+
+        boolean result = isSlotTemporary(slotName);
+
+        assertThat("Slot is not temporary", result, CoreMatchers.equalTo(true));
+    }
+
+    @Test
+    void createTemporaryLogicalSlotPgLowerThan10()
+            throws SQLException {
+        assumeFalse(TestUtil.haveMinimumServerVersion(replConnection, ServerVersion.v10));
+
+        BaseConnection baseConnection = (BaseConnection) replConnection;
+
+        String slotName = "pgjdbc_test_create_temporary_logical_replication_slot_pg_lower_than_10";
+
+        try {
+
+            baseConnection
+                    .getReplicationAPI()
+                    .createReplicationSlot()
+                    .logical()
+                    .withSlotName(slotName)
+                    .withOutputPlugin("test_decoding")
+                    .withTemporaryOption()
+                    .make();
+
+            fail("PostgreSQL < 10 does not support temporary replication slots");
+
+        } catch (SQLFeatureNotSupportedException e) {
+            // success
+        }
+    }
+
+    @Test
+    void dropLogicalSlot() throws Exception {
+        PGConnection pgConnection = (PGConnection) replConnection;
+
+        slotName = "pgjdbc_test_create_logical_replication_slot";
+
+        pgConnection
+                .getReplicationAPI()
+                .createReplicationSlot()
+                .logical()
+                .withSlotName(slotName)
+                .withOutputPlugin("test_decoding")
+                .make();
+
+        pgConnection
+                .getReplicationAPI()
+                .dropReplicationSlot(slotName);
+
+        boolean result = isLogicalSlotExists(slotName);
+
+        slotName = null;
+
+        assertThat(result, CoreMatchers.equalTo(false));
+    }
+
+    private boolean isPhysicalSlotExists(String slotName) throws SQLException {
+        boolean result;
+
+        Statement st = sqlConnection.createStatement();
+        ResultSet resultSet = st.executeQuery(
+                "select * from pg_replication_slots where slot_name = '" + slotName
+                        + "' and slot_type = 'physical'");
+        result = resultSet.next();
+        resultSet.close();
+        st.close();
+        return result;
+    }
+
+    private boolean isLogicalSlotExists(String slotName) throws SQLException {
+        boolean result;
+
+        Statement st = sqlConnection.createStatement();
+        ResultSet resultSet = st.executeQuery(
+                "select 1 from pg_replication_slots where slot_name = '" + slotName
+                        + "' and slot_type = 'logical'");
+        result = resultSet.next();
+        resultSet.close();
+        st.close();
+        return result;
+    }
+
+    private boolean isSlotTemporary(String slotName) throws SQLException {
+        if (!TestUtil.haveMinimumServerVersion(sqlConnection, ServerVersion.v10)) {
+            return false;
+        }
+
+        boolean result;
+
+        Statement st = sqlConnection.createStatement();
+        ResultSet resultSet = st.executeQuery(
+                "select 1 from pg_replication_slots where slot_name = '" + slotName
+                        + "' and temporary = true");
+        result = resultSet.next();
+        resultSet.close();
+        st.close();
+        return result;
+    }
+
+    private void dropReplicationSlot() throws Exception {
+        if (slotName != null) {
+            TestUtil.dropReplicationSlot(sqlConnection, slotName);
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/replication/ReplicationTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/replication/ReplicationTestSuite.java
new file mode 100644
index 0000000..4115ef8
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/replication/ReplicationTestSuite.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.replication;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import org.junit.AssumptionViolatedException;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.test.TestUtil;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+        CopyBothResponseTest.class,
+        LogicalReplicationStatusTest.class,
+        LogicalReplicationTest.class,
+        LogSequenceNumberTest.class,
+        PhysicalReplicationTest.class,
+        ReplicationConnectionTest.class,
+        ReplicationSlotTest.class,
+})
+class ReplicationTestSuite {
+
+    @BeforeAll
+    static void setUp() throws Exception {
+        Connection connection = TestUtil.openDB();
+        try {
+            if (TestUtil.haveMinimumServerVersion(connection, ServerVersion.v9_0)) {
+                assumeWalSenderEnabled(connection);
+                assumeReplicationRole(connection);
+            } else {
+                throw new AssumptionViolatedException(
+                        "Skip replication test because current database version "
+                                + "too old and don't contain replication API"
+                );
+            }
+        } finally {
+            connection.close();
+        }
+    }
+
+    private static void assumeWalSenderEnabled(Connection connection) throws SQLException {
+        Statement stmt = connection.createStatement();
+        ResultSet rs = stmt.executeQuery("SHOW max_wal_senders");
+        rs.next();
+        int maxWalSenders = rs.getInt(1);
+        rs.close();
+        stmt.close();
+
+        if (maxWalSenders == 0) {
+            throw new AssumptionViolatedException(
+                    "Skip replication test because max_wal_senders = 0");
+        }
+    }
+
+    private static void assumeReplicationRole(Connection connection) throws SQLException {
+        Statement stmt = connection.createStatement();
+        ResultSet rs =
+                stmt.executeQuery("SELECT usename, userepl FROM pg_user WHERE usename = current_user");
+        rs.next();
+        String userName = rs.getString(1);
+        boolean replicationGrant = rs.getBoolean(2);
+        rs.close();
+        stmt.close();
+
+        if (!replicationGrant) {
+            throw new AssumptionViolatedException(
+                    "Skip replication test because user '" + userName + "' doesn't have replication role");
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/socketfactory/CustomSocketFactory.java b/pgjdbc/src/test/java/org/postgresql/test/socketfactory/CustomSocketFactory.java
index 8b6c2b9..659c88c 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/socketfactory/CustomSocketFactory.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/socketfactory/CustomSocketFactory.java
@@ -9,62 +9,61 @@ import java.io.IOException;
 import java.net.InetAddress;
 import java.net.Socket;
 import java.net.UnknownHostException;
-
 import javax.net.SocketFactory;
 
 public class CustomSocketFactory extends SocketFactory {
 
-  private static CustomSocketFactory instance;
+    private static CustomSocketFactory instance;
 
-  private final String argument;
-  private int socketCreated;
+    private final String argument;
+    private int socketCreated;
 
-  public CustomSocketFactory(String argument) {
-    if (instance != null) {
-      throw new IllegalStateException("Test failed, multiple custom socket factory instantiation");
+    public CustomSocketFactory(String argument) {
+        if (instance != null) {
+            throw new IllegalStateException("Test failed, multiple custom socket factory instantiation");
+        }
+        instance = this;
+        this.argument = argument;
     }
-    instance = this;
-    this.argument = argument;
-  }
 
-  @Override
-  public Socket createSocket(String arg0, int arg1) throws IOException, UnknownHostException {
-    throw new UnsupportedOperationException();
-  }
+    public static CustomSocketFactory getInstance() {
+        return instance;
+    }
 
-  @Override
-  public Socket createSocket(InetAddress arg0, int arg1) throws IOException {
-    throw new UnsupportedOperationException();
-  }
+    @Override
+    public Socket createSocket(String arg0, int arg1) throws IOException, UnknownHostException {
+        throw new UnsupportedOperationException();
+    }
 
-  @Override
-  public Socket createSocket(String arg0, int arg1, InetAddress arg2, int arg3)
-      throws IOException, UnknownHostException {
-    throw new UnsupportedOperationException();
-  }
+    @Override
+    public Socket createSocket(InetAddress arg0, int arg1) throws IOException {
+        throw new UnsupportedOperationException();
+    }
 
-  @Override
-  public Socket createSocket(InetAddress arg0, int arg1, InetAddress arg2, int arg3)
-      throws IOException {
-    throw new UnsupportedOperationException();
-  }
+    @Override
+    public Socket createSocket(String arg0, int arg1, InetAddress arg2, int arg3)
+            throws IOException, UnknownHostException {
+        throw new UnsupportedOperationException();
+    }
 
-  @Override
-  public Socket createSocket() throws IOException {
-    socketCreated++;
-    return new Socket();
-  }
+    @Override
+    public Socket createSocket(InetAddress arg0, int arg1, InetAddress arg2, int arg3)
+            throws IOException {
+        throw new UnsupportedOperationException();
+    }
 
-  public String getArgument() {
-    return argument;
-  }
+    @Override
+    public Socket createSocket() throws IOException {
+        socketCreated++;
+        return new Socket();
+    }
 
-  public int getSocketCreated() {
-    return socketCreated;
-  }
+    public String getArgument() {
+        return argument;
+    }
 
-  public static CustomSocketFactory getInstance() {
-    return instance;
-  }
+    public int getSocketCreated() {
+        return socketCreated;
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/socketfactory/SocketFactoryTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/socketfactory/SocketFactoryTestSuite.java
index 1706fce..7b999b1 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/socketfactory/SocketFactoryTestSuite.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/socketfactory/SocketFactoryTestSuite.java
@@ -5,46 +5,43 @@
 
 package org.postgresql.test.socketfactory;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-
-import org.postgresql.PGProperty;
-import org.postgresql.test.TestUtil;
-
+import java.sql.Connection;
+import java.util.Properties;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
-
-import java.sql.Connection;
-import java.util.Properties;
+import org.postgresql.PGProperty;
+import org.postgresql.test.TestUtil;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 class SocketFactoryTestSuite {
 
-  private static final String STRING_ARGUMENT = "name of a socket";
+    private static final String STRING_ARGUMENT = "name of a socket";
 
-  private Connection conn;
+    private Connection conn;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    Properties properties = new Properties();
-    properties.put(PGProperty.SOCKET_FACTORY.getName(), CustomSocketFactory.class.getName());
-    properties.put(PGProperty.SOCKET_FACTORY_ARG.getName(), STRING_ARGUMENT);
-    conn = TestUtil.openDB(properties);
-  }
+    @BeforeEach
+    void setUp() throws Exception {
+        Properties properties = new Properties();
+        properties.put(PGProperty.SOCKET_FACTORY.getName(), CustomSocketFactory.class.getName());
+        properties.put(PGProperty.SOCKET_FACTORY_ARG.getName(), STRING_ARGUMENT);
+        conn = TestUtil.openDB(properties);
+    }
 
-  @AfterEach
-  void tearDown() throws Exception {
-    TestUtil.closeDB(conn);
-  }
+    @AfterEach
+    void tearDown() throws Exception {
+        TestUtil.closeDB(conn);
+    }
 
-  /**
-   * Test custom socket factory.
-   */
-  @Test
-  void databaseMetaData() throws Exception {
-    assertNotNull(CustomSocketFactory.getInstance(), "Custom socket factory not null");
-    assertEquals(STRING_ARGUMENT, CustomSocketFactory.getInstance().getArgument());
-    assertEquals(1, CustomSocketFactory.getInstance().getSocketCreated());
-  }
+    /**
+     * Test custom socket factory.
+     */
+    @Test
+    void databaseMetaData() throws Exception {
+        assertNotNull(CustomSocketFactory.getInstance(), "Custom socket factory not null");
+        assertEquals(STRING_ARGUMENT, CustomSocketFactory.getInstance().getArgument());
+        assertEquals(1, CustomSocketFactory.getInstance().getSocketCreated());
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/ssl/CommonNameVerifierTest.java b/pgjdbc/src/test/java/org/postgresql/test/ssl/CommonNameVerifierTest.java
index 7d28b21..b86133e 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/ssl/CommonNameVerifierTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/ssl/CommonNameVerifierTest.java
@@ -5,33 +5,30 @@
 
 package org.postgresql.test.ssl;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-import org.postgresql.ssl.PGjdbcHostnameVerifier;
-
+import java.util.Arrays;
 import org.junit.jupiter.params.ParameterizedTest;
 import org.junit.jupiter.params.provider.MethodSource;
-
-import java.util.Arrays;
+import org.postgresql.ssl.PGjdbcHostnameVerifier;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class CommonNameVerifierTest {
-  public static Iterable<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-        {"com", "host.com", -1},
-        {"*.com", "host.com", -1},
-        {"*.com", "*.*.com", -1},
-        {"**.com", "*.com", -1},
-        {"a.com", "*.host.com", -1},
-        {"host.com", "subhost.host.com", -1},
-        {"host.com", "host.com", 0}
-    });
-  }
+    public static Iterable<Object[]> data() {
+        return Arrays.asList(new Object[][]{
+                {"com", "host.com", -1},
+                {"*.com", "host.com", -1},
+                {"*.com", "*.*.com", -1},
+                {"**.com", "*.com", -1},
+                {"a.com", "*.host.com", -1},
+                {"host.com", "subhost.host.com", -1},
+                {"host.com", "host.com", 0}
+        });
+    }
 
-  @MethodSource("data")
-  @ParameterizedTest(name = "a={0}, b={1}")
-  void comparePatterns(String a, String b, int expected) throws Exception {
-    assertEquals(expected, PGjdbcHostnameVerifier.HOSTNAME_PATTERN_COMPARATOR.compare(a, b), a + " vs " + b);
+    @MethodSource("data")
+    @ParameterizedTest(name = "a={0}, b={1}")
+    void comparePatterns(String a, String b, int expected) throws Exception {
+        assertEquals(expected, PGjdbcHostnameVerifier.HOSTNAME_PATTERN_COMPARATOR.compare(a, b), a + " vs " + b);
 
-    assertEquals(-expected, PGjdbcHostnameVerifier.HOSTNAME_PATTERN_COMPARATOR.compare(b, a), b + " vs " + a);
-  }
+        assertEquals(-expected, PGjdbcHostnameVerifier.HOSTNAME_PATTERN_COMPARATOR.compare(b, a), b + " vs " + a);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/ssl/LazyKeyManagerTest.java b/pgjdbc/src/test/java/org/postgresql/test/ssl/LazyKeyManagerTest.java
index 121ed30..8b59884 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/ssl/LazyKeyManagerTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/ssl/LazyKeyManagerTest.java
@@ -5,99 +5,95 @@
 
 package org.postgresql.test.ssl;
 
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertNull;
-
-import org.postgresql.ssl.LazyKeyManager;
-import org.postgresql.ssl.PKCS12KeyManager;
-import org.postgresql.test.TestUtil;
-
-import org.junit.jupiter.api.Test;
-
 import java.io.IOException;
 import java.security.PrivateKey;
 import java.security.cert.X509Certificate;
-
 import javax.security.auth.callback.Callback;
 import javax.security.auth.callback.CallbackHandler;
 import javax.security.auth.callback.PasswordCallback;
 import javax.security.auth.callback.UnsupportedCallbackException;
 import javax.security.auth.x500.X500Principal;
+import org.junit.jupiter.api.Test;
+import org.postgresql.ssl.LazyKeyManager;
+import org.postgresql.ssl.PKCS12KeyManager;
+import org.postgresql.test.TestUtil;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
 
 class LazyKeyManagerTest {
 
-  @Test
-  void loadP12Key() throws Exception {
-    PKCS12KeyManager pkcs12KeyManager = new PKCS12KeyManager(
-        TestUtil.getSslTestCertPath("goodclient.p12"),
-        new TestCallbackHandler("sslpwd"));
-    PrivateKey pk = pkcs12KeyManager.getPrivateKey("user");
-    assertNotNull(pk);
-    X509Certificate[] chain = pkcs12KeyManager.getCertificateChain("user");
-    assertNotNull(chain);
-  }
-
-  @Test
-  void loadKey() throws Exception {
-    LazyKeyManager lazyKeyManager = new LazyKeyManager(
-        TestUtil.getSslTestCertPath("goodclient.crt"),
-        TestUtil.getSslTestCertPath("goodclient.pk8"),
-        new TestCallbackHandler("sslpwd"),
-        true);
-    PrivateKey pk = lazyKeyManager.getPrivateKey("user");
-    assertNotNull(pk);
-  }
-
-  @Test
-  void chooseClientAlias() throws Exception {
-    LazyKeyManager lazyKeyManager = new LazyKeyManager(
-        TestUtil.getSslTestCertPath("goodclient.crt"),
-        TestUtil.getSslTestCertPath("goodclient.pk8"),
-        new TestCallbackHandler("sslpwd"),
-        true);
-    X500Principal testPrincipal = new X500Principal("CN=root certificate, O=PgJdbc test, ST=CA, C=US");
-    X500Principal[] issuers = new X500Principal[]{testPrincipal};
-
-    String validKeyType = lazyKeyManager.chooseClientAlias(new String[]{"RSA"}, issuers, null);
-    assertNotNull(validKeyType);
-
-    String ignoresCase = lazyKeyManager.chooseClientAlias(new String[]{"rsa"}, issuers, null);
-    assertNotNull(ignoresCase);
-
-    String invalidKeyType = lazyKeyManager.chooseClientAlias(new String[]{"EC"}, issuers, null);
-    assertNull(invalidKeyType);
-
-    String containsValidKeyType = lazyKeyManager.chooseClientAlias(new String[]{"EC", "RSA"}, issuers, null);
-    assertNotNull(containsValidKeyType);
-
-    String ignoresBlank = lazyKeyManager.chooseClientAlias(new String[]{}, issuers, null);
-    assertNotNull(ignoresBlank);
-  }
-
-  public static class TestCallbackHandler implements CallbackHandler {
-    char [] password;
-
-    TestCallbackHandler(String password) {
-      if (password != null) {
-        this.password = password.toCharArray();
-      }
+    @Test
+    void loadP12Key() throws Exception {
+        PKCS12KeyManager pkcs12KeyManager = new PKCS12KeyManager(
+                TestUtil.getSslTestCertPath("goodclient.p12"),
+                new TestCallbackHandler("sslpwd"));
+        PrivateKey pk = pkcs12KeyManager.getPrivateKey("user");
+        assertNotNull(pk);
+        X509Certificate[] chain = pkcs12KeyManager.getCertificateChain("user");
+        assertNotNull(chain);
     }
 
-    @Override
-    public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
-      for (Callback callback : callbacks) {
-        if (!(callback instanceof PasswordCallback)) {
-          throw new UnsupportedCallbackException(callback);
-        }
-        PasswordCallback pwdCallback = (PasswordCallback) callback;
-        if (password != null) {
-          pwdCallback.setPassword(password);
-          continue;
-        }
-        // It is used instead of cons.readPassword(prompt), because the prompt may contain '%'
-        // characters
-        //pwdCallback.setPassword(cons.readPassword("%s", pwdCallback.getPrompt()));
-      }
+    @Test
+    void loadKey() throws Exception {
+        LazyKeyManager lazyKeyManager = new LazyKeyManager(
+                TestUtil.getSslTestCertPath("goodclient.crt"),
+                TestUtil.getSslTestCertPath("goodclient.pk8"),
+                new TestCallbackHandler("sslpwd"),
+                true);
+        PrivateKey pk = lazyKeyManager.getPrivateKey("user");
+        assertNotNull(pk);
+    }
+
+    @Test
+    void chooseClientAlias() throws Exception {
+        LazyKeyManager lazyKeyManager = new LazyKeyManager(
+                TestUtil.getSslTestCertPath("goodclient.crt"),
+                TestUtil.getSslTestCertPath("goodclient.pk8"),
+                new TestCallbackHandler("sslpwd"),
+                true);
+        X500Principal testPrincipal = new X500Principal("CN=root certificate, O=PgJdbc test, ST=CA, C=US");
+        X500Principal[] issuers = new X500Principal[]{testPrincipal};
+
+        String validKeyType = lazyKeyManager.chooseClientAlias(new String[]{"RSA"}, issuers, null);
+        assertNotNull(validKeyType);
+
+        String ignoresCase = lazyKeyManager.chooseClientAlias(new String[]{"rsa"}, issuers, null);
+        assertNotNull(ignoresCase);
+
+        String invalidKeyType = lazyKeyManager.chooseClientAlias(new String[]{"EC"}, issuers, null);
+        assertNull(invalidKeyType);
+
+        String containsValidKeyType = lazyKeyManager.chooseClientAlias(new String[]{"EC", "RSA"}, issuers, null);
+        assertNotNull(containsValidKeyType);
+
+        String ignoresBlank = lazyKeyManager.chooseClientAlias(new String[]{}, issuers, null);
+        assertNotNull(ignoresBlank);
+    }
+
+    public static class TestCallbackHandler implements CallbackHandler {
+        char[] password;
+
+        TestCallbackHandler(String password) {
+            if (password != null) {
+                this.password = password.toCharArray();
+            }
+        }
+
+        @Override
+        public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
+            for (Callback callback : callbacks) {
+                if (!(callback instanceof PasswordCallback)) {
+                    throw new UnsupportedCallbackException(callback);
+                }
+                PasswordCallback pwdCallback = (PasswordCallback) callback;
+                if (password != null) {
+                    pwdCallback.setPassword(password);
+                    continue;
+                }
+                // It is used instead of cons.readPassword(prompt), because the prompt may contain '%'
+                // characters
+                //pwdCallback.setPassword(cons.readPassword("%s", pwdCallback.getPrompt()));
+            }
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/ssl/LibPQFactoryHostNameTest.java b/pgjdbc/src/test/java/org/postgresql/test/ssl/LibPQFactoryHostNameTest.java
index 45cabc0..c68816b 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/ssl/LibPQFactoryHostNameTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/ssl/LibPQFactoryHostNameTest.java
@@ -5,45 +5,42 @@
 
 package org.postgresql.test.ssl;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-import org.postgresql.ssl.PGjdbcHostnameVerifier;
-import org.postgresql.ssl.jdbc4.LibPQFactory;
-
+import java.util.Arrays;
 import org.junit.jupiter.params.ParameterizedTest;
 import org.junit.jupiter.params.provider.MethodSource;
-
-import java.util.Arrays;
+import org.postgresql.ssl.PGjdbcHostnameVerifier;
+import org.postgresql.ssl.jdbc4.LibPQFactory;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class LibPQFactoryHostNameTest {
-  public static Iterable<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-        {"host.com", "pattern.com", false},
-        {"host.com", ".pattern.com", false},
-        {"host.com", "*.pattern.com", false},
-        {"host.com", "*.host.com", false},
-        {"a.com", "*.host.com", false},
-        {".a.com", "*.host.com", false},
-        {"longhostname.com", "*.com", true},
-        {"longhostname.ru", "*.com", false},
-        {"host.com", "host.com", true},
-        {"sub.host.com", "host.com", false},
-        {"sub.host.com", "sub.host.com", true},
-        {"sub.host.com", "*.host.com", true},
-        {"Sub.host.com", "sub.host.com", true},
-        {"sub.host.com", "Sub.host.com", true},
-        {"sub.host.com", "*.hoSt.com", true},
-        {"*.host.com", "host.com", false},
-        {"sub.sub.host.com", "*.host.com", false}, // Wildcard should cover just one level
-        {"com", "*", false}, // Wildcard should have al least one dot
-    });
-  }
+    public static Iterable<Object[]> data() {
+        return Arrays.asList(new Object[][]{
+                {"host.com", "pattern.com", false},
+                {"host.com", ".pattern.com", false},
+                {"host.com", "*.pattern.com", false},
+                {"host.com", "*.host.com", false},
+                {"a.com", "*.host.com", false},
+                {".a.com", "*.host.com", false},
+                {"longhostname.com", "*.com", true},
+                {"longhostname.ru", "*.com", false},
+                {"host.com", "host.com", true},
+                {"sub.host.com", "host.com", false},
+                {"sub.host.com", "sub.host.com", true},
+                {"sub.host.com", "*.host.com", true},
+                {"Sub.host.com", "sub.host.com", true},
+                {"sub.host.com", "Sub.host.com", true},
+                {"sub.host.com", "*.hoSt.com", true},
+                {"*.host.com", "host.com", false},
+                {"sub.sub.host.com", "*.host.com", false}, // Wildcard should cover just one level
+                {"com", "*", false}, // Wildcard should have al least one dot
+        });
+    }
 
-  @MethodSource("data")
-  @ParameterizedTest(name = "host={0}, pattern={1}")
-  void checkPattern(String hostname, String pattern, boolean expected) throws Exception {
-    assertEquals(expected, LibPQFactory.verifyHostName(hostname, pattern), hostname + ", pattern: " + pattern);
+    @MethodSource("data")
+    @ParameterizedTest(name = "host={0}, pattern={1}")
+    void checkPattern(String hostname, String pattern, boolean expected) throws Exception {
+        assertEquals(expected, LibPQFactory.verifyHostName(hostname, pattern), hostname + ", pattern: " + pattern);
 
-    assertEquals(expected, PGjdbcHostnameVerifier.INSTANCE.verifyHostName(hostname, pattern), hostname + ", pattern: " + pattern);
-  }
+        assertEquals(expected, PGjdbcHostnameVerifier.INSTANCE.verifyHostName(hostname, pattern), hostname + ", pattern: " + pattern);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/ssl/PKCS12KeyTest.java b/pgjdbc/src/test/java/org/postgresql/test/ssl/PKCS12KeyTest.java
index ca6abbb..fbbeab0 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/ssl/PKCS12KeyTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/ssl/PKCS12KeyTest.java
@@ -5,85 +5,81 @@
 
 package org.postgresql.test.ssl;
 
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.postgresql.PGProperty;
-import org.postgresql.ssl.PKCS12KeyManager;
-import org.postgresql.test.TestUtil;
-
-import org.junit.jupiter.api.Test;
-
 import java.io.IOException;
 import java.sql.Connection;
 import java.util.Properties;
-
 import javax.security.auth.callback.Callback;
 import javax.security.auth.callback.CallbackHandler;
 import javax.security.auth.callback.PasswordCallback;
 import javax.security.auth.callback.UnsupportedCallbackException;
 import javax.security.auth.x500.X500Principal;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGProperty;
+import org.postgresql.ssl.PKCS12KeyManager;
+import org.postgresql.test.TestUtil;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 class PKCS12KeyTest {
-  @Test
-  void TestGoodClientP12() throws Exception {
-    TestUtil.assumeSslTestsEnabled();
+    @Test
+    void TestGoodClientP12() throws Exception {
+        TestUtil.assumeSslTestsEnabled();
 
-    Properties props = new Properties();
-    props.put(TestUtil.DATABASE_PROP, "hostssldb");
-    PGProperty.SSL_MODE.set(props, "prefer");
-    PGProperty.SSL_KEY.set(props, TestUtil.getSslTestCertPath("goodclient.p12"));
+        Properties props = new Properties();
+        props.put(TestUtil.DATABASE_PROP, "hostssldb");
+        PGProperty.SSL_MODE.set(props, "prefer");
+        PGProperty.SSL_KEY.set(props, TestUtil.getSslTestCertPath("goodclient.p12"));
 
-    try (Connection conn = TestUtil.openDB(props)) {
-      boolean sslUsed = TestUtil.queryForBoolean(conn, "SELECT ssl_is_used()");
-      assertTrue(sslUsed, "SSL should be in use");
-    }
-  }
-
-  @Test
-  void TestChooseClientAlias() throws Exception {
-    PKCS12KeyManager pkcs12KeyManager = new PKCS12KeyManager(TestUtil.getSslTestCertPath("goodclient.p12"), new TestCallbackHandler("sslpwd"));
-    X500Principal testPrincipal = new X500Principal("CN=root certificate, O=PgJdbc test, ST=CA, C=US");
-    X500Principal[] issuers = new X500Principal[]{testPrincipal};
-
-    String validKeyType = pkcs12KeyManager.chooseClientAlias(new String[]{"RSA"}, issuers, null);
-    assertNotNull(validKeyType);
-
-    String ignoresCase = pkcs12KeyManager.chooseClientAlias(new String[]{"rsa"}, issuers, null);
-    assertNotNull(ignoresCase);
-
-    String invalidKeyType = pkcs12KeyManager.chooseClientAlias(new String[]{"EC"}, issuers, null);
-    assertNull(invalidKeyType);
-
-    String containsValidKeyType = pkcs12KeyManager.chooseClientAlias(new String[]{"EC", "RSA"}, issuers, null);
-    assertNotNull(containsValidKeyType);
-
-    String ignoresBlank = pkcs12KeyManager.chooseClientAlias(new String[]{}, issuers, null);
-    assertNotNull(ignoresBlank);
-  }
-
-  public static class TestCallbackHandler implements CallbackHandler {
-    char [] password;
-
-    TestCallbackHandler(String password) {
-      if (password != null) {
-        this.password = password.toCharArray();
-      }
-    }
-
-    @Override
-    public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
-      for (Callback callback : callbacks) {
-        if (!(callback instanceof PasswordCallback)) {
-          throw new UnsupportedCallbackException(callback);
+        try (Connection conn = TestUtil.openDB(props)) {
+            boolean sslUsed = TestUtil.queryForBoolean(conn, "SELECT ssl_is_used()");
+            assertTrue(sslUsed, "SSL should be in use");
+        }
+    }
+
+    @Test
+    void TestChooseClientAlias() throws Exception {
+        PKCS12KeyManager pkcs12KeyManager = new PKCS12KeyManager(TestUtil.getSslTestCertPath("goodclient.p12"), new TestCallbackHandler("sslpwd"));
+        X500Principal testPrincipal = new X500Principal("CN=root certificate, O=PgJdbc test, ST=CA, C=US");
+        X500Principal[] issuers = new X500Principal[]{testPrincipal};
+
+        String validKeyType = pkcs12KeyManager.chooseClientAlias(new String[]{"RSA"}, issuers, null);
+        assertNotNull(validKeyType);
+
+        String ignoresCase = pkcs12KeyManager.chooseClientAlias(new String[]{"rsa"}, issuers, null);
+        assertNotNull(ignoresCase);
+
+        String invalidKeyType = pkcs12KeyManager.chooseClientAlias(new String[]{"EC"}, issuers, null);
+        assertNull(invalidKeyType);
+
+        String containsValidKeyType = pkcs12KeyManager.chooseClientAlias(new String[]{"EC", "RSA"}, issuers, null);
+        assertNotNull(containsValidKeyType);
+
+        String ignoresBlank = pkcs12KeyManager.chooseClientAlias(new String[]{}, issuers, null);
+        assertNotNull(ignoresBlank);
+    }
+
+    public static class TestCallbackHandler implements CallbackHandler {
+        char[] password;
+
+        TestCallbackHandler(String password) {
+            if (password != null) {
+                this.password = password.toCharArray();
+            }
+        }
+
+        @Override
+        public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
+            for (Callback callback : callbacks) {
+                if (!(callback instanceof PasswordCallback)) {
+                    throw new UnsupportedCallbackException(callback);
+                }
+                PasswordCallback pwdCallback = (PasswordCallback) callback;
+                if (password != null) {
+                    pwdCallback.setPassword(password);
+                    continue;
+                }
+            }
         }
-        PasswordCallback pwdCallback = (PasswordCallback) callback;
-        if (password != null) {
-          pwdCallback.setPassword(password);
-          continue;
-        }
-      }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/ssl/SingleCertValidatingFactoryTest.java b/pgjdbc/src/test/java/org/postgresql/test/ssl/SingleCertValidatingFactoryTest.java
index 97baadf..2563aa0 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/ssl/SingleCertValidatingFactoryTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/ssl/SingleCertValidatingFactoryTest.java
@@ -5,14 +5,6 @@
 
 package org.postgresql.test.ssl;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.test.TestUtil;
-
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.Test;
-
 import java.io.BufferedReader;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
@@ -24,288 +16,292 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Properties;
-
 import javax.net.ssl.SSLHandshakeException;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.postgresql.test.TestUtil;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 public class SingleCertValidatingFactoryTest {
-  @BeforeAll
-  static void setUp() {
-    TestUtil.assumeSslTestsEnabled();
-  }
+    // The valid and invalid server SSL certificates:
+    private static final String goodServerCertPath = "../certdir/goodroot.crt";
+    private static final String badServerCertPath = "../certdir/badroot.crt";
 
-  // The valid and invalid server SSL certificates:
-  private static final String goodServerCertPath = "../certdir/goodroot.crt";
-  private static final String badServerCertPath = "../certdir/badroot.crt";
-
-  private String getGoodServerCert() {
-    return loadFile(goodServerCertPath);
-  }
-
-  private String getBadServerCert() {
-    return loadFile(badServerCertPath);
-  }
-
-  protected String getUsername() {
-    return System.getProperty("username");
-  }
-
-  protected String getPassword() {
-    return System.getProperty("password");
-  }
-
-  /**
-   * Tests whether a given throwable or one of it's root causes matches of a given class.
-   */
-  private boolean matchesExpected(Throwable t,
-      Class<? extends Throwable> expectedThrowable)
-      throws SQLException {
-    if (t == null || expectedThrowable == null) {
-      return false;
-    }
-    if (expectedThrowable.isAssignableFrom(t.getClass())) {
-      return true;
-    }
-    return matchesExpected(t.getCause(), expectedThrowable);
-  }
-
-  protected void testConnect(Properties info, boolean sslExpected) throws SQLException {
-    testConnect(info, sslExpected, null);
-  }
-
-  /**
-   * Connects to the database with the given connection properties and then verifies that connection
-   * is using SSL.
-   */
-  protected void testConnect(Properties info, boolean sslExpected, Class<? extends Throwable> expectedThrowable) throws SQLException {
-    info.setProperty(TestUtil.DATABASE_PROP, "hostdb");
-    try (Connection conn = TestUtil.openDB(info)) {
-      Statement stmt = conn.createStatement();
-      // Basic SELECT test:
-      ResultSet rs = stmt.executeQuery("SELECT 1");
-      rs.next();
-      assertEquals(1, rs.getInt(1));
-      rs.close();
-      // Verify SSL usage is as expected:
-      rs = stmt.executeQuery("SELECT ssl_is_used()");
-      rs.next();
-      boolean sslActual = rs.getBoolean(1);
-      assertEquals(sslExpected, sslActual);
-      stmt.close();
-    } catch (Exception e) {
-      if (matchesExpected(e, expectedThrowable)) {
-        // do nothing and just suppress the exception
-        return;
-      } else {
-        if (e instanceof RuntimeException) {
-          throw (RuntimeException) e;
-        } else if (e instanceof SQLException) {
-          throw (SQLException) e;
-        } else {
-          throw new RuntimeException(e);
-        }
-      }
+    @BeforeAll
+    static void setUp() {
+        TestUtil.assumeSslTestsEnabled();
     }
 
-    if (expectedThrowable != null) {
-      fail("Expected exception " + expectedThrowable.getName() + " but it did not occur.");
-    }
-  }
-
-  /**
-   * Connect using SSL and attempt to validate the server's certificate but don't actually provide
-   * it. This connection attempt should *fail* as the client should reject the server.
-   */
-  @Test
-  void connectSSLWithValidationNoCert() throws SQLException {
-    Properties info = new Properties();
-    info.setProperty("ssl", "true");
-    info.setProperty("sslfactory", "org.postgresql.ssl.DefaultJavaSSLFactory");
-    testConnect(info, true, SSLHandshakeException.class);
-  }
-
-  /**
-   * <p>Connect using SSL and attempt to validate the server's certificate against the wrong pre shared
-   * certificate. This test uses a pre generated certificate that will *not* match the test
-   * PostgreSQL server (the certificate is for properssl.example.com).</p>
-   *
-   * <p>This connection uses a custom SSLSocketFactory using a custom trust manager that validates the
-   * remote server's certificate against the pre shared certificate.</p>
-   *
-   * <p>This test should throw an exception as the client should reject the server since the
-   * certificate does not match.</p>
-   */
-  @Test
-  void connectSSLWithValidationWrongCert() throws SQLException, IOException {
-    Properties info = new Properties();
-    info.setProperty("ssl", "true");
-    info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
-    info.setProperty("sslfactoryarg", "file:" + badServerCertPath);
-    testConnect(info, true, SSLHandshakeException.class);
-  }
-
-  @Test
-  void fileCertInvalid() throws SQLException, IOException {
-    Properties info = new Properties();
-    info.setProperty("ssl", "true");
-    info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
-    info.setProperty("sslfactoryarg", "file:foo/bar/baz");
-    testConnect(info, true, FileNotFoundException.class);
-  }
-
-  @Test
-  void stringCertInvalid() throws SQLException, IOException {
-    Properties info = new Properties();
-    info.setProperty("ssl", "true");
-    info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
-    info.setProperty("sslfactoryarg", "foobar!");
-    testConnect(info, true, GeneralSecurityException.class);
-  }
-
-  /**
-   * Connect using SSL and attempt to validate the server's certificate against the proper pre
-   * shared certificate. The certificate is specified as a String. Note that the test read's the
-   * certificate from a local file.
-   */
-  @Test
-  void connectSSLWithValidationProperCertFile() throws SQLException, IOException {
-    Properties info = new Properties();
-    info.setProperty("ssl", "true");
-    info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
-    info.setProperty("sslfactoryarg", "file:" + goodServerCertPath);
-    testConnect(info, true);
-  }
-
-  /**
-   * Connect using SSL and attempt to validate the server's certificate against the proper pre
-   * shared certificate. The certificate is specified as a String (eg. the "----- BEGIN CERTIFICATE
-   * ----- ... etc").
-   */
-  @Test
-  void connectSSLWithValidationProperCertString() throws SQLException, IOException {
-    Properties info = new Properties();
-    info.setProperty("ssl", "true");
-    info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
-    info.setProperty("sslfactoryarg", getGoodServerCert());
-    testConnect(info, true);
-  }
-
-  /**
-   * Connect using SSL and attempt to validate the server's certificate against the proper pre
-   * shared certificate. The certificate is specified as a system property.
-   */
-  @Test
-  void connectSSLWithValidationProperCertSysProp() throws SQLException, IOException {
-    // System property name we're using for the SSL cert. This can be anything.
-    String sysPropName = "org.postgresql.jdbc.test.sslcert";
-
-    try {
-      System.setProperty(sysPropName, getGoodServerCert());
-
-      Properties info = new Properties();
-      info.setProperty("ssl", "true");
-      info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
-      info.setProperty("sslfactoryarg", "sys:" + sysPropName);
-      testConnect(info, true);
-    } finally {
-      // Clear it out when we're done:
-      System.setProperty(sysPropName, "");
-    }
-  }
-
-  /**
-   * <p>Connect using SSL and attempt to validate the server's certificate against the proper pre
-   * shared certificate. The certificate is specified as an environment variable.</p>
-   *
-   * <p>Note: To execute this test successfully you need to set the value of the environment variable
-   * DATASOURCE_SSL_CERT prior to running the test.</p>
-   *
-   * <p>Here's one way to do it: $ DATASOURCE_SSL_CERT=$(cat certdir/goodroot.crt) ant clean test</p>
-   */
-  @Test
-  void connectSSLWithValidationProperCertEnvVar() throws SQLException, IOException {
-    String envVarName = "DATASOURCE_SSL_CERT";
-    if (System.getenv(envVarName) == null) {
-      System.out.println(
-          "Skipping test connectSSLWithValidationProperCertEnvVar (env variable is not defined)");
-      return;
-    }
-
-    Properties info = new Properties();
-    info.setProperty("ssl", "true");
-    info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
-    info.setProperty("sslfactoryarg", "env:" + envVarName);
-    testConnect(info, true);
-  }
-
-  /**
-   * Connect using SSL using a system property to specify the SSL certificate but not actually
-   * having it set. This tests whether the proper exception is thrown.
-   */
-  @Test
-  void connectSSLWithValidationMissingSysProp() throws SQLException, IOException {
-    // System property name we're using for the SSL cert. This can be anything.
-    String sysPropName = "org.postgresql.jdbc.test.sslcert";
-
-    try {
-      System.setProperty(sysPropName, "");
-
-      Properties info = new Properties();
-      info.setProperty("ssl", "true");
-      info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
-      info.setProperty("sslfactoryarg", "sys:" + sysPropName);
-      testConnect(info, true, GeneralSecurityException.class);
-    } finally {
-      // Clear it out when we're done:
-      System.setProperty(sysPropName, "");
-    }
-  }
-
-  /**
-   * Connect using SSL using an environment var to specify the SSL certificate but not actually
-   * having it set. This tests whether the proper exception is thrown.
-   */
-  @Test
-  void connectSSLWithValidationMissingEnvVar() throws SQLException, IOException {
-    // Use an environment variable that does *not* exist:
-    String envVarName = "MISSING_DATASOURCE_SSL_CERT";
-    if (System.getenv(envVarName) != null) {
-      System.out
-          .println("Skipping test connectSSLWithValidationMissingEnvVar (env variable is defined)");
-      return;
-    }
-
-    Properties info = new Properties();
-    info.setProperty("ssl", "true");
-    info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
-    info.setProperty("sslfactoryarg", "env:" + envVarName);
-    testConnect(info, true, GeneralSecurityException.class);
-  }
-
-  ///////////////////////////////////////////////////////////////////
-
-  /**
-   * Utility function to load a file as a string.
-   */
-  public static String loadFile(String path) {
-    BufferedReader br = null;
-    try {
-      br = new BufferedReader(new InputStreamReader(new FileInputStream(path)));
-      StringBuilder sb = new StringBuilder();
-      String line;
-      while ((line = br.readLine()) != null) {
-        sb.append(line);
-        sb.append("\n");
-      }
-      return sb.toString();
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    } finally {
-      if (br != null) {
+    /**
+     * Utility function to load a file as a string.
+     */
+    public static String loadFile(String path) {
+        BufferedReader br = null;
         try {
-          br.close();
-        } catch (Exception e) {
+            br = new BufferedReader(new InputStreamReader(new FileInputStream(path)));
+            StringBuilder sb = new StringBuilder();
+            String line;
+            while ((line = br.readLine()) != null) {
+                sb.append(line);
+                sb.append("\n");
+            }
+            return sb.toString();
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        } finally {
+            if (br != null) {
+                try {
+                    br.close();
+                } catch (Exception e) {
+                }
+            }
         }
-      }
     }
-  }
+
+    private String getGoodServerCert() {
+        return loadFile(goodServerCertPath);
+    }
+
+    private String getBadServerCert() {
+        return loadFile(badServerCertPath);
+    }
+
+    protected String getUsername() {
+        return System.getProperty("username");
+    }
+
+    protected String getPassword() {
+        return System.getProperty("password");
+    }
+
+    /**
+     * Tests whether a given throwable or one of it's root causes matches of a given class.
+     */
+    private boolean matchesExpected(Throwable t,
+                                    Class<? extends Throwable> expectedThrowable)
+            throws SQLException {
+        if (t == null || expectedThrowable == null) {
+            return false;
+        }
+        if (expectedThrowable.isAssignableFrom(t.getClass())) {
+            return true;
+        }
+        return matchesExpected(t.getCause(), expectedThrowable);
+    }
+
+    protected void testConnect(Properties info, boolean sslExpected) throws SQLException {
+        testConnect(info, sslExpected, null);
+    }
+
+    /**
+     * Connects to the database with the given connection properties and then verifies that connection
+     * is using SSL.
+     */
+    protected void testConnect(Properties info, boolean sslExpected, Class<? extends Throwable> expectedThrowable) throws SQLException {
+        info.setProperty(TestUtil.DATABASE_PROP, "hostdb");
+        try (Connection conn = TestUtil.openDB(info)) {
+            Statement stmt = conn.createStatement();
+            // Basic SELECT test:
+            ResultSet rs = stmt.executeQuery("SELECT 1");
+            rs.next();
+            assertEquals(1, rs.getInt(1));
+            rs.close();
+            // Verify SSL usage is as expected:
+            rs = stmt.executeQuery("SELECT ssl_is_used()");
+            rs.next();
+            boolean sslActual = rs.getBoolean(1);
+            assertEquals(sslExpected, sslActual);
+            stmt.close();
+        } catch (Exception e) {
+            if (matchesExpected(e, expectedThrowable)) {
+                // do nothing and just suppress the exception
+                return;
+            } else {
+                if (e instanceof RuntimeException) {
+                    throw (RuntimeException) e;
+                } else if (e instanceof SQLException) {
+                    throw (SQLException) e;
+                } else {
+                    throw new RuntimeException(e);
+                }
+            }
+        }
+
+        if (expectedThrowable != null) {
+            fail("Expected exception " + expectedThrowable.getName() + " but it did not occur.");
+        }
+    }
+
+    /**
+     * Connect using SSL and attempt to validate the server's certificate but don't actually provide
+     * it. This connection attempt should *fail* as the client should reject the server.
+     */
+    @Test
+    void connectSSLWithValidationNoCert() throws SQLException {
+        Properties info = new Properties();
+        info.setProperty("ssl", "true");
+        info.setProperty("sslfactory", "org.postgresql.ssl.DefaultJavaSSLFactory");
+        testConnect(info, true, SSLHandshakeException.class);
+    }
+
+    /**
+     * <p>Connect using SSL and attempt to validate the server's certificate against the wrong pre shared
+     * certificate. This test uses a pre generated certificate that will *not* match the test
+     * PostgreSQL server (the certificate is for properssl.example.com).</p>
+     *
+     * <p>This connection uses a custom SSLSocketFactory using a custom trust manager that validates the
+     * remote server's certificate against the pre shared certificate.</p>
+     *
+     * <p>This test should throw an exception as the client should reject the server since the
+     * certificate does not match.</p>
+     */
+    @Test
+    void connectSSLWithValidationWrongCert() throws SQLException, IOException {
+        Properties info = new Properties();
+        info.setProperty("ssl", "true");
+        info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
+        info.setProperty("sslfactoryarg", "file:" + badServerCertPath);
+        testConnect(info, true, SSLHandshakeException.class);
+    }
+
+    @Test
+    void fileCertInvalid() throws SQLException, IOException {
+        Properties info = new Properties();
+        info.setProperty("ssl", "true");
+        info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
+        info.setProperty("sslfactoryarg", "file:foo/bar/baz");
+        testConnect(info, true, FileNotFoundException.class);
+    }
+
+    @Test
+    void stringCertInvalid() throws SQLException, IOException {
+        Properties info = new Properties();
+        info.setProperty("ssl", "true");
+        info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
+        info.setProperty("sslfactoryarg", "foobar!");
+        testConnect(info, true, GeneralSecurityException.class);
+    }
+
+    /**
+     * Connect using SSL and attempt to validate the server's certificate against the proper pre
+     * shared certificate. The certificate is specified as a String. Note that the test read's the
+     * certificate from a local file.
+     */
+    @Test
+    void connectSSLWithValidationProperCertFile() throws SQLException, IOException {
+        Properties info = new Properties();
+        info.setProperty("ssl", "true");
+        info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
+        info.setProperty("sslfactoryarg", "file:" + goodServerCertPath);
+        testConnect(info, true);
+    }
+
+    /**
+     * Connect using SSL and attempt to validate the server's certificate against the proper pre
+     * shared certificate. The certificate is specified as a String (eg. the "----- BEGIN CERTIFICATE
+     * ----- ... etc").
+     */
+    @Test
+    void connectSSLWithValidationProperCertString() throws SQLException, IOException {
+        Properties info = new Properties();
+        info.setProperty("ssl", "true");
+        info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
+        info.setProperty("sslfactoryarg", getGoodServerCert());
+        testConnect(info, true);
+    }
+
+    /**
+     * Connect using SSL and attempt to validate the server's certificate against the proper pre
+     * shared certificate. The certificate is specified as a system property.
+     */
+    @Test
+    void connectSSLWithValidationProperCertSysProp() throws SQLException, IOException {
+        // System property name we're using for the SSL cert. This can be anything.
+        String sysPropName = "org.postgresql.jdbc.test.sslcert";
+
+        try {
+            System.setProperty(sysPropName, getGoodServerCert());
+
+            Properties info = new Properties();
+            info.setProperty("ssl", "true");
+            info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
+            info.setProperty("sslfactoryarg", "sys:" + sysPropName);
+            testConnect(info, true);
+        } finally {
+            // Clear it out when we're done:
+            System.setProperty(sysPropName, "");
+        }
+    }
+
+    /**
+     * <p>Connect using SSL and attempt to validate the server's certificate against the proper pre
+     * shared certificate. The certificate is specified as an environment variable.</p>
+     *
+     * <p>Note: To execute this test successfully you need to set the value of the environment variable
+     * DATASOURCE_SSL_CERT prior to running the test.</p>
+     *
+     * <p>Here's one way to do it: $ DATASOURCE_SSL_CERT=$(cat certdir/goodroot.crt) ant clean test</p>
+     */
+    @Test
+    void connectSSLWithValidationProperCertEnvVar() throws SQLException, IOException {
+        String envVarName = "DATASOURCE_SSL_CERT";
+        if (System.getenv(envVarName) == null) {
+            System.out.println(
+                    "Skipping test connectSSLWithValidationProperCertEnvVar (env variable is not defined)");
+            return;
+        }
+
+        Properties info = new Properties();
+        info.setProperty("ssl", "true");
+        info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
+        info.setProperty("sslfactoryarg", "env:" + envVarName);
+        testConnect(info, true);
+    }
+
+    /**
+     * Connect using SSL using a system property to specify the SSL certificate but not actually
+     * having it set. This tests whether the proper exception is thrown.
+     */
+    @Test
+    void connectSSLWithValidationMissingSysProp() throws SQLException, IOException {
+        // System property name we're using for the SSL cert. This can be anything.
+        String sysPropName = "org.postgresql.jdbc.test.sslcert";
+
+        try {
+            System.setProperty(sysPropName, "");
+
+            Properties info = new Properties();
+            info.setProperty("ssl", "true");
+            info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
+            info.setProperty("sslfactoryarg", "sys:" + sysPropName);
+            testConnect(info, true, GeneralSecurityException.class);
+        } finally {
+            // Clear it out when we're done:
+            System.setProperty(sysPropName, "");
+        }
+    }
+
+    ///////////////////////////////////////////////////////////////////
+
+    /**
+     * Connect using SSL using an environment var to specify the SSL certificate but not actually
+     * having it set. This tests whether the proper exception is thrown.
+     */
+    @Test
+    void connectSSLWithValidationMissingEnvVar() throws SQLException, IOException {
+        // Use an environment variable that does *not* exist:
+        String envVarName = "MISSING_DATASOURCE_SSL_CERT";
+        if (System.getenv(envVarName) != null) {
+            System.out
+                    .println("Skipping test connectSSLWithValidationMissingEnvVar (env variable is defined)");
+            return;
+        }
+
+        Properties info = new Properties();
+        info.setProperty("ssl", "true");
+        info.setProperty("sslfactory", "org.postgresql.ssl.SingleCertValidatingFactory");
+        info.setProperty("sslfactoryarg", "env:" + envVarName);
+        testConnect(info, true, GeneralSecurityException.class);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/ssl/SslTest.java b/pgjdbc/src/test/java/org/postgresql/test/ssl/SslTest.java
index c5c1536..f7a5b8c 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/ssl/SslTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/ssl/SslTest.java
@@ -5,18 +5,6 @@
 
 package org.postgresql.test.ssl;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.PGProperty;
-import org.postgresql.jdbc.GSSEncMode;
-import org.postgresql.jdbc.SslMode;
-import org.postgresql.test.TestUtil;
-import org.postgresql.util.PSQLState;
-
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.MethodSource;
-
 import java.io.EOFException;
 import java.io.FileNotFoundException;
 import java.net.SocketException;
@@ -27,455 +15,463 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import java.util.Properties;
-
 import javax.net.ssl.SSLHandshakeException;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.postgresql.PGProperty;
+import org.postgresql.jdbc.GSSEncMode;
+import org.postgresql.jdbc.SslMode;
+import org.postgresql.test.TestUtil;
+import org.postgresql.util.PSQLState;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 public class SslTest {
-  enum Hostname {
-    GOOD("localhost"),
-    BAD("127.0.0.1"),
-    ;
+    public Hostname host;
+    public TestDatabase db;
+    public SslMode sslmode;
+    public ClientCertificate clientCertificate;
+    public ClientRootCertificate clientRootCertificate;
+    public GSSEncMode gssEncMode;
 
-    final String value;
+    public static Iterable<Object[]> data() {
+        TestUtil.assumeSslTestsEnabled();
 
-    Hostname(String value) {
-      this.value = value;
-    }
-  }
+        Collection<Object[]> tests = new ArrayList<>();
 
-  enum TestDatabase {
-    hostdb,
-    hostnossldb,
-    hostssldb,
-    hostsslcertdb,
-    certdb,
-    ;
-
-    public static final TestDatabase[] VALUES = values();
-
-    public boolean requiresClientCert() {
-      return this == certdb || this == hostsslcertdb;
-    }
-
-    public boolean requiresSsl() {
-      return this == certdb || this == hostssldb || this == hostsslcertdb;
-    }
-
-    public boolean rejectsSsl() {
-      return this == hostnossldb;
-    }
-  }
-
-  enum ClientCertificate {
-    EMPTY(""),
-    GOOD("goodclient"),
-    BAD("badclient"),
-    ;
-
-    public static final ClientCertificate[] VALUES = values();
-    public final String fileName;
-
-    ClientCertificate(String fileName) {
-      this.fileName = fileName;
-    }
-  }
-
-  enum ClientRootCertificate {
-    EMPTY(""),
-    GOOD("goodroot"),
-    BAD("badroot"),
-    ;
-
-    public static final ClientRootCertificate[] VALUES = values();
-    public final String fileName;
-
-    ClientRootCertificate(String fileName) {
-      this.fileName = fileName;
-    }
-  }
-
-  public Hostname host;
-  public TestDatabase db;
-  public SslMode sslmode;
-  public ClientCertificate clientCertificate;
-  public ClientRootCertificate clientRootCertificate;
-  public GSSEncMode gssEncMode;
-
-  public static Iterable<Object[]> data() {
-    TestUtil.assumeSslTestsEnabled();
-
-    Collection<Object[]> tests = new ArrayList<>();
-
-    for (SslMode sslMode : SslMode.VALUES) {
-      for (Hostname hostname : Hostname.values()) {
-        for (TestDatabase database : TestDatabase.VALUES) {
-          for (ClientCertificate clientCertificate : ClientCertificate.VALUES) {
-            for (ClientRootCertificate rootCertificate : ClientRootCertificate.VALUES) {
-              if ((sslMode == SslMode.DISABLE
-                  || database.rejectsSsl())
-                  && (clientCertificate != ClientCertificate.GOOD
-                  || rootCertificate != ClientRootCertificate.GOOD)) {
-                // When SSL is disabled, it does not make sense to verify "bad certificates"
-                // since certificates are NOT used in plaintext connections
-                continue;
-              }
-              if (database.rejectsSsl()
-                  && (sslMode.verifyCertificate()
-                      || hostname == Hostname.BAD)
-              ) {
-                // DB would reject SSL connection, so it makes no sense to test cases like verify-full
-                continue;
-              }
-              for (GSSEncMode gssEncMode : GSSEncMode.values()) {
-                if (gssEncMode == GSSEncMode.REQUIRE) {
-                  // TODO: support gss tests in /certdir/pg_hba.conf
-                  continue;
+        for (SslMode sslMode : SslMode.VALUES) {
+            for (Hostname hostname : Hostname.values()) {
+                for (TestDatabase database : TestDatabase.VALUES) {
+                    for (ClientCertificate clientCertificate : ClientCertificate.VALUES) {
+                        for (ClientRootCertificate rootCertificate : ClientRootCertificate.VALUES) {
+                            if ((sslMode == SslMode.DISABLE
+                                    || database.rejectsSsl())
+                                    && (clientCertificate != ClientCertificate.GOOD
+                                    || rootCertificate != ClientRootCertificate.GOOD)) {
+                                // When SSL is disabled, it does not make sense to verify "bad certificates"
+                                // since certificates are NOT used in plaintext connections
+                                continue;
+                            }
+                            if (database.rejectsSsl()
+                                    && (sslMode.verifyCertificate()
+                                    || hostname == Hostname.BAD)
+                            ) {
+                                // DB would reject SSL connection, so it makes no sense to test cases like verify-full
+                                continue;
+                            }
+                            for (GSSEncMode gssEncMode : GSSEncMode.values()) {
+                                if (gssEncMode == GSSEncMode.REQUIRE) {
+                                    // TODO: support gss tests in /certdir/pg_hba.conf
+                                    continue;
+                                }
+                                tests.add(new Object[]{hostname, database, sslMode, clientCertificate, rootCertificate, gssEncMode});
+                            }
+                        }
+                    }
                 }
-                tests.add(new Object[]{hostname, database, sslMode, clientCertificate, rootCertificate, gssEncMode});
-              }
             }
-          }
         }
-      }
+
+        return tests;
     }
 
-    return tests;
-  }
-
-  private static boolean contains(String value, String substring) {
-    return value != null && value.contains(substring);
-  }
-
-  private void assertClientCertRequired(SQLException e, String caseName) {
-    if (e == null) {
-      fail(caseName + " should result in failure of client validation");
-    }
-    assertEquals(PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState(), e.getSQLState(), caseName + " ==> CONNECTION_FAILURE is expected");
-  }
-
-  private void checkErrorCodes(SQLException e) {
-    if (e != null && e.getCause() instanceof FileNotFoundException
-        && clientRootCertificate != ClientRootCertificate.EMPTY) {
-      fail("FileNotFoundException => it looks like a configuration failure");
+    private static boolean contains(String value, String substring) {
+        return value != null && value.contains(substring);
     }
 
-    if (e == null && sslmode == SslMode.ALLOW && !db.requiresSsl()) {
-      // allowed to connect with plain connection
-      return;
-    }
-
-    if (clientRootCertificate == ClientRootCertificate.EMPTY
-        && (sslmode == SslMode.VERIFY_CA || sslmode == SslMode.VERIFY_FULL)) {
-      String caseName = "rootCertificate is missing and sslmode=" + sslmode;
-      if (e == null) {
-        fail(caseName + " should result in FileNotFound exception for root certificate");
-      }
-      assertEquals(PSQLState.CONNECTION_FAILURE.getState(), e.getSQLState(), caseName + " ==> CONNECTION_FAILURE is expected");
-      FileNotFoundException fnf = findCause(e, FileNotFoundException.class);
-      if (fnf == null) {
-        fail(caseName + " ==> FileNotFoundException should be present in getCause chain");
-      }
-      return;
-    }
-
-    if (db.requiresSsl() && sslmode == SslMode.DISABLE) {
-      String caseName = "sslmode=DISABLE and database " + db + " requires SSL";
-      if (e == null) {
-        fail(caseName + " should result in connection failure");
-      }
-      assertEquals(PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState(), e.getSQLState(), caseName + " ==> INVALID_AUTHORIZATION_SPECIFICATION is expected");
-      return;
-    }
-
-    if (db.rejectsSsl() && sslmode.requireEncryption()) {
-      String caseName =
-          "database " + db + " rejects SSL, and sslmode " + sslmode + " requires encryption";
-      if (e == null) {
-        fail(caseName + " should result in connection failure");
-      }
-      assertEquals(PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState(), e.getSQLState(), caseName + " ==> INVALID_AUTHORIZATION_SPECIFICATION is expected");
-      return;
-    }
-
-    // Server certificate, server hostname, and client certificate can be validated in any order
-    // So we have three validators and expect at least one of them to match
-    List<AssertionError> errors = null;
-    try {
-      if (assertServerCertificate(e)) {
-        return;
-      }
-    } catch (AssertionError ae) {
-      errors = addError(errors, ae);
-    }
-
-    try {
-      if (assertServerHostname(e)) {
-        return;
-      }
-    } catch (AssertionError ae) {
-      errors = addError(errors, ae);
-    }
-
-    try {
-      if (assertClientCertificate(e)) {
-        return;
-      }
-    } catch (AssertionError ae) {
-      errors = addError(errors, ae);
-    }
-
-    if (sslmode == SslMode.ALLOW && db.requiresSsl()) {
-      // Allow tries to connect with non-ssl first, and it always throws the first error even after try SSL.
-      // "If SSL was expected to fail" (e.g. invalid certificate), and db requiresSsl, then ALLOW
-      // should fail as well
-      String caseName =
-          "sslmode=ALLOW and db " + db + " requires SSL, and there are expected SSL failures";
-      if (errors == null) {
-        if (e != null) {
-          fail(caseName + " ==> connection should be upgraded to SSL with no failures");
+    private static <T extends Throwable> T findCause(Throwable t,
+                                                     Class<T> cause) {
+        while (t != null) {
+            if (cause.isInstance(t)) {
+                return (T) t;
+            }
+            t = t.getCause();
         }
-      } else {
+        return null;
+    }
+
+    private void assertClientCertRequired(SQLException e, String caseName) {
+        if (e == null) {
+            fail(caseName + " should result in failure of client validation");
+        }
+        assertEquals(PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState(), e.getSQLState(), caseName + " ==> CONNECTION_FAILURE is expected");
+    }
+
+    private void checkErrorCodes(SQLException e) {
+        if (e != null && e.getCause() instanceof FileNotFoundException
+                && clientRootCertificate != ClientRootCertificate.EMPTY) {
+            fail("FileNotFoundException => it looks like a configuration failure");
+        }
+
+        if (e == null && sslmode == SslMode.ALLOW && !db.requiresSsl()) {
+            // allowed to connect with plain connection
+            return;
+        }
+
+        if (clientRootCertificate == ClientRootCertificate.EMPTY
+                && (sslmode == SslMode.VERIFY_CA || sslmode == SslMode.VERIFY_FULL)) {
+            String caseName = "rootCertificate is missing and sslmode=" + sslmode;
+            if (e == null) {
+                fail(caseName + " should result in FileNotFound exception for root certificate");
+            }
+            assertEquals(PSQLState.CONNECTION_FAILURE.getState(), e.getSQLState(), caseName + " ==> CONNECTION_FAILURE is expected");
+            FileNotFoundException fnf = findCause(e, FileNotFoundException.class);
+            if (fnf == null) {
+                fail(caseName + " ==> FileNotFoundException should be present in getCause chain");
+            }
+            return;
+        }
+
+        if (db.requiresSsl() && sslmode == SslMode.DISABLE) {
+            String caseName = "sslmode=DISABLE and database " + db + " requires SSL";
+            if (e == null) {
+                fail(caseName + " should result in connection failure");
+            }
+            assertEquals(PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState(), e.getSQLState(), caseName + " ==> INVALID_AUTHORIZATION_SPECIFICATION is expected");
+            return;
+        }
+
+        if (db.rejectsSsl() && sslmode.requireEncryption()) {
+            String caseName =
+                    "database " + db + " rejects SSL, and sslmode " + sslmode + " requires encryption";
+            if (e == null) {
+                fail(caseName + " should result in connection failure");
+            }
+            assertEquals(PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState(), e.getSQLState(), caseName + " ==> INVALID_AUTHORIZATION_SPECIFICATION is expected");
+            return;
+        }
+
+        // Server certificate, server hostname, and client certificate can be validated in any order
+        // So we have three validators and expect at least one of them to match
+        List<AssertionError> errors = null;
         try {
-          if (e == null) {
-            fail(caseName + " ==> connection should fail");
-          }
-          assertEquals(PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState(), e.getSQLState(), caseName + " ==> INVALID_AUTHORIZATION_SPECIFICATION is expected");
-        } catch (AssertionError er) {
-          for (AssertionError error : errors) {
-            er.addSuppressed(error);
-          }
-          throw er;
+            if (assertServerCertificate(e)) {
+                return;
+            }
+        } catch (AssertionError ae) {
+            errors = addError(errors, ae);
         }
-      }
-      // ALLOW is ok
-      return;
+
+        try {
+            if (assertServerHostname(e)) {
+                return;
+            }
+        } catch (AssertionError ae) {
+            errors = addError(errors, ae);
+        }
+
+        try {
+            if (assertClientCertificate(e)) {
+                return;
+            }
+        } catch (AssertionError ae) {
+            errors = addError(errors, ae);
+        }
+
+        if (sslmode == SslMode.ALLOW && db.requiresSsl()) {
+            // Allow tries to connect with non-ssl first, and it always throws the first error even after try SSL.
+            // "If SSL was expected to fail" (e.g. invalid certificate), and db requiresSsl, then ALLOW
+            // should fail as well
+            String caseName =
+                    "sslmode=ALLOW and db " + db + " requires SSL, and there are expected SSL failures";
+            if (errors == null) {
+                if (e != null) {
+                    fail(caseName + " ==> connection should be upgraded to SSL with no failures");
+                }
+            } else {
+                try {
+                    if (e == null) {
+                        fail(caseName + " ==> connection should fail");
+                    }
+                    assertEquals(PSQLState.INVALID_AUTHORIZATION_SPECIFICATION.getState(), e.getSQLState(), caseName + " ==> INVALID_AUTHORIZATION_SPECIFICATION is expected");
+                } catch (AssertionError er) {
+                    for (AssertionError error : errors) {
+                        er.addSuppressed(error);
+                    }
+                    throw er;
+                }
+            }
+            // ALLOW is ok
+            return;
+        }
+
+        if (errors == null) {
+            if (e == null) {
+                // Assume "no exception" was expected.
+                // The cases like "successfully connected in sslmode=DISABLE to SSLONLY db"
+                // should be handled with assertions above
+                return;
+            }
+            fail("SQLException present when it was not expected");
+        }
+
+        AssertionError firstError = errors.get(0);
+        if (errors.size() == 1) {
+            throw firstError;
+        }
+
+        for (int i = 1; i < errors.size(); i++) {
+            AssertionError error = errors.get(i);
+            firstError.addSuppressed(error);
+        }
+
+        throw firstError;
     }
 
-    if (errors == null) {
-      if (e == null) {
-        // Assume "no exception" was expected.
-        // The cases like "successfully connected in sslmode=DISABLE to SSLONLY db"
-        // should be handled with assertions above
-        return;
-      }
-      fail("SQLException present when it was not expected");
+    private List<AssertionError> addError(List<AssertionError> errors, AssertionError ae) {
+        if (errors == null) {
+            errors = new ArrayList<>();
+        }
+        errors.add(ae);
+        return errors;
     }
 
-    AssertionError firstError = errors.get(0);
-    if (errors.size() == 1) {
-      throw firstError;
+    /**
+     * Checks server certificate validation error.
+     *
+     * @param e connection exception or null if no exception
+     * @return true when validation pass, false when the case is not applicable
+     * @throws AssertionError when exception does not match expectations
+     */
+    private boolean assertServerCertificate(SQLException e) {
+        if (clientRootCertificate == ClientRootCertificate.GOOD
+                || (sslmode != SslMode.VERIFY_CA && sslmode != SslMode.VERIFY_FULL)) {
+            return false;
+        }
+
+        String caseName = "Server certificate is " + clientRootCertificate + " + sslmode=" + sslmode;
+        if (e == null) {
+            fail(caseName + " should result in failure of server validation");
+        }
+
+        assertEquals(PSQLState.CONNECTION_FAILURE.getState(), e.getSQLState(), caseName + " ==> CONNECTION_FAILURE is expected");
+        CertPathValidatorException validatorEx = findCause(e, CertPathValidatorException.class);
+        if (validatorEx == null) {
+            fail(caseName + " ==> exception should be caused by CertPathValidatorException,"
+                    + " but no CertPathValidatorException is present in the getCause chain");
+        }
+        assertEquals("NO_TRUST_ANCHOR", validatorEx.getReason().toString(), caseName + " ==> CertPathValidatorException.getReason");
+        return true;
     }
 
-    for (int i = 1; i < errors.size(); i++) {
-      AssertionError error = errors.get(i);
-      firstError.addSuppressed(error);
+    /**
+     * Checks hostname validation error.
+     *
+     * @param e connection exception or null if no exception
+     * @return true when validation pass, false when the case is not applicable
+     * @throws AssertionError when exception does not match expectations
+     */
+    private boolean assertServerHostname(SQLException e) {
+        if (sslmode != SslMode.VERIFY_FULL || host != Hostname.BAD) {
+            return false;
+        }
+
+        String caseName = "VERIFY_FULL + hostname that does not match server certificate";
+        if (e == null) {
+            fail(caseName + " ==> CONNECTION_FAILURE expected");
+        }
+        assertEquals(PSQLState.CONNECTION_FAILURE.getState(), e.getSQLState(), caseName + " ==> CONNECTION_FAILURE is expected");
+        String message = e.getMessage();
+        if (message == null || !message.contains("PgjdbcHostnameVerifier")) {
+            fail(caseName + " ==> message should contain"
+                    + " 'PgjdbcHostnameVerifier'. Actual message is " + message);
+        }
+        return true;
     }
 
-    throw firstError;
-  }
+    /**
+     * Checks client certificate validation error.
+     *
+     * @param e connection exception or null if no exception
+     * @return true when validation pass, false when the case is not applicable
+     * @throws AssertionError when exception does not match expectations
+     */
+    private boolean assertClientCertificate(SQLException e) {
+        if (db.requiresClientCert() && clientCertificate == ClientCertificate.EMPTY) {
+            String caseName =
+                    "client certificate was not sent and database " + db + " requires client certificate";
+            assertClientCertRequired(e, caseName);
+            return true;
+        }
 
-  private List<AssertionError> addError(List<AssertionError> errors, AssertionError ae) {
-    if (errors == null) {
-      errors = new ArrayList<>();
-    }
-    errors.add(ae);
-    return errors;
-  }
+        if (clientCertificate != ClientCertificate.BAD) {
+            return false;
+        }
+        // Server verifies certificate no matter how it is configured, so sending BAD one
+        // is doomed to fail
+        String caseName = "BAD client certificate, and database " + db + " requires one";
+        if (e == null) {
+            fail(caseName + " should result in failure of client validation");
+        }
+        // Note: Java's SSLSocket handshake does NOT process alert messages
+        // even if they are present on the wire. This looks like a perfectly valid
+        // handshake, however, the subsequent read from the stream (e.g. during startup
+        // message) discovers the alert message (e.g. "Received fatal alert: decrypt_error")
+        // and converts that to exception.
+        // That is why "CONNECTION_UNABLE_TO_CONNECT" is listed here for BAD client cert.
+        // Ideally, handshake failure should be detected during the handshake, not after sending the startup
+        // message
+        if (!PSQLState.CONNECTION_FAILURE.getState().equals(e.getSQLState())
+                && !(clientCertificate == ClientCertificate.BAD
+                && PSQLState.CONNECTION_UNABLE_TO_CONNECT.getState().equals(e.getSQLState()))
+        ) {
+            fail(caseName + " ==> CONNECTION_FAILURE(08006)"
+                    + " or CONNECTION_UNABLE_TO_CONNECT(08001) is expected"
+                    + ", got " + e.getSQLState());
+        }
 
-  /**
-   * Checks server certificate validation error.
-   *
-   * @param e connection exception or null if no exception
-   * @return true when validation pass, false when the case is not applicable
-   * @throws AssertionError when exception does not match expectations
-   */
-  private boolean assertServerCertificate(SQLException e) {
-    if (clientRootCertificate == ClientRootCertificate.GOOD
-        || (sslmode != SslMode.VERIFY_CA && sslmode != SslMode.VERIFY_FULL)) {
-      return false;
+        // Three exceptions are possible
+        // SSLHandshakeException: Received fatal alert: unknown_ca
+        // EOFException
+        // SocketException: broken pipe (write failed)
+
+        // decrypt_error does not look to be a valid case, however, we allow it for now
+        // SSLHandshakeException: Received fatal alert: decrypt_error
+
+        SocketException brokenPipe = findCause(e, SocketException.class);
+        if (brokenPipe != null) {
+            if (!contains(brokenPipe.getMessage(), "Broken pipe")) {
+                fail(
+                        caseName + " ==> server should have terminated the connection (broken pipe expected)"
+                                + ", actual exception was " + brokenPipe.getMessage());
+            }
+            return true;
+        }
+
+        EOFException eofException = findCause(e, EOFException.class);
+        if (eofException != null) {
+            return true;
+        }
+
+        SSLHandshakeException handshakeException = findCause(e, SSLHandshakeException.class);
+        if (handshakeException != null) {
+            final String handshakeMessage = handshakeException.getMessage();
+            if (!contains(handshakeMessage, "unknown_ca")
+                    && !contains(handshakeMessage, "decrypt_error")) {
+                fail(
+                        caseName
+                                + " ==> server should have terminated the connection (expected 'unknown_ca' or 'decrypt_error')"
+                                + ", actual exception was " + handshakeMessage);
+            }
+            return true;
+        }
+
+        fail(caseName + " ==> exception should be caused by SocketException(broken pipe)"
+                + " or EOFException,"
+                + " or SSLHandshakeException. No exceptions of such kind are present in the getCause chain");
+        return false;
     }
 
-    String caseName = "Server certificate is " + clientRootCertificate + " + sslmode=" + sslmode;
-    if (e == null) {
-      fail(caseName + " should result in failure of server validation");
+    @MethodSource("data")
+    @ParameterizedTest(name = "host={0}, db={1} sslMode={2}, cCert={3}, cRootCert={4}, gssEncMode={5}")
+    void run(Hostname host, TestDatabase db, SslMode sslmode, ClientCertificate clientCertificate, ClientRootCertificate clientRootCertificate, GSSEncMode gssEncMode) throws SQLException {
+        initSslTest(host, db, sslmode, clientCertificate, clientRootCertificate, gssEncMode);
+        Properties props = new Properties();
+        props.put(TestUtil.SERVER_HOST_PORT_PROP, host.value + ":" + TestUtil.getPort());
+        props.put(TestUtil.DATABASE_PROP, db.toString());
+        PGProperty.SSL_MODE.set(props, sslmode.value);
+        PGProperty.GSS_ENC_MODE.set(props, gssEncMode.value);
+        if (clientCertificate == ClientCertificate.EMPTY) {
+            PGProperty.SSL_CERT.set(props, "");
+            PGProperty.SSL_KEY.set(props, "");
+        } else {
+            PGProperty.SSL_CERT.set(props, TestUtil.getSslTestCertPath(clientCertificate.fileName + ".crt"));
+            PGProperty.SSL_KEY.set(props, TestUtil.getSslTestCertPath(clientCertificate.fileName + ".pk8"));
+        }
+        if (clientRootCertificate == ClientRootCertificate.EMPTY) {
+            PGProperty.SSL_ROOT_CERT.set(props, "");
+        } else {
+            PGProperty.SSL_ROOT_CERT.set(props, TestUtil.getSslTestCertPath(clientRootCertificate.fileName + ".crt"));
+        }
+
+        try (Connection conn = TestUtil.openDB(props)) {
+            boolean sslUsed = TestUtil.queryForBoolean(conn, "SELECT ssl_is_used()");
+            if (sslmode == SslMode.ALLOW) {
+                assertEquals(db.requiresSsl(), sslUsed, "SSL should be used if the DB requires SSL");
+            } else {
+                assertEquals(sslmode != SslMode.DISABLE && !db.rejectsSsl(), sslUsed, "SSL should be used unless it is disabled or the DB rejects it");
+            }
+        } catch (SQLException e) {
+            try {
+                // Note that checkErrorCodes throws AssertionError for unexpected cases
+                checkErrorCodes(e);
+            } catch (AssertionError ae) {
+                // Make sure original SQLException is printed as well even in case of AssertionError
+                ae.initCause(e);
+                throw ae;
+            }
+        }
     }
 
-    assertEquals(PSQLState.CONNECTION_FAILURE.getState(), e.getSQLState(), caseName + " ==> CONNECTION_FAILURE is expected");
-    CertPathValidatorException validatorEx = findCause(e, CertPathValidatorException.class);
-    if (validatorEx == null) {
-      fail(caseName + " ==> exception should be caused by CertPathValidatorException,"
-          + " but no CertPathValidatorException is present in the getCause chain");
-    }
-    assertEquals("NO_TRUST_ANCHOR", validatorEx.getReason().toString(), caseName + " ==> CertPathValidatorException.getReason");
-    return true;
-  }
-
-  /**
-   * Checks hostname validation error.
-   *
-   * @param e connection exception or null if no exception
-   * @return true when validation pass, false when the case is not applicable
-   * @throws AssertionError when exception does not match expectations
-   */
-  private boolean assertServerHostname(SQLException e) {
-    if (sslmode != SslMode.VERIFY_FULL || host != Hostname.BAD) {
-      return false;
+    public void initSslTest(Hostname host, TestDatabase db, SslMode sslmode, ClientCertificate clientCertificate, ClientRootCertificate clientRootCertificate, GSSEncMode gssEncMode) {
+        this.host = host;
+        this.db = db;
+        this.sslmode = sslmode;
+        this.clientCertificate = clientCertificate;
+        this.clientRootCertificate = clientRootCertificate;
+        this.gssEncMode = gssEncMode;
     }
 
-    String caseName = "VERIFY_FULL + hostname that does not match server certificate";
-    if (e == null) {
-      fail(caseName + " ==> CONNECTION_FAILURE expected");
-    }
-    assertEquals(PSQLState.CONNECTION_FAILURE.getState(), e.getSQLState(), caseName + " ==> CONNECTION_FAILURE is expected");
-    String message = e.getMessage();
-    if (message == null || !message.contains("PgjdbcHostnameVerifier")) {
-      fail(caseName + " ==> message should contain"
-          + " 'PgjdbcHostnameVerifier'. Actual message is " + message);
-    }
-    return true;
-  }
+    enum Hostname {
+        GOOD("localhost"),
+        BAD("127.0.0.1"),
+        ;
 
-  /**
-   * Checks client certificate validation error.
-   *
-   * @param e connection exception or null if no exception
-   * @return true when validation pass, false when the case is not applicable
-   * @throws AssertionError when exception does not match expectations
-   */
-  private boolean assertClientCertificate(SQLException e) {
-    if (db.requiresClientCert() && clientCertificate == ClientCertificate.EMPTY) {
-      String caseName =
-          "client certificate was not sent and database " + db + " requires client certificate";
-      assertClientCertRequired(e, caseName);
-      return true;
+        final String value;
+
+        Hostname(String value) {
+            this.value = value;
+        }
     }
 
-    if (clientCertificate != ClientCertificate.BAD) {
-      return false;
-    }
-    // Server verifies certificate no matter how it is configured, so sending BAD one
-    // is doomed to fail
-    String caseName = "BAD client certificate, and database " + db + " requires one";
-    if (e == null) {
-      fail(caseName + " should result in failure of client validation");
-    }
-    // Note: Java's SSLSocket handshake does NOT process alert messages
-    // even if they are present on the wire. This looks like a perfectly valid
-    // handshake, however, the subsequent read from the stream (e.g. during startup
-    // message) discovers the alert message (e.g. "Received fatal alert: decrypt_error")
-    // and converts that to exception.
-    // That is why "CONNECTION_UNABLE_TO_CONNECT" is listed here for BAD client cert.
-    // Ideally, handshake failure should be detected during the handshake, not after sending the startup
-    // message
-    if (!PSQLState.CONNECTION_FAILURE.getState().equals(e.getSQLState())
-        && !(clientCertificate == ClientCertificate.BAD
-        && PSQLState.CONNECTION_UNABLE_TO_CONNECT.getState().equals(e.getSQLState()))
-    ) {
-      fail(caseName + " ==> CONNECTION_FAILURE(08006)"
-              + " or CONNECTION_UNABLE_TO_CONNECT(08001) is expected"
-              + ", got " + e.getSQLState());
+    enum TestDatabase {
+        hostdb,
+        hostnossldb,
+        hostssldb,
+        hostsslcertdb,
+        certdb,
+        ;
+
+        public static final TestDatabase[] VALUES = values();
+
+        public boolean requiresClientCert() {
+            return this == certdb || this == hostsslcertdb;
+        }
+
+        public boolean requiresSsl() {
+            return this == certdb || this == hostssldb || this == hostsslcertdb;
+        }
+
+        public boolean rejectsSsl() {
+            return this == hostnossldb;
+        }
     }
 
-    // Three exceptions are possible
-    // SSLHandshakeException: Received fatal alert: unknown_ca
-    // EOFException
-    // SocketException: broken pipe (write failed)
+    enum ClientCertificate {
+        EMPTY(""),
+        GOOD("goodclient"),
+        BAD("badclient"),
+        ;
 
-    // decrypt_error does not look to be a valid case, however, we allow it for now
-    // SSLHandshakeException: Received fatal alert: decrypt_error
+        public static final ClientCertificate[] VALUES = values();
+        public final String fileName;
 
-    SocketException brokenPipe = findCause(e, SocketException.class);
-    if (brokenPipe != null) {
-      if (!contains(brokenPipe.getMessage(), "Broken pipe")) {
-        fail(
-            caseName + " ==> server should have terminated the connection (broken pipe expected)"
-                + ", actual exception was " + brokenPipe.getMessage());
-      }
-      return true;
+        ClientCertificate(String fileName) {
+            this.fileName = fileName;
+        }
     }
 
-    EOFException eofException = findCause(e, EOFException.class);
-    if (eofException != null) {
-      return true;
-    }
+    enum ClientRootCertificate {
+        EMPTY(""),
+        GOOD("goodroot"),
+        BAD("badroot"),
+        ;
 
-    SSLHandshakeException handshakeException = findCause(e, SSLHandshakeException.class);
-    if (handshakeException != null) {
-      final String handshakeMessage = handshakeException.getMessage();
-      if (!contains(handshakeMessage, "unknown_ca")
-          && !contains(handshakeMessage, "decrypt_error")) {
-        fail(
-            caseName
-                + " ==> server should have terminated the connection (expected 'unknown_ca' or 'decrypt_error')"
-                + ", actual exception was " + handshakeMessage);
-      }
-      return true;
-    }
+        public static final ClientRootCertificate[] VALUES = values();
+        public final String fileName;
 
-    fail(caseName + " ==> exception should be caused by SocketException(broken pipe)"
-        + " or EOFException,"
-        + " or SSLHandshakeException. No exceptions of such kind are present in the getCause chain");
-    return false;
-  }
-
-  private static <T extends Throwable> T findCause(Throwable t,
-      Class<T> cause) {
-    while (t != null) {
-      if (cause.isInstance(t)) {
-        return (T) t;
-      }
-      t = t.getCause();
+        ClientRootCertificate(String fileName) {
+            this.fileName = fileName;
+        }
     }
-    return null;
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "host={0}, db={1} sslMode={2}, cCert={3}, cRootCert={4}, gssEncMode={5}")
-  void run(Hostname host, TestDatabase db, SslMode sslmode, ClientCertificate clientCertificate, ClientRootCertificate clientRootCertificate, GSSEncMode gssEncMode) throws SQLException {
-    initSslTest(host, db, sslmode, clientCertificate, clientRootCertificate, gssEncMode);
-    Properties props = new Properties();
-    props.put(TestUtil.SERVER_HOST_PORT_PROP, host.value + ":" + TestUtil.getPort());
-    props.put(TestUtil.DATABASE_PROP, db.toString());
-    PGProperty.SSL_MODE.set(props, sslmode.value);
-    PGProperty.GSS_ENC_MODE.set(props, gssEncMode.value);
-    if (clientCertificate == ClientCertificate.EMPTY) {
-      PGProperty.SSL_CERT.set(props, "");
-      PGProperty.SSL_KEY.set(props, "");
-    } else {
-      PGProperty.SSL_CERT.set(props, TestUtil.getSslTestCertPath(clientCertificate.fileName + ".crt"));
-      PGProperty.SSL_KEY.set(props, TestUtil.getSslTestCertPath(clientCertificate.fileName + ".pk8"));
-    }
-    if (clientRootCertificate == ClientRootCertificate.EMPTY) {
-      PGProperty.SSL_ROOT_CERT.set(props, "");
-    } else {
-      PGProperty.SSL_ROOT_CERT.set(props, TestUtil.getSslTestCertPath(clientRootCertificate.fileName + ".crt"));
-    }
-
-    try (Connection conn = TestUtil.openDB(props)) {
-      boolean sslUsed = TestUtil.queryForBoolean(conn, "SELECT ssl_is_used()");
-      if (sslmode == SslMode.ALLOW) {
-        assertEquals(db.requiresSsl(), sslUsed, "SSL should be used if the DB requires SSL");
-      } else {
-        assertEquals(sslmode != SslMode.DISABLE && !db.rejectsSsl(), sslUsed, "SSL should be used unless it is disabled or the DB rejects it");
-      }
-    } catch (SQLException e) {
-      try {
-        // Note that checkErrorCodes throws AssertionError for unexpected cases
-        checkErrorCodes(e);
-      } catch (AssertionError ae) {
-        // Make sure original SQLException is printed as well even in case of AssertionError
-        ae.initCause(e);
-        throw ae;
-      }
-    }
-  }
-
-  public void initSslTest(Hostname host, TestDatabase db, SslMode sslmode, ClientCertificate clientCertificate, ClientRootCertificate clientRootCertificate, GSSEncMode gssEncMode) {
-    this.host = host;
-    this.db = db;
-    this.sslmode = sslmode;
-    this.clientCertificate = clientCertificate;
-    this.clientRootCertificate = clientRootCertificate;
-    this.gssEncMode = gssEncMode;
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/ssl/SslTestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/ssl/SslTestSuite.java
index 95e439c..5499bcc 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/ssl/SslTestSuite.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/ssl/SslTestSuite.java
@@ -10,10 +10,10 @@ import org.junit.runners.Suite;
 
 @RunWith(Suite.class)
 @Suite.SuiteClasses({
-    CommonNameVerifierTest.class,
-    LazyKeyManagerTest.class,
-    LibPQFactoryHostNameTest.class,
-    SslTest.class,
+        CommonNameVerifierTest.class,
+        LazyKeyManagerTest.class,
+        LibPQFactoryHostNameTest.class,
+        SslTest.class,
 })
 public class SslTestSuite {
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/sspi/SSPITest.java b/pgjdbc/src/test/java/org/postgresql/test/sspi/SSPITest.java
index ac4acb3..85c968c 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/sspi/SSPITest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/sspi/SSPITest.java
@@ -5,77 +5,74 @@
 
 package org.postgresql.test.sspi;
 
-import static org.hamcrest.CoreMatchers.containsString;
-import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.MatcherAssert.assertThat;
-import static org.hamcrest.junit.MatcherAssume.assumeThat;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.PGProperty;
-import org.postgresql.test.TestUtil;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
-
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.Disabled;
-import org.junit.jupiter.api.Test;
-
 import java.sql.Connection;
 import java.sql.Statement;
 import java.util.Locale;
 import java.util.Properties;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGProperty;
+import org.postgresql.test.TestUtil;
+import org.postgresql.util.PSQLException;
+import org.postgresql.util.PSQLState;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assume.assumeThat;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /*
-* These tests require a working SSPI authentication setup
-* in the database server that allows the executing user
-* to authenticate as the "sspiusername" in the build
-* configuration.
-*/
+ * These tests require a working SSPI authentication setup
+ * in the database server that allows the executing user
+ * to authenticate as the "sspiusername" in the build
+ * configuration.
+ */
 class SSPITest {
 
-  /*
-   * SSPI only exists on Windows.
-   */
-  @BeforeAll
-  static void checkPlatform() {
-    assumeThat("SSPI not supported on this platform",
-               System.getProperty("os.name").toLowerCase(Locale.ROOT),
-               containsString("windows"));
-  }
-
-  /*
-   * Tests that SSPI login succeeds and a query can be run.
-   */
-  @Test
-  @Disabled
-  void authorized() throws Exception {
-    Properties props = new Properties();
-    PGProperty.USER.set(props, TestUtil.getSSPIUser());
-
-    Connection con = TestUtil.openDB(props);
-
-    Statement stmt = con.createStatement();
-    stmt.executeQuery("SELECT 1");
-
-    TestUtil.closeDB(con);
-  }
-
-  /*
-   * Tests that SSPI login fails with an unknown/unauthorized
-   * user name.
-   */
-  @Test
-  void unauthorized() throws Exception {
-    Properties props = new Properties();
-    PGProperty.USER.set(props, "invalid" + TestUtil.getSSPIUser());
-
-    try {
-      Connection con = TestUtil.openDB(props);
-      TestUtil.closeDB(con);
-      fail("Expected a PSQLException");
-    } catch (PSQLException e) {
-      assertThat(e.getSQLState(), is(PSQLState.INVALID_PASSWORD.getState()));
+    /*
+     * SSPI only exists on Windows.
+     */
+    @BeforeAll
+    static void checkPlatform() {
+        assumeThat("SSPI not supported on this platform",
+                System.getProperty("os.name").toLowerCase(Locale.ROOT),
+                containsString("windows"));
+    }
+
+    /*
+     * Tests that SSPI login succeeds and a query can be run.
+     */
+    @Test
+    @Disabled
+    void authorized() throws Exception {
+        Properties props = new Properties();
+        PGProperty.USER.set(props, TestUtil.getSSPIUser());
+
+        Connection con = TestUtil.openDB(props);
+
+        Statement stmt = con.createStatement();
+        stmt.executeQuery("SELECT 1");
+
+        TestUtil.closeDB(con);
+    }
+
+    /*
+     * Tests that SSPI login fails with an unknown/unauthorized
+     * user name.
+     */
+    @Test
+    void unauthorized() throws Exception {
+        Properties props = new Properties();
+        PGProperty.USER.set(props, "invalid" + TestUtil.getSSPIUser());
+
+        try {
+            Connection con = TestUtil.openDB(props);
+            TestUtil.closeDB(con);
+            fail("Expected a PSQLException");
+        } catch (PSQLException e) {
+            assertThat(e.getSQLState(), is(PSQLState.INVALID_PASSWORD.getState()));
+        }
     }
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/sspi/SSPITestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/sspi/SSPITestSuite.java
index 6e1ae33..4aba92f 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/sspi/SSPITestSuite.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/sspi/SSPITestSuite.java
@@ -11,5 +11,5 @@ package org.postgresql.test.sspi;
 //@RunWith(Suite.class)
 //@Suite.SuiteClasses({ SSPITest.class })
 public class SSPITestSuite {
-  // Empty.
+    // Empty.
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/util/Await.java b/pgjdbc/src/test/java/org/postgresql/test/util/Await.java
similarity index 61%
rename from pgjdbc/src/test/java/org/postgresql/util/Await.java
rename to pgjdbc/src/test/java/org/postgresql/test/util/Await.java
index c40c44c..b9051f4 100644
--- a/pgjdbc/src/test/java/org/postgresql/util/Await.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/Await.java
@@ -20,22 +20,22 @@
  * limitations under the License.
  */
 
-package org.postgresql.util;
+package org.postgresql.test.util;
 
 import java.time.Duration;
 
 public class Await {
-  public static void until(String message, Duration timeout, Condition condition) throws InterruptedException {
-    long deadline = System.currentTimeMillis() + timeout.toMillis();
-    while (!condition.get()) {
-      if (System.currentTimeMillis() > deadline) {
-        throw new AssertionError("Condition not met within " + timeout + ": " + message);
-      }
-      Thread.sleep(100);
+    public static void until(String message, Duration timeout, Condition condition) throws InterruptedException {
+        long deadline = System.currentTimeMillis() + timeout.toMillis();
+        while (!condition.get()) {
+            if (System.currentTimeMillis() > deadline) {
+                throw new AssertionError("Condition not met within " + timeout + ": " + message);
+            }
+            Thread.sleep(100);
+        }
     }
-  }
 
-  public interface Condition {
-    boolean get();
-  }
+    public interface Condition {
+        boolean get();
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/BigDecimalByteConverterTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/BigDecimalByteConverterTest.java
new file mode 100644
index 0000000..593de4f
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/BigDecimalByteConverterTest.java
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2020, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.util;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Collection;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.postgresql.util.ByteConverter;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * @author Brett Okken
+ */
+public class BigDecimalByteConverterTest {
+    public static Iterable<Object[]> data() {
+        final Collection<Object[]> numbers = new ArrayList<>();
+        numbers.add(new Object[]{new BigDecimal("0.1")});
+        numbers.add(new Object[]{new BigDecimal("0.10")});
+        numbers.add(new Object[]{new BigDecimal("0.01")});
+        numbers.add(new Object[]{new BigDecimal("0.001")});
+        numbers.add(new Object[]{new BigDecimal("0.0001")});
+        numbers.add(new Object[]{new BigDecimal("0.00001")});
+        numbers.add(new Object[]{new BigDecimal("1.0")});
+        numbers.add(new Object[]{new BigDecimal("0.000000000000000000000000000000000000000000000000000")});
+        numbers.add(new Object[]{new BigDecimal("0.100000000000000000000000000000000000000000000009900")});
+        numbers.add(new Object[]{new BigDecimal("-1.0")});
+        numbers.add(new Object[]{new BigDecimal("-1")});
+        numbers.add(new Object[]{new BigDecimal("1.2")});
+        numbers.add(new Object[]{new BigDecimal("-2.05")});
+        numbers.add(new Object[]{new BigDecimal("0.000000000000000000000000000990")});
+        numbers.add(new Object[]{new BigDecimal("-0.000000000000000000000000000990")});
+        numbers.add(new Object[]{new BigDecimal("10.0000000000099")});
+        numbers.add(new Object[]{new BigDecimal(".10000000000000")});
+        numbers.add(new Object[]{new BigDecimal("1.10000000000000")});
+        numbers.add(new Object[]{new BigDecimal("99999.2")});
+        numbers.add(new Object[]{new BigDecimal("99999")});
+        numbers.add(new Object[]{new BigDecimal("-99999.2")});
+        numbers.add(new Object[]{new BigDecimal("-99999")});
+        numbers.add(new Object[]{new BigDecimal("2147483647")});
+        numbers.add(new Object[]{new BigDecimal("-2147483648")});
+        numbers.add(new Object[]{new BigDecimal("2147483648")});
+        numbers.add(new Object[]{new BigDecimal("-2147483649")});
+        numbers.add(new Object[]{new BigDecimal("9223372036854775807")});
+        numbers.add(new Object[]{new BigDecimal("-9223372036854775808")});
+        numbers.add(new Object[]{new BigDecimal("9223372036854775808")});
+        numbers.add(new Object[]{new BigDecimal("-9223372036854775809")});
+        numbers.add(new Object[]{new BigDecimal("10223372036850000000")});
+        numbers.add(new Object[]{new BigDecimal("19223372036854775807")});
+        numbers.add(new Object[]{new BigDecimal("19223372036854775807.300")});
+        numbers.add(new Object[]{new BigDecimal("-19223372036854775807.300")});
+        numbers.add(new Object[]{new BigDecimal(BigInteger.valueOf(1234567890987654321L), -1)});
+        numbers.add(new Object[]{new BigDecimal(BigInteger.valueOf(1234567890987654321L), -5)});
+        numbers.add(new Object[]{new BigDecimal(BigInteger.valueOf(-1234567890987654321L), -3)});
+        numbers.add(new Object[]{new BigDecimal(BigInteger.valueOf(6), -8)});
+        numbers.add(new Object[]{new BigDecimal("30000")});
+        numbers.add(new Object[]{new BigDecimal("40000").setScale(15)});
+        numbers.add(new Object[]{new BigDecimal("20000.000000000000000000")});
+        numbers.add(new Object[]{new BigDecimal("9990000").setScale(8)});
+        numbers.add(new Object[]{new BigDecimal("1000000").setScale(31)});
+        numbers.add(new Object[]{new BigDecimal("10000000000000000000000000000000000000").setScale(14)});
+        numbers.add(new Object[]{new BigDecimal("90000000000000000000000000000000000000")});
+        return numbers;
+    }
+
+    static void testBinaryConversion(BigDecimal number) {
+        final byte[] bytes = ByteConverter.numeric(number);
+        final BigDecimal actual = (BigDecimal) ByteConverter.numeric(bytes);
+        if (number.scale() >= 0) {
+            assertEquals(number, actual);
+        } else {
+            assertEquals(number.toPlainString(), actual.toPlainString());
+        }
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "number = {0,number,#,###.##################################################}")
+    void binary(BigDecimal number) {
+        testBinaryConversion(number);
+    }
+
+    @Test
+    void bigDecimal10_pow_131072_minus_1() {
+        testBinaryConversion(
+                new BigDecimal(BigInteger.TEN.pow(131072).subtract(BigInteger.ONE))
+        );
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/BrokenInputStream.java b/pgjdbc/src/test/java/org/postgresql/test/util/BrokenInputStream.java
index 9224c05..80c2f2a 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/util/BrokenInputStream.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/BrokenInputStream.java
@@ -10,22 +10,22 @@ import java.io.InputStream;
 
 public class BrokenInputStream extends InputStream {
 
-  private final InputStream is;
-  private final long breakOn;
-  private long numRead;
+    private final InputStream is;
+    private final long breakOn;
+    private long numRead;
 
-  public BrokenInputStream(InputStream is, long breakOn) {
-    this.is = is;
-    this.breakOn = breakOn;
-    this.numRead = 0;
-  }
-
-  @Override
-  public int read() throws IOException {
-    if (breakOn > numRead++) {
-      throw new IOException("I was told to break on " + breakOn);
+    public BrokenInputStream(InputStream is, long breakOn) {
+        this.is = is;
+        this.breakOn = breakOn;
+        this.numRead = 0;
     }
 
-    return is.read();
-  }
+    @Override
+    public int read() throws IOException {
+        if (breakOn > numRead++) {
+            throw new IOException("I was told to break on " + breakOn);
+        }
+
+        return is.read();
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/BufferGenerator.java b/pgjdbc/src/test/java/org/postgresql/test/util/BufferGenerator.java
index 2ad2c47..a9e1352 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/util/BufferGenerator.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/BufferGenerator.java
@@ -16,30 +16,30 @@ import java.util.Random;
  * Created by amozhenin on 30.09.2015.
  */
 public class BufferGenerator {
-  public static final int ROW_COUNT = 100000;
+    public static final int ROW_COUNT = 100000;
 
-  public static void main(String[] args) throws Exception {
-    Random random = new Random();
-    random.setSeed(new Date().getTime());
-    OutputStream out = null;
-    try {
-      File outFile = new File("target", "buffer.txt");
-      outFile.getParentFile().mkdir();
-      out = new BufferedOutputStream(new FileOutputStream(outFile));
-      for (long i = 0; i < ROW_COUNT; i++) {
-        StringBuffer line = new StringBuffer();
-        line.append("VERY_LONG_LINE_TO_ASSIST_IN_DETECTION_OF_ISSUE_366_#_").append(i).append('\t');
-        int letter = random.nextInt(26); // don't really care about uniformity for a test
-        char character = (char) ((int) 'A' + letter); // black magic
-        line.append("VERY_LONG_STRING_TO_REPRODUCE_ISSUE_366_").append(character).append(character);
-        line.append(character).append('\t').append(random.nextDouble()).append('\n');
-        out.write(line.toString().getBytes("UTF-8"));
-      }
-    } finally {
-      if (out != null) {
-        out.close();
-      }
+    public static void main(String[] args) throws Exception {
+        Random random = new Random();
+        random.setSeed(new Date().getTime());
+        OutputStream out = null;
+        try {
+            File outFile = new File("target", "buffer.txt");
+            outFile.getParentFile().mkdir();
+            out = new BufferedOutputStream(new FileOutputStream(outFile));
+            for (long i = 0; i < ROW_COUNT; i++) {
+                StringBuffer line = new StringBuffer();
+                line.append("VERY_LONG_LINE_TO_ASSIST_IN_DETECTION_OF_ISSUE_366_#_").append(i).append('\t');
+                int letter = random.nextInt(26); // don't really care about uniformity for a test
+                char character = (char) ((int) 'A' + letter); // black magic
+                line.append("VERY_LONG_STRING_TO_REPRODUCE_ISSUE_366_").append(character).append(character);
+                line.append(character).append('\t').append(random.nextDouble()).append('\n');
+                out.write(line.toString().getBytes("UTF-8"));
+            }
+        } finally {
+            if (out != null) {
+                out.close();
+            }
+        }
     }
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/ByteBufferByteStreamWriterTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/ByteBufferByteStreamWriterTest.java
index 940331f..33e6650 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/util/ByteBufferByteStreamWriterTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/ByteBufferByteStreamWriterTest.java
@@ -5,70 +5,67 @@
 
 package org.postgresql.test.util;
 
-import static org.junit.jupiter.api.Assertions.assertArrayEquals;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.util.ByteBufferByteStreamWriter;
-import org.postgresql.util.ByteStreamWriter;
-
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.nio.ByteBuffer;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.util.ByteBufferByteStreamWriter;
+import org.postgresql.util.ByteStreamWriter;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 class ByteBufferByteStreamWriterTest {
 
-  private ByteArrayOutputStream targetStream;
-  private byte[] data;
-  private ByteBufferByteStreamWriter writer;
+    private ByteArrayOutputStream targetStream;
+    private byte[] data;
+    private ByteBufferByteStreamWriter writer;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    targetStream = new ByteArrayOutputStream();
-    data = new byte[]{1, 2, 3, 4};
-    ByteBuffer buffer = ByteBuffer.wrap(data);
-    writer = new ByteBufferByteStreamWriter(buffer);
-  }
-
-  @Test
-  void reportsLengthCorrectly() {
-    assertEquals(4, writer.getLength(), "Incorrect length reported");
-  }
-
-  @Test
-  void copiesDataCorrectly() throws IOException {
-    writer.writeTo(target(targetStream));
-    byte[] written = targetStream.toByteArray();
-    assertArrayEquals(data, written, "Incorrect data written to target stream");
-  }
-
-  @Test
-  void propagatesException() throws IOException {
-    final IOException e = new IOException("oh no");
-    OutputStream errorStream = new OutputStream() {
-      @Override
-      public void write(int b) throws IOException {
-        throw e;
-      }
-    };
-    try {
-      writer.writeTo(target(errorStream));
-      fail("No exception thrown");
-    } catch (IOException caught) {
-      assertEquals(caught, e, "Exception was thrown that wasn't the expected one");
+    private static ByteStreamWriter.ByteStreamTarget target(final OutputStream stream) {
+        return new ByteStreamWriter.ByteStreamTarget() {
+            @Override
+            public OutputStream getOutputStream() {
+                return stream;
+            }
+        };
     }
-  }
 
-  private static ByteStreamWriter.ByteStreamTarget target(final OutputStream stream) {
-    return new ByteStreamWriter.ByteStreamTarget() {
-      @Override
-      public OutputStream getOutputStream() {
-        return stream;
-      }
-    };
-  }
+    @BeforeEach
+    void setUp() throws Exception {
+        targetStream = new ByteArrayOutputStream();
+        data = new byte[]{1, 2, 3, 4};
+        ByteBuffer buffer = ByteBuffer.wrap(data);
+        writer = new ByteBufferByteStreamWriter(buffer);
+    }
+
+    @Test
+    void reportsLengthCorrectly() {
+        assertEquals(4, writer.getLength(), "Incorrect length reported");
+    }
+
+    @Test
+    void copiesDataCorrectly() throws IOException {
+        writer.writeTo(target(targetStream));
+        byte[] written = targetStream.toByteArray();
+        assertArrayEquals(data, written, "Incorrect data written to target stream");
+    }
+
+    @Test
+    void propagatesException() throws IOException {
+        final IOException e = new IOException("oh no");
+        OutputStream errorStream = new OutputStream() {
+            @Override
+            public void write(int b) throws IOException {
+                throw e;
+            }
+        };
+        try {
+            writer.writeTo(target(errorStream));
+            fail("No exception thrown");
+        } catch (IOException caught) {
+            assertEquals(caught, e, "Exception was thrown that wasn't the expected one");
+        }
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/ByteStreamWriterTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/ByteStreamWriterTest.java
index 34069e3..f3e252a 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/util/ByteStreamWriterTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/ByteStreamWriterTest.java
@@ -5,259 +5,256 @@
 
 package org.postgresql.test.util;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.jdbc2.BaseTest4;
-import org.postgresql.util.ByteBufferByteStreamWriter;
-import org.postgresql.util.ByteStreamWriter;
-
-import org.junit.Test;
-
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.Random;
+import org.junit.Test;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.jdbc2.BaseTest4;
+import org.postgresql.util.ByteBufferByteStreamWriter;
+import org.postgresql.util.ByteStreamWriter;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 public class ByteStreamWriterTest extends BaseTest4 {
 
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    assumeByteaSupported();
-    TestUtil.createTempTable(con, "images", "img bytea");
-  }
-
-  private ByteBuffer testData(int size) {
-    ByteBuffer data = ByteBuffer.allocate(size);
-    Random random = new Random(31459);
-    while (data.remaining() > 8) {
-      data.putLong(random.nextLong());
-    }
-    while (data.remaining() > 0) {
-      data.put((byte) (random.nextInt() % 256));
-    }
-    data.rewind();
-    return data;
-  }
-
-  private void insertStream(ByteBuffer testData) throws Exception {
-    insertStream(testData, null);
-  }
-
-  private void insertStream(ByteBuffer testData, Integer lengthOverride) throws Exception {
-    insertStream(new TestByteBufferByteStreamWriter(testData, lengthOverride));
-  }
-
-  private void insertStream(ByteStreamWriter writer) throws Exception {
-    PreparedStatement updatePS = con.prepareStatement(TestUtil.insertSQL("images", "img", "?"));
-    try {
-      updatePS.setObject(1, writer);
-      updatePS.executeUpdate();
-    } finally {
-      updatePS.close();
-    }
-  }
-
-  private void validateContent(ByteBuffer data) throws Exception {
-    validateContent(data.array());
-  }
-
-  private void validateContent(byte [] data) throws Exception {
-    PreparedStatement selectPS = con.prepareStatement(TestUtil.selectSQL("images", "img"));
-    try {
-      ResultSet rs = selectPS.executeQuery();
-      try {
-        rs.next();
-        byte[] actualData = rs.getBytes(1);
-        assertArrayEquals("Sent and received data are not the same", data, actualData);
-      } finally {
-        rs.close();
-      }
-    } finally {
-      selectPS.close();
-    }
-  }
-
-  @Test
-  public void testEmpty() throws Exception {
-    ByteBuffer testData = testData(0);
-    insertStream(testData);
-    validateContent(testData);
-  }
-
-  @Test
-  public void testLength2Kb() throws Exception {
-    ByteBuffer testData = testData(2 * 1024);
-    insertStream(testData);
-    validateContent(testData);
-  }
-
-  @Test
-  public void testLength37b() throws Exception {
-    ByteBuffer testData = testData(37);
-    insertStream(testData);
-    validateContent(testData);
-  }
-
-  @Test
-  public void testLength2KbReadOnly() throws Exception {
-    ByteBuffer testData = testData(2 * 1024);
-    // Read-only buffer does not provide access to the array, so we test it separately
-    insertStream(testData.asReadOnlyBuffer());
-    validateContent(testData);
-  }
-
-  @Test
-  public void testTwoBuffers() throws Exception {
-    ByteBuffer testData = testData(20);
-    ByteBuffer part1 = testData.duplicate();
-    part1.position(0);
-    part1.limit(9);
-    ByteBuffer part2 = testData.duplicate();
-    part2.position(part1.limit());
-    part2.limit(testData.limit());
-    // Read-only buffer does not provide access to the array, so we test it separately
-    insertStream(ByteStreamWriter.of(part1, part2));
-    validateContent(testData);
-  }
-
-  @Test
-  public void testThreeBuffersWithReadonly() throws Exception {
-    ByteBuffer testData = testData(20);
-    ByteBuffer part1 = testData.duplicate();
-    part1.position(0);
-    part1.limit(9);
-    ByteBuffer part2 = testData.duplicate();
-    part2.position(part1.limit());
-    part2.limit(15);
-    ByteBuffer part3 = testData.duplicate();
-    part3.position(part2.limit());
-    part3.limit(testData.limit());
-    // Read-only buffer does not provide access to the array, so we test it separately
-    insertStream(ByteStreamWriter.of(part1, part2.asReadOnlyBuffer(), part3));
-    validateContent(testData);
-  }
-
-  @Test
-  public void testLength10Kb() throws Exception {
-    ByteBuffer testData = testData(10 * 1024);
-    insertStream(testData);
-    validateContent(testData);
-  }
-
-  @Test
-  public void testLength100Kb() throws Exception {
-    ByteBuffer testData = testData(100 * 1024);
-    insertStream(testData);
-    validateContent(testData);
-  }
-
-  @Test
-  public void testLength200Kb() throws Exception {
-    ByteBuffer testData = testData(200 * 1024);
-    insertStream(testData);
-    validateContent(testData);
-  }
-
-  @Test
-  public void testLengthGreaterThanContent() throws Exception {
-    ByteBuffer testData = testData(8);
-    insertStream(testData, 10);
-    byte[] expectedData = new byte[10];
-    testData.rewind();
-    testData.get(expectedData, 0, 8);
-    // other two bytes are zeroed out, which the jvm does for us automatically
-    validateContent(expectedData);
-  }
-
-  @Test
-  public void testLengthLessThanContent() throws Exception {
-    ByteBuffer testData = testData(8);
-    try {
-      insertStream(testData, 4);
-      fail("did not throw exception when too much content");
-    } catch (SQLException e) {
-      Throwable cause = e.getCause();
-      assertTrue("cause wan't an IOException", cause instanceof IOException);
-      assertEquals("Incorrect exception message",
-          cause.getMessage(), "Attempt to write more than the specified 4 bytes");
-    }
-  }
-
-  @Test
-  public void testIOExceptionPassedThroughAsCause() throws Exception {
-    IOException e = new IOException("oh no");
-    try {
-      insertStream(new ExceptionThrowingByteStreamWriter(e));
-      fail("did not throw exception when IOException thrown");
-    } catch (SQLException sqle) {
-      Throwable cause = sqle.getCause();
-      assertEquals("Incorrect exception cause", e, cause);
-    }
-  }
-
-  @Test
-  public void testRuntimeExceptionPassedThroughAsIOException() throws Exception {
-    RuntimeException e = new RuntimeException("oh no");
-    try {
-      insertStream(new ExceptionThrowingByteStreamWriter(e));
-      fail("did not throw exception when RuntimeException thrown");
-    } catch (SQLException sqle) {
-      Throwable cause = sqle.getCause();
-      assertTrue("cause wan't an IOException", cause instanceof IOException);
-      assertEquals("Incorrect exception message",
-          cause.getMessage(), "Error writing bytes to stream");
-      Throwable nestedCause = cause.getCause();
-      assertEquals("Incorrect exception cause", e, nestedCause);
-    }
-  }
-
-  /**
-   * Allows testing where reported length doesn't match what the stream writer attempts
-   */
-  private static class TestByteBufferByteStreamWriter extends ByteBufferByteStreamWriter {
-
-    private final Integer lengthOverride;
-
-    private TestByteBufferByteStreamWriter(ByteBuffer buf, Integer lengthOverride) {
-      super(buf);
-      this.lengthOverride = lengthOverride;
-    }
-
     @Override
-    public int getLength() {
-      return lengthOverride != null ? lengthOverride : super.getLength();
-    }
-  }
-
-  private static class ExceptionThrowingByteStreamWriter implements ByteStreamWriter {
-
-    private final Throwable cause;
-
-    private ExceptionThrowingByteStreamWriter(Throwable cause) {
-      assertTrue(cause instanceof RuntimeException || cause instanceof IOException);
-      this.cause = cause;
+    public void setUp() throws Exception {
+        super.setUp();
+        assumeByteaSupported();
+        TestUtil.createTempTable(con, "images", "img bytea");
     }
 
-    @Override
-    public int getLength() {
-      return 1;
+    private ByteBuffer testData(int size) {
+        ByteBuffer data = ByteBuffer.allocate(size);
+        Random random = new Random(31459);
+        while (data.remaining() > 8) {
+            data.putLong(random.nextLong());
+        }
+        while (data.remaining() > 0) {
+            data.put((byte) (random.nextInt() % 256));
+        }
+        data.rewind();
+        return data;
     }
 
-    @Override
-    public void writeTo(ByteStreamTarget target) throws IOException {
-      if (cause instanceof RuntimeException) {
-        throw (RuntimeException) cause;
-      } else if (cause instanceof IOException) {
-        throw (IOException) cause;
-      }
+    private void insertStream(ByteBuffer testData) throws Exception {
+        insertStream(testData, null);
+    }
+
+    private void insertStream(ByteBuffer testData, Integer lengthOverride) throws Exception {
+        insertStream(new TestByteBufferByteStreamWriter(testData, lengthOverride));
+    }
+
+    private void insertStream(ByteStreamWriter writer) throws Exception {
+        PreparedStatement updatePS = con.prepareStatement(TestUtil.insertSQL("images", "img", "?"));
+        try {
+            updatePS.setObject(1, writer);
+            updatePS.executeUpdate();
+        } finally {
+            updatePS.close();
+        }
+    }
+
+    private void validateContent(ByteBuffer data) throws Exception {
+        validateContent(data.array());
+    }
+
+    private void validateContent(byte[] data) throws Exception {
+        PreparedStatement selectPS = con.prepareStatement(TestUtil.selectSQL("images", "img"));
+        try {
+            ResultSet rs = selectPS.executeQuery();
+            try {
+                rs.next();
+                byte[] actualData = rs.getBytes(1);
+                assertArrayEquals("Sent and received data are not the same", data, actualData);
+            } finally {
+                rs.close();
+            }
+        } finally {
+            selectPS.close();
+        }
+    }
+
+    @Test
+    public void testEmpty() throws Exception {
+        ByteBuffer testData = testData(0);
+        insertStream(testData);
+        validateContent(testData);
+    }
+
+    @Test
+    public void testLength2Kb() throws Exception {
+        ByteBuffer testData = testData(2 * 1024);
+        insertStream(testData);
+        validateContent(testData);
+    }
+
+    @Test
+    public void testLength37b() throws Exception {
+        ByteBuffer testData = testData(37);
+        insertStream(testData);
+        validateContent(testData);
+    }
+
+    @Test
+    public void testLength2KbReadOnly() throws Exception {
+        ByteBuffer testData = testData(2 * 1024);
+        // Read-only buffer does not provide access to the array, so we test it separately
+        insertStream(testData.asReadOnlyBuffer());
+        validateContent(testData);
+    }
+
+    @Test
+    public void testTwoBuffers() throws Exception {
+        ByteBuffer testData = testData(20);
+        ByteBuffer part1 = testData.duplicate();
+        part1.position(0);
+        part1.limit(9);
+        ByteBuffer part2 = testData.duplicate();
+        part2.position(part1.limit());
+        part2.limit(testData.limit());
+        // Read-only buffer does not provide access to the array, so we test it separately
+        insertStream(ByteStreamWriter.of(part1, part2));
+        validateContent(testData);
+    }
+
+    @Test
+    public void testThreeBuffersWithReadonly() throws Exception {
+        ByteBuffer testData = testData(20);
+        ByteBuffer part1 = testData.duplicate();
+        part1.position(0);
+        part1.limit(9);
+        ByteBuffer part2 = testData.duplicate();
+        part2.position(part1.limit());
+        part2.limit(15);
+        ByteBuffer part3 = testData.duplicate();
+        part3.position(part2.limit());
+        part3.limit(testData.limit());
+        // Read-only buffer does not provide access to the array, so we test it separately
+        insertStream(ByteStreamWriter.of(part1, part2.asReadOnlyBuffer(), part3));
+        validateContent(testData);
+    }
+
+    @Test
+    public void testLength10Kb() throws Exception {
+        ByteBuffer testData = testData(10 * 1024);
+        insertStream(testData);
+        validateContent(testData);
+    }
+
+    @Test
+    public void testLength100Kb() throws Exception {
+        ByteBuffer testData = testData(100 * 1024);
+        insertStream(testData);
+        validateContent(testData);
+    }
+
+    @Test
+    public void testLength200Kb() throws Exception {
+        ByteBuffer testData = testData(200 * 1024);
+        insertStream(testData);
+        validateContent(testData);
+    }
+
+    @Test
+    public void testLengthGreaterThanContent() throws Exception {
+        ByteBuffer testData = testData(8);
+        insertStream(testData, 10);
+        byte[] expectedData = new byte[10];
+        testData.rewind();
+        testData.get(expectedData, 0, 8);
+        // other two bytes are zeroed out, which the jvm does for us automatically
+        validateContent(expectedData);
+    }
+
+    @Test
+    public void testLengthLessThanContent() throws Exception {
+        ByteBuffer testData = testData(8);
+        try {
+            insertStream(testData, 4);
+            fail("did not throw exception when too much content");
+        } catch (SQLException e) {
+            Throwable cause = e.getCause();
+            assertTrue("cause wan't an IOException", cause instanceof IOException);
+            assertEquals("Incorrect exception message",
+                    cause.getMessage(), "Attempt to write more than the specified 4 bytes");
+        }
+    }
+
+    @Test
+    public void testIOExceptionPassedThroughAsCause() throws Exception {
+        IOException e = new IOException("oh no");
+        try {
+            insertStream(new ExceptionThrowingByteStreamWriter(e));
+            fail("did not throw exception when IOException thrown");
+        } catch (SQLException sqle) {
+            Throwable cause = sqle.getCause();
+            assertEquals("Incorrect exception cause", e, cause);
+        }
+    }
+
+    @Test
+    public void testRuntimeExceptionPassedThroughAsIOException() throws Exception {
+        RuntimeException e = new RuntimeException("oh no");
+        try {
+            insertStream(new ExceptionThrowingByteStreamWriter(e));
+            fail("did not throw exception when RuntimeException thrown");
+        } catch (SQLException sqle) {
+            Throwable cause = sqle.getCause();
+            assertTrue("cause wan't an IOException", cause instanceof IOException);
+            assertEquals("Incorrect exception message",
+                    cause.getMessage(), "Error writing bytes to stream");
+            Throwable nestedCause = cause.getCause();
+            assertEquals("Incorrect exception cause", e, nestedCause);
+        }
+    }
+
+    /**
+     * Allows testing where reported length doesn't match what the stream writer attempts
+     */
+    private static class TestByteBufferByteStreamWriter extends ByteBufferByteStreamWriter {
+
+        private final Integer lengthOverride;
+
+        private TestByteBufferByteStreamWriter(ByteBuffer buf, Integer lengthOverride) {
+            super(buf);
+            this.lengthOverride = lengthOverride;
+        }
+
+        @Override
+        public int getLength() {
+            return lengthOverride != null ? lengthOverride : super.getLength();
+        }
+    }
+
+    private static class ExceptionThrowingByteStreamWriter implements ByteStreamWriter {
+
+        private final Throwable cause;
+
+        private ExceptionThrowingByteStreamWriter(Throwable cause) {
+            assertTrue(cause instanceof RuntimeException || cause instanceof IOException);
+            this.cause = cause;
+        }
+
+        @Override
+        public int getLength() {
+            return 1;
+        }
+
+        @Override
+        public void writeTo(ByteStreamTarget target) throws IOException {
+            if (cause instanceof RuntimeException) {
+                throw (RuntimeException) cause;
+            } else if (cause instanceof IOException) {
+                throw (IOException) cause;
+            }
+        }
     }
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/ExpressionPropertiesTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/ExpressionPropertiesTest.java
index b30d812..78f95b2 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/util/ExpressionPropertiesTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/ExpressionPropertiesTest.java
@@ -5,56 +5,53 @@
 
 package org.postgresql.test.util;
 
+import java.util.Properties;
+import org.junit.jupiter.api.Test;
+import org.postgresql.util.ExpressionProperties;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
-import org.postgresql.util.ExpressionProperties;
-
-import org.junit.jupiter.api.Test;
-
-import java.util.Properties;
-
 class ExpressionPropertiesTest {
-  @Test
-  void simpleReplace() {
-    ExpressionProperties p = new ExpressionProperties();
-    p.put("server", "app1");
-    p.put("file", "pgjdbc_${server}.txt");
-    assertEquals("pgjdbc_app1.txt", p.getProperty("file"), "${server} should be replaced");
-  }
+    @Test
+    void simpleReplace() {
+        ExpressionProperties p = new ExpressionProperties();
+        p.put("server", "app1");
+        p.put("file", "pgjdbc_${server}.txt");
+        assertEquals("pgjdbc_app1.txt", p.getProperty("file"), "${server} should be replaced");
+    }
 
-  @Test
-  void replacementMissing() {
-    ExpressionProperties p = new ExpressionProperties();
-    p.put("file", "pgjdbc_${server}.txt");
-    assertEquals("pgjdbc_${server}.txt", p.getProperty("file"), "${server} should be kept as is as there is no replacement");
-  }
+    @Test
+    void replacementMissing() {
+        ExpressionProperties p = new ExpressionProperties();
+        p.put("file", "pgjdbc_${server}.txt");
+        assertEquals("pgjdbc_${server}.txt", p.getProperty("file"), "${server} should be kept as is as there is no replacement");
+    }
 
-  @Test
-  void multipleReplacements() {
-    ExpressionProperties p = new ExpressionProperties();
-    p.put("server", "app1");
-    p.put("file", "${server}${server}${server}${server}${server}");
-    assertEquals("app1app1app1app1app1", p.getProperty("file"), "All the ${server} entries should be replaced");
-  }
+    @Test
+    void multipleReplacements() {
+        ExpressionProperties p = new ExpressionProperties();
+        p.put("server", "app1");
+        p.put("file", "${server}${server}${server}${server}${server}");
+        assertEquals("app1app1app1app1app1", p.getProperty("file"), "All the ${server} entries should be replaced");
+    }
 
-  @Test
-  void multipleParentProperties() {
-    Properties p1 = new Properties();
-    p1.setProperty("server", "app1_${app.type}");
-    Properties p2 = new Properties();
-    p2.setProperty("app.type", "production");
+    @Test
+    void multipleParentProperties() {
+        Properties p1 = new Properties();
+        p1.setProperty("server", "app1_${app.type}");
+        Properties p2 = new Properties();
+        p2.setProperty("app.type", "production");
 
-    ExpressionProperties p = new ExpressionProperties(p1, p2);
-    p.put("file", "pgjdbc_${server}.txt");
+        ExpressionProperties p = new ExpressionProperties(p1, p2);
+        p.put("file", "pgjdbc_${server}.txt");
 
-    assertEquals("pgjdbc_app1_production.txt", p.getProperty("file"), "All the ${...} entries should be replaced");
-  }
+        assertEquals("pgjdbc_app1_production.txt", p.getProperty("file"), "All the ${...} entries should be replaced");
+    }
 
-  @Test
-  void rawValue() {
-    ExpressionProperties p = new ExpressionProperties();
-    p.put("server", "app1");
-    p.put("file", "${server}${server}${server}${server}${server}");
-    assertEquals("${server}${server}${server}${server}${server}", p.getRawPropertyValue("file"), "No replacements in raw value expected");
-  }
+    @Test
+    void rawValue() {
+        ExpressionProperties p = new ExpressionProperties();
+        p.put("server", "app1");
+        p.put("file", "${server}${server}${server}${server}${server}");
+        assertEquals("${server}${server}${server}${server}${server}", p.getRawPropertyValue("file"), "No replacements in raw value expected");
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/HostSpecTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/HostSpecTest.java
index 7aad5bc..6fc75b0 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/util/HostSpecTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/HostSpecTest.java
@@ -5,96 +5,94 @@
 
 package org.postgresql.test.util;
 
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.util.HostSpec;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
-import org.postgresql.util.HostSpec;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Test;
-
 /**
  * @author Joe Kutner on 10/19/17.
- *         Twitter: @codefinger
+ * Twitter: @codefinger
  */
 class HostSpecTest {
 
-  @AfterEach
-  void cleanup() {
-    System.clearProperty("socksProxyHost");
-    System.clearProperty("socksProxyPort");
-    System.clearProperty("socksNonProxyHosts");
-  }
+    @AfterEach
+    void cleanup() {
+        System.clearProperty("socksProxyHost");
+        System.clearProperty("socksProxyPort");
+        System.clearProperty("socksNonProxyHosts");
+    }
 
-  @Test
-  void shouldResolve() throws Exception {
-    HostSpec hostSpec = new HostSpec("localhost", 5432);
-    assertTrue(hostSpec.shouldResolve());
-  }
+    @Test
+    void shouldResolve() throws Exception {
+        HostSpec hostSpec = new HostSpec("localhost", 5432);
+        assertTrue(hostSpec.shouldResolve());
+    }
 
-  @Test
-  void shouldResolveWithEmptySocksProxyHost() throws Exception {
-    System.setProperty("socksProxyHost", "");
-    HostSpec hostSpec = new HostSpec("localhost", 5432);
-    assertTrue(hostSpec.shouldResolve());
-  }
+    @Test
+    void shouldResolveWithEmptySocksProxyHost() throws Exception {
+        System.setProperty("socksProxyHost", "");
+        HostSpec hostSpec = new HostSpec("localhost", 5432);
+        assertTrue(hostSpec.shouldResolve());
+    }
 
-  @Test
-  void shouldResolveWithWhiteSpaceSocksProxyHost() throws Exception {
-    System.setProperty("socksProxyHost", " ");
-    HostSpec hostSpec = new HostSpec("localhost", 5432);
-    assertTrue(hostSpec.shouldResolve());
-  }
+    @Test
+    void shouldResolveWithWhiteSpaceSocksProxyHost() throws Exception {
+        System.setProperty("socksProxyHost", " ");
+        HostSpec hostSpec = new HostSpec("localhost", 5432);
+        assertTrue(hostSpec.shouldResolve());
+    }
 
-  @Test
-  void shouldResolveWithSocksProxyHost() throws Exception {
-    System.setProperty("socksProxyHost", "fake-socks-proxy");
-    HostSpec hostSpec = new HostSpec("example.com", 5432);
-    assertFalse(hostSpec.shouldResolve());
-  }
+    @Test
+    void shouldResolveWithSocksProxyHost() throws Exception {
+        System.setProperty("socksProxyHost", "fake-socks-proxy");
+        HostSpec hostSpec = new HostSpec("example.com", 5432);
+        assertFalse(hostSpec.shouldResolve());
+    }
 
-  @Test
-  void shouldResolveWithSocksProxyHostWithLocalhost() throws Exception {
-    System.setProperty("socksProxyHost", "fake-socks-proxy");
-    HostSpec hostSpec = new HostSpec("localhost", 5432);
-    assertTrue(hostSpec.shouldResolve());
-  }
+    @Test
+    void shouldResolveWithSocksProxyHostWithLocalhost() throws Exception {
+        System.setProperty("socksProxyHost", "fake-socks-proxy");
+        HostSpec hostSpec = new HostSpec("localhost", 5432);
+        assertTrue(hostSpec.shouldResolve());
+    }
 
-  @Test
-  void shouldResolveWithSocksNonProxyHost() throws Exception {
-    System.setProperty("socksProxyHost", "fake-socks-proxy");
-    System.setProperty("socksNonProxyHosts", "example.com");
-    HostSpec hostSpec = new HostSpec("example.com", 5432);
-    assertTrue(hostSpec.shouldResolve());
-  }
+    @Test
+    void shouldResolveWithSocksNonProxyHost() throws Exception {
+        System.setProperty("socksProxyHost", "fake-socks-proxy");
+        System.setProperty("socksNonProxyHosts", "example.com");
+        HostSpec hostSpec = new HostSpec("example.com", 5432);
+        assertTrue(hostSpec.shouldResolve());
+    }
 
-  @Test
-  void shouldResolveWithSocksNonProxyHosts() throws Exception {
-    System.setProperty("socksProxyHost", "fake-socks-proxy");
-    System.setProperty("socksNonProxyHosts", "example.com|localhost");
-    HostSpec hostSpec = new HostSpec("example.com", 5432);
-    assertTrue(hostSpec.shouldResolve());
-  }
+    @Test
+    void shouldResolveWithSocksNonProxyHosts() throws Exception {
+        System.setProperty("socksProxyHost", "fake-socks-proxy");
+        System.setProperty("socksNonProxyHosts", "example.com|localhost");
+        HostSpec hostSpec = new HostSpec("example.com", 5432);
+        assertTrue(hostSpec.shouldResolve());
+    }
 
-  @Test
-  void shouldResolveWithSocksNonProxyHostsNotMatching() throws Exception {
-    System.setProperty("socksProxyHost", "fake-socks-proxy");
-    System.setProperty("socksNonProxyHosts", "example.com|localhost");
-    HostSpec hostSpec = new HostSpec("example.org", 5432);
-    assertFalse(hostSpec.shouldResolve());
-  }
+    @Test
+    void shouldResolveWithSocksNonProxyHostsNotMatching() throws Exception {
+        System.setProperty("socksProxyHost", "fake-socks-proxy");
+        System.setProperty("socksNonProxyHosts", "example.com|localhost");
+        HostSpec hostSpec = new HostSpec("example.org", 5432);
+        assertFalse(hostSpec.shouldResolve());
+    }
 
-  @Test
-  void shouldReturnEmptyLocalAddressBind() throws Exception {
-    HostSpec hostSpec = new HostSpec("example.org", 5432);
-    assertNull(hostSpec.getLocalSocketAddress());
-  }
+    @Test
+    void shouldReturnEmptyLocalAddressBind() throws Exception {
+        HostSpec hostSpec = new HostSpec("example.org", 5432);
+        assertNull(hostSpec.getLocalSocketAddress());
+    }
 
-  @Test
-  void shouldReturnLocalAddressBind() throws Exception {
-    HostSpec hostSpec = new HostSpec("example.org", 5432, "foo");
-    assertEquals("foo", hostSpec.getLocalSocketAddress());
-  }
+    @Test
+    void shouldReturnLocalAddressBind() throws Exception {
+        HostSpec hostSpec = new HostSpec("example.org", 5432, "foo");
+        assertEquals("foo", hostSpec.getLocalSocketAddress());
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/IntListTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/IntListTest.java
new file mode 100644
index 0000000..fcb5188
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/IntListTest.java
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2023, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.util;
+
+import org.junit.jupiter.api.Test;
+import org.postgresql.util.IntList;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+/**
+ * Tests {@link IntList}.
+ */
+class IntListTest {
+
+    @Test
+    void size() {
+        final IntList list = new IntList();
+        assertEquals(0, list.size());
+        list.add(3);
+        assertEquals(1, list.size());
+
+        for (int i = 0; i < 48; i++) {
+            list.add(i);
+        }
+        assertEquals(49, list.size());
+
+        list.clear();
+        assertEquals(0, list.size());
+    }
+
+    @Test
+    void get_empty() {
+        final IntList list = new IntList();
+        assertThrows(ArrayIndexOutOfBoundsException.class, () -> list.get(0));
+    }
+
+    @Test
+    void get_negative() {
+        final IntList list = new IntList();
+        list.add(3);
+        assertThrows(ArrayIndexOutOfBoundsException.class, () -> list.get(-1));
+    }
+
+    @Test
+    void get_tooLarge() {
+        final IntList list = new IntList();
+        list.add(3);
+        assertThrows(ArrayIndexOutOfBoundsException.class, () -> list.get(1));
+    }
+
+    @Test
+    void get() {
+        final IntList list = new IntList();
+        list.add(3);
+        assertEquals(3, list.get(0));
+
+        for (int i = 0; i < 1048; i++) {
+            list.add(i);
+        }
+
+        assertEquals(3, list.get(0));
+
+        for (int i = 0; i < 1048; i++) {
+            assertEquals(i, list.get(i + 1));
+        }
+
+        list.clear();
+        list.add(4);
+        assertEquals(4, list.get(0));
+    }
+
+    @Test
+    void toArray() {
+        int[] emptyArray = new IntList().toArray();
+        IntList list = new IntList();
+        assertSame(emptyArray, list.toArray(), "emptyList.toArray()");
+
+        list.add(45);
+        assertArrayEquals(new int[]{45}, list.toArray());
+
+        list.clear();
+        assertSame(emptyArray, list.toArray(), "emptyList.toArray() after clearing the list");
+
+        final int[] expected = new int[1048];
+        for (int i = 0; i < 1048; i++) {
+            list.add(i);
+            expected[i] = i;
+        }
+        assertArrayEquals(expected, list.toArray());
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/LargeObjectVacuum.java b/pgjdbc/src/test/java/org/postgresql/test/util/LargeObjectVacuum.java
new file mode 100644
index 0000000..9dc13b6
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/LargeObjectVacuum.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2023, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.util;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+/**
+ * Autovacuum does not always keeps up with the generated bloat, so this class helps vacuuming
+ * the pg_largeobject table when it grows too large.
+ */
+public class LargeObjectVacuum {
+    private final Connection connection;
+    private final long maxSize;
+
+    public LargeObjectVacuum(Connection connection) {
+        this(connection, 1024 * 1024 * 1024);
+    }
+
+    public LargeObjectVacuum(Connection connection, long maxSize) {
+        this.connection = connection;
+        this.maxSize = maxSize;
+    }
+
+    public void vacuum() throws SQLException {
+        if (getLargeObjectTableSize() > maxSize) {
+            vacuumLargeObjectTable();
+        }
+    }
+
+    private void vacuumLargeObjectTable() throws SQLException {
+        // Vacuum can't be executed in a transaction, so we go into autocommit mode
+        connection.setAutoCommit(true);
+        try (PreparedStatement vacuum =
+                     connection.prepareStatement("VACUUM FULL ANALYZE pg_largeobject")) {
+            vacuum.execute();
+        }
+        connection.setAutoCommit(false);
+    }
+
+    private long getLargeObjectTableSize() throws SQLException {
+        try (PreparedStatement ps =
+                     connection.prepareStatement("select pg_table_size('pg_largeobject')")) {
+            try (ResultSet rs = ps.executeQuery()) {
+                rs.next();
+                return rs.getLong(1);
+            }
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/LazyCleanerTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/LazyCleanerTest.java
new file mode 100644
index 0000000..9d45854
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/LazyCleanerTest.java
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2023, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+/* changes were made to move it into the org.postgresql.util package
+ *
+ * Copyright 2022 Juan Lopes
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.postgresql.test.util;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.UUID;
+import org.junit.jupiter.api.Test;
+import org.postgresql.util.LazyCleaner;
+import static java.time.Duration.ofSeconds;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+public class LazyCleanerTest {
+    public static Thread getThreadByName(String threadName) {
+        for (Thread t : Thread.getAllStackTraces().keySet()) {
+            if (t.getName().equals(threadName)) {
+                return t;
+            }
+        }
+        throw new IllegalStateException("Cleanup thread  " + threadName + " not found");
+    }
+
+    @Test
+    void phantomCleaner() throws InterruptedException {
+        List<Object> list = new ArrayList<>(Arrays.asList(
+                new Object(), new Object(), new Object()));
+
+        LazyCleaner t = new LazyCleaner(ofSeconds(5), "Cleaner");
+
+        String[] collected = new String[list.size()];
+        List<LazyCleaner.Cleanable<RuntimeException>> cleaners = new ArrayList<>();
+        for (int i = 0; i < list.size(); i++) {
+            final int ii = i;
+            cleaners.add(
+                    t.register(
+                            list.get(i),
+                            leak -> {
+                                collected[ii] = leak ? "LEAK" : "NO LEAK";
+                                if (ii == 0) {
+                                    throw new RuntimeException(
+                                            "Exception from cleanup action to verify if the cleaner thread would survive"
+                                    );
+                                }
+                            }
+                    )
+            );
+        }
+        assertEquals(
+                list.size(),
+                t.getWatchedCount(),
+                "All objects are strongly-reachable, so getWatchedCount should reflect it"
+        );
+
+        assertTrue(t.isThreadRunning(),
+                "cleanup thread should be running, and it should wait for the leaks");
+
+        cleaners.get(1).clean();
+
+        assertEquals(
+                list.size() - 1,
+                t.getWatchedCount(),
+                "One object has been released properly, so getWatchedCount should reflect it"
+        );
+
+        list.set(0, null);
+        System.gc();
+        System.gc();
+
+        Await.until(
+                "One object was released, and another one has leaked, so getWatchedCount should reflect it",
+                ofSeconds(5),
+                () -> t.getWatchedCount() == list.size() - 2
+        );
+
+        list.clear();
+        System.gc();
+        System.gc();
+
+        Await.until(
+                "The cleanup thread should detect leaks and terminate within 5-10 seconds after GC",
+                ofSeconds(10),
+                () -> !t.isThreadRunning()
+        );
+
+        assertEquals(
+                Arrays.asList("LEAK", "NO LEAK", "LEAK").toString(),
+                Arrays.asList(collected).toString(),
+                "Second object has been released properly, so it should be reported as NO LEAK"
+        );
+    }
+
+    @Test
+    void getThread() throws InterruptedException {
+        String threadName = UUID.randomUUID().toString();
+        LazyCleaner t = new LazyCleaner(ofSeconds(5), threadName);
+        List<Object> list = new ArrayList<>();
+        list.add(new Object());
+        LazyCleaner.Cleanable<IllegalStateException> cleanable =
+                t.register(
+                        list.get(0),
+                        leak -> {
+                            throw new IllegalStateException("test exception from CleaningAction");
+                        }
+                );
+        assertTrue(t.isThreadRunning(),
+                "cleanup thread should be running, and it should wait for the leaks");
+        Thread thread = getThreadByName(threadName);
+        thread.interrupt();
+        Await.until(
+                "The cleanup thread should ignore the interrupt since there's one object to monitor",
+                ofSeconds(10),
+                () -> !thread.isInterrupted()
+        );
+        assertThrows(
+                IllegalStateException.class,
+                cleanable::clean,
+                "Exception from cleanable.clean() should be rethrown"
+        );
+        thread.interrupt();
+        Await.until(
+                "The cleanup thread should exit shortly after interrupt as there's no leaks to monitor",
+                ofSeconds(1),
+                () -> !t.isThreadRunning()
+        );
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/LruCacheTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/LruCacheTest.java
index 59594eb..4d0896e 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/util/LruCacheTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/LruCacheTest.java
@@ -5,184 +5,181 @@
 
 package org.postgresql.test.util;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.postgresql.util.CanEstimateSize;
-import org.postgresql.util.LruCache;
-
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
 import java.sql.SQLException;
 import java.util.ArrayDeque;
 import java.util.Arrays;
 import java.util.Deque;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.util.CanEstimateSize;
+import org.postgresql.util.LruCache;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /**
  * Tests {@link org.postgresql.util.LruCache}.
  */
 class LruCacheTest {
 
-  private static class Entry implements CanEstimateSize {
-    private final int id;
+    private final Integer[] expectCreate = new Integer[1];
+    private final Deque<Entry> expectEvict = new ArrayDeque<>();
+    private final Entry dummy = new Entry(-999);
+    private LruCache<Integer, Entry> cache;
 
-    Entry(int id) {
-      this.id = id;
+    @BeforeEach
+    void setUp() throws Exception {
+        cache = new LruCache<>(4, 1000, false, new LruCache.CreateAction<Integer, Entry>() {
+            @Override
+            public Entry create(Integer key) throws SQLException {
+                assertEquals(expectCreate[0], key, "Unexpected create");
+                return new Entry(key);
+            }
+        }, new LruCache.EvictAction<Entry>() {
+            @Override
+            public void evict(Entry entry) throws SQLException {
+                if (expectEvict.isEmpty()) {
+                    fail("Unexpected entry was evicted: " + entry);
+                }
+                Entry expected = expectEvict.removeFirst();
+                assertEquals(expected, entry, "Unexpected evict");
+            }
+        });
     }
 
-    @Override
-    public long getSize() {
-      return id;
+    @Test
+    void evictsByNumberOfEntries() throws SQLException {
+        Entry a;
+        Entry b;
+        Entry c;
+        Entry d;
+        Entry e;
+
+        a = use(1);
+        b = use(2);
+        c = use(3);
+        d = use(4);
+        e = use(5, a);
     }
 
-    @Override
-    public String toString() {
-      return "Entry{" + "id=" + id + '}';
+    @Test
+    void evictsBySize() throws SQLException {
+        Entry a;
+        Entry b;
+        Entry c;
+
+        a = use(330);
+        b = use(331);
+        c = use(332);
+        use(400, a, b);
     }
-  }
 
-  private final Integer[] expectCreate = new Integer[1];
-  private final Deque<Entry> expectEvict = new ArrayDeque<>();
-  private final Entry dummy = new Entry(-999);
-  private LruCache<Integer, Entry> cache;
+    @Test
+    void evictsLeastRecentlyUsed() throws SQLException {
+        Entry a;
+        Entry b;
+        Entry c;
+        Entry d;
 
-  @BeforeEach
-  void setUp() throws Exception {
-    cache = new LruCache<>(4, 1000, false, new LruCache.CreateAction<Integer, Entry>() {
-      @Override
-      public Entry create(Integer key) throws SQLException {
-        assertEquals(expectCreate[0], key, "Unexpected create");
-        return new Entry(key);
-      }
-    }, new LruCache.EvictAction<Entry>() {
-      @Override
-      public void evict(Entry entry) throws SQLException {
-        if (expectEvict.isEmpty()) {
-          fail("Unexpected entry was evicted: " + entry);
+        a = use(1);
+        b = use(2);
+        c = use(3);
+        a = use(1); // reuse a
+        use(5);
+        d = use(4, b); // expect b to be evicted
+    }
+
+    @Test
+    void cyclicReplacement() throws SQLException {
+        Entry a;
+        Entry b;
+        Entry c;
+        Entry d;
+        Entry e;
+
+        a = use(1);
+        b = use(2);
+        c = use(3);
+        d = use(4);
+        e = use(5, a);
+
+        for (int i = 0; i < 1000; i++) {
+            a = use(1, b);
+            b = use(2, c);
+            c = use(3, d);
+            d = use(4, e);
+            e = use(5, a);
         }
-        Entry expected = expectEvict.removeFirst();
-        assertEquals(expected, entry, "Unexpected evict");
-      }
-    });
-  }
-
-  @Test
-  void evictsByNumberOfEntries() throws SQLException {
-    Entry a;
-    Entry b;
-    Entry c;
-    Entry d;
-    Entry e;
-
-    a = use(1);
-    b = use(2);
-    c = use(3);
-    d = use(4);
-    e = use(5, a);
-  }
-
-  @Test
-  void evictsBySize() throws SQLException {
-    Entry a;
-    Entry b;
-    Entry c;
-
-    a = use(330);
-    b = use(331);
-    c = use(332);
-    use(400, a, b);
-  }
-
-  @Test
-  void evictsLeastRecentlyUsed() throws SQLException {
-    Entry a;
-    Entry b;
-    Entry c;
-    Entry d;
-
-    a = use(1);
-    b = use(2);
-    c = use(3);
-    a = use(1); // reuse a
-    use(5);
-    d = use(4, b); // expect b to be evicted
-  }
-
-  @Test
-  void cyclicReplacement() throws SQLException {
-    Entry a;
-    Entry b;
-    Entry c;
-    Entry d;
-    Entry e;
-
-    a = use(1);
-    b = use(2);
-    c = use(3);
-    d = use(4);
-    e = use(5, a);
-
-    for (int i = 0; i < 1000; i++) {
-      a = use(1, b);
-      b = use(2, c);
-      c = use(3, d);
-      d = use(4, e);
-      e = use(5, a);
     }
-  }
 
-  @Test
-  void duplicateKey() throws SQLException {
-    Entry a;
+    @Test
+    void duplicateKey() throws SQLException {
+        Entry a;
 
-    a = use(1);
-    expectEvict.clear();
-    expectEvict.add(a);
-    // This overwrites the cache, evicting previous entry with exactly the same key
-    cache.put(1, new Entry(1));
-    assertEvict();
-  }
-
-  @Test
-  void caching() throws SQLException {
-    Entry a;
-    Entry b;
-    Entry c;
-    Entry d;
-    Entry e;
-
-    a = use(1);
-    b = use(2);
-    c = use(3);
-    d = use(4);
-
-    for (int i = 0; i < 10000; i++) {
-      c = use(-3);
-      b = use(-2);
-      a = use(-1);
-      e = use(5, d);
-      c = use(-3);
-      b = use(-2);
-      a = use(-1);
-      d = use(4, e);
+        a = use(1);
+        expectEvict.clear();
+        expectEvict.add(a);
+        // This overwrites the cache, evicting previous entry with exactly the same key
+        cache.put(1, new Entry(1));
+        assertEvict();
     }
-  }
 
-  private Entry use(int expectCreate, Entry... expectEvict) throws SQLException {
-    this.expectCreate[0] = expectCreate <= 0 ? -1 : expectCreate;
-    this.expectEvict.clear();
-    this.expectEvict.addAll(Arrays.asList(expectEvict));
-    Entry a = cache.borrow(Math.abs(expectCreate));
-    cache.put(a.id, a); // a
-    assertEvict();
-    return a;
-  }
+    @Test
+    void caching() throws SQLException {
+        Entry a;
+        Entry b;
+        Entry c;
+        Entry d;
+        Entry e;
 
-  private void assertEvict() {
-    if (expectEvict.isEmpty()) {
-      return;
+        a = use(1);
+        b = use(2);
+        c = use(3);
+        d = use(4);
+
+        for (int i = 0; i < 10000; i++) {
+            c = use(-3);
+            b = use(-2);
+            a = use(-1);
+            e = use(5, d);
+            c = use(-3);
+            b = use(-2);
+            a = use(-1);
+            d = use(4, e);
+        }
+    }
+
+    private Entry use(int expectCreate, Entry... expectEvict) throws SQLException {
+        this.expectCreate[0] = expectCreate <= 0 ? -1 : expectCreate;
+        this.expectEvict.clear();
+        this.expectEvict.addAll(Arrays.asList(expectEvict));
+        Entry a = cache.borrow(Math.abs(expectCreate));
+        cache.put(a.id, a); // a
+        assertEvict();
+        return a;
+    }
+
+    private void assertEvict() {
+        if (expectEvict.isEmpty()) {
+            return;
+        }
+        fail("Some of the expected evictions not happened: " + expectEvict.toString());
+    }
+
+    private static class Entry implements CanEstimateSize {
+        private final int id;
+
+        Entry(int id) {
+            this.id = id;
+        }
+
+        @Override
+        public long getSize() {
+            return id;
+        }
+
+        @Override
+        public String toString() {
+            return "Entry{" + "id=" + id + '}';
+        }
     }
-    fail("Some of the expected evictions not happened: " + expectEvict.toString());
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/MiniJndiContext.java b/pgjdbc/src/test/java/org/postgresql/test/util/MiniJndiContext.java
index cdb947a..dde89f5 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/util/MiniJndiContext.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/MiniJndiContext.java
@@ -11,7 +11,6 @@ import java.rmi.MarshalledObject;
 import java.util.HashMap;
 import java.util.Hashtable;
 import java.util.Map;
-
 import javax.naming.Binding;
 import javax.naming.Context;
 import javax.naming.Name;
@@ -30,186 +29,186 @@ import javax.naming.spi.ObjectFactory;
  * @author Aaron Mulder (ammulder@chariotsolutions.com)
  */
 public class MiniJndiContext implements Context {
-  private final Map<String, Object> map = new HashMap<>();
+    private final Map<String, Object> map = new HashMap<>();
 
-  public MiniJndiContext() {
-  }
-
-  @Override
-  public Object lookup(Name name) throws NamingException {
-    return lookup(name.get(0));
-  }
-
-  @Override
-  public Object lookup(String name) throws NamingException {
-    Object o = map.get(name);
-    if (o == null) {
-      return null;
+    public MiniJndiContext() {
     }
-    if (o instanceof Reference) {
-      Reference ref = (Reference) o;
-      try {
-        Class<?> factoryClass = Class.forName(ref.getFactoryClassName());
-        ObjectFactory fac = (ObjectFactory) factoryClass.newInstance();
-        return fac.getObjectInstance(ref, null, this, null);
-      } catch (Exception e) {
-        throw new NamingException("Unable to dereference to object: " + e);
-      }
-    } else if (o instanceof MarshalledObject) {
-      try {
-        return ((MarshalledObject<?>) o).get();
-      } catch (IOException e) {
-        throw new NamingException("Unable to deserialize object: " + e);
-      } catch (ClassNotFoundException e) {
-        throw new NamingException("Unable to deserialize object: " + e);
-      }
-    } else {
-      throw new NamingException("JNDI Object is neither Referenceable nor Serializable");
+
+    @Override
+    public Object lookup(Name name) throws NamingException {
+        return lookup(name.get(0));
     }
-  }
 
-  @Override
-  public void bind(Name name, Object obj) throws NamingException {
-    rebind(name.get(0), obj);
-  }
-
-  @Override
-  public void bind(String name, Object obj) throws NamingException {
-    rebind(name, obj);
-  }
-
-  @Override
-  public void rebind(Name name, Object obj) throws NamingException {
-    rebind(name.get(0), obj);
-  }
-
-  @Override
-  public void rebind(String name, Object obj) throws NamingException {
-    if (obj instanceof Referenceable) {
-      Reference ref = ((Referenceable) obj).getReference();
-      map.put(name, ref);
-    } else if (obj instanceof Serializable) {
-      try {
-        MarshalledObject<Object> mo = new MarshalledObject<>(obj);
-        map.put(name, mo);
-      } catch (IOException e) {
-        throw new NamingException("Unable to serialize object to JNDI: " + e);
-      }
-    } else {
-      throw new NamingException(
-          "Object to store in JNDI is neither Referenceable nor Serializable");
+    @Override
+    public Object lookup(String name) throws NamingException {
+        Object o = map.get(name);
+        if (o == null) {
+            return null;
+        }
+        if (o instanceof Reference) {
+            Reference ref = (Reference) o;
+            try {
+                Class<?> factoryClass = Class.forName(ref.getFactoryClassName());
+                ObjectFactory fac = (ObjectFactory) factoryClass.newInstance();
+                return fac.getObjectInstance(ref, null, this, null);
+            } catch (Exception e) {
+                throw new NamingException("Unable to dereference to object: " + e);
+            }
+        } else if (o instanceof MarshalledObject) {
+            try {
+                return ((MarshalledObject<?>) o).get();
+            } catch (IOException e) {
+                throw new NamingException("Unable to deserialize object: " + e);
+            } catch (ClassNotFoundException e) {
+                throw new NamingException("Unable to deserialize object: " + e);
+            }
+        } else {
+            throw new NamingException("JNDI Object is neither Referenceable nor Serializable");
+        }
     }
-  }
 
-  @Override
-  public void unbind(Name name) throws NamingException {
-    unbind(name.get(0));
-  }
+    @Override
+    public void bind(Name name, Object obj) throws NamingException {
+        rebind(name.get(0), obj);
+    }
 
-  @Override
-  public void unbind(String name) throws NamingException {
-    map.remove(name);
-  }
+    @Override
+    public void bind(String name, Object obj) throws NamingException {
+        rebind(name, obj);
+    }
 
-  @Override
-  public void rename(Name oldName, Name newName) throws NamingException {
-    rename(oldName.get(0), newName.get(0));
-  }
+    @Override
+    public void rebind(Name name, Object obj) throws NamingException {
+        rebind(name.get(0), obj);
+    }
 
-  @Override
-  public void rename(String oldName, String newName) throws NamingException {
-    map.put(newName, map.remove(oldName));
-  }
+    @Override
+    public void rebind(String name, Object obj) throws NamingException {
+        if (obj instanceof Referenceable) {
+            Reference ref = ((Referenceable) obj).getReference();
+            map.put(name, ref);
+        } else if (obj instanceof Serializable) {
+            try {
+                MarshalledObject<Object> mo = new MarshalledObject<>(obj);
+                map.put(name, mo);
+            } catch (IOException e) {
+                throw new NamingException("Unable to serialize object to JNDI: " + e);
+            }
+        } else {
+            throw new NamingException(
+                    "Object to store in JNDI is neither Referenceable nor Serializable");
+        }
+    }
 
-  @Override
-  public NamingEnumeration<NameClassPair> list(Name name) throws NamingException {
-    return null;
-  }
+    @Override
+    public void unbind(Name name) throws NamingException {
+        unbind(name.get(0));
+    }
 
-  @Override
-  public NamingEnumeration<NameClassPair> list(String name) throws NamingException {
-    return null;
-  }
+    @Override
+    public void unbind(String name) throws NamingException {
+        map.remove(name);
+    }
 
-  @Override
-  public NamingEnumeration<Binding> listBindings(Name name) throws NamingException {
-    return null;
-  }
+    @Override
+    public void rename(Name oldName, Name newName) throws NamingException {
+        rename(oldName.get(0), newName.get(0));
+    }
 
-  @Override
-  public NamingEnumeration<Binding> listBindings(String name) throws NamingException {
-    return null;
-  }
+    @Override
+    public void rename(String oldName, String newName) throws NamingException {
+        map.put(newName, map.remove(oldName));
+    }
 
-  @Override
-  public void destroySubcontext(Name name) throws NamingException {
-  }
+    @Override
+    public NamingEnumeration<NameClassPair> list(Name name) throws NamingException {
+        return null;
+    }
 
-  @Override
-  public void destroySubcontext(String name) throws NamingException {
-  }
+    @Override
+    public NamingEnumeration<NameClassPair> list(String name) throws NamingException {
+        return null;
+    }
 
-  @Override
-  public Context createSubcontext(Name name) throws NamingException {
-    return null;
-  }
+    @Override
+    public NamingEnumeration<Binding> listBindings(Name name) throws NamingException {
+        return null;
+    }
 
-  @Override
-  public Context createSubcontext(String name) throws NamingException {
-    return null;
-  }
+    @Override
+    public NamingEnumeration<Binding> listBindings(String name) throws NamingException {
+        return null;
+    }
 
-  @Override
-  public Object lookupLink(Name name) throws NamingException {
-    return null;
-  }
+    @Override
+    public void destroySubcontext(Name name) throws NamingException {
+    }
 
-  @Override
-  public Object lookupLink(String name) throws NamingException {
-    return null;
-  }
+    @Override
+    public void destroySubcontext(String name) throws NamingException {
+    }
 
-  @Override
-  public NameParser getNameParser(Name name) throws NamingException {
-    return null;
-  }
+    @Override
+    public Context createSubcontext(Name name) throws NamingException {
+        return null;
+    }
 
-  @Override
-  public NameParser getNameParser(String name) throws NamingException {
-    return null;
-  }
+    @Override
+    public Context createSubcontext(String name) throws NamingException {
+        return null;
+    }
 
-  @Override
-  public Name composeName(Name name, Name prefix) throws NamingException {
-    return null;
-  }
+    @Override
+    public Object lookupLink(Name name) throws NamingException {
+        return null;
+    }
 
-  @Override
-  public String composeName(String name, String prefix) throws NamingException {
-    return null;
-  }
+    @Override
+    public Object lookupLink(String name) throws NamingException {
+        return null;
+    }
 
-  @Override
-  public Object addToEnvironment(String propName, Object propVal) throws NamingException {
-    return null;
-  }
+    @Override
+    public NameParser getNameParser(Name name) throws NamingException {
+        return null;
+    }
 
-  @Override
-  public Object removeFromEnvironment(String propName) throws NamingException {
-    return null;
-  }
+    @Override
+    public NameParser getNameParser(String name) throws NamingException {
+        return null;
+    }
 
-  @Override
-  public Hashtable<?, ?> getEnvironment() throws NamingException {
-    return null;
-  }
+    @Override
+    public Name composeName(Name name, Name prefix) throws NamingException {
+        return null;
+    }
 
-  @Override
-  public void close() throws NamingException {
-  }
+    @Override
+    public String composeName(String name, String prefix) throws NamingException {
+        return null;
+    }
 
-  @Override
-  public String getNameInNamespace() throws NamingException {
-    return null;
-  }
+    @Override
+    public Object addToEnvironment(String propName, Object propVal) throws NamingException {
+        return null;
+    }
+
+    @Override
+    public Object removeFromEnvironment(String propName) throws NamingException {
+        return null;
+    }
+
+    @Override
+    public Hashtable<?, ?> getEnvironment() throws NamingException {
+        return null;
+    }
+
+    @Override
+    public void close() throws NamingException {
+    }
+
+    @Override
+    public String getNameInNamespace() throws NamingException {
+        return null;
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/MiniJndiContextFactory.java b/pgjdbc/src/test/java/org/postgresql/test/util/MiniJndiContextFactory.java
index 1b82ab2..cb1e467 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/util/MiniJndiContextFactory.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/MiniJndiContextFactory.java
@@ -6,7 +6,6 @@
 package org.postgresql.test.util;
 
 import java.util.Hashtable;
-
 import javax.naming.Context;
 import javax.naming.NamingException;
 import javax.naming.spi.InitialContextFactory;
@@ -18,8 +17,8 @@ import javax.naming.spi.InitialContextFactory;
  * @author Aaron Mulder (ammulder@chariotsolutions.com)
  */
 public class MiniJndiContextFactory implements InitialContextFactory {
-  @Override
-  public Context getInitialContext(Hashtable<?, ?> environment) throws NamingException {
-    return new MiniJndiContext();
-  }
+    @Override
+    public Context getInitialContext(Hashtable<?, ?> environment) throws NamingException {
+        return new MiniJndiContext();
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/util/NullOutputStream.java b/pgjdbc/src/test/java/org/postgresql/test/util/NullOutputStream.java
similarity index 55%
rename from pgjdbc/src/test/java/org/postgresql/util/NullOutputStream.java
rename to pgjdbc/src/test/java/org/postgresql/test/util/NullOutputStream.java
index 764dc1a..d2c7497 100644
--- a/pgjdbc/src/test/java/org/postgresql/util/NullOutputStream.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/NullOutputStream.java
@@ -3,7 +3,7 @@
  * See the LICENSE file in the project root for more information.
  */
 
-package org.postgresql.util;
+package org.postgresql.test.util;
 
 import java.io.OutputStream;
 import java.io.PrintStream;
@@ -13,17 +13,17 @@ import java.io.PrintStream;
  */
 public class NullOutputStream extends PrintStream {
 
-  public NullOutputStream(OutputStream out) {
-    super(out);
-  }
+    public NullOutputStream(OutputStream out) {
+        super(out);
+    }
 
-  @Override
-  public void write(int b) {
+    @Override
+    public void write(int b) {
 
-  }
+    }
 
-  @Override
-  public void write(byte[] buf, int off, int len) {
+    @Override
+    public void write(byte[] buf, int off, int len) {
 
-  }
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/NumberParserTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/NumberParserTest.java
new file mode 100644
index 0000000..89c2ed0
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/NumberParserTest.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2023, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.util;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.postgresql.util.NumberParser;
+import static org.junit.jupiter.api.Assertions.fail;
+
+class NumberParserTest {
+    @Test
+    void getFastLong_normalLongs() {
+        List<Long> tests = new ArrayList<>();
+        for (long base : new long[]{0, 42, 65536, -65536, Long.MAX_VALUE}) {
+            for (int diff = -10; diff <= 10; diff++) {
+                tests.add(base + diff);
+            }
+        }
+
+        for (Long test : tests) {
+            assertGetLongResult(Long.toString(test), test);
+        }
+    }
+
+    @Test
+    void getFastLong_discardsFractionalPart() {
+        assertGetLongResult("234.435", 234);
+        assertGetLongResult("-234234.", -234234);
+    }
+
+    @Test
+    void getFastLong_failOnIncorrectStrings() {
+        assertGetLongFail("");
+        assertGetLongFail("-234.12542.");
+        assertGetLongFail(".");
+        assertGetLongFail("-.");
+        assertGetLongFail(Long.toString(Long.MIN_VALUE).substring(1));
+    }
+
+    private void assertGetLongResult(String s, long expected) {
+        try {
+            Assertions.assertEquals(
+                    expected,
+                    NumberParser.getFastLong(s.getBytes(), Long.MIN_VALUE, Long.MAX_VALUE),
+                    "string \"" + s + "\" parsed well to number " + expected
+            );
+        } catch (NumberFormatException nfe) {
+            fail("failed to parse(NumberFormatException) string \"" + s + "\", expected result " + expected);
+        }
+    }
+
+    private void assertGetLongFail(String s) {
+        try {
+            long ret = NumberParser.getFastLong(s.getBytes(), Long.MIN_VALUE, Long.MAX_VALUE);
+            fail("Expected NumberFormatException on parsing \"" + s + "\", but result: " + ret);
+        } catch (NumberFormatException nfe) {
+            // ok
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/ObjectFactoryTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/ObjectFactoryTest.java
index a04b8ea..5f3576c 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/util/ObjectFactoryTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/ObjectFactoryTest.java
@@ -5,102 +5,98 @@
 
 package org.postgresql.test.util;
 
-import static org.junit.jupiter.api.Assertions.assertAll;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-
+import java.sql.SQLException;
+import java.util.Properties;
+import javax.net.SocketFactory;
+import org.junit.jupiter.api.Test;
+import org.opentest4j.MultipleFailuresError;
 import org.postgresql.PGProperty;
 import org.postgresql.jdbc.SslMode;
 import org.postgresql.test.TestUtil;
 import org.postgresql.util.ObjectFactory;
 import org.postgresql.util.PSQLState;
-
-import org.junit.jupiter.api.Test;
-import org.opentest4j.MultipleFailuresError;
-
-import java.sql.SQLException;
-import java.util.Properties;
-
-import javax.net.SocketFactory;
+import static org.junit.jupiter.api.Assertions.assertAll;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 
 class ObjectFactoryTest {
-  Properties props = new Properties();
-
-  static class BadObject {
-    static boolean wasInstantiated;
-
-    BadObject() {
-      wasInstantiated = true;
-      throw new RuntimeException("I should not be instantiated");
-    }
-  }
-
-  private void testInvalidInstantiation(PGProperty prop, PSQLState expectedSqlState) {
-    prop.set(props, BadObject.class.getName());
-
-    BadObject.wasInstantiated = false;
-    SQLException ex = assertThrows(SQLException.class, () -> {
-      TestUtil.openDB(props);
-    });
-
-    try {
-      assertAll(
-          () -> assertFalse(BadObject.wasInstantiated, "ObjectFactory should not have "
-              + "instantiated bad object for " + prop),
-          () -> assertEquals(expectedSqlState.getState(), ex.getSQLState(), () -> "#getSQLState()"),
-          () -> {
-            assertThrows(
-                ClassCastException.class,
-                () -> {
-                  throw ex.getCause();
-                },
-                () -> "Wrong class specified for " + prop.name()
-                    + " => ClassCastException is expected in SQLException#getCause()"
-            );
-          }
-      );
-    } catch (MultipleFailuresError e) {
-      // Add the original exception so it is easier to understand the reason for the test to fail
-      e.addSuppressed(ex);
-      throw e;
-    }
-  }
-
-  @Test
-  void invalidSocketFactory() {
-    testInvalidInstantiation(PGProperty.SOCKET_FACTORY, PSQLState.CONNECTION_FAILURE);
-  }
-
-  @Test
-  void invalidSSLFactory() {
-    TestUtil.assumeSslTestsEnabled();
-    // We need at least "require" to trigger SslSockerFactory instantiation
-    PGProperty.SSL_MODE.set(props, SslMode.REQUIRE.value);
-    testInvalidInstantiation(PGProperty.SSL_FACTORY, PSQLState.CONNECTION_FAILURE);
-  }
-
-  @Test
-  void invalidAuthenticationPlugin() {
-    testInvalidInstantiation(PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME,
-        PSQLState.INVALID_PARAMETER_VALUE);
-  }
-
-  @Test
-  void invalidSslHostnameVerifier() {
-    TestUtil.assumeSslTestsEnabled();
-    // Hostname verification is done at verify-full level only
-    PGProperty.SSL_MODE.set(props, SslMode.VERIFY_FULL.value);
-    PGProperty.SSL_ROOT_CERT.set(props, TestUtil.getSslTestCertPath("goodroot.crt"));
-    testInvalidInstantiation(PGProperty.SSL_HOSTNAME_VERIFIER, PSQLState.CONNECTION_FAILURE);
-  }
-
-  @Test
-  void instantiateInvalidSocketFactory() {
     Properties props = new Properties();
-    assertThrows(ClassCastException.class, () -> {
-      ObjectFactory.instantiate(SocketFactory.class, BadObject.class.getName(), props,
-          false, null);
-    });
-  }
+
+    private void testInvalidInstantiation(PGProperty prop, PSQLState expectedSqlState) {
+        prop.set(props, BadObject.class.getName());
+
+        BadObject.wasInstantiated = false;
+        SQLException ex = assertThrows(SQLException.class, () -> {
+            TestUtil.openDB(props);
+        });
+
+        try {
+            assertAll(
+                    () -> assertFalse(BadObject.wasInstantiated, "ObjectFactory should not have "
+                            + "instantiated bad object for " + prop),
+                    () -> assertEquals(expectedSqlState.getState(), ex.getSQLState(), () -> "#getSQLState()"),
+                    () -> {
+                        assertThrows(
+                                ClassCastException.class,
+                                () -> {
+                                    throw ex.getCause();
+                                },
+                                () -> "Wrong class specified for " + prop.name()
+                                        + " => ClassCastException is expected in SQLException#getCause()"
+                        );
+                    }
+            );
+        } catch (MultipleFailuresError e) {
+            // Add the original exception so it is easier to understand the reason for the test to fail
+            e.addSuppressed(ex);
+            throw e;
+        }
+    }
+
+    @Test
+    void invalidSocketFactory() {
+        testInvalidInstantiation(PGProperty.SOCKET_FACTORY, PSQLState.CONNECTION_FAILURE);
+    }
+
+    @Test
+    void invalidSSLFactory() {
+        TestUtil.assumeSslTestsEnabled();
+        // We need at least "require" to trigger SslSockerFactory instantiation
+        PGProperty.SSL_MODE.set(props, SslMode.REQUIRE.value);
+        testInvalidInstantiation(PGProperty.SSL_FACTORY, PSQLState.CONNECTION_FAILURE);
+    }
+
+    @Test
+    void invalidAuthenticationPlugin() {
+        testInvalidInstantiation(PGProperty.AUTHENTICATION_PLUGIN_CLASS_NAME,
+                PSQLState.INVALID_PARAMETER_VALUE);
+    }
+
+    @Test
+    void invalidSslHostnameVerifier() {
+        TestUtil.assumeSslTestsEnabled();
+        // Hostname verification is done at verify-full level only
+        PGProperty.SSL_MODE.set(props, SslMode.VERIFY_FULL.value);
+        PGProperty.SSL_ROOT_CERT.set(props, TestUtil.getSslTestCertPath("goodroot.crt"));
+        testInvalidInstantiation(PGProperty.SSL_HOSTNAME_VERIFIER, PSQLState.CONNECTION_FAILURE);
+    }
+
+    @Test
+    void instantiateInvalidSocketFactory() {
+        Properties props = new Properties();
+        assertThrows(ClassCastException.class, () -> {
+            ObjectFactory.instantiate(SocketFactory.class, BadObject.class.getName(), props,
+                    false, null);
+        });
+    }
+
+    static class BadObject {
+        static boolean wasInstantiated;
+
+        BadObject() {
+            wasInstantiated = true;
+            throw new RuntimeException("I should not be instantiated");
+        }
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/PGPropertyMaxResultBufferParserTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/PGPropertyMaxResultBufferParserTest.java
index 48be46d..7055d01 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/util/PGPropertyMaxResultBufferParserTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/PGPropertyMaxResultBufferParserTest.java
@@ -5,55 +5,52 @@
 
 package org.postgresql.test.util;
 
+import java.lang.management.ManagementFactory;
+import java.util.Arrays;
+import java.util.Collection;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.postgresql.util.PGPropertyMaxResultBufferParser;
+import org.postgresql.util.PSQLException;
 import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 
-import org.postgresql.util.PGPropertyMaxResultBufferParser;
-import org.postgresql.util.PSQLException;
-
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.MethodSource;
-
-import java.lang.management.ManagementFactory;
-import java.util.Arrays;
-import java.util.Collection;
-
 public class PGPropertyMaxResultBufferParserTest {
-  public static Collection<Object[]> data() {
-    Object[][] data = new Object[][]{
-      {"100", 100L},
-      {"10K", 10L * 1000},
-      {"25M", 25L * 1000 * 1000},
-      //next two should be too big
-      {"35G", (long) (0.90 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax())},
-      {"1T", (long) (0.90 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax())},
-      //percent test
-      {"5p", (long) (0.05 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax())},
-      {"10pct", (long) (0.10 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax())},
-      {"15percent",
-        (long) (0.15 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax())},
-      //for testing empty property
-      {"", -1},
-      {null, -1}
-    };
-    return Arrays.asList(data);
-  }
+    public static Collection<Object[]> data() {
+        Object[][] data = new Object[][]{
+                {"100", 100L},
+                {"10K", 10L * 1000},
+                {"25M", 25L * 1000 * 1000},
+                //next two should be too big
+                {"35G", (long) (0.90 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax())},
+                {"1T", (long) (0.90 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax())},
+                //percent test
+                {"5p", (long) (0.05 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax())},
+                {"10pct", (long) (0.10 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax())},
+                {"15percent",
+                        (long) (0.15 * ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax())},
+                //for testing empty property
+                {"", -1},
+                {null, -1}
+        };
+        return Arrays.asList(data);
+    }
 
-  @MethodSource("data")
-  @ParameterizedTest(name = "{index}: Test with valueToParse={0}, expectedResult={1}")
-  void getMaxResultBufferValue(String valueToParse, long expectedResult) {
-    assertDoesNotThrow(() -> {
-      long result = PGPropertyMaxResultBufferParser.parseProperty(valueToParse);
-      assertEquals(expectedResult, result);
-    });
-  }
+    @MethodSource("data")
+    @ParameterizedTest(name = "{index}: Test with valueToParse={0}, expectedResult={1}")
+    void getMaxResultBufferValue(String valueToParse, long expectedResult) {
+        assertDoesNotThrow(() -> {
+            long result = PGPropertyMaxResultBufferParser.parseProperty(valueToParse);
+            assertEquals(expectedResult, result);
+        });
+    }
 
-  @Test
-  void getMaxResultBufferValueException() throws PSQLException {
-    assertThrows(PSQLException.class, () -> {
-      long ignore = PGPropertyMaxResultBufferParser.parseProperty("abc");
-    });
-  }
+    @Test
+    void getMaxResultBufferValueException() throws PSQLException {
+        assertThrows(PSQLException.class, () -> {
+            long ignore = PGPropertyMaxResultBufferParser.parseProperty("abc");
+        });
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/PGPropertyUtilTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/PGPropertyUtilTest.java
new file mode 100644
index 0000000..2ef30d8
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/PGPropertyUtilTest.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2021, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.util;
+
+import java.util.Properties;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGProperty;
+import org.postgresql.util.PGPropertyUtil;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+class PGPropertyUtilTest {
+
+    // data for next two test methods
+    private static final String[][] TRANSLATION_TABLE = {
+            {"allowEncodingChanges", "allowEncodingChanges"},
+            {"port", "PGPORT"},
+            {"host", "PGHOST"},
+            {"dbname", "PGDBNAME"},
+    };
+
+    @Test
+    void propertiesConsistencyCheck() {
+        // PGPORT
+        Properties properties = new Properties();
+        PGProperty.PG_PORT.set(properties, "0");
+        Assertions.assertFalse(PGPropertyUtil.propertiesConsistencyCheck(properties));
+        PGProperty.PG_PORT.set(properties, "1");
+        assertTrue(PGPropertyUtil.propertiesConsistencyCheck(properties));
+        PGProperty.PG_PORT.set(properties, "5432");
+        assertTrue(PGPropertyUtil.propertiesConsistencyCheck(properties));
+        PGProperty.PG_PORT.set(properties, "65535");
+        assertTrue(PGPropertyUtil.propertiesConsistencyCheck(properties));
+        PGProperty.PG_PORT.set(properties, "65536");
+        assertFalse(PGPropertyUtil.propertiesConsistencyCheck(properties));
+        PGProperty.PG_PORT.set(properties, "abcdef");
+        assertFalse(PGPropertyUtil.propertiesConsistencyCheck(properties));
+        // any other not handled
+        properties = new Properties();
+        properties.setProperty("not-handled-key", "not-handled-value");
+        assertTrue(PGPropertyUtil.propertiesConsistencyCheck(properties));
+    }
+
+    @Test
+    void translatePGServiceToPGProperty() {
+        for (String[] row : TRANSLATION_TABLE) {
+            assertEquals(row[1], PGPropertyUtil.translatePGServiceToPGProperty(row[0]));
+        }
+    }
+
+    @Test
+    void translatePGPropertyToPGService() {
+        for (String[] row : TRANSLATION_TABLE) {
+            assertEquals(row[0], PGPropertyUtil.translatePGPropertyToPGService(row[1]));
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/PGbyteaTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/PGbyteaTest.java
new file mode 100644
index 0000000..a1e6f02
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/PGbyteaTest.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2021, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.util;
+
+import java.sql.SQLException;
+import java.util.Random;
+import org.junit.jupiter.api.Test;
+import org.postgresql.util.PGbytea;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+
+class PGbyteaTest {
+
+    private static final byte[] HEX_DIGITS_U = new byte[]{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B',
+            'C', 'D', 'E', 'F'};
+    private static final byte[] HEX_DIGITS_L = new byte[]{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',
+            'c', 'd', 'e', 'f'};
+
+    private static byte[] hexEncode(byte[] data, byte[] hexDigits) {
+
+        // the string created will have 2 characters for each byte.
+        // and 2 lead characters to indicate hex encoding
+        final byte[] encoded = new byte[2 + (data.length << 1)];
+        encoded[0] = '\\';
+        encoded[1] = 'x';
+        for (int i = 0; i < data.length; i++) {
+            final int idx = (i << 1) + 2;
+            final byte b = data[i];
+            encoded[idx] = hexDigits[(b & 0xF0) >>> 4];
+            encoded[idx + 1] = hexDigits[b & 0x0F];
+        }
+        return encoded;
+    }
+
+    @Test
+    void hexDecode_lower() throws SQLException {
+        final byte[] data = new byte[1023];
+        new Random(7).nextBytes(data);
+        final byte[] encoded = hexEncode(data, HEX_DIGITS_L);
+        final byte[] decoded = PGbytea.toBytes(encoded);
+        assertArrayEquals(data, decoded);
+    }
+
+    @Test
+    void hexDecode_upper() throws SQLException {
+        final byte[] data = new byte[9513];
+        new Random(-8).nextBytes(data);
+        final byte[] encoded = hexEncode(data, HEX_DIGITS_U);
+        final byte[] decoded = PGbytea.toBytes(encoded);
+        assertArrayEquals(data, decoded);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/PGtokenizerTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/PGtokenizerTest.java
new file mode 100644
index 0000000..95af803
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/PGtokenizerTest.java
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.util;
+
+import org.junit.jupiter.api.Test;
+import org.postgresql.util.PGtokenizer;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+class PGtokenizerTest {
+
+    @Test
+    void tokenize() {
+        PGtokenizer pGtokenizer = new PGtokenizer("1,2EC1830300027,1,,", ',');
+        assertEquals(5, pGtokenizer.getSize());
+    }
+
+    @Test
+    void tokenize2() {
+        PGtokenizer pGtokenizer = new PGtokenizer(",,d,\"f(10\",\"(mime,pdf,pdf)\",test,2018-10-11,1010", ',');
+        assertEquals(8, pGtokenizer.getSize());
+    }
+
+    @Test
+    void tokenize3() {
+        PGtokenizer pGtokenizer = new PGtokenizer(",,d,\"f)10\",\"(mime,pdf,pdf)\",test,2018-10-11,1010", ',');
+        assertEquals(8, pGtokenizer.getSize());
+    }
+
+    @Test
+    void tokenize4() {
+        PGtokenizer pGtokenizer = new PGtokenizer(",,d,\"f()10\",\"(mime,pdf,pdf)\",test,2018-10-11,1010", ',');
+        assertEquals(8, pGtokenizer.getSize());
+    }
+
+    @Test
+    void removePara() {
+        String string = PGtokenizer.removePara("(1,2EC1830300027,1,,)");
+        assertEquals("1,2EC1830300027,1,,", string);
+    }
+
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/PasswordUtilTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/PasswordUtilTest.java
index 82080cf..c705893 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/util/PasswordUtilTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/PasswordUtilTest.java
@@ -5,178 +5,178 @@
 
 package org.postgresql.test.util;
 
+import java.security.SecureRandom;
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Properties;
+import org.junit.jupiter.api.Test;
+import org.postgresql.PGConnection;
+import org.postgresql.core.Utils;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.annotations.DisabledIfServerVersionBelow;
+import org.postgresql.util.PasswordUtil;
 import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
-import org.postgresql.PGConnection;
-import org.postgresql.core.Utils;
-import org.postgresql.test.TestUtil;
-import org.postgresql.util.PasswordUtil;
-
-import org.junit.jupiter.api.Test;
-
-import java.security.SecureRandom;
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.Arrays;
-import java.util.Properties;
-
 class PasswordUtilTest {
-  private static final SecureRandom rng = new SecureRandom();
+    private static final SecureRandom rng = new SecureRandom();
 
-  private static String randomSuffix() {
-    return Long.toHexString(rng.nextLong());
-  }
-
-  private void assertValidUsernamePassword(String user, String password) {
-    Properties props = new Properties();
-    props.setProperty("user", user);
-    props.setProperty("password", password);
-    try (Connection conn = TestUtil.openDB(props)) {
-      String actualUser = TestUtil.queryForString(conn, "SELECT USER");
-      assertEquals(user, actualUser, "User should match");
-    } catch (SQLException e) {
-      throw new RuntimeException("Failed to authenticate using supplied user and password", e);
+    private static String randomSuffix() {
+        return Long.toHexString(rng.nextLong());
     }
-  }
 
-  private void assertInvalidUsernamePassword(String user, String password) {
-    Properties props = new Properties();
-    props.setProperty("user", user);
-    props.setProperty("password", password);
-    assertThrows(SQLException.class, () -> {
-      try (Connection conn = TestUtil.openDB(props)) {
-        conn.getSchema(); // Do something with conn to appease checkstyle
-      }
-    }, "User should not be able to authenticate");
-  }
-
-  private void assertWiped(char[] passwordChars) {
-    char[] expected = Arrays.copyOf(passwordChars, passwordChars.length);
-    Arrays.fill(passwordChars, (char) 0);
-    assertArrayEquals(expected, passwordChars, "password array should be all zeros after use");
-  }
-
-  private void testUserPassword(String encryptionType, String username, String password,
-      String encodedPassword) throws SQLException {
-    String escapedUsername = Utils.escapeIdentifier(null, username).toString();
-
-    try (Connection superConn = TestUtil.openPrivilegedDB()) {
-      TestUtil.execute(superConn, "CREATE USER " //
-          + escapedUsername //
-          + " WITH PASSWORD '" + encodedPassword + "'");
-
-      String shadowPass = TestUtil.queryForString(superConn, //
-          "SELECT passwd FROM pg_shadow WHERE usename = ?", username);
-      assertEquals(shadowPass, encodedPassword, "pg_shadow value of password must match encoded");
-
-      // We should be able to log in using our new user:
-      assertValidUsernamePassword(username, password);
-      // We also check that we cannot log in with the wrong password to ensure that
-      // the server is not simply trusting everything
-      assertInvalidUsernamePassword(username, "Bad Password:" + password);
-
-      String newPassword = "mySecretNewPassword" + randomSuffix();
-      PGConnection pgConn = superConn.unwrap(PGConnection.class);
-      char[] newPasswordChars = newPassword.toCharArray();
-      pgConn.alterUserPassword(username, newPasswordChars, encryptionType);
-      assertNotEquals(newPassword, String.valueOf(newPasswordChars), "newPassword char[] array should be wiped and not match original after encoding");
-      assertWiped(newPasswordChars);
-
-      // We should be able to log in using our new password
-      assertValidUsernamePassword(username, newPassword);
-      // We also check that we cannot log in with the wrong password to ensure that
-      // the server is not simply trusting everything
-      assertInvalidUsernamePassword(username, "Bad Password:" + newPassword);
-    } finally {
-      try (Connection superConn = TestUtil.openPrivilegedDB()) {
-        TestUtil.execute(superConn, "DROP USER " + escapedUsername);
-      } catch (Exception ignore) { }
+    private void assertValidUsernamePassword(String user, String password) {
+        Properties props = new Properties();
+        props.setProperty("user", user);
+        props.setProperty("password", password);
+        try (Connection conn = TestUtil.openDB(props)) {
+            String actualUser = TestUtil.queryForString(conn, "SELECT USER");
+            assertEquals(user, actualUser, "User should match");
+        } catch (SQLException e) {
+            throw new RuntimeException("Failed to authenticate using supplied user and password", e);
+        }
     }
-  }
 
-  private void testUserPassword(String encryptionType, String username, String password) throws SQLException {
-    char[] passwordChars = password.toCharArray();
-    String encodedPassword = PasswordUtil.encodePassword(
-        username, passwordChars,
-        encryptionType == null ? "md5" : encryptionType);
-    assertNotEquals(password, String.valueOf(passwordChars), "password char[] array should be wiped and not match original password after encoding");
-    assertWiped(passwordChars);
-    testUserPassword(encryptionType, username, password, encodedPassword);
-  }
-
-  private void testUserPassword(String encryptionType) throws SQLException {
-    String username = "test_password_" + randomSuffix();
-    String password = "t0pSecret" + randomSuffix();
-
-    testUserPassword(encryptionType, username, password);
-    testUserPassword(encryptionType, username, "password with spaces");
-    testUserPassword(encryptionType, username, "password with single ' quote'");
-    testUserPassword(encryptionType, username, "password with double \" quote'");
-    testUserPassword(encryptionType, username + " with spaces", password);
-    testUserPassword(encryptionType, username + " with single ' quote", password);
-    testUserPassword(encryptionType, username + " with single \" quote", password);
-  }
-
-  @Test
-  void encodePasswordWithServersPasswordEncryption() throws SQLException {
-    String encryptionType;
-    try (Connection conn = TestUtil.openPrivilegedDB()) {
-      encryptionType = TestUtil.queryForString(conn, "SHOW password_encryption");
+    private void assertInvalidUsernamePassword(String user, String password) {
+        Properties props = new Properties();
+        props.setProperty("user", user);
+        props.setProperty("password", password);
+        assertThrows(SQLException.class, () -> {
+            try (Connection conn = TestUtil.openDB(props)) {
+                conn.getSchema(); // Do something with conn to appease checkstyle
+            }
+        }, "User should not be able to authenticate");
     }
-    testUserPassword(encryptionType);
-  }
 
-  @Test
-  void alterUserPasswordSupportsNullEncoding() throws SQLException {
-    testUserPassword(null);
-  }
+    private void assertWiped(char[] passwordChars) {
+        char[] expected = Arrays.copyOf(passwordChars, passwordChars.length);
+        Arrays.fill(passwordChars, (char) 0);
+        assertArrayEquals(expected, passwordChars, "password array should be all zeros after use");
+    }
 
-  @Test
-  void mD5() throws SQLException {
-    testUserPassword("md5");
-  }
+    private void testUserPassword(String encryptionType, String username, String password,
+                                  String encodedPassword) throws SQLException {
+        String escapedUsername = Utils.escapeIdentifier(null, username).toString();
 
-  @Test
-  void encryptionTypeValueOfOn() throws SQLException {
-    testUserPassword("on");
-  }
+        try (Connection superConn = TestUtil.openPrivilegedDB()) {
+            TestUtil.execute(superConn, "CREATE USER " //
+                    + escapedUsername //
+                    + " WITH PASSWORD '" + encodedPassword + "'");
 
-  @Test
-  void encryptionTypeValueOfOff() throws SQLException {
-    testUserPassword("off");
-  }
+            String shadowPass = TestUtil.queryForString(superConn, //
+                    "SELECT passwd FROM pg_shadow WHERE usename = ?", username);
+            assertEquals(shadowPass, encodedPassword, "pg_shadow value of password must match encoded");
 
-  @Test
-  @DisabledIfServerVersionBelow("10.0")
-  void scramSha256() throws SQLException {
-    testUserPassword("scram-sha-256");
-  }
+            // We should be able to log in using our new user:
+            assertValidUsernamePassword(username, password);
+            // We also check that we cannot log in with the wrong password to ensure that
+            // the server is not simply trusting everything
+            assertInvalidUsernamePassword(username, "Bad Password:" + password);
 
-  @Test
-  @DisabledIfServerVersionBelow("10.0")
-  void customScramParams() throws SQLException {
-    String username = "test_password_" + randomSuffix();
-    String password = "t0pSecret" + randomSuffix();
-    byte[] salt = new byte[32];
-    rng.nextBytes(salt);
-    int iterations = 12345;
-    String encodedPassword = PasswordUtil.encodeScramSha256(password.toCharArray(), iterations, salt);
-    assertTrue(encodedPassword.startsWith("SCRAM-SHA-256$" + iterations + ":"), "encoded password should have custom iteration count");
-    testUserPassword("scram-sha-256", username, password, encodedPassword);
-  }
+            String newPassword = "mySecretNewPassword" + randomSuffix();
+            PGConnection pgConn = superConn.unwrap(PGConnection.class);
+            char[] newPasswordChars = newPassword.toCharArray();
+            pgConn.alterUserPassword(username, newPasswordChars, encryptionType);
+            assertNotEquals(newPassword, String.valueOf(newPasswordChars), "newPassword char[] array should be wiped and not match original after encoding");
+            assertWiped(newPasswordChars);
 
-  @Test
-  void unknownEncryptionType() throws SQLException {
-    String username = "test_password_" + randomSuffix();
-    String password = "t0pSecret" + randomSuffix();
-    char[] passwordChars = password.toCharArray();
-    assertThrows(SQLException.class, () -> {
-      PasswordUtil.encodePassword(username, passwordChars, "not-a-real-encryption-type");
-    });
-    assertWiped(passwordChars);
-  }
+            // We should be able to log in using our new password
+            assertValidUsernamePassword(username, newPassword);
+            // We also check that we cannot log in with the wrong password to ensure that
+            // the server is not simply trusting everything
+            assertInvalidUsernamePassword(username, "Bad Password:" + newPassword);
+        } finally {
+            try (Connection superConn = TestUtil.openPrivilegedDB()) {
+                TestUtil.execute(superConn, "DROP USER " + escapedUsername);
+            } catch (Exception ignore) {
+            }
+        }
+    }
+
+    private void testUserPassword(String encryptionType, String username, String password) throws SQLException {
+        char[] passwordChars = password.toCharArray();
+        String encodedPassword = PasswordUtil.encodePassword(
+                username, passwordChars,
+                encryptionType == null ? "md5" : encryptionType);
+        assertNotEquals(password, String.valueOf(passwordChars), "password char[] array should be wiped and not match original password after encoding");
+        assertWiped(passwordChars);
+        testUserPassword(encryptionType, username, password, encodedPassword);
+    }
+
+    private void testUserPassword(String encryptionType) throws SQLException {
+        String username = "test_password_" + randomSuffix();
+        String password = "t0pSecret" + randomSuffix();
+
+        testUserPassword(encryptionType, username, password);
+        testUserPassword(encryptionType, username, "password with spaces");
+        testUserPassword(encryptionType, username, "password with single ' quote'");
+        testUserPassword(encryptionType, username, "password with double \" quote'");
+        testUserPassword(encryptionType, username + " with spaces", password);
+        testUserPassword(encryptionType, username + " with single ' quote", password);
+        testUserPassword(encryptionType, username + " with single \" quote", password);
+    }
+
+    @Test
+    void encodePasswordWithServersPasswordEncryption() throws SQLException {
+        String encryptionType;
+        try (Connection conn = TestUtil.openPrivilegedDB()) {
+            encryptionType = TestUtil.queryForString(conn, "SHOW password_encryption");
+        }
+        testUserPassword(encryptionType);
+    }
+
+    @Test
+    void alterUserPasswordSupportsNullEncoding() throws SQLException {
+        testUserPassword(null);
+    }
+
+    @Test
+    void mD5() throws SQLException {
+        testUserPassword("md5");
+    }
+
+    @Test
+    void encryptionTypeValueOfOn() throws SQLException {
+        testUserPassword("on");
+    }
+
+    @Test
+    void encryptionTypeValueOfOff() throws SQLException {
+        testUserPassword("off");
+    }
+
+    @Test
+    @DisabledIfServerVersionBelow("10.0")
+    void scramSha256() throws SQLException {
+        testUserPassword("scram-sha-256");
+    }
+
+    @Test
+    @DisabledIfServerVersionBelow("10.0")
+    void customScramParams() throws SQLException {
+        String username = "test_password_" + randomSuffix();
+        String password = "t0pSecret" + randomSuffix();
+        byte[] salt = new byte[32];
+        rng.nextBytes(salt);
+        int iterations = 12345;
+        String encodedPassword = PasswordUtil.encodePassword(username, password.toCharArray(), "SCRAM-SHA-256");
+        //PasswordUtil.encodeScramSha256(password.toCharArray(), iterations, salt);
+        assertTrue(encodedPassword.startsWith("SCRAM-SHA-256$" + iterations + ":"), "encoded password should have custom iteration count");
+        testUserPassword("scram-sha-256", username, password, encodedPassword);
+    }
+
+    @Test
+    void unknownEncryptionType() throws SQLException {
+        String username = "test_password_" + randomSuffix();
+        String password = "t0pSecret" + randomSuffix();
+        char[] passwordChars = password.toCharArray();
+        assertThrows(SQLException.class, () -> {
+            PasswordUtil.encodePassword(username, passwordChars, "not-a-real-encryption-type");
+        });
+        assertWiped(passwordChars);
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/ReaderInputStreamTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/ReaderInputStreamTest.java
new file mode 100644
index 0000000..96ccab2
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/ReaderInputStreamTest.java
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2016, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.util;
+
+import java.io.ByteArrayInputStream;
+import java.io.CharArrayReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.io.StringReader;
+import java.nio.charset.MalformedInputException;
+import java.util.Arrays;
+import org.junit.jupiter.api.Test;
+import org.postgresql.util.ReaderInputStream;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+class ReaderInputStreamTest {
+    // 132878 = U+2070E - chosen because it is the first supplementary character
+    // in the International Ideographic Core (IICore)
+    // see http://www.i18nguy.com/unicode/supplementary-test.html for further explanation
+
+    // Character.highSurrogate(132878) = 0xd841
+    private static final char LEADING_SURROGATE = 0xd841;
+
+    // Character.lowSurrogate(132878) = 0xdf0e
+    private static final char TRAILING_SURROGATE = 0xdf0e;
+
+    private static void read(InputStream is, int... expected) throws IOException {
+        byte[] actual = new byte[4];
+        Arrays.fill(actual, (byte) 0x00);
+        int nActual = is.read(actual);
+        int[] actualInts = new int[4];
+        for (int i = 0; i < actual.length; i++) {
+            actualInts[i] = actual[i] & 0xff;
+        }
+        if (expected.length > 0) {
+            // Ensure "expected" has 4 bytes
+            expected = Arrays.copyOf(expected, 4);
+            assertEquals(Arrays.toString(expected), Arrays.toString(actualInts));
+        } else {
+            assertEquals(-1, nActual, "should be end-of-stream");
+            is.close();
+        }
+    }
+
+    @Test
+    @SuppressWarnings("nullability")
+    void NullReaderTest() {
+        assertThrows(IllegalArgumentException.class, () -> {
+            new ReaderInputStream(null);
+        });
+    }
+
+    @Test
+    void cbufTooSmallReaderTest() {
+        assertThrows(IllegalArgumentException.class, () -> {
+            new ReaderInputStream(new StringReader("abc"), 1);
+        });
+    }
+
+    @Test
+    void SimpleTest() throws IOException {
+        char[] chars = {'a', 'b', 'c'};
+        Reader reader = new CharArrayReader(chars);
+        InputStream is = new ReaderInputStream(reader);
+        read(is, 0x61, 0x62, 0x63);
+        read(is);
+    }
+
+    @Test
+    void inputSmallerThanCbufsizeTest() throws IOException {
+        char[] chars = {'a'};
+        Reader reader = new CharArrayReader(chars);
+        InputStream is = new ReaderInputStream(reader, 2);
+        read(is, 0x61);
+        read(is);
+    }
+
+    @Test
+    void tooManyReadsTest() throws IOException {
+        char[] chars = {'a'};
+        Reader reader = new CharArrayReader(chars);
+        InputStream is = new ReaderInputStream(reader, 2);
+        read(is, 0x61);
+        assertEquals(-1, is.read(), "should be end-of-stream");
+        assertEquals(-1, is.read(), "should be end-of-stream");
+        assertEquals(-1, is.read(), "should be end-of-stream");
+        is.close();
+    }
+
+    @Test
+    void surrogatePairSpansCharBufBoundaryTest() throws IOException {
+        char[] chars = {'a', LEADING_SURROGATE, TRAILING_SURROGATE};
+        Reader reader = new CharArrayReader(chars);
+        InputStream is = new ReaderInputStream(reader, 2);
+        read(is, 0x61, 0xF0, 0xA0, 0x9C);
+        read(is, 0x8E);
+        read(is);
+    }
+
+    @Test
+    void invalidInputTest() throws IOException {
+        assertThrows(MalformedInputException.class, () -> {
+            char[] chars = {'a', LEADING_SURROGATE, LEADING_SURROGATE};
+            Reader reader = new CharArrayReader(chars);
+            InputStream is = new ReaderInputStream(reader, 2);
+            read(is);
+        });
+    }
+
+    @Test
+    void unmatchedLeadingSurrogateInputTest() throws IOException {
+        assertThrows(MalformedInputException.class, () -> {
+            char[] chars = {LEADING_SURROGATE};
+            Reader reader = new CharArrayReader(chars);
+            InputStream is = new ReaderInputStream(reader, 2);
+            read(is, 0x00);
+        });
+    }
+
+    @Test
+    void unmatchedTrailingSurrogateInputTest() throws IOException {
+        assertThrows(MalformedInputException.class, () -> {
+            char[] chars = {TRAILING_SURROGATE};
+            Reader reader = new CharArrayReader(chars);
+            InputStream is = new ReaderInputStream(reader, 2);
+            read(is);
+        });
+    }
+
+    @Test
+    @SuppressWarnings("nullness")
+    void nullArrayReadTest() throws IOException {
+        assertThrows(NullPointerException.class, () -> {
+            Reader reader = new StringReader("abc");
+            InputStream is = new ReaderInputStream(reader);
+            is.read(null, 0, 4);
+        });
+    }
+
+    @Test
+    void invalidOffsetArrayReadTest() throws IOException {
+        assertThrows(IndexOutOfBoundsException.class, () -> {
+            Reader reader = new StringReader("abc");
+            InputStream is = new ReaderInputStream(reader);
+            byte[] bytes = new byte[4];
+            is.read(bytes, 5, 4);
+        });
+    }
+
+    @Test
+    void negativeOffsetArrayReadTest() throws IOException {
+        assertThrows(IndexOutOfBoundsException.class, () -> {
+            Reader reader = new StringReader("abc");
+            InputStream is = new ReaderInputStream(reader);
+            byte[] bytes = new byte[4];
+            is.read(bytes, -1, 4);
+        });
+    }
+
+    @Test
+    void invalidLengthArrayReadTest() throws IOException {
+        assertThrows(IndexOutOfBoundsException.class, () -> {
+            Reader reader = new StringReader("abc");
+            InputStream is = new ReaderInputStream(reader);
+            byte[] bytes = new byte[4];
+            is.read(bytes, 1, 4);
+        });
+    }
+
+    @Test
+    void negativeLengthArrayReadTest() throws IOException {
+        assertThrows(IndexOutOfBoundsException.class, () -> {
+            Reader reader = new StringReader("abc");
+            InputStream is = new ReaderInputStream(reader);
+            byte[] bytes = new byte[4];
+            is.read(bytes, 1, -2);
+        });
+    }
+
+    @Test
+    void zeroLengthArrayReadTest() throws IOException {
+        Reader reader = new StringReader("abc");
+        InputStream is = new ReaderInputStream(reader);
+        byte[] bytes = new byte[4];
+        assertEquals(0, is.read(bytes, 1, 0), "requested 0 byte read");
+    }
+
+    @Test
+    void singleCharArrayReadTest() throws IOException {
+        Reader reader = new SingleCharPerReadReader(LEADING_SURROGATE, TRAILING_SURROGATE);
+        InputStream is = new ReaderInputStream(reader);
+        read(is, 0xF0, 0xA0, 0x9C, 0x8E);
+        read(is);
+    }
+
+    @Test
+    void malformedSingleCharArrayReadTest() throws IOException {
+        assertThrows(MalformedInputException.class, () -> {
+            Reader reader = new SingleCharPerReadReader(LEADING_SURROGATE, LEADING_SURROGATE);
+            InputStream is = new ReaderInputStream(reader);
+            read(is, 0xF0, 0xA0, 0x9C, 0x8E);
+        });
+    }
+
+    @Test
+    void readsEqualToBlockSizeTest() throws Exception {
+        final int blockSize = 8 * 1024;
+        final int dataSize = blockSize + 57;
+        final byte[] data = new byte[dataSize];
+        final byte[] buffer = new byte[blockSize];
+
+        InputStreamReader isr = new InputStreamReader(new ByteArrayInputStream(data), "UTF-8");
+        ReaderInputStream r = new ReaderInputStream(isr, blockSize);
+
+        int total = 0;
+
+        total += r.read(buffer, 0, blockSize);
+        total += r.read(buffer, 0, blockSize);
+
+        assertEquals(dataSize, total, "Data not read completely: missing " + (dataSize - total) + " bytes");
+    }
+
+    private static class SingleCharPerReadReader extends Reader {
+        private final char[] data;
+        private int i;
+
+        private SingleCharPerReadReader(char... data) {
+            this.data = data;
+        }
+
+        @Override
+        public int read(char[] cbuf, int off, int len) throws IOException {
+            if (i < data.length) {
+                cbuf[off] = data[i++];
+                return 1;
+            }
+
+            return -1;
+        }
+
+        @Override
+        public void close() throws IOException {
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/RegexMatcher.java b/pgjdbc/src/test/java/org/postgresql/test/util/RegexMatcher.java
index 382eb4c..63fb8d9 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/util/RegexMatcher.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/RegexMatcher.java
@@ -5,45 +5,43 @@
 
 package org.postgresql.test.util;
 
+import java.util.regex.Pattern;
 import org.hamcrest.Description;
 import org.hamcrest.Matcher;
 import org.hamcrest.TypeSafeMatcher;
 
-import java.util.regex.Pattern;
-
 /**
  * Provides a matcher for String objects which does a regex comparison.
  */
 public final class RegexMatcher extends TypeSafeMatcher<String> {
 
-  private final Pattern pattern;
+    private final Pattern pattern;
 
-  /**
-   * @param pattern
-   *          The pattern to match items on.
-   */
-  private RegexMatcher(Pattern pattern) {
-    this.pattern = pattern;
-  }
+    /**
+     * @param pattern The pattern to match items on.
+     */
+    private RegexMatcher(Pattern pattern) {
+        this.pattern = pattern;
+    }
 
-  public static Matcher<String> matchesPattern(String pattern) {
-    return new RegexMatcher(Pattern.compile(pattern));
-  }
+    public static Matcher<String> matchesPattern(String pattern) {
+        return new RegexMatcher(Pattern.compile(pattern));
+    }
 
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public void describeTo(Description description) {
-    description.appendText("matches regex=" + pattern.toString());
-  }
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public void describeTo(Description description) {
+        description.appendText("matches regex=" + pattern.toString());
+    }
 
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected boolean matchesSafely(String item) {
-    return pattern.matcher(item).matches();
-  }
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    protected boolean matchesSafely(String item) {
+        return pattern.matcher(item).matches();
+    }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/ServerVersionParseTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/ServerVersionParseTest.java
index 5f11586..7d5c766 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/util/ServerVersionParseTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/ServerVersionParseTest.java
@@ -5,86 +5,83 @@
 
 package org.postgresql.test.util;
 
+import java.util.Arrays;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.postgresql.core.ServerVersion;
+import org.postgresql.core.Version;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.fail;
 
-import org.postgresql.core.ServerVersion;
-import org.postgresql.core.Version;
-
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.MethodSource;
-
-import java.util.Arrays;
-
 public class ServerVersionParseTest {
-  public static Iterable<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-        /* 4 part version tests */
-        {"7.4.0.0", 70400, null},
-        {"9.0.0.0", 90000, null},
-        {"9.0.1.0", 90001, null},
-        {"9.2.1.0", 90201, null},
-        {"7.4.0", 70400, null},
-        {"9.0.0", 90000, null},
-        {"9.0.1", 90001, null},
-        {"9.2.1", 90201, null},
-        /* Major only */
-        {"7.4", 70400, null},
-        {"9.0", 90000, null},
-        {"9.2", 90200, null},
-        {"9.6", 90600, null},
-        {"10", 100000, null},
-        {"11", 110000, null},
-        {"12", 120000, null},
-        /* Multidigit */
-        {"9.4.10", 90410, null},
-        {"9.20.10", 92010, null},
-        /* After 10 */
-        {"10.1", 100001, null},
-        {"10.10", 100010, null},
-        {"11.1", 110001, null},
-        {"123.20", 1230020, null},
-        /* Fail cases */
-        {"9.20.100", -1, "Should've rejected three-digit minor version"},
-        {"9.100.10", -1, "Should've rejected three-digit second part of major version"},
-        {"10.100.10", -1, "10+ version should have 2 components only"},
-        {"12345.1", -1, "Too big version number"},
-        /* Preparsed */
-        {"90104", 90104, null},
-        {"090104", 90104, null},
-        {"070400", 70400, null},
-        {"100004", 100004, null},
-        {"10000", 10000, null},
-        /* --with-extra-version or beta/devel tags */
-        {"9.4devel", 90400, null},
-        {"9.4beta1", 90400, null},
-        {"10devel", 100000, null},
-        {"10beta1", 100000, null},
-        {"10.1devel", 100001, null},
-        {"10.1beta1", 100001, null},
-        {"9.4.1bobs", 90401, null},
-        {"9.4.1bobspatched9.4", 90401, null},
-        {"9.4.1-bobs-patched-postgres-v2.2", 90401, null},
+    public static Iterable<Object[]> data() {
+        return Arrays.asList(new Object[][]{
+                /* 4 part version tests */
+                {"7.4.0.0", 70400, null},
+                {"9.0.0.0", 90000, null},
+                {"9.0.1.0", 90001, null},
+                {"9.2.1.0", 90201, null},
+                {"7.4.0", 70400, null},
+                {"9.0.0", 90000, null},
+                {"9.0.1", 90001, null},
+                {"9.2.1", 90201, null},
+                /* Major only */
+                {"7.4", 70400, null},
+                {"9.0", 90000, null},
+                {"9.2", 90200, null},
+                {"9.6", 90600, null},
+                {"10", 100000, null},
+                {"11", 110000, null},
+                {"12", 120000, null},
+                /* Multidigit */
+                {"9.4.10", 90410, null},
+                {"9.20.10", 92010, null},
+                /* After 10 */
+                {"10.1", 100001, null},
+                {"10.10", 100010, null},
+                {"11.1", 110001, null},
+                {"123.20", 1230020, null},
+                /* Fail cases */
+                {"9.20.100", -1, "Should've rejected three-digit minor version"},
+                {"9.100.10", -1, "Should've rejected three-digit second part of major version"},
+                {"10.100.10", -1, "10+ version should have 2 components only"},
+                {"12345.1", -1, "Too big version number"},
+                /* Preparsed */
+                {"90104", 90104, null},
+                {"090104", 90104, null},
+                {"070400", 70400, null},
+                {"100004", 100004, null},
+                {"10000", 10000, null},
+                /* --with-extra-version or beta/devel tags */
+                {"9.4devel", 90400, null},
+                {"9.4beta1", 90400, null},
+                {"10devel", 100000, null},
+                {"10beta1", 100000, null},
+                {"10.1devel", 100001, null},
+                {"10.1beta1", 100001, null},
+                {"9.4.1bobs", 90401, null},
+                {"9.4.1bobspatched9.4", 90401, null},
+                {"9.4.1-bobs-patched-postgres-v2.2", 90401, null},
 
-    });
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "str = {0}, expected = {1}")
-  void run(String versionString, int versionNum, String rejectReason) {
-    try {
-      Version version = ServerVersion.from(versionString);
-      if (rejectReason == null) {
-        assertEquals(versionNum, version.getVersionNum(), "Parsing " + versionString);
-      } else {
-        fail("Should fail to parse " + versionString + ", " + rejectReason);
-      }
-    } catch (NumberFormatException e) {
-      if (rejectReason != null) {
-        return;
-      }
-      throw e;
+        });
+    }
+
+    @MethodSource("data")
+    @ParameterizedTest(name = "str = {0}, expected = {1}")
+    void run(String versionString, int versionNum, String rejectReason) {
+        try {
+            Version version = ServerVersion.from(versionString);
+            if (rejectReason == null) {
+                assertEquals(versionNum, version.getVersionNum(), "Parsing " + versionString);
+            } else {
+                fail("Should fail to parse " + versionString + ", " + rejectReason);
+            }
+        } catch (NumberFormatException e) {
+            if (rejectReason != null) {
+                return;
+            }
+            throw e;
+        }
     }
-  }
 
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/ServerVersionTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/ServerVersionTest.java
index 99f106d..b6a4731 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/util/ServerVersionTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/ServerVersionTest.java
@@ -5,33 +5,31 @@
 
 package org.postgresql.test.util;
 
+import org.junit.jupiter.api.Test;
+import org.postgresql.core.ServerVersion;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
-import org.postgresql.core.ServerVersion;
-
-import org.junit.jupiter.api.Test;
-
 class ServerVersionTest {
-  @Test
-  void versionIncreases() {
-    ServerVersion prev = null;
-    for (ServerVersion serverVersion : ServerVersion.values()) {
-      if (prev != null) {
-        assertTrue(prev.getVersionNum() < serverVersion.getVersionNum(),
-            prev + " should be less than " + serverVersion);
-      }
-      prev = serverVersion;
+    @Test
+    void versionIncreases() {
+        ServerVersion prev = null;
+        for (ServerVersion serverVersion : ServerVersion.values()) {
+            if (prev != null) {
+                assertTrue(prev.getVersionNum() < serverVersion.getVersionNum(),
+                        prev + " should be less than " + serverVersion);
+            }
+            prev = serverVersion;
+        }
     }
-  }
 
-  @Test
-  void versions() {
-    assertEquals(ServerVersion.v12.getVersionNum(), ServerVersion.from("12.0").getVersionNum());
-    assertEquals(120004, ServerVersion.from("12.4").getVersionNum());
-    assertEquals(ServerVersion.v11.getVersionNum(), ServerVersion.from("11.0").getVersionNum());
-    assertEquals(110006, ServerVersion.from("11.6").getVersionNum());
-    assertEquals(ServerVersion.v10.getVersionNum(), ServerVersion.from("10.0").getVersionNum());
-    assertTrue(ServerVersion.v9_6.getVersionNum() < ServerVersion.from("9.6.4").getVersionNum());
-  }
+    @Test
+    void versions() {
+        assertEquals(ServerVersion.v12.getVersionNum(), ServerVersion.from("12.0").getVersionNum());
+        assertEquals(120004, ServerVersion.from("12.4").getVersionNum());
+        assertEquals(ServerVersion.v11.getVersionNum(), ServerVersion.from("11.0").getVersionNum());
+        assertEquals(110006, ServerVersion.from("11.6").getVersionNum());
+        assertEquals(ServerVersion.v10.getVersionNum(), ServerVersion.from("10.0").getVersionNum());
+        assertTrue(ServerVersion.v9_6.getVersionNum() < ServerVersion.from("9.6.4").getVersionNum());
+    }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/StrangeInputStream.java b/pgjdbc/src/test/java/org/postgresql/test/util/StrangeInputStream.java
new file mode 100644
index 0000000..e806e6c
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/StrangeInputStream.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.util;
+
+import java.io.FileNotFoundException;
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Random;
+
+/**
+ * {@link InputStream} implementation that reads less data than is provided in the destination
+ * array. This allows to stress test {@link org.postgresql.copy.CopyManager} or other consumers.
+ */
+public class StrangeInputStream extends FilterInputStream {
+    private final Random rand = new Random(); // generator of fun events
+
+    public StrangeInputStream(InputStream is, long seed) throws FileNotFoundException {
+        super(is);
+        rand.setSeed(seed);
+    }
+
+    @Override
+    public int read(byte[] b) throws IOException {
+        int maxRead = rand.nextInt(b.length);
+        return super.read(b, 0, maxRead);
+    }
+
+    @Override
+    public int read(byte[] b, int off, int len) throws IOException {
+        if (len > 0 && rand.nextInt(10) > 7) {
+            int next = super.read();
+            if (next == -1) {
+                return -1;
+            }
+            b[off] = (byte) next;
+            return 1;
+        }
+        int maxRead = rand.nextInt(len);
+        return super.read(b, off, maxRead);
+    }
+
+    @Override
+    public long skip(long n) throws IOException {
+        long maxSkip = rand.nextLong() % (n + 1);
+        return super.skip(maxSkip);
+    }
+
+    @Override
+    public int available() throws IOException {
+        int available = super.available();
+        return rand.nextInt(available + 1);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/StrangeOutputStream.java b/pgjdbc/src/test/java/org/postgresql/test/util/StrangeOutputStream.java
new file mode 100644
index 0000000..1213334
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/StrangeOutputStream.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2004, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.util;
+
+import java.io.FilterOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Random;
+
+/**
+ * {@link OutputStream} implementation that breaks writes into several individual writes. It
+ * allows to stress test other {@link OutputStream} implementations.
+ * For instance, it allows to test non-zero offset writes from the source buffers,
+ * and it might convert buffered writes into individual byte-by-byte writes.
+ */
+public class StrangeOutputStream extends FilterOutputStream {
+    private final Random rand = new Random(); // generator of fun events
+    private final byte[] oneByte = new byte[1];
+    private final double flushProbability;
+
+    public StrangeOutputStream(OutputStream os, long seed, double flushProbability) {
+        super(os);
+        this.flushProbability = flushProbability;
+        rand.setSeed(seed);
+    }
+
+    @Override
+    public void write(int b) throws IOException {
+        oneByte[0] = (byte) b;
+        out.write(oneByte);
+        if (rand.nextDouble() < flushProbability) {
+            flush();
+        }
+    }
+
+    @Override
+    public void write(byte[] b, int off, int len) throws IOException {
+        while (len > 0) {
+            int maxWrite = rand.nextInt(len + 1);
+            if (maxWrite == 1 && rand.nextBoolean()) {
+                out.write(b[off]);
+            } else {
+                out.write(b, off, maxWrite);
+            }
+            off += maxWrite;
+            len -= maxWrite;
+            if (rand.nextDouble() < flushProbability) {
+                flush();
+            }
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/StrangeProxyServer.java b/pgjdbc/src/test/java/org/postgresql/test/util/StrangeProxyServer.java
index dc34fbe..827826c 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/util/StrangeProxyServer.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/StrangeProxyServer.java
@@ -5,8 +5,6 @@
 
 package org.postgresql.test.util;
 
-import org.postgresql.test.TestUtil;
-
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.InputStream;
@@ -14,6 +12,7 @@ import java.io.OutputStream;
 import java.net.ServerSocket;
 import java.net.Socket;
 import java.net.SocketTimeoutException;
+import org.postgresql.test.TestUtil;
 
 /**
  * Proxy server that allows for pretending that traffic did not arrive at the
@@ -23,69 +22,69 @@ import java.net.SocketTimeoutException;
  * EOF then both sides are immediately closed.
  */
 public class StrangeProxyServer implements Closeable {
-  private final ServerSocket serverSock;
-  private volatile boolean keepRunning = true;
-  private volatile long minAcceptedAt;
+    private final ServerSocket serverSock;
+    private volatile boolean keepRunning = true;
+    private volatile long minAcceptedAt;
 
-  public StrangeProxyServer(String destHost, int destPort) throws IOException {
-    this.serverSock = new ServerSocket(0);
-    this.serverSock.setSoTimeout(100);
-    doAsync(() -> {
-      while (keepRunning) {
-        try {
-          Socket sourceSock = serverSock.accept();
-          final long acceptedAt = System.currentTimeMillis();
-          Socket destSock = new Socket(destHost, destPort);
-          doAsync(() -> transferOneByOne(acceptedAt, sourceSock, destSock));
-          doAsync(() -> transferOneByOne(acceptedAt, destSock, sourceSock));
-        } catch (SocketTimeoutException ignore) {
-        } catch (IOException e) {
-          throw new RuntimeException(e);
-        }
-      }
-      TestUtil.closeQuietly(serverSock);
-    });
-  }
-
-  public int getServerPort() {
-    return this.serverSock.getLocalPort();
-  }
-
-  @Override
-  public void close() {
-    this.keepRunning = false;
-  }
-
-  public void stopForwardingOlderClients() {
-    this.minAcceptedAt = System.currentTimeMillis();
-  }
-
-  public void stopForwardingAllClients() {
-    this.minAcceptedAt = Long.MAX_VALUE;
-  }
-
-  private void doAsync(Runnable task) {
-    Thread thread = new Thread(task);
-    thread.setDaemon(true);
-    thread.start();
-  }
-
-  private void transferOneByOne(long acceptedAt, Socket source, Socket dest) {
-    try {
-      InputStream in = source.getInputStream();
-      OutputStream out = dest.getOutputStream();
-      int b;
-      // As long as we're running try to read
-      while (keepRunning && (b = in.read()) >= 0) {
-        // But only write it if the client is newer than the last call to stopForwardingOlderClients()
-        if (acceptedAt >= minAcceptedAt) {
-          out.write(b);
-        }
-      }
-    } catch (IOException ignore) {
-    } finally {
-      TestUtil.closeQuietly(source);
-      TestUtil.closeQuietly(dest);
+    public StrangeProxyServer(String destHost, int destPort) throws IOException {
+        this.serverSock = new ServerSocket(0);
+        this.serverSock.setSoTimeout(100);
+        doAsync(() -> {
+            while (keepRunning) {
+                try {
+                    Socket sourceSock = serverSock.accept();
+                    final long acceptedAt = System.currentTimeMillis();
+                    Socket destSock = new Socket(destHost, destPort);
+                    doAsync(() -> transferOneByOne(acceptedAt, sourceSock, destSock));
+                    doAsync(() -> transferOneByOne(acceptedAt, destSock, sourceSock));
+                } catch (SocketTimeoutException ignore) {
+                } catch (IOException e) {
+                    throw new RuntimeException(e);
+                }
+            }
+            TestUtil.closeQuietly(serverSock);
+        });
+    }
+
+    public int getServerPort() {
+        return this.serverSock.getLocalPort();
+    }
+
+    @Override
+    public void close() {
+        this.keepRunning = false;
+    }
+
+    public void stopForwardingOlderClients() {
+        this.minAcceptedAt = System.currentTimeMillis();
+    }
+
+    public void stopForwardingAllClients() {
+        this.minAcceptedAt = Long.MAX_VALUE;
+    }
+
+    private void doAsync(Runnable task) {
+        Thread thread = new Thread(task);
+        thread.setDaemon(true);
+        thread.start();
+    }
+
+    private void transferOneByOne(long acceptedAt, Socket source, Socket dest) {
+        try {
+            InputStream in = source.getInputStream();
+            OutputStream out = dest.getOutputStream();
+            int b;
+            // As long as we're running try to read
+            while (keepRunning && (b = in.read()) >= 0) {
+                // But only write it if the client is newer than the last call to stopForwardingOlderClients()
+                if (acceptedAt >= minAcceptedAt) {
+                    out.write(b);
+                }
+            }
+        } catch (IOException ignore) {
+        } finally {
+            TestUtil.closeQuietly(source);
+            TestUtil.closeQuietly(dest);
+        }
     }
-  }
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/util/StubEnvironmentAndProperties.java b/pgjdbc/src/test/java/org/postgresql/test/util/StubEnvironmentAndProperties.java
similarity index 85%
rename from pgjdbc/src/test/java/org/postgresql/util/StubEnvironmentAndProperties.java
rename to pgjdbc/src/test/java/org/postgresql/test/util/StubEnvironmentAndProperties.java
index 23f780d..a6289e6 100644
--- a/pgjdbc/src/test/java/org/postgresql/util/StubEnvironmentAndProperties.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/StubEnvironmentAndProperties.java
@@ -3,16 +3,13 @@
  * See the LICENSE file in the project root for more information.
  */
 
-package org.postgresql.util;
-
-import org.junit.jupiter.api.extension.ExtendWith;
-import org.junit.jupiter.api.parallel.Isolated;
-import uk.org.webcompere.systemstubs.jupiter.SystemStubsExtension;
+package org.postgresql.test.util;
 
 import java.lang.annotation.ElementType;
 import java.lang.annotation.Retention;
 import java.lang.annotation.RetentionPolicy;
 import java.lang.annotation.Target;
+import org.junit.jupiter.api.parallel.Isolated;
 
 /**
  * This annotation is used to mark a test method as a test that should be run with stubbing system
@@ -25,7 +22,6 @@ import java.lang.annotation.Target;
  * static methods are not available in other threads</a> are resolved</p>
  */
 @Isolated
-@ExtendWith(SystemStubsExtension.class)
 @Retention(RetentionPolicy.RUNTIME)
 @Target({ElementType.METHOD, ElementType.TYPE})
 public @interface StubEnvironmentAndProperties {
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/TestLogHandler.java b/pgjdbc/src/test/java/org/postgresql/test/util/TestLogHandler.java
new file mode 100644
index 0000000..0389fd9
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/TestLogHandler.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2020, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.util;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.logging.Handler;
+import java.util.logging.LogRecord;
+import java.util.regex.Pattern;
+
+public class TestLogHandler extends Handler {
+    public Queue<LogRecord> records = new ConcurrentLinkedQueue<>();
+
+    @Override
+    public void publish(LogRecord record) {
+        records.add(record);
+    }
+
+    @Override
+    public void flush() {
+    }
+
+    @Override
+    public void close() throws SecurityException {
+    }
+
+    public List<LogRecord> getRecordsMatching(Pattern messagePattern) {
+        List<LogRecord> matches = new ArrayList<>();
+        for (LogRecord r : this.records) {
+            String message = r.getMessage();
+            if (message != null && messagePattern.matcher(message).find()) {
+                matches.add(r);
+            }
+        }
+        return matches;
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/UnusualBigDecimalByteConverterTest.java b/pgjdbc/src/test/java/org/postgresql/test/util/UnusualBigDecimalByteConverterTest.java
new file mode 100644
index 0000000..9f220ea
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/UnusualBigDecimalByteConverterTest.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2020, PostgreSQL Global Development Group
+ * See the LICENSE file in the project root for more information.
+ */
+
+package org.postgresql.test.util;
+
+import java.math.BigDecimal;
+import org.junit.jupiter.api.Test;
+import org.postgresql.util.ByteConverter;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * Tests unusual binary representations of numeric values.
+ *
+ * @author Brett Okken
+ */
+class UnusualBigDecimalByteConverterTest {
+
+    /**
+     * Typically a number < 1 would have sections of leading '0' values represented in weight
+     * rather than including as short values.
+     */
+    @Test
+    void test_4_leading_0() {
+        //len 2
+        //weight -1
+        //scale 5
+        final byte[] data = new byte[]{0, 2, -1, -1, 0, 0, 0, 5, 0, 0, 23, 112};
+        final BigDecimal actual = (BigDecimal) ByteConverter.numeric(data);
+        assertEquals(new BigDecimal("0.00006"), actual);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/EnvironmentVariableMocker.java b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/EnvironmentVariableMocker.java
new file mode 100644
index 0000000..2ec6128
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/EnvironmentVariableMocker.java
@@ -0,0 +1,124 @@
+package org.postgresql.test.util.systemstubs;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.lang.instrument.Instrumentation;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.Stack;
+import java.util.jar.JarFile;
+import net.bytebuddy.ByteBuddy;
+import net.bytebuddy.agent.ByteBuddyAgent;
+import net.bytebuddy.dynamic.loading.ClassReloadingStrategy;
+import net.bytebuddy.implementation.MethodDelegation;
+import static net.bytebuddy.matcher.ElementMatchers.isStatic;
+import static net.bytebuddy.matcher.ElementMatchers.namedOneOf;
+
+/**
+ * This takes control of the environment variables using ByteBuddy. It captures the environment
+ * when first used, and defaults to that. When the {@link EnvironmentVariables} mock wishes to provide
+ * mocking, the alternative map of variables is put into a stack and set as the current variables used by
+ * the interceptor.
+ */
+public class EnvironmentVariableMocker {
+    private static final Stack<Map<String, String>> REPLACEMENT_ENV = new Stack<>();
+    private static final Map<String, String> ORIGINAL_ENV;
+
+    static {
+        ORIGINAL_ENV = new HashMap<>(System.getenv());
+        try {
+            Instrumentation instrumentation = ByteBuddyAgent.install();
+            installInterceptorIntoBootLoader(instrumentation);
+            var byteBuddy = new ByteBuddy();
+            byteBuddy.redefine(Class.forName("java.lang.ProcessEnvironment"))
+                    .method(isStatic().and(namedOneOf("getenv", "environment", "toEnvironmentBlock")))
+                    .intercept(MethodDelegation.to(ProcessEnvironmentInterceptor.class))
+                    .make()
+                    .load(
+                            EnvironmentVariableMocker.class.getClassLoader(),
+                            ClassReloadingStrategy.fromInstalledAgent());
+            ProcessEnvironmentInterceptor.setEnv(ORIGINAL_ENV);
+        } catch (Throwable e) {
+
+            throw new IllegalStateException("Cannot set up environment mocking: " + e.getMessage() +
+                    ".", e);
+        }
+    }
+
+    private static void installInterceptorIntoBootLoader(Instrumentation instrumentation) throws IOException {
+        File tempFile = File.createTempFile("interceptor", ".jar");
+        tempFile.deleteOnExit();
+        try (FileOutputStream file = new FileOutputStream(tempFile);
+             var resourceStream = EnvironmentVariableMocker.class.getClassLoader()
+                     .getResourceAsStream("system-stubs-interceptor.jar")) {
+            resourceStream.transferTo(file);
+        }
+
+        instrumentation.appendToBootstrapClassLoaderSearch(new JarFile(tempFile));
+    }
+
+    @Deprecated(since = "2.1.5")
+    public static void connect(Map<String, String> newEnvironmentVariables) {
+        connect(newEnvironmentVariables, Collections.emptySet());
+    }
+
+    /**
+     * Attach a map as the mutable replacement environment variables for now. This can be done
+     * multiple times and each time the replacement will supersede the maps before. Then when {@link #pop()}
+     * is called, we'll rollback to the previous.
+     *
+     * @param newEnvironmentVariables the mutable map - note: this will be populated by the current
+     *                                environment
+     * @param variablesToRemove       a list of variables to take out of the resulting environment variables
+     */
+    public static void connect(Map<String, String> newEnvironmentVariables, Set<String> variablesToRemove) {
+        // add all entries not already present in the new environment variables
+        System.getenv().entrySet().stream()
+                .filter(entry -> !newEnvironmentVariables.containsKey(entry.getKey()))
+                .forEach(entry -> newEnvironmentVariables.put(entry.getKey(), entry.getValue()));
+        variablesToRemove.forEach(newEnvironmentVariables::remove);
+        REPLACEMENT_ENV.push(newEnvironmentVariables);
+        ProcessEnvironmentInterceptor.setEnv(newEnvironmentVariables);
+    }
+
+    /**
+     * Remove the latest set of mock environment variables. This will run all the way to empty, after which
+     * the original implementation of the getenv functions will be called directly again.
+     *
+     * @return true if mocking has now stopped
+     */
+    public static synchronized boolean pop() {
+        if (!REPLACEMENT_ENV.empty()) {
+            REPLACEMENT_ENV.pop();
+        }
+
+        if (!REPLACEMENT_ENV.empty()) {
+            ProcessEnvironmentInterceptor.setEnv(REPLACEMENT_ENV.peek());
+        } else {
+            ProcessEnvironmentInterceptor.setEnv(ORIGINAL_ENV);
+        }
+
+        return REPLACEMENT_ENV.empty();
+    }
+
+    /**
+     * A safer form - allows us to remove the specific map that we want to
+     *
+     * @param theOneToPop the map to remove
+     * @return true if removed
+     */
+    public static synchronized boolean remove(Map<String, String> theOneToPop) {
+        var result = REPLACEMENT_ENV.remove(theOneToPop);
+
+        if (!REPLACEMENT_ENV.empty()) {
+            ProcessEnvironmentInterceptor.setEnv(REPLACEMENT_ENV.peek());
+        } else {
+            ProcessEnvironmentInterceptor.setEnv(ORIGINAL_ENV);
+        }
+
+        return result;
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/EnvironmentVariables.java b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/EnvironmentVariables.java
new file mode 100644
index 0000000..3f61069
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/EnvironmentVariables.java
@@ -0,0 +1,161 @@
+package org.postgresql.test.util.systemstubs;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import org.postgresql.test.util.systemstubs.resource.NameValuePairSetter;
+import org.postgresql.test.util.systemstubs.resource.SingularTestResource;
+import static java.util.Collections.emptyMap;
+import static org.postgresql.test.util.systemstubs.properties.PropertiesUtils.toStringMap;
+
+/**
+ * A collection of values for environment variables. New values can be
+ * added by {@link #and(String, String)}. The {@code EnvironmentVariables}
+ * object is then used to execute an arbitrary piece of code with these
+ * environment variables being present. Use {@link #execute}
+ * to wrap around test code:
+ * <pre>
+ * &#064;Test
+ * void execute_code_with_environment_variables() throws Exception {
+ *   withEnvironmentVariable("first", "first value")
+ *     .and("second", "second value")
+ *     .and("third", null)
+ *     .execute((){@literal ->} {
+ *       assertEquals("first value", System.getenv("first"));
+ *       assertEquals("second value", System.getenv("second"));
+ *       assertNull(System.getenv("third"));
+ *     });
+ * }
+ * </pre>
+ *
+ * @since 1.0.0
+ */
+public class EnvironmentVariables extends SingularTestResource implements NameValuePairSetter<EnvironmentVariables> {
+    private final Map<String, String> variables;
+    private final Set<String> toRemove = new HashSet<>();
+
+    /**
+     * Default constructor with an empty set of environment variables. Use {@link #set(String, String)} to
+     * provide some, mutating this, or {@link #and(String, String)} to fork a fresh object with desired
+     * environment variable settings.
+     */
+    public EnvironmentVariables() {
+        this(emptyMap());
+    }
+
+    /**
+     * Construct with an initial set of variables as name value pairs.
+     *
+     * @param name   first environment variable's name
+     * @param value  first environment variable's value
+     * @param others must be of even-numbered length. Name/value pairs of the other values to
+     *               apply to the environment when this object is active
+     */
+    public EnvironmentVariables(String name, String value, String... others) {
+        if (others.length % 2 != 0) {
+            throw new IllegalArgumentException("Must provide even number of parameters");
+        }
+        variables = new HashMap<>();
+        variables.put(name, value);
+        for (int i = 0; i < others.length; i += 2) {
+            variables.put(others[i], others[i + 1]);
+        }
+    }
+
+    /**
+     * Construct with an initial map of variables as name value pairs
+     *
+     * @param properties name value pairs as {@link Properties} object
+     */
+    public EnvironmentVariables(Properties properties) {
+        this(toStringMap(properties));
+    }
+
+    /**
+     * Construct with an initial map of variables as name value pairs
+     *
+     * @param variables initial variables
+     */
+    public EnvironmentVariables(Map<String, String> variables) {
+        this.variables = new HashMap<>(variables);
+    }
+
+    /**
+     * <em>Immutable setter:</em> creates a new {@code WithEnvironmentVariables} object that
+     * additionally stores the value for an additional environment variable.
+     * You cannot specify the value of an environment variable twice. An
+     * {@code IllegalArgumentException} when you try.
+     *
+     * @param name  the name of the environment variable.
+     * @param value the value of the environment variable.
+     * @return a new {@code WithEnvironmentVariables} object.
+     * @throws IllegalArgumentException when a value for the environment
+     *                                  variable {@code name} is already specified.
+     * @see #execute(ThrowingRunnable)
+     */
+    public EnvironmentVariables and(String name, String value) {
+        validateNotSet(name, value);
+        return new EnvironmentVariables(variables).set(name, value);
+    }
+
+    /**
+     * <em>Mutable setter:</em> applies the change to the stored environment variables
+     * and applies to the environment too if currently active.
+     *
+     * @param name  name of variable to set
+     * @param value value to set
+     * @return this for fluent calling
+     */
+    @Override
+    public EnvironmentVariables set(String name, String value) {
+        variables.put(name, value);
+        return this;
+    }
+
+    @Override
+    public EnvironmentVariables remove(String name) {
+        toRemove.add(name);
+        variables.remove(name);
+
+        return this;
+    }
+
+    /**
+     * Return a copy of all the variables set for testing
+     *
+     * @return a copy of the map
+     */
+    public Map<String, String> getVariables() {
+        return new HashMap<>(variables);
+    }
+
+    private void validateNotSet(String name, String value) {
+        if (variables.containsKey(name)) {
+            String currentValue = variables.get(name);
+            throw new IllegalArgumentException("The environment variable '" + name +
+                    "' cannot be set to " + format(value) + " because it was already set to " +
+                    format(currentValue) + "."
+            );
+        }
+    }
+
+    private String format(String text) {
+        if (text == null) {
+            return "null";
+        } else {
+            return "'" + text + "'";
+        }
+    }
+
+    @Override
+    protected void doSetup() {
+        EnvironmentVariableMocker.connect(variables, toRemove);
+    }
+
+    @Override
+    protected void doTeardown() {
+        EnvironmentVariableMocker.remove(variables);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/ProcessEnvironmentInterceptor.java b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/ProcessEnvironmentInterceptor.java
new file mode 100644
index 0000000..893da9a
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/ProcessEnvironmentInterceptor.java
@@ -0,0 +1,195 @@
+package org.postgresql.test.util.systemstubs;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import static java.util.stream.Collectors.toSet;
+
+/**
+ * Plugs into the boot loader to provide an alternative implementation to ProcessEnvironment
+ * controllable at test time.
+ */
+public class ProcessEnvironmentInterceptor {
+    private static Map<String, String> CURRENT_ENVIRONMENT_VARIABLES = new HashMap<>();
+
+    private static Map<String, String> theEnvironment;
+
+    /**
+     * For use by the EnvironmentMocker - this overwrites the effective environment variables that the system
+     * appears to have.
+     *
+     * @param env the environment variable map to use - this is kept by reference and so is mutable
+     */
+    public static void setEnv(Map<String, String> env) {
+        CURRENT_ENVIRONMENT_VARIABLES = env;
+
+        // this copy exposes process environment to tools looking to mock it
+        theEnvironment = Collections.unmodifiableMap(CURRENT_ENVIRONMENT_VARIABLES);
+    }
+
+    /**
+     * The equivalent of <code>getenv</code> in the original ProcessEnvironment, assuming that
+     * mocking is "turned on"
+     *
+     * @return the current effective environment
+     */
+    public static Map<String, String> getenv() {
+        return Collections.unmodifiableMap(filterNulls(CURRENT_ENVIRONMENT_VARIABLES));
+    }
+
+    /**
+     * Get a single environment variable
+     *
+     * @param name name of the variable
+     * @return the value or null
+     */
+    public static String getenv(String name) {
+        return getenv().get(name);
+    }
+
+    /**
+     * Reads the environment variables as does getenv - a different part of
+     * ProcessEnvironment that we're stubbing
+     *
+     * @return the environment map
+     */
+    public static Map<String, String> environment() {
+        return new HashMap<>(getenv());
+    }
+
+    /**
+     * Ripped from the JDK implementation
+     *
+     * @param m the map to convert
+     * @return string representation
+     */
+    public static String toEnvironmentBlock(Map<String, String> m) {
+        // Sort Unicode-case-insensitively by name
+        List<Map.Entry<String, String>> list = m != null ?
+                new ArrayList<>(m.entrySet()) :
+                new ArrayList<>(getenv().entrySet());
+        Collections.sort(list, (e1, e2) -> NameComparator.compareNames(e1.getKey(), e2.getKey()));
+
+        StringBuilder sb = new StringBuilder(list.size() * 30);
+        int cmp = -1;
+
+        // Some versions of MSVCRT.DLL require SystemRoot to be set.
+        // So, we make sure that it is always set, even if not provided
+        // by the caller.
+        final String systemRoot = "SystemRoot";
+
+        for (Map.Entry<String, String> e : list) {
+            String key = e.getKey();
+            String value = e.getValue();
+            if (cmp < 0 && (cmp = NameComparator.compareNames(key, systemRoot)) > 0) {
+                // Not set, so add it here
+                addToEnvIfSet(sb, systemRoot);
+            }
+            addToEnv(sb, key, value);
+        }
+        if (cmp < 0) {
+            // Got to end of list and still not found
+            addToEnvIfSet(sb, systemRoot);
+        }
+        if (sb.length() == 0) {
+            // Environment was empty and SystemRoot not set in parent
+            sb.append('\u0000');
+        }
+        // Block is double NUL terminated
+        sb.append('\u0000');
+        return sb.toString();
+    }
+
+    /**
+     * Convert the requested environment variables to a Nix format
+     *
+     * @param m    the map of variables
+     * @param envc the target array to receive the size
+     * @return the byte array of environment variables
+     */
+    // code taken from the original in ProcessEnvironment
+    public static byte[] toEnvironmentBlock(Map<String, String> m, int[] envc) {
+        if (m == null) {
+            return null;
+        }
+        int count = m.size() * 2; // For added '=' and NUL
+        for (Map.Entry<String, String> entry : m.entrySet()) {
+            count += entry.getKey().getBytes().length;
+            count += entry.getValue().getBytes().length;
+        }
+
+        byte[] block = new byte[count];
+
+        int i = 0;
+        for (Map.Entry<String, String> entry : m.entrySet()) {
+            final byte[] key = entry.getKey().getBytes();
+            final byte[] value = entry.getValue().getBytes();
+            System.arraycopy(key, 0, block, i, key.length);
+            i += key.length;
+            block[i++] = (byte) '=';
+            System.arraycopy(value, 0, block, i, value.length);
+            i += value.length + 1;
+            // No need to write NUL byte explicitly
+            //block[i++] = (byte) '\u0000';
+        }
+        envc[0] = m.size();
+        return block;
+    }
+
+    private static Map<String, String> filterNulls(Map<String, String> currentMockedEnvironment) {
+        var nullsToRemove = currentMockedEnvironment.entrySet()
+                .stream()
+                .filter(entry -> entry.getValue() == null)
+                .map(Map.Entry::getKey)
+                .collect(toSet());
+
+        nullsToRemove.forEach(currentMockedEnvironment::remove);
+        return currentMockedEnvironment;
+    }
+
+    // add the environment variable to the child, if it exists in parent
+    private static void addToEnvIfSet(StringBuilder sb, String name) {
+        String s = getenv().get(name);
+        if (s != null) {
+            addToEnv(sb, name, s);
+        }
+    }
+
+    private static void addToEnv(StringBuilder sb, String name, String val) {
+        sb.append(name).append('=').append(val).append('\u0000');
+    }
+
+    private static final class NameComparator
+            implements Comparator<String> {
+
+        public static int compareNames(String s1, String s2) {
+            // We can't use String.compareToIgnoreCase since it
+            // canonicalizes to lower case, while Windows
+            // canonicalizes to upper case!  For example, "_" should
+            // sort *after* "Z", not before.
+            int n1 = s1.length();
+            int n2 = s2.length();
+            int min = Math.min(n1, n2);
+            for (int i = 0; i < min; i++) {
+                char c1 = s1.charAt(i);
+                char c2 = s2.charAt(i);
+                if (c1 != c2) {
+                    c1 = Character.toUpperCase(c1);
+                    c2 = Character.toUpperCase(c2);
+                    if (c1 != c2) {
+                        // No overflow because of numeric promotion
+                        return c1 - c2;
+                    }
+                }
+            }
+            return n1 - n2;
+        }
+
+        public int compare(String s1, String s2) {
+            return compareNames(s1, s2);
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/ThrowingRunnable.java b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/ThrowingRunnable.java
new file mode 100644
index 0000000..29292f1
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/ThrowingRunnable.java
@@ -0,0 +1,48 @@
+package org.postgresql.test.util.systemstubs;
+
+import java.util.concurrent.Callable;
+import org.postgresql.test.util.systemstubs.exception.WrappedThrowable;
+
+/**
+ * This code may throw an {@link Exception}. Therefore we cannot use
+ * {@link Runnable}.
+ *
+ * @since 1.0.0
+ */
+public interface ThrowingRunnable {
+    /**
+     * Convert a lambda of type runnable to Callable
+     *
+     * @param runnable a runnable that can be converted
+     * @return a {@link Callable}
+     * @since 1.0.0
+     */
+    static Callable<Void> asCallable(ThrowingRunnable runnable) {
+        return runnable.asCallable();
+    }
+
+    /**
+     * Execute the action.
+     *
+     * @throws Exception the action may throw an arbitrary exception.
+     */
+    void run() throws Throwable;
+
+    /**
+     * Convert this to a Callable
+     *
+     * @return a {@link Callable} which executes this
+     */
+    default Callable<Void> asCallable() {
+        return () -> {
+            try {
+                run();
+            } catch (Error | Exception e) {
+                throw e;
+            } catch (Throwable t) {
+                throw new WrappedThrowable(t);
+            }
+            return null;
+        };
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/exception/WrappedThrowable.java b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/exception/WrappedThrowable.java
new file mode 100644
index 0000000..163400a
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/exception/WrappedThrowable.java
@@ -0,0 +1,10 @@
+package org.postgresql.test.util.systemstubs.exception;
+
+/**
+ * Wrapper to help pass a throwable out through a {@link java.util.concurrent.Callable}
+ */
+public class WrappedThrowable extends RuntimeException {
+    public WrappedThrowable(Throwable cause) {
+        super(cause);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/properties/PropertiesUtils.java b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/properties/PropertiesUtils.java
new file mode 100644
index 0000000..b59bd75
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/properties/PropertiesUtils.java
@@ -0,0 +1,32 @@
+package org.postgresql.test.util.systemstubs.properties;
+
+import java.util.Map;
+import java.util.Properties;
+import static java.util.stream.Collectors.toMap;
+
+public class PropertiesUtils {
+    /**
+     * Produce a clone of some properties in a new object
+     *
+     * @param source the source to clone
+     * @return a distinct copy
+     */
+    public static Properties copyOf(Properties source) {
+        Properties copy = new Properties();
+        copy.putAll(source);
+        return copy;
+    }
+
+    /**
+     * Convert a properties object to a map
+     *
+     * @param properties the source properties
+     * @return a <code>Map</code>
+     */
+    public static Map<String, String> toStringMap(Properties properties) {
+        return properties.entrySet()
+                .stream()
+                .collect(toMap(entry -> String.valueOf(entry.getKey()),
+                        entry -> String.valueOf(entry.getValue())));
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/properties/SystemProperties.java b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/properties/SystemProperties.java
new file mode 100644
index 0000000..4f46a22
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/properties/SystemProperties.java
@@ -0,0 +1,23 @@
+package org.postgresql.test.util.systemstubs.properties;
+
+import java.util.Properties;
+
+/**
+ * Maintain system properties after a test from the ones before the test. Stores the
+ * existing properties when started, and restores them when complete. Allows for a list of properties
+ * that will be applied to the system to be set before the stubbing is triggered.
+ */
+public class SystemProperties extends SystemPropertiesImpl<SystemProperties> {
+
+    public SystemProperties() {
+        super();
+    }
+
+    public SystemProperties(Properties properties) {
+        super(properties);
+    }
+
+    public SystemProperties(String name, String value, String... nameValues) {
+        super(name, value, nameValues);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/properties/SystemPropertiesImpl.java b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/properties/SystemPropertiesImpl.java
new file mode 100644
index 0000000..53b08a9
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/properties/SystemPropertiesImpl.java
@@ -0,0 +1,115 @@
+package org.postgresql.test.util.systemstubs.properties;
+
+import java.util.HashSet;
+import java.util.Properties;
+import java.util.Set;
+import org.postgresql.test.util.systemstubs.resource.NameValuePairSetter;
+import org.postgresql.test.util.systemstubs.resource.SingularTestResource;
+import static java.lang.System.getProperties;
+import static java.lang.System.setProperties;
+
+/**
+ * Maintain system properties after a test from the ones before the test. Stores the
+ * existing properties when started, and restores them when complete. Allows for a list of properties
+ * that will be applied to the system to be set before the stubbing is triggered.
+ */
+public class SystemPropertiesImpl<T extends SystemPropertiesImpl<T>> extends SingularTestResource
+        implements NameValuePairSetter<T> {
+    private Properties originalProperties;
+    private Properties properties;
+
+    private Set<String> propertiesToRemove = new HashSet<>();
+
+    /**
+     * Default constructor with no properties. Use {@link #set} to set properties
+     * either while active or before activation.
+     *
+     * @since 1.0.0
+     */
+    public SystemPropertiesImpl() {
+        this.properties = new Properties();
+    }
+
+    /**
+     * Construct with a specific set of properties.
+     *
+     * @param properties properties to use
+     * @since 1.0.0
+     */
+    public SystemPropertiesImpl(Properties properties) {
+        this.properties = PropertiesUtils.copyOf(properties);
+    }
+
+    /**
+     * Construct with a set of properties to apply when the object is active
+     *
+     * @param name       name of the first property
+     * @param value      value of the first property
+     * @param nameValues pairs of names and values for further properties
+     * @since 1.0.0
+     */
+    public SystemPropertiesImpl(String name, String value, String... nameValues) {
+        this();
+        if (nameValues.length % 2 != 0) {
+            throw new IllegalArgumentException("Must have pairs of values");
+        }
+        properties.setProperty(name, value);
+        for (int i = 0; i < nameValues.length; i += 2) {
+            properties.setProperty(nameValues[i], nameValues[i + 1]);
+        }
+    }
+
+    /**
+     * Set a system property. If active, this will set it with {@link System#setProperty(String, String)}.
+     * If not active, then this will store the property to apply when this object is part of an execution.
+     * It is also possible to use {@link System#setProperty(String, String)} while this object is active,
+     * but when the execution finishes, this object will be unaware of the property set, so will not set
+     * it next time.
+     *
+     * @param name  name of the property
+     * @param value value to set
+     * @return this object for fluent use
+     * @since 1.0.0
+     */
+    @Override
+    @SuppressWarnings("unchecked")
+    public T set(String name, String value) {
+        properties.setProperty(name, value);
+        if (isActive()) {
+            System.setProperty(name, value);
+        }
+        return (T) this;
+    }
+
+    /**
+     * Remove a property - this removes it from system properties if active, and remembers to remove it
+     * while the object is active
+     *
+     * @param name the name of the property to remove
+     * @return <code>this</code> for fluent use
+     * @since 2.1.5
+     */
+    @Override
+    @SuppressWarnings("unchecked")
+    public T remove(String name) {
+        propertiesToRemove.add(name);
+        if (isActive()) {
+            System.getProperties().remove(name);
+        }
+        return (T) this;
+    }
+
+    @Override
+    protected void doSetup() throws Exception {
+        originalProperties = getProperties();
+        Properties copyProperties = PropertiesUtils.copyOf(originalProperties);
+        propertiesToRemove.forEach(copyProperties::remove);
+        copyProperties.putAll(properties);
+        setProperties(copyProperties);
+    }
+
+    @Override
+    protected void doTeardown() throws Exception {
+        setProperties(originalProperties);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/Executable.java b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/Executable.java
new file mode 100644
index 0000000..dbef690
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/Executable.java
@@ -0,0 +1,34 @@
+package org.postgresql.test.util.systemstubs.resource;
+
+import java.util.concurrent.Callable;
+import org.postgresql.test.util.systemstubs.ThrowingRunnable;
+
+/**
+ * The execution interface. Defines the <em>execute-around</em> pattern
+ * where an object can set up and tear down some sort of resource in a try-finally block
+ * around calling some inner operation, returning its value.
+ *
+ * @since 1.0.0
+ */
+@FunctionalInterface
+public interface Executable {
+    /**
+     * Execute this test resource around a callable
+     *
+     * @param callable the callable to execute
+     * @param <T>      the type of object to return
+     * @return the result of the operation
+     * @throws Exception on any error thrown by the callable
+     */
+    <T> T execute(Callable<T> callable) throws Exception;
+
+    /**
+     * Execute this test resource around a runnnable
+     *
+     * @param runnable the runnable to execute
+     * @throws Exception on any error thrown by the callable
+     */
+    default void execute(ThrowingRunnable runnable) throws Exception {
+        execute(runnable.asCallable());
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/NameValuePairSetter.java b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/NameValuePairSetter.java
new file mode 100644
index 0000000..385b701
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/NameValuePairSetter.java
@@ -0,0 +1,57 @@
+package org.postgresql.test.util.systemstubs.resource;
+
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * The general interface of something that can set name value pairs on itself
+ *
+ * @param <T> the final type of the class which provides this
+ */
+public interface NameValuePairSetter<T extends NameValuePairSetter<T>> {
+    /**
+     * Set a name value pair
+     *
+     * @param name  the name
+     * @param value the value
+     * @return <code>this</code> for fluent calling
+     */
+    T set(String name, String value);
+
+    /**
+     * Set many name value pairs at once
+     *
+     * @param nameValuePairs an even number of inputs, whose {@link Object#toString()} is used for each name/value
+     * @return <code>this</code> for fluent calling
+     */
+    @SuppressWarnings("unchecked")
+    default T set(Object... nameValuePairs) {
+        if (nameValuePairs.length % 2 != 0) {
+            throw new IllegalArgumentException("Must provide an even number of name/value pairs");
+        }
+        for (int i = 0; i < nameValuePairs.length; i += 2) {
+            set(nameValuePairs[i].toString(), nameValuePairs[i + 1].toString());
+        }
+        return (T) this;
+    }
+
+    /**
+     * Set from a collection of properties.
+     *
+     * @param properties a map of values, or {@link Properties} object
+     * @return <code>this</code> for fluent calling
+     */
+    @SuppressWarnings("unchecked")
+    default T set(Map<Object, Object> properties) {
+        properties.forEach((key, value) -> set(String.valueOf(key), String.valueOf(value)));
+        return (T) this;
+    }
+
+    /**
+     * Remove one of the name value pairs
+     *
+     * @param name the name
+     * @return <code>this</code> for fluent calling
+     */
+    T remove(String name);
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/Resources.java b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/Resources.java
new file mode 100644
index 0000000..b93c84e
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/Resources.java
@@ -0,0 +1,71 @@
+package org.postgresql.test.util.systemstubs.resource;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+/**
+ * Helper functions for test resources
+ */
+public class Resources {
+    /**
+     * Use the execute around idiom with multiple resources
+     *
+     * @param resources the resources to wrap around the test, in the order to set them up
+     * @param <T>       the return type
+     * @return an {@link Executable} with the {@link Executable#execute} methods on it
+     */
+    public static <T> Executable with(TestResource... resources) {
+        return new Executable() {
+            @Override
+            public <T> T execute(Callable<T> callable) throws Exception {
+                return Resources.execute(callable, resources);
+            }
+        };
+    }
+
+    /**
+     * The execute-around idiom. Prepares a resource, runs the resources and then cleans up. The resources
+     * are set up in the order of declaration and tidied in reverse order. Any failure during set up results in
+     * a corresponding teardown operation, just in case, but only for those resources that have been set up so far.
+     *
+     * @param callable  the item to run
+     * @param resources the resources to set up
+     * @throws Exception on error
+     */
+    public static <T> T execute(Callable<T> callable, TestResource... resources) throws Exception {
+        LinkedList<TestResource> resourcesSetUp = new LinkedList<>();
+
+        try {
+            for (TestResource resource : resources) {
+                resourcesSetUp.addFirst(resource);
+                resource.setup();
+            }
+
+            return callable.call();
+        } finally {
+            executeCleanup(resourcesSetUp);
+        }
+    }
+
+    /**
+     * Clean up all of the resources provided, tolerating exceptions in any of them and throwing
+     * at the end if necessary
+     *
+     * @param resourcesSetUp the list of resources in the order to clean them up
+     * @throws Exception on the first teardown error
+     */
+    public static void executeCleanup(List<TestResource> resourcesSetUp) throws Exception {
+        Exception firstExceptionThrownOnTidyUp = null;
+        for (TestResource resource : resourcesSetUp) {
+            try {
+                resource.teardown();
+            } catch (Exception e) {
+                firstExceptionThrownOnTidyUp = firstExceptionThrownOnTidyUp == null ? e : firstExceptionThrownOnTidyUp;
+            }
+        }
+        if (firstExceptionThrownOnTidyUp != null) {
+            throw firstExceptionThrownOnTidyUp;
+        }
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/SingularTestResource.java b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/SingularTestResource.java
new file mode 100644
index 0000000..0d7eadc
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/SingularTestResource.java
@@ -0,0 +1,52 @@
+package org.postgresql.test.util.systemstubs.resource;
+
+/**
+ * Adds reference counting to the {@link TestResource} interface in case something tries to perform
+ * multiple setup or teardown calls on the same resource. Promises only a single instance of the
+ * set up at a time.
+ *
+ * @since 1.0.0
+ */
+public abstract class SingularTestResource implements TestResource {
+    private int refCount = 0;
+
+    @Override
+    public void setup() throws Exception {
+        refCount++;
+
+        if (refCount == 1) {
+            doSetup();
+        }
+    }
+
+    @Override
+    public void teardown() throws Exception {
+        refCount--;
+
+        if (refCount == 0) {
+            doTeardown();
+        }
+
+        if (refCount < 0) {
+            refCount = 0;
+        }
+    }
+
+    /**
+     * Subclass overrides this to provide actual setup
+     *
+     * @throws Exception on setup error
+     */
+    protected abstract void doSetup() throws Exception;
+
+    /**
+     * Subclass overrides this to provide actual cleanup
+     *
+     * @throws Exception on clean up error
+     */
+    protected abstract void doTeardown() throws Exception;
+
+    protected boolean isActive() {
+        return refCount > 0;
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/TestResource.java b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/TestResource.java
new file mode 100644
index 0000000..9caee84
--- /dev/null
+++ b/pgjdbc/src/test/java/org/postgresql/test/util/systemstubs/resource/TestResource.java
@@ -0,0 +1,36 @@
+package org.postgresql.test.util.systemstubs.resource;
+
+import java.util.concurrent.Callable;
+
+/**
+ * A test resource is something that can be set up at the start of a test and
+ * torn down at the end.
+ */
+public interface TestResource extends Executable {
+    /**
+     * Prepare the resource for testing
+     *
+     * @throws Exception on error starting
+     */
+    void setup() throws Exception;
+
+    /**
+     * Clean up the resource
+     *
+     * @throws Exception on error cleaning up
+     */
+    void teardown() throws Exception;
+
+    /**
+     * Execute this test resource around a callable
+     *
+     * @param callable the callable to execute
+     * @param <T>      the type of object to return
+     * @return the result of the operation
+     * @throws Exception on any error thrown by the callable
+     * @since 1.0.0
+     */
+    default <T> T execute(Callable<T> callable) throws Exception {
+        return Resources.execute(callable, this);
+    }
+}
diff --git a/pgjdbc/src/test/java/org/postgresql/test/xa/XADataSourceTest.java b/pgjdbc/src/test/java/org/postgresql/test/xa/XADataSourceTest.java
index 1ae284d..06c379c 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/xa/XADataSourceTest.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/xa/XADataSourceTest.java
@@ -5,22 +5,6 @@
 
 package org.postgresql.test.xa;
 
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertNotEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
-import static org.junit.jupiter.api.Assumptions.assumeTrue;
-
-import org.postgresql.test.TestUtil;
-import org.postgresql.test.jdbc2.optional.BaseDataSourceTest;
-import org.postgresql.xa.PGXADataSource;
-
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
 import java.sql.Connection;
 import java.sql.ResultSet;
 import java.sql.SQLException;
@@ -28,797 +12,809 @@ import java.sql.Statement;
 import java.sql.Timestamp;
 import java.util.Arrays;
 import java.util.Random;
-
 import javax.sql.XAConnection;
 import javax.sql.XADataSource;
 import javax.transaction.xa.XAException;
 import javax.transaction.xa.XAResource;
 import javax.transaction.xa.Xid;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.postgresql.test.TestUtil;
+import org.postgresql.test.jdbc2.optional.BaseDataSourceTest;
+import org.postgresql.xa.PGXADataSource;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 public class XADataSourceTest {
 
-  private XADataSource xaDs;
+    private XADataSource xaDs;
 
-  private Connection dbConn;
-  private boolean connIsSuper;
+    private Connection dbConn;
+    private boolean connIsSuper;
 
-  private XAConnection xaconn;
-  private XAResource xaRes;
-  private Connection conn;
+    private XAConnection xaconn;
+    private XAResource xaRes;
+    private Connection conn;
 
-  public XADataSourceTest() {
-    xaDs = new PGXADataSource();
-    BaseDataSourceTest.setupDataSource((PGXADataSource) xaDs);
-  }
-
-  @BeforeAll
-  static void beforeClass() throws Exception {
-    try (Connection con = TestUtil.openDB()) {
-      assumeTrue(isPreparedTransactionEnabled(con), "max_prepared_transactions should be non-zero for XA tests");
-    }
-  }
-
-  @BeforeEach
-  void setUp() throws Exception {
-    dbConn = TestUtil.openDB();
-
-    // Check if we're operating as a superuser; some tests require it.
-    Statement st = dbConn.createStatement();
-    st.executeQuery("SHOW is_superuser;");
-    ResultSet rs = st.getResultSet();
-    rs.next(); // One row is guaranteed
-    connIsSuper = rs.getBoolean(1); // One col is guaranteed
-    st.close();
-
-    TestUtil.createTable(dbConn, "testxa1", "foo int");
-    TestUtil.createTable(dbConn, "testxa2", "foo int primary key");
-    TestUtil.createTable(dbConn, "testxa3", "foo int references testxa2(foo) deferrable");
-
-    clearAllPrepared();
-
-    xaconn = xaDs.getXAConnection();
-    xaRes = xaconn.getXAResource();
-    conn = xaconn.getConnection();
-  }
-
-  private static boolean isPreparedTransactionEnabled(Connection connection) throws SQLException {
-    Statement stmt = connection.createStatement();
-    ResultSet rs = stmt.executeQuery("SHOW max_prepared_transactions");
-    rs.next();
-    int mpt = rs.getInt(1);
-    rs.close();
-    stmt.close();
-    return mpt > 0;
-  }
-
-  @AfterEach
-  void tearDown() throws SQLException {
-    try {
-      xaconn.close();
-    } catch (Exception ignored) {
+    public XADataSourceTest() {
+        xaDs = new PGXADataSource();
+        BaseDataSourceTest.setupDataSource((PGXADataSource) xaDs);
     }
 
-    clearAllPrepared();
-    TestUtil.dropTable(dbConn, "testxa3");
-    TestUtil.dropTable(dbConn, "testxa2");
-    TestUtil.dropTable(dbConn, "testxa1");
-    TestUtil.closeDB(dbConn);
-
-  }
-
-  private void clearAllPrepared() throws SQLException {
-    Statement st = dbConn.createStatement();
-    try {
-      ResultSet rs = st.executeQuery(
-          "SELECT x.gid, x.owner = current_user "
-              + "FROM pg_prepared_xacts x "
-              + "WHERE x.database = current_database()");
-
-      Statement st2 = dbConn.createStatement();
-      while (rs.next()) {
-        // TODO: This should really use org.junit.Assume once we move to JUnit 4
-        assertTrue(rs.getBoolean(2),
-            "Only prepared xacts owned by current user may be present in db");
-        st2.executeUpdate("ROLLBACK PREPARED '" + rs.getString(1) + "'");
-      }
-      st2.close();
-    } finally {
-      st.close();
-    }
-  }
-
-  static class CustomXid implements Xid {
-    private static Random rand = new Random(System.currentTimeMillis());
-    byte[] gtrid = new byte[Xid.MAXGTRIDSIZE];
-    byte[] bqual = new byte[Xid.MAXBQUALSIZE];
-
-    CustomXid(int i) {
-      rand.nextBytes(gtrid);
-      gtrid[0] = (byte) i;
-      gtrid[1] = (byte) i;
-      gtrid[2] = (byte) i;
-      gtrid[3] = (byte) i;
-      gtrid[4] = (byte) i;
-      bqual[0] = 4;
-      bqual[1] = 5;
-      bqual[2] = 6;
-    }
-
-    @Override
-    public int getFormatId() {
-      return 0;
-    }
-
-    @Override
-    public byte[] getGlobalTransactionId() {
-      return gtrid;
-    }
-
-    @Override
-    public byte[] getBranchQualifier() {
-      return bqual;
-    }
-
-    @Override
-    public boolean equals(Object o) {
-      if (!(o instanceof Xid)) {
-        return false;
-      }
-
-      Xid other = (Xid) o;
-      if (other.getFormatId() != this.getFormatId()) {
-        return false;
-      }
-      if (!Arrays.equals(other.getBranchQualifier(), this.getBranchQualifier())) {
-        return false;
-      }
-      return Arrays.equals(other.getGlobalTransactionId(), this.getGlobalTransactionId());
-    }
-
-    @Override
-    public int hashCode() {
-      final int prime = 31;
-      int result = 1;
-      result = prime * result + Arrays.hashCode(getBranchQualifier());
-      result = prime * result + getFormatId();
-      result = prime * result + Arrays.hashCode(getGlobalTransactionId());
-      return result;
-    }
-  }
-
-  /*
-   * Check that the equals method works for the connection wrapper returned by
-   * PGXAConnection.getConnection().
-   */
-  @Test
-  void wrapperEquals() throws Exception {
-    assertEquals(conn, conn, "Wrappers should be equal");
-    assertNotEquals(null, conn, "Wrapper should be unequal to null");
-    assertNotEquals("dummy string object", conn, "Wrapper should be unequal to unrelated object");
-  }
-
-  @Test
-  void onePhase() throws Exception {
-    Xid xid = new CustomXid(1);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    conn.createStatement().executeQuery("SELECT * FROM testxa1");
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.commit(xid, true);
-  }
-
-  @Test
-  void twoPhaseCommit() throws Exception {
-    Xid xid = new CustomXid(1);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    conn.createStatement().executeQuery("SELECT * FROM testxa1");
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
-    xaRes.commit(xid, false);
-  }
-
-  @Test
-  void closeBeforeCommit() throws Exception {
-    Xid xid = new CustomXid(5);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    assertEquals(1, conn.createStatement().executeUpdate("INSERT INTO testxa1 VALUES (1)"));
-    conn.close();
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.commit(xid, true);
-
-    ResultSet rs = dbConn.createStatement().executeQuery("SELECT foo FROM testxa1");
-    assertTrue(rs.next());
-    assertEquals(1, rs.getInt(1));
-  }
-
-  @Test
-  void recover() throws Exception {
-    Xid xid = new CustomXid(12345);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    conn.createStatement().executeQuery("SELECT * FROM testxa1");
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
-
-    {
-      Xid[] recoveredXidArray = xaRes.recover(XAResource.TMSTARTRSCAN);
-
-      boolean recoveredXid = false;
-
-      for (Xid aRecoveredXidArray : recoveredXidArray) {
-        if (xid.equals(aRecoveredXidArray)) {
-          recoveredXid = true;
-          break;
+    @BeforeAll
+    static void beforeClass() throws Exception {
+        try (Connection con = TestUtil.openDB()) {
+            assumeTrue(isPreparedTransactionEnabled(con), "max_prepared_transactions should be non-zero for XA tests");
         }
-      }
-
-      assertTrue(recoveredXid, "Did not recover prepared xid");
-      assertEquals(0, xaRes.recover(XAResource.TMNOFLAGS).length);
     }
 
-    xaRes.rollback(xid);
+    private static boolean isPreparedTransactionEnabled(Connection connection) throws SQLException {
+        Statement stmt = connection.createStatement();
+        ResultSet rs = stmt.executeQuery("SHOW max_prepared_transactions");
+        rs.next();
+        int mpt = rs.getInt(1);
+        rs.close();
+        stmt.close();
+        return mpt > 0;
+    }
 
-    {
-      Xid[] recoveredXidArray = xaRes.recover(XAResource.TMSTARTRSCAN);
+    /**
+     * <p>Get the time the current transaction was started from the server.</p>
+     *
+     * <p>This can be used to check that transaction doesn't get committed/ rolled back inadvertently, by
+     * calling this once before and after the suspected piece of code, and check that they match. It's
+     * a bit iffy, conceivably you might get the same timestamp anyway if the suspected piece of code
+     * runs fast enough, and/or the server clock is very coarse grained. But it'll do for testing
+     * purposes.</p>
+     */
+    private static Timestamp getTransactionTimestamp(Connection conn) throws SQLException {
+        ResultSet rs = conn.createStatement().executeQuery("SELECT now()");
+        rs.next();
+        return rs.getTimestamp(1);
+    }
 
-      boolean recoveredXid = false;
+    @BeforeEach
+    void setUp() throws Exception {
+        dbConn = TestUtil.openDB();
 
-      for (Xid aRecoveredXidArray : recoveredXidArray) {
-        if (xaRes.equals(aRecoveredXidArray)) {
-          recoveredXid = true;
-          break;
+        // Check if we're operating as a superuser; some tests require it.
+        Statement st = dbConn.createStatement();
+        st.executeQuery("SHOW is_superuser;");
+        ResultSet rs = st.getResultSet();
+        rs.next(); // One row is guaranteed
+        connIsSuper = rs.getBoolean(1); // One col is guaranteed
+        st.close();
+
+        TestUtil.createTable(dbConn, "testxa1", "foo int");
+        TestUtil.createTable(dbConn, "testxa2", "foo int primary key");
+        TestUtil.createTable(dbConn, "testxa3", "foo int references testxa2(foo) deferrable");
+
+        clearAllPrepared();
+
+        xaconn = xaDs.getXAConnection();
+        xaRes = xaconn.getXAResource();
+        conn = xaconn.getConnection();
+    }
+
+    @AfterEach
+    void tearDown() throws SQLException {
+        try {
+            xaconn.close();
+        } catch (Exception ignored) {
         }
-      }
 
-      assertFalse(recoveredXid, "Recovered rolled back xid");
+        clearAllPrepared();
+        TestUtil.dropTable(dbConn, "testxa3");
+        TestUtil.dropTable(dbConn, "testxa2");
+        TestUtil.dropTable(dbConn, "testxa1");
+        TestUtil.closeDB(dbConn);
+
     }
-  }
 
-  @Test
-  void rollback() throws XAException {
-    Xid xid = new CustomXid(3);
+    private void clearAllPrepared() throws SQLException {
+        Statement st = dbConn.createStatement();
+        try {
+            ResultSet rs = st.executeQuery(
+                    "SELECT x.gid, x.owner = current_user "
+                            + "FROM pg_prepared_xacts x "
+                            + "WHERE x.database = current_database()");
 
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
-    xaRes.rollback(xid);
-  }
-
-  @Test
-  void rollbackWithoutPrepare() throws XAException {
-    Xid xid = new CustomXid(4);
-
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.rollback(xid);
-  }
-
-  @Test
-  void autoCommit() throws Exception {
-    Xid xid = new CustomXid(6);
-
-    // When not in an XA transaction, autocommit should be true
-    // per normal JDBC rules.
-    assertTrue(conn.getAutoCommit());
-
-    // When in an XA transaction, autocommit should be false
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    assertFalse(conn.getAutoCommit());
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    assertFalse(conn.getAutoCommit());
-    xaRes.commit(xid, true);
-    assertTrue(conn.getAutoCommit());
-
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
-    assertTrue(conn.getAutoCommit());
-    xaRes.commit(xid, false);
-    assertTrue(conn.getAutoCommit());
-
-    // Check that autocommit is reset to true after a 1-phase rollback
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.rollback(xid);
-    assertTrue(conn.getAutoCommit());
-
-    // Check that autocommit is reset to true after a 2-phase rollback
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
-    xaRes.rollback(xid);
-    assertTrue(conn.getAutoCommit());
-
-    // Check that autoCommit is set correctly after a getConnection-call
-    conn = xaconn.getConnection();
-    assertTrue(conn.getAutoCommit());
-
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-
-    conn.createStatement().executeQuery("SELECT * FROM testxa1");
-
-    Timestamp ts1 = getTransactionTimestamp(conn);
-
-    conn.close();
-    conn = xaconn.getConnection();
-    assertFalse(conn.getAutoCommit());
-
-    Timestamp ts2 = getTransactionTimestamp(conn);
+            Statement st2 = dbConn.createStatement();
+            while (rs.next()) {
+                // TODO: This should really use org.junit.Assume once we move to JUnit 4
+                assertTrue(rs.getBoolean(2),
+                        "Only prepared xacts owned by current user may be present in db");
+                st2.executeUpdate("ROLLBACK PREPARED '" + rs.getString(1) + "'");
+            }
+            st2.close();
+        } finally {
+            st.close();
+        }
+    }
 
     /*
-     * Check that we're still in the same transaction. close+getConnection() should not rollback the
-     * XA-transaction implicitly.
+     * Check that the equals method works for the connection wrapper returned by
+     * PGXAConnection.getConnection().
      */
-    assertEquals(ts1, ts2);
-
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
-    xaRes.rollback(xid);
-    assertTrue(conn.getAutoCommit());
-  }
-
-  /**
-   * <p>Get the time the current transaction was started from the server.</p>
-   *
-   * <p>This can be used to check that transaction doesn't get committed/ rolled back inadvertently, by
-   * calling this once before and after the suspected piece of code, and check that they match. It's
-   * a bit iffy, conceivably you might get the same timestamp anyway if the suspected piece of code
-   * runs fast enough, and/or the server clock is very coarse grained. But it'll do for testing
-   * purposes.</p>
-   */
-  private static Timestamp getTransactionTimestamp(Connection conn) throws SQLException {
-    ResultSet rs = conn.createStatement().executeQuery("SELECT now()");
-    rs.next();
-    return rs.getTimestamp(1);
-  }
-
-  @Test
-  void endThenJoin() throws XAException {
-    Xid xid = new CustomXid(5);
-
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.start(xid, XAResource.TMJOIN);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.commit(xid, true);
-  }
-
-  @Test
-  void restoreOfAutoCommit() throws Exception {
-    conn.setAutoCommit(false);
-
-    Xid xid = new CustomXid(14);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.commit(xid, true);
-
-    assertFalse(
-        conn.getAutoCommit(),
-        "XaResource should have restored connection autocommit mode after commit or rollback to the initial state.");
-
-    // Test true case
-    conn.setAutoCommit(true);
-
-    xid = new CustomXid(15);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.commit(xid, true);
-
-    assertTrue(
-        conn.getAutoCommit(),
-        "XaResource should have restored connection autocommit mode after commit or rollback to the initial state.");
-
-  }
-
-  @Test
-  void restoreOfAutoCommitEndThenJoin() throws Exception {
-    // Test with TMJOIN
-    conn.setAutoCommit(true);
-
-    Xid xid = new CustomXid(16);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.start(xid, XAResource.TMJOIN);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.commit(xid, true);
-
-    assertTrue(
-        conn.getAutoCommit(),
-        "XaResource should have restored connection autocommit mode after start(TMNOFLAGS) end() start(TMJOIN) and then commit or rollback to the initial state.");
-
-  }
-
-  /**
-   * Test how the driver responds to rolling back a transaction that has already been rolled back.
-   * Check the driver reports the xid does not exist. The db knows the fact. ERROR: prepared
-   * transaction with identifier "blah" does not exist
-   */
-  @Test
-  void repeatedRolledBack() throws Exception {
-    Xid xid = new CustomXid(654321);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
-    // tm crash
-    xaRes.recover(XAResource.TMSTARTRSCAN);
-    xaRes.rollback(xid);
-    try {
-      xaRes.rollback(xid);
-      fail("Rollback was successful");
-    } catch (XAException xae) {
-      assertEquals(XAException.XAER_NOTA, xae.errorCode, "Checking the errorCode is XAER_NOTA indicating the " + "xid does not exist.");
-    }
-  }
-
-  /**
-   * Invoking prepare on already prepared {@link Xid} causes {@link XAException} being thrown
-   * with error code {@link XAException#XAER_PROTO}.
-   */
-  @Test
-  void preparingPreparedXid() throws Exception {
-    Xid xid = new CustomXid(1);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
-    try {
-      xaRes.prepare(xid);
-      fail("Prepare is expected to fail with XAER_PROTO as xid was already prepared");
-    } catch (XAException xae) {
-      assertEquals(XAException.XAER_PROTO, xae.errorCode, "Prepare call on already prepared xid " + xid + " expects XAER_PROTO");
-    } finally {
-      xaRes.rollback(xid);
-    }
-  }
-
-  /**
-   * Invoking commit on already committed {@link Xid} causes {@link XAException} being thrown
-   * with error code {@link XAException#XAER_NOTA}.
-   */
-  @Test
-  void committingCommittedXid() throws Exception {
-    Xid xid = new CustomXid(1);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
-    xaRes.commit(xid, false);
-
-    try {
-      xaRes.commit(xid, false);
-      fail("Commit is expected to fail with XAER_NOTA as xid was already committed");
-    } catch (XAException xae) {
-      assertEquals(XAException.XAER_NOTA, xae.errorCode, "Commit call on already committed xid " + xid + " expects XAER_NOTA");
-    }
-  }
-
-  /**
-   * Invoking commit on {@link Xid} committed by different connection.
-   * That different connection could be for example transaction manager recovery.
-   */
-  @Test
-  void commitByDifferentConnection() throws Exception {
-    Xid xid = new CustomXid(1);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
-
-    XADataSource secondDs = null;
-    try {
-      secondDs = new PGXADataSource();
-      BaseDataSourceTest.setupDataSource((PGXADataSource) secondDs);
-      XAResource secondXaRes = secondDs.getXAConnection().getXAResource();
-      secondXaRes.recover(XAResource.TMSTARTRSCAN | XAResource.TMENDRSCAN);
-      secondXaRes.commit(xid, false);
-    } finally {
-      if (secondDs != null) {
-        secondDs.getXAConnection().close();
-      }
+    @Test
+    void wrapperEquals() throws Exception {
+        assertEquals(conn, conn, "Wrappers should be equal");
+        assertNotEquals(null, conn, "Wrapper should be unequal to null");
+        assertNotEquals("dummy string object", conn, "Wrapper should be unequal to unrelated object");
     }
 
-    try {
-      xaRes.commit(xid, false);
-      fail("Commit is expected to fail with XAER_RMERR as somebody else already committed");
-    } catch (XAException xae) {
-      assertEquals(XAException.XAER_RMERR, xae.errorCode, "Commit call on already committed xid " + xid + " expects XAER_RMERR");
-    }
-  }
-
-  /**
-   * Invoking rollback on {@link Xid} rolled-back by different connection.
-   * That different connection could be for example transaction manager recovery.
-   */
-  @Test
-  void rollbackByDifferentConnection() throws Exception {
-    Xid xid = new CustomXid(1);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
-
-    XADataSource secondDs = null;
-    try {
-      secondDs = new PGXADataSource();
-      BaseDataSourceTest.setupDataSource((PGXADataSource) secondDs);
-      XAResource secondXaRes = secondDs.getXAConnection().getXAResource();
-      secondXaRes.recover(XAResource.TMSTARTRSCAN | XAResource.TMENDRSCAN);
-      secondXaRes.rollback(xid);
-    } finally {
-      if (secondDs != null) {
-        secondDs.getXAConnection().close();
-      }
+    @Test
+    void onePhase() throws Exception {
+        Xid xid = new CustomXid(1);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        conn.createStatement().executeQuery("SELECT * FROM testxa1");
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.commit(xid, true);
     }
 
-    try {
-      xaRes.rollback(xid);
-      fail("Rollback is expected to fail with XAER_RMERR as somebody else already rolled-back");
-    } catch (XAException xae) {
-      assertEquals(XAException.XAER_RMERR, xae.errorCode, "Rollback call on already rolled-back xid " + xid + " expects XAER_RMERR");
+    @Test
+    void twoPhaseCommit() throws Exception {
+        Xid xid = new CustomXid(1);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        conn.createStatement().executeQuery("SELECT * FROM testxa1");
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
+        xaRes.commit(xid, false);
     }
-  }
 
-  /**
-   * One-phase commit of prepared {@link Xid} should throw exception.
-   */
-  @Test
-  void onePhaseCommitOfPrepared() throws Exception {
-    Xid xid = new CustomXid(1);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
+    @Test
+    void closeBeforeCommit() throws Exception {
+        Xid xid = new CustomXid(5);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        assertEquals(1, conn.createStatement().executeUpdate("INSERT INTO testxa1 VALUES (1)"));
+        conn.close();
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.commit(xid, true);
 
-    try {
-      xaRes.commit(xid, true);
-      fail("One-phase commit is expected to fail with XAER_PROTO when called on prepared xid");
-    } catch (XAException xae) {
-      assertEquals(XAException.XAER_PROTO, xae.errorCode, "One-phase commit of prepared xid " + xid + " expects XAER_PROTO");
+        ResultSet rs = dbConn.createStatement().executeQuery("SELECT foo FROM testxa1");
+        assertTrue(rs.next());
+        assertEquals(1, rs.getInt(1));
     }
-  }
 
-  /**
-   * Invoking one-phase commit on already one-phase committed {@link Xid} causes
-   * {@link XAException} being thrown with error code {@link XAException#XAER_NOTA}.
-   */
-  @Test
-  void onePhaseCommittingCommittedXid() throws Exception {
-    Xid xid = new CustomXid(1);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.commit(xid, true);
+    @Test
+    void recover() throws Exception {
+        Xid xid = new CustomXid(12345);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        conn.createStatement().executeQuery("SELECT * FROM testxa1");
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
 
-    try {
-      xaRes.commit(xid, true);
-      fail("One-phase commit is expected to fail with XAER_NOTA as xid was already committed");
-    } catch (XAException xae) {
-      assertEquals(XAException.XAER_NOTA, xae.errorCode, "One-phase commit call on already committed xid " + xid + " expects XAER_NOTA");
+        {
+            Xid[] recoveredXidArray = xaRes.recover(XAResource.TMSTARTRSCAN);
+
+            boolean recoveredXid = false;
+
+            for (Xid aRecoveredXidArray : recoveredXidArray) {
+                if (xid.equals(aRecoveredXidArray)) {
+                    recoveredXid = true;
+                    break;
+                }
+            }
+
+            assertTrue(recoveredXid, "Did not recover prepared xid");
+            assertEquals(0, xaRes.recover(XAResource.TMNOFLAGS).length);
+        }
+
+        xaRes.rollback(xid);
+
+        {
+            Xid[] recoveredXidArray = xaRes.recover(XAResource.TMSTARTRSCAN);
+
+            boolean recoveredXid = false;
+
+            for (Xid aRecoveredXidArray : recoveredXidArray) {
+                if (xaRes.equals(aRecoveredXidArray)) {
+                    recoveredXid = true;
+                    break;
+                }
+            }
+
+            assertFalse(recoveredXid, "Recovered rolled back xid");
+        }
     }
-  }
 
-  /**
-   * When unknown xid is tried to be prepared the expected {@link XAException#errorCode}
-   * is {@link XAException#XAER_NOTA}.
-   */
-  @Test
-  void prepareUnknownXid() throws Exception {
-    Xid xid = new CustomXid(1);
-    try {
-      xaRes.prepare(xid);
-      fail("Prepare is expected to fail with XAER_NOTA as used unknown xid");
-    } catch (XAException xae) {
-      assertEquals(XAException.XAER_NOTA, xae.errorCode, "Prepare call on unknown xid " + xid + " expects XAER_NOTA");
+    @Test
+    void rollback() throws XAException {
+        Xid xid = new CustomXid(3);
+
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
+        xaRes.rollback(xid);
     }
-  }
 
-  /**
-   * When unknown xid is tried to be committed the expected {@link XAException#errorCode}
-   * is {@link XAException#XAER_NOTA}.
-   */
-  @Test
-  void commitUnknownXid() throws Exception {
-    Xid xid = new CustomXid(1);
-    Xid unknownXid = new CustomXid(42);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
-    try {
-      xaRes.commit(unknownXid, false);
-      fail("Commit is expected to fail with XAER_NOTA as used unknown xid");
-    } catch (XAException xae) {
-      assertEquals(XAException.XAER_NOTA, xae.errorCode, "Commit call on unknown xid " + unknownXid + " expects XAER_NOTA");
-    } finally {
-      xaRes.rollback(xid);
+    @Test
+    void rollbackWithoutPrepare() throws XAException {
+        Xid xid = new CustomXid(4);
+
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.rollback(xid);
     }
-  }
 
-  /**
-   * When unknown xid is tried to be committed with one-phase commit optimization
-   * the expected {@link XAException#errorCode} is {@link XAException#XAER_NOTA}.
-   */
-  @Test
-  void onePhaseCommitUnknownXid() throws Exception {
-    Xid xid = new CustomXid(1);
-    Xid unknownXid = new CustomXid(42);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    try {
-      xaRes.commit(unknownXid, true);
-      fail("One-phase commit is expected to fail with XAER_NOTA as used unknown xid");
-    } catch (XAException xae) {
-      assertEquals(XAException.XAER_NOTA, xae.errorCode, "Commit call on unknown xid " + unknownXid + " expects XAER_NOTA");
-    } finally {
-      xaRes.rollback(xid);
+    @Test
+    void autoCommit() throws Exception {
+        Xid xid = new CustomXid(6);
+
+        // When not in an XA transaction, autocommit should be true
+        // per normal JDBC rules.
+        assertTrue(conn.getAutoCommit());
+
+        // When in an XA transaction, autocommit should be false
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        assertFalse(conn.getAutoCommit());
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        assertFalse(conn.getAutoCommit());
+        xaRes.commit(xid, true);
+        assertTrue(conn.getAutoCommit());
+
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
+        assertTrue(conn.getAutoCommit());
+        xaRes.commit(xid, false);
+        assertTrue(conn.getAutoCommit());
+
+        // Check that autocommit is reset to true after a 1-phase rollback
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.rollback(xid);
+        assertTrue(conn.getAutoCommit());
+
+        // Check that autocommit is reset to true after a 2-phase rollback
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
+        xaRes.rollback(xid);
+        assertTrue(conn.getAutoCommit());
+
+        // Check that autoCommit is set correctly after a getConnection-call
+        conn = xaconn.getConnection();
+        assertTrue(conn.getAutoCommit());
+
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+
+        conn.createStatement().executeQuery("SELECT * FROM testxa1");
+
+        Timestamp ts1 = getTransactionTimestamp(conn);
+
+        conn.close();
+        conn = xaconn.getConnection();
+        assertFalse(conn.getAutoCommit());
+
+        Timestamp ts2 = getTransactionTimestamp(conn);
+
+        /*
+         * Check that we're still in the same transaction. close+getConnection() should not rollback the
+         * XA-transaction implicitly.
+         */
+        assertEquals(ts1, ts2);
+
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
+        xaRes.rollback(xid);
+        assertTrue(conn.getAutoCommit());
     }
-  }
 
-  /**
-   * When unknown xid is tried to be rolled-back the expected {@link XAException#errorCode}
-   * is {@link XAException#XAER_NOTA}.
-   */
-  @Test
-  void rollbackUnknownXid() throws Exception {
-    Xid xid = new CustomXid(1);
-    Xid unknownXid = new CustomXid(42);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
-    try {
-      xaRes.rollback(unknownXid);
-      fail("Rollback is expected to fail as used unknown xid");
-    } catch (XAException xae) {
-      assertEquals(XAException.XAER_NOTA, xae.errorCode, "Commit call on unknown xid " + unknownXid + " expects XAER_NOTA");
-    } finally {
-      xaRes.rollback(xid);
+    @Test
+    void endThenJoin() throws XAException {
+        Xid xid = new CustomXid(5);
+
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.start(xid, XAResource.TMJOIN);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.commit(xid, true);
     }
-  }
 
-  /**
-   * When trying to commit xid which was already removed by arbitrary action of database.
-   * Resource manager can't expect state of the {@link Xid}.
-   */
-  @Test
-  void databaseRemovesPreparedBeforeCommit() throws Exception {
-    Xid xid = new CustomXid(1);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
+    @Test
+    void restoreOfAutoCommit() throws Exception {
+        conn.setAutoCommit(false);
 
-    clearAllPrepared();
+        Xid xid = new CustomXid(14);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.commit(xid, true);
+
+        assertFalse(
+                conn.getAutoCommit(),
+                "XaResource should have restored connection autocommit mode after commit or rollback to the initial state.");
+
+        // Test true case
+        conn.setAutoCommit(true);
+
+        xid = new CustomXid(15);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.commit(xid, true);
+
+        assertTrue(
+                conn.getAutoCommit(),
+                "XaResource should have restored connection autocommit mode after commit or rollback to the initial state.");
 
-    try {
-      xaRes.commit(xid, false);
-      fail("Commit is expected to fail as committed xid was removed before");
-    } catch (XAException xae) {
-      assertEquals(XAException.XAER_RMERR, xae.errorCode, "Commit call on xid " + xid + " not known to DB expects XAER_RMERR");
     }
-  }
 
-  /**
-   * When trying to rollback xid which was already removed by arbitrary action of database.
-   * Resource manager can't expect state of the {@link Xid}.
-   */
-  @Test
-  void databaseRemovesPreparedBeforeRollback() throws Exception {
-    Xid xid = new CustomXid(1);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
+    @Test
+    void restoreOfAutoCommitEndThenJoin() throws Exception {
+        // Test with TMJOIN
+        conn.setAutoCommit(true);
 
-    clearAllPrepared();
+        Xid xid = new CustomXid(16);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.start(xid, XAResource.TMJOIN);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.commit(xid, true);
+
+        assertTrue(
+                conn.getAutoCommit(),
+                "XaResource should have restored connection autocommit mode after start(TMNOFLAGS) end() start(TMJOIN) and then commit or rollback to the initial state.");
 
-    try {
-      xaRes.rollback(xid);
-      fail("Rollback is expected to fail as committed xid was removed before");
-    } catch (XAException xae) {
-      assertEquals(XAException.XAER_RMERR, xae.errorCode, "Rollback call on xid " + xid + " not known to DB expects XAER_RMERR");
     }
-  }
 
-  /**
-   * When trying to commit and connection issue happens then
-   * {@link XAException} error code {@link XAException#XAER_RMFAIL} is expected.
-   */
-  @Test
-  void networkIssueOnCommit() throws Exception {
-    Xid xid = new CustomXid(1);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
-
-    xaconn.close();
-
-    try {
-      xaRes.commit(xid, false);
-      fail("Commit is expected to fail as connection was closed");
-    } catch (XAException xae) {
-      assertEquals(XAException.XAER_RMFAIL, xae.errorCode, "Commit call on closed connection expects XAER_RMFAIL");
+    /**
+     * Test how the driver responds to rolling back a transaction that has already been rolled back.
+     * Check the driver reports the xid does not exist. The db knows the fact. ERROR: prepared
+     * transaction with identifier "blah" does not exist
+     */
+    @Test
+    void repeatedRolledBack() throws Exception {
+        Xid xid = new CustomXid(654321);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
+        // tm crash
+        xaRes.recover(XAResource.TMSTARTRSCAN);
+        xaRes.rollback(xid);
+        try {
+            xaRes.rollback(xid);
+            fail("Rollback was successful");
+        } catch (XAException xae) {
+            assertEquals(XAException.XAER_NOTA, xae.errorCode, "Checking the errorCode is XAER_NOTA indicating the " + "xid does not exist.");
+        }
     }
-  }
 
-  /**
-   * When trying to one-phase commit and connection issue happens then
-   * {@link XAException} error code {@link XAException#XAER_RMFAIL} is expected.
-   */
-  @Test
-  void networkIssueOnOnePhaseCommit() throws Exception {
-    Xid xid = new CustomXid(1);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-
-    xaconn.close();
-
-    try {
-      xaRes.commit(xid, true);
-      fail("One-phase commit is expected to fail as connection was closed");
-    } catch (XAException xae) {
-      assertEquals(XAException.XAER_RMFAIL, xae.errorCode, "One-phase commit call on closed connection expects XAER_RMFAIL");
+    /**
+     * Invoking prepare on already prepared {@link Xid} causes {@link XAException} being thrown
+     * with error code {@link XAException#XAER_PROTO}.
+     */
+    @Test
+    void preparingPreparedXid() throws Exception {
+        Xid xid = new CustomXid(1);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
+        try {
+            xaRes.prepare(xid);
+            fail("Prepare is expected to fail with XAER_PROTO as xid was already prepared");
+        } catch (XAException xae) {
+            assertEquals(XAException.XAER_PROTO, xae.errorCode, "Prepare call on already prepared xid " + xid + " expects XAER_PROTO");
+        } finally {
+            xaRes.rollback(xid);
+        }
     }
-  }
 
-  /**
-   * When trying to rollback and connection issue happens then
-   * {@link XAException} error code {@link XAException#XAER_RMFAIL} is expected.
-   */
-  @Test
-  void networkIssueOnRollback() throws Exception {
-    Xid xid = new CustomXid(1);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    xaRes.end(xid, XAResource.TMSUCCESS);
-    xaRes.prepare(xid);
+    /**
+     * Invoking commit on already committed {@link Xid} causes {@link XAException} being thrown
+     * with error code {@link XAException#XAER_NOTA}.
+     */
+    @Test
+    void committingCommittedXid() throws Exception {
+        Xid xid = new CustomXid(1);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
+        xaRes.commit(xid, false);
 
-    xaconn.close();
-
-    try {
-      xaRes.rollback(xid);
-      fail("Rollback is expected to fail as connection was closed");
-    } catch (XAException xae) {
-      assertEquals(XAException.XAER_RMFAIL, xae.errorCode, "Rollback call on closed connection expects XAER_RMFAIL");
+        try {
+            xaRes.commit(xid, false);
+            fail("Commit is expected to fail with XAER_NOTA as xid was already committed");
+        } catch (XAException xae) {
+            assertEquals(XAException.XAER_NOTA, xae.errorCode, "Commit call on already committed xid " + xid + " expects XAER_NOTA");
+        }
     }
-  }
 
-  /**
-   * When using deferred constraints a constraint violation can occur on prepare. This has to be
-   * mapped to the correct XA Error Code
-   */
-  @Test
-  void mappingOfConstraintViolations() throws Exception {
-    Xid xid = new CustomXid(1);
-    xaRes.start(xid, XAResource.TMNOFLAGS);
-    assertEquals(0, conn.createStatement().executeUpdate("SET CONSTRAINTS ALL DEFERRED"));
-    assertEquals(1, conn.createStatement().executeUpdate("INSERT INTO testxa3 VALUES (4)"));
-    xaRes.end(xid, XAResource.TMSUCCESS);
+    /**
+     * Invoking commit on {@link Xid} committed by different connection.
+     * That different connection could be for example transaction manager recovery.
+     */
+    @Test
+    void commitByDifferentConnection() throws Exception {
+        Xid xid = new CustomXid(1);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
 
-    try {
-      xaRes.prepare(xid);
+        XADataSource secondDs = null;
+        try {
+            secondDs = new PGXADataSource();
+            BaseDataSourceTest.setupDataSource((PGXADataSource) secondDs);
+            XAResource secondXaRes = secondDs.getXAConnection().getXAResource();
+            secondXaRes.recover(XAResource.TMSTARTRSCAN | XAResource.TMENDRSCAN);
+            secondXaRes.commit(xid, false);
+        } finally {
+            if (secondDs != null) {
+                secondDs.getXAConnection().close();
+            }
+        }
 
-      fail("Prepare is expected to fail as an integrity violation occurred");
-    } catch (XAException xae) {
-      assertEquals(XAException.XA_RBINTEGRITY, xae.errorCode, "Prepare call with deferred constraints violations expects XA_RBINTEGRITY");
+        try {
+            xaRes.commit(xid, false);
+            fail("Commit is expected to fail with XAER_RMERR as somebody else already committed");
+        } catch (XAException xae) {
+            assertEquals(XAException.XAER_RMERR, xae.errorCode, "Commit call on already committed xid " + xid + " expects XAER_RMERR");
+        }
     }
-  }
 
-  /*
-   * We don't support transaction interleaving. public void testInterleaving1() throws Exception {
-   * Xid xid1 = new CustomXid(1); Xid xid2 = new CustomXid(2);
-   *
-   * xaRes.start(xid1, XAResource.TMNOFLAGS); conn.createStatement().executeUpdate(
-   * "UPDATE testxa1 SET foo = 'ccc'"); xaRes.end(xid1, XAResource.TMSUCCESS);
-   *
-   * xaRes.start(xid2, XAResource.TMNOFLAGS); conn.createStatement().executeUpdate(
-   * "UPDATE testxa2 SET foo = 'bbb'");
-   *
-   * xaRes.commit(xid1, true);
-   *
-   * xaRes.end(xid2, XAResource.TMSUCCESS);
-   *
-   * xaRes.commit(xid2, true);
-   *
-   * } public void testInterleaving2() throws Exception { Xid xid1 = new CustomXid(1); Xid xid2 =
-   * new CustomXid(2); Xid xid3 = new CustomXid(3);
-   *
-   * xaRes.start(xid1, XAResource.TMNOFLAGS); conn.createStatement().executeUpdate(
-   * "UPDATE testxa1 SET foo = 'aa'"); xaRes.end(xid1, XAResource.TMSUCCESS);
-   *
-   * xaRes.start(xid2, XAResource.TMNOFLAGS); conn.createStatement().executeUpdate(
-   * "UPDATE testxa2 SET foo = 'bb'"); xaRes.end(xid2, XAResource.TMSUCCESS);
-   *
-   * xaRes.start(xid3, XAResource.TMNOFLAGS); conn.createStatement().executeUpdate(
-   * "UPDATE testxa3 SET foo = 'cc'"); xaRes.end(xid3, XAResource.TMSUCCESS);
-   *
-   * xaRes.commit(xid1, true); xaRes.commit(xid2, true); xaRes.commit(xid3, true); }
-   */
+    /**
+     * Invoking rollback on {@link Xid} rolled-back by different connection.
+     * That different connection could be for example transaction manager recovery.
+     */
+    @Test
+    void rollbackByDifferentConnection() throws Exception {
+        Xid xid = new CustomXid(1);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
+
+        XADataSource secondDs = null;
+        try {
+            secondDs = new PGXADataSource();
+            BaseDataSourceTest.setupDataSource((PGXADataSource) secondDs);
+            XAResource secondXaRes = secondDs.getXAConnection().getXAResource();
+            secondXaRes.recover(XAResource.TMSTARTRSCAN | XAResource.TMENDRSCAN);
+            secondXaRes.rollback(xid);
+        } finally {
+            if (secondDs != null) {
+                secondDs.getXAConnection().close();
+            }
+        }
+
+        try {
+            xaRes.rollback(xid);
+            fail("Rollback is expected to fail with XAER_RMERR as somebody else already rolled-back");
+        } catch (XAException xae) {
+            assertEquals(XAException.XAER_RMERR, xae.errorCode, "Rollback call on already rolled-back xid " + xid + " expects XAER_RMERR");
+        }
+    }
+
+    /**
+     * One-phase commit of prepared {@link Xid} should throw exception.
+     */
+    @Test
+    void onePhaseCommitOfPrepared() throws Exception {
+        Xid xid = new CustomXid(1);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
+
+        try {
+            xaRes.commit(xid, true);
+            fail("One-phase commit is expected to fail with XAER_PROTO when called on prepared xid");
+        } catch (XAException xae) {
+            assertEquals(XAException.XAER_PROTO, xae.errorCode, "One-phase commit of prepared xid " + xid + " expects XAER_PROTO");
+        }
+    }
+
+    /**
+     * Invoking one-phase commit on already one-phase committed {@link Xid} causes
+     * {@link XAException} being thrown with error code {@link XAException#XAER_NOTA}.
+     */
+    @Test
+    void onePhaseCommittingCommittedXid() throws Exception {
+        Xid xid = new CustomXid(1);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.commit(xid, true);
+
+        try {
+            xaRes.commit(xid, true);
+            fail("One-phase commit is expected to fail with XAER_NOTA as xid was already committed");
+        } catch (XAException xae) {
+            assertEquals(XAException.XAER_NOTA, xae.errorCode, "One-phase commit call on already committed xid " + xid + " expects XAER_NOTA");
+        }
+    }
+
+    /**
+     * When unknown xid is tried to be prepared the expected {@link XAException#errorCode}
+     * is {@link XAException#XAER_NOTA}.
+     */
+    @Test
+    void prepareUnknownXid() throws Exception {
+        Xid xid = new CustomXid(1);
+        try {
+            xaRes.prepare(xid);
+            fail("Prepare is expected to fail with XAER_NOTA as used unknown xid");
+        } catch (XAException xae) {
+            assertEquals(XAException.XAER_NOTA, xae.errorCode, "Prepare call on unknown xid " + xid + " expects XAER_NOTA");
+        }
+    }
+
+    /**
+     * When unknown xid is tried to be committed the expected {@link XAException#errorCode}
+     * is {@link XAException#XAER_NOTA}.
+     */
+    @Test
+    void commitUnknownXid() throws Exception {
+        Xid xid = new CustomXid(1);
+        Xid unknownXid = new CustomXid(42);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
+        try {
+            xaRes.commit(unknownXid, false);
+            fail("Commit is expected to fail with XAER_NOTA as used unknown xid");
+        } catch (XAException xae) {
+            assertEquals(XAException.XAER_NOTA, xae.errorCode, "Commit call on unknown xid " + unknownXid + " expects XAER_NOTA");
+        } finally {
+            xaRes.rollback(xid);
+        }
+    }
+
+    /**
+     * When unknown xid is tried to be committed with one-phase commit optimization
+     * the expected {@link XAException#errorCode} is {@link XAException#XAER_NOTA}.
+     */
+    @Test
+    void onePhaseCommitUnknownXid() throws Exception {
+        Xid xid = new CustomXid(1);
+        Xid unknownXid = new CustomXid(42);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        try {
+            xaRes.commit(unknownXid, true);
+            fail("One-phase commit is expected to fail with XAER_NOTA as used unknown xid");
+        } catch (XAException xae) {
+            assertEquals(XAException.XAER_NOTA, xae.errorCode, "Commit call on unknown xid " + unknownXid + " expects XAER_NOTA");
+        } finally {
+            xaRes.rollback(xid);
+        }
+    }
+
+    /**
+     * When unknown xid is tried to be rolled-back the expected {@link XAException#errorCode}
+     * is {@link XAException#XAER_NOTA}.
+     */
+    @Test
+    void rollbackUnknownXid() throws Exception {
+        Xid xid = new CustomXid(1);
+        Xid unknownXid = new CustomXid(42);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
+        try {
+            xaRes.rollback(unknownXid);
+            fail("Rollback is expected to fail as used unknown xid");
+        } catch (XAException xae) {
+            assertEquals(XAException.XAER_NOTA, xae.errorCode, "Commit call on unknown xid " + unknownXid + " expects XAER_NOTA");
+        } finally {
+            xaRes.rollback(xid);
+        }
+    }
+
+    /**
+     * When trying to commit xid which was already removed by arbitrary action of database.
+     * Resource manager can't expect state of the {@link Xid}.
+     */
+    @Test
+    void databaseRemovesPreparedBeforeCommit() throws Exception {
+        Xid xid = new CustomXid(1);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
+
+        clearAllPrepared();
+
+        try {
+            xaRes.commit(xid, false);
+            fail("Commit is expected to fail as committed xid was removed before");
+        } catch (XAException xae) {
+            assertEquals(XAException.XAER_RMERR, xae.errorCode, "Commit call on xid " + xid + " not known to DB expects XAER_RMERR");
+        }
+    }
+
+    /**
+     * When trying to rollback xid which was already removed by arbitrary action of database.
+     * Resource manager can't expect state of the {@link Xid}.
+     */
+    @Test
+    void databaseRemovesPreparedBeforeRollback() throws Exception {
+        Xid xid = new CustomXid(1);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
+
+        clearAllPrepared();
+
+        try {
+            xaRes.rollback(xid);
+            fail("Rollback is expected to fail as committed xid was removed before");
+        } catch (XAException xae) {
+            assertEquals(XAException.XAER_RMERR, xae.errorCode, "Rollback call on xid " + xid + " not known to DB expects XAER_RMERR");
+        }
+    }
+
+    /**
+     * When trying to commit and connection issue happens then
+     * {@link XAException} error code {@link XAException#XAER_RMFAIL} is expected.
+     */
+    @Test
+    void networkIssueOnCommit() throws Exception {
+        Xid xid = new CustomXid(1);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
+
+        xaconn.close();
+
+        try {
+            xaRes.commit(xid, false);
+            fail("Commit is expected to fail as connection was closed");
+        } catch (XAException xae) {
+            assertEquals(XAException.XAER_RMFAIL, xae.errorCode, "Commit call on closed connection expects XAER_RMFAIL");
+        }
+    }
+
+    /**
+     * When trying to one-phase commit and connection issue happens then
+     * {@link XAException} error code {@link XAException#XAER_RMFAIL} is expected.
+     */
+    @Test
+    void networkIssueOnOnePhaseCommit() throws Exception {
+        Xid xid = new CustomXid(1);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+
+        xaconn.close();
+
+        try {
+            xaRes.commit(xid, true);
+            fail("One-phase commit is expected to fail as connection was closed");
+        } catch (XAException xae) {
+            assertEquals(XAException.XAER_RMFAIL, xae.errorCode, "One-phase commit call on closed connection expects XAER_RMFAIL");
+        }
+    }
+
+    /**
+     * When trying to rollback and connection issue happens then
+     * {@link XAException} error code {@link XAException#XAER_RMFAIL} is expected.
+     */
+    @Test
+    void networkIssueOnRollback() throws Exception {
+        Xid xid = new CustomXid(1);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        xaRes.end(xid, XAResource.TMSUCCESS);
+        xaRes.prepare(xid);
+
+        xaconn.close();
+
+        try {
+            xaRes.rollback(xid);
+            fail("Rollback is expected to fail as connection was closed");
+        } catch (XAException xae) {
+            assertEquals(XAException.XAER_RMFAIL, xae.errorCode, "Rollback call on closed connection expects XAER_RMFAIL");
+        }
+    }
+
+    /**
+     * When using deferred constraints a constraint violation can occur on prepare. This has to be
+     * mapped to the correct XA Error Code
+     */
+    @Test
+    void mappingOfConstraintViolations() throws Exception {
+        Xid xid = new CustomXid(1);
+        xaRes.start(xid, XAResource.TMNOFLAGS);
+        assertEquals(0, conn.createStatement().executeUpdate("SET CONSTRAINTS ALL DEFERRED"));
+        assertEquals(1, conn.createStatement().executeUpdate("INSERT INTO testxa3 VALUES (4)"));
+        xaRes.end(xid, XAResource.TMSUCCESS);
+
+        try {
+            xaRes.prepare(xid);
+
+            fail("Prepare is expected to fail as an integrity violation occurred");
+        } catch (XAException xae) {
+            assertEquals(XAException.XA_RBINTEGRITY, xae.errorCode, "Prepare call with deferred constraints violations expects XA_RBINTEGRITY");
+        }
+    }
+
+    static class CustomXid implements Xid {
+        private static Random rand = new Random(System.currentTimeMillis());
+        byte[] gtrid = new byte[Xid.MAXGTRIDSIZE];
+        byte[] bqual = new byte[Xid.MAXBQUALSIZE];
+
+        CustomXid(int i) {
+            rand.nextBytes(gtrid);
+            gtrid[0] = (byte) i;
+            gtrid[1] = (byte) i;
+            gtrid[2] = (byte) i;
+            gtrid[3] = (byte) i;
+            gtrid[4] = (byte) i;
+            bqual[0] = 4;
+            bqual[1] = 5;
+            bqual[2] = 6;
+        }
+
+        @Override
+        public int getFormatId() {
+            return 0;
+        }
+
+        @Override
+        public byte[] getGlobalTransactionId() {
+            return gtrid;
+        }
+
+        @Override
+        public byte[] getBranchQualifier() {
+            return bqual;
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            if (!(o instanceof Xid)) {
+                return false;
+            }
+
+            Xid other = (Xid) o;
+            if (other.getFormatId() != this.getFormatId()) {
+                return false;
+            }
+            if (!Arrays.equals(other.getBranchQualifier(), this.getBranchQualifier())) {
+                return false;
+            }
+            return Arrays.equals(other.getGlobalTransactionId(), this.getGlobalTransactionId());
+        }
+
+        @Override
+        public int hashCode() {
+            final int prime = 31;
+            int result = 1;
+            result = prime * result + Arrays.hashCode(getBranchQualifier());
+            result = prime * result + getFormatId();
+            result = prime * result + Arrays.hashCode(getGlobalTransactionId());
+            return result;
+        }
+    }
+
+    /*
+     * We don't support transaction interleaving. public void testInterleaving1() throws Exception {
+     * Xid xid1 = new CustomXid(1); Xid xid2 = new CustomXid(2);
+     *
+     * xaRes.start(xid1, XAResource.TMNOFLAGS); conn.createStatement().executeUpdate(
+     * "UPDATE testxa1 SET foo = 'ccc'"); xaRes.end(xid1, XAResource.TMSUCCESS);
+     *
+     * xaRes.start(xid2, XAResource.TMNOFLAGS); conn.createStatement().executeUpdate(
+     * "UPDATE testxa2 SET foo = 'bbb'");
+     *
+     * xaRes.commit(xid1, true);
+     *
+     * xaRes.end(xid2, XAResource.TMSUCCESS);
+     *
+     * xaRes.commit(xid2, true);
+     *
+     * } public void testInterleaving2() throws Exception { Xid xid1 = new CustomXid(1); Xid xid2 =
+     * new CustomXid(2); Xid xid3 = new CustomXid(3);
+     *
+     * xaRes.start(xid1, XAResource.TMNOFLAGS); conn.createStatement().executeUpdate(
+     * "UPDATE testxa1 SET foo = 'aa'"); xaRes.end(xid1, XAResource.TMSUCCESS);
+     *
+     * xaRes.start(xid2, XAResource.TMNOFLAGS); conn.createStatement().executeUpdate(
+     * "UPDATE testxa2 SET foo = 'bb'"); xaRes.end(xid2, XAResource.TMSUCCESS);
+     *
+     * xaRes.start(xid3, XAResource.TMNOFLAGS); conn.createStatement().executeUpdate(
+     * "UPDATE testxa3 SET foo = 'cc'"); xaRes.end(xid3, XAResource.TMSUCCESS);
+     *
+     * xaRes.commit(xid1, true); xaRes.commit(xid2, true); xaRes.commit(xid3, true); }
+     */
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/test/xa/XATestSuite.java b/pgjdbc/src/test/java/org/postgresql/test/xa/XATestSuite.java
index d4d4cb7..8e9a1a2 100644
--- a/pgjdbc/src/test/java/org/postgresql/test/xa/XATestSuite.java
+++ b/pgjdbc/src/test/java/org/postgresql/test/xa/XATestSuite.java
@@ -10,7 +10,7 @@ import org.junit.runners.Suite;
 
 @RunWith(Suite.class)
 @Suite.SuiteClasses({
-    XADataSourceTest.class,
+        XADataSourceTest.class,
 })
 public class XATestSuite {
 }
diff --git a/pgjdbc/src/test/java/org/postgresql/util/BigDecimalByteConverterTest.java b/pgjdbc/src/test/java/org/postgresql/util/BigDecimalByteConverterTest.java
deleted file mode 100644
index a1af327..0000000
--- a/pgjdbc/src/test/java/org/postgresql/util/BigDecimalByteConverterTest.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2020, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.util;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.MethodSource;
-
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.util.ArrayList;
-import java.util.Collection;
-
-/**
- *
- * @author Brett Okken
- */
-public class BigDecimalByteConverterTest {
-  public static Iterable<Object[]> data() {
-    final Collection<Object[]> numbers = new ArrayList<>();
-    numbers.add(new Object[]{new BigDecimal("0.1")});
-    numbers.add(new Object[]{new BigDecimal("0.10")});
-    numbers.add(new Object[]{new BigDecimal("0.01")});
-    numbers.add(new Object[]{new BigDecimal("0.001")});
-    numbers.add(new Object[]{new BigDecimal("0.0001")});
-    numbers.add(new Object[]{new BigDecimal("0.00001")});
-    numbers.add(new Object[]{new BigDecimal("1.0")});
-    numbers.add(new Object[]{new BigDecimal("0.000000000000000000000000000000000000000000000000000")});
-    numbers.add(new Object[]{new BigDecimal("0.100000000000000000000000000000000000000000000009900")});
-    numbers.add(new Object[]{new BigDecimal("-1.0")});
-    numbers.add(new Object[]{new BigDecimal("-1")});
-    numbers.add(new Object[]{new BigDecimal("1.2")});
-    numbers.add(new Object[]{new BigDecimal("-2.05")});
-    numbers.add(new Object[]{new BigDecimal("0.000000000000000000000000000990")});
-    numbers.add(new Object[]{new BigDecimal("-0.000000000000000000000000000990")});
-    numbers.add(new Object[]{new BigDecimal("10.0000000000099")});
-    numbers.add(new Object[]{new BigDecimal(".10000000000000")});
-    numbers.add(new Object[]{new BigDecimal("1.10000000000000")});
-    numbers.add(new Object[]{new BigDecimal("99999.2")});
-    numbers.add(new Object[]{new BigDecimal("99999")});
-    numbers.add(new Object[]{new BigDecimal("-99999.2")});
-    numbers.add(new Object[]{new BigDecimal("-99999")});
-    numbers.add(new Object[]{new BigDecimal("2147483647")});
-    numbers.add(new Object[]{new BigDecimal("-2147483648")});
-    numbers.add(new Object[]{new BigDecimal("2147483648")});
-    numbers.add(new Object[]{new BigDecimal("-2147483649")});
-    numbers.add(new Object[]{new BigDecimal("9223372036854775807")});
-    numbers.add(new Object[]{new BigDecimal("-9223372036854775808")});
-    numbers.add(new Object[]{new BigDecimal("9223372036854775808")});
-    numbers.add(new Object[]{new BigDecimal("-9223372036854775809")});
-    numbers.add(new Object[]{new BigDecimal("10223372036850000000")});
-    numbers.add(new Object[]{new BigDecimal("19223372036854775807")});
-    numbers.add(new Object[]{new BigDecimal("19223372036854775807.300")});
-    numbers.add(new Object[]{new BigDecimal("-19223372036854775807.300")});
-    numbers.add(new Object[]{new BigDecimal(BigInteger.valueOf(1234567890987654321L), -1)});
-    numbers.add(new Object[]{new BigDecimal(BigInteger.valueOf(1234567890987654321L), -5)});
-    numbers.add(new Object[]{new BigDecimal(BigInteger.valueOf(-1234567890987654321L), -3)});
-    numbers.add(new Object[]{new BigDecimal(BigInteger.valueOf(6), -8)});
-    numbers.add(new Object[]{new BigDecimal("30000")});
-    numbers.add(new Object[]{new BigDecimal("40000").setScale(15)});
-    numbers.add(new Object[]{new BigDecimal("20000.000000000000000000")});
-    numbers.add(new Object[]{new BigDecimal("9990000").setScale(8)});
-    numbers.add(new Object[]{new BigDecimal("1000000").setScale(31)});
-    numbers.add(new Object[]{new BigDecimal("10000000000000000000000000000000000000").setScale(14)});
-    numbers.add(new Object[]{new BigDecimal("90000000000000000000000000000000000000")});
-    return numbers;
-  }
-
-  @MethodSource("data")
-  @ParameterizedTest(name = "number = {0,number,#,###.##################################################}")
-  void binary(BigDecimal number) {
-    testBinaryConversion(number);
-  }
-
-  @Test
-  void bigDecimal10_pow_131072_minus_1() {
-    testBinaryConversion(
-        new BigDecimal(BigInteger.TEN.pow(131072).subtract(BigInteger.ONE))
-    );
-  }
-
-  static void testBinaryConversion(BigDecimal number) {
-    final byte[] bytes = ByteConverter.numeric(number);
-    final BigDecimal actual = (BigDecimal) ByteConverter.numeric(bytes);
-    if (number.scale() >= 0) {
-      assertEquals(number, actual);
-    } else {
-      assertEquals(number.toPlainString(), actual.toPlainString());
-    }
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/util/IntListTest.java b/pgjdbc/src/test/java/org/postgresql/util/IntListTest.java
deleted file mode 100644
index e7d1451..0000000
--- a/pgjdbc/src/test/java/org/postgresql/util/IntListTest.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2023, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.util;
-
-import static org.junit.jupiter.api.Assertions.assertArrayEquals;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertSame;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-
-import org.junit.jupiter.api.Test;
-
-/**
- * Tests {@link IntList}.
- */
-class IntListTest {
-
-  @Test
-  void size() {
-    final IntList list = new IntList();
-    assertEquals(0, list.size());
-    list.add(3);
-    assertEquals(1, list.size());
-
-    for (int i = 0; i < 48; i++) {
-      list.add(i);
-    }
-    assertEquals(49, list.size());
-
-    list.clear();
-    assertEquals(0, list.size());
-  }
-
-  @Test
-  void get_empty() {
-    final IntList list = new IntList();
-    assertThrows(ArrayIndexOutOfBoundsException.class, () -> list.get(0));
-  }
-
-  @Test
-  void get_negative() {
-    final IntList list = new IntList();
-    list.add(3);
-    assertThrows(ArrayIndexOutOfBoundsException.class, () -> list.get(-1));
-  }
-
-  @Test
-  void get_tooLarge() {
-    final IntList list = new IntList();
-    list.add(3);
-    assertThrows(ArrayIndexOutOfBoundsException.class, () -> list.get(1));
-  }
-
-  @Test
-  void get() {
-    final IntList list = new IntList();
-    list.add(3);
-    assertEquals(3, list.get(0));
-
-    for (int i = 0; i < 1048; i++) {
-      list.add(i);
-    }
-
-    assertEquals(3, list.get(0));
-
-    for (int i = 0; i < 1048; i++) {
-      assertEquals(i, list.get(i + 1));
-    }
-
-    list.clear();
-    list.add(4);
-    assertEquals(4, list.get(0));
-  }
-
-  @Test
-  void toArray() {
-    int[] emptyArray = new IntList().toArray();
-    IntList list = new IntList();
-    assertSame(emptyArray, list.toArray(), "emptyList.toArray()");
-
-    list.add(45);
-    assertArrayEquals(new int[]{45}, list.toArray());
-
-    list.clear();
-    assertSame(emptyArray, list.toArray(), "emptyList.toArray() after clearing the list");
-
-    final int[] expected = new int[1048];
-    for (int i = 0; i < 1048; i++) {
-      list.add(i);
-      expected[i] = i;
-    }
-    assertArrayEquals(expected, list.toArray());
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/util/LazyCleanerTest.java b/pgjdbc/src/test/java/org/postgresql/util/LazyCleanerTest.java
deleted file mode 100644
index fb97b36..0000000
--- a/pgjdbc/src/test/java/org/postgresql/util/LazyCleanerTest.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (c) 2023, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-/* changes were made to move it into the org.postgresql.util package
- *
- * Copyright 2022 Juan Lopes
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.postgresql.util;
-
-import static java.time.Duration.ofSeconds;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.junit.jupiter.api.Test;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.UUID;
-
-public class LazyCleanerTest {
-  @Test
-  void phantomCleaner() throws InterruptedException {
-    List<Object> list = new ArrayList<>(Arrays.asList(
-        new Object(), new Object(), new Object()));
-
-    LazyCleaner t = new LazyCleaner(ofSeconds(5), "Cleaner");
-
-    String[] collected = new String[list.size()];
-    List<LazyCleaner.Cleanable<RuntimeException>> cleaners = new ArrayList<>();
-    for (int i = 0; i < list.size(); i++) {
-      final int ii = i;
-      cleaners.add(
-          t.register(
-              list.get(i),
-              leak -> {
-                collected[ii] = leak ? "LEAK" : "NO LEAK";
-                if (ii == 0) {
-                  throw new RuntimeException(
-                      "Exception from cleanup action to verify if the cleaner thread would survive"
-                  );
-                }
-              }
-          )
-      );
-    }
-    assertEquals(
-        list.size(),
-        t.getWatchedCount(),
-        "All objects are strongly-reachable, so getWatchedCount should reflect it"
-    );
-
-    assertTrue(t.isThreadRunning(),
-        "cleanup thread should be running, and it should wait for the leaks");
-
-    cleaners.get(1).clean();
-
-    assertEquals(
-        list.size() - 1,
-        t.getWatchedCount(),
-        "One object has been released properly, so getWatchedCount should reflect it"
-    );
-
-    list.set(0, null);
-    System.gc();
-    System.gc();
-
-    Await.until(
-        "One object was released, and another one has leaked, so getWatchedCount should reflect it",
-        ofSeconds(5),
-        () -> t.getWatchedCount() == list.size() - 2
-    );
-
-    list.clear();
-    System.gc();
-    System.gc();
-
-    Await.until(
-        "The cleanup thread should detect leaks and terminate within 5-10 seconds after GC",
-        ofSeconds(10),
-        () -> !t.isThreadRunning()
-    );
-
-    assertEquals(
-        Arrays.asList("LEAK", "NO LEAK", "LEAK").toString(),
-        Arrays.asList(collected).toString(),
-        "Second object has been released properly, so it should be reported as NO LEAK"
-    );
-  }
-
-  @Test
-  void getThread() throws InterruptedException {
-    String threadName = UUID.randomUUID().toString();
-    LazyCleaner t = new LazyCleaner(ofSeconds(5), threadName);
-    List<Object> list = new ArrayList<>();
-    list.add(new Object());
-    LazyCleaner.Cleanable<IllegalStateException> cleanable =
-        t.register(
-            list.get(0),
-            leak -> {
-              throw new IllegalStateException("test exception from CleaningAction");
-            }
-        );
-    assertTrue(t.isThreadRunning(),
-        "cleanup thread should be running, and it should wait for the leaks");
-    Thread thread = getThreadByName(threadName);
-    thread.interrupt();
-    Await.until(
-        "The cleanup thread should ignore the interrupt since there's one object to monitor",
-        ofSeconds(10),
-        () -> !thread.isInterrupted()
-    );
-    assertThrows(
-        IllegalStateException.class,
-        cleanable::clean,
-        "Exception from cleanable.clean() should be rethrown"
-    );
-    thread.interrupt();
-    Await.until(
-        "The cleanup thread should exit shortly after interrupt as there's no leaks to monitor",
-        ofSeconds(1),
-        () -> !t.isThreadRunning()
-    );
-  }
-
-  public static Thread getThreadByName(String threadName) {
-    for (Thread t : Thread.getAllStackTraces().keySet()) {
-      if (t.getName().equals(threadName)) {
-        return t;
-      }
-    }
-    throw new IllegalStateException("Cleanup thread  " + threadName + " not found");
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/util/NumberParserTest.java b/pgjdbc/src/test/java/org/postgresql/util/NumberParserTest.java
deleted file mode 100644
index 84a0fbd..0000000
--- a/pgjdbc/src/test/java/org/postgresql/util/NumberParserTest.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2023, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.util;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
-
-import org.junit.jupiter.api.Test;
-
-import java.util.ArrayList;
-import java.util.List;
-
-class NumberParserTest {
-  @Test
-  void getFastLong_normalLongs() {
-    List<Long> tests = new ArrayList<>();
-    for (long base : new long[]{0, 42, 65536, -65536, Long.MAX_VALUE}) {
-      for (int diff = -10; diff <= 10; diff++) {
-        tests.add(base + diff);
-      }
-    }
-
-    for (Long test : tests) {
-      assertGetLongResult(Long.toString(test), test);
-    }
-  }
-
-  @Test
-  void getFastLong_discardsFractionalPart() {
-    assertGetLongResult("234.435", 234);
-    assertGetLongResult("-234234.", -234234);
-  }
-
-  @Test
-  void getFastLong_failOnIncorrectStrings() {
-    assertGetLongFail("");
-    assertGetLongFail("-234.12542.");
-    assertGetLongFail(".");
-    assertGetLongFail("-.");
-    assertGetLongFail(Long.toString(Long.MIN_VALUE).substring(1));
-  }
-
-  private void assertGetLongResult(String s, long expected) {
-    try {
-      assertEquals(
-          expected,
-          NumberParser.getFastLong(s.getBytes(), Long.MIN_VALUE, Long.MAX_VALUE),
-          "string \"" + s + "\" parsed well to number " + expected
-      );
-    } catch (NumberFormatException nfe) {
-      fail("failed to parse(NumberFormatException) string \"" + s + "\", expected result " + expected);
-    }
-  }
-
-  private void assertGetLongFail(String s) {
-    try {
-      long ret = NumberParser.getFastLong(s.getBytes(), Long.MIN_VALUE, Long.MAX_VALUE);
-      fail("Expected NumberFormatException on parsing \"" + s + "\", but result: " + ret);
-    } catch (NumberFormatException nfe) {
-      // ok
-    }
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/util/OSUtilTest.java b/pgjdbc/src/test/java/org/postgresql/util/OSUtilTest.java
deleted file mode 100644
index 6c020e8..0000000
--- a/pgjdbc/src/test/java/org/postgresql/util/OSUtilTest.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2021, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.util;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-import org.junit.jupiter.api.Test;
-import uk.org.webcompere.systemstubs.environment.EnvironmentVariables;
-import uk.org.webcompere.systemstubs.properties.SystemProperties;
-import uk.org.webcompere.systemstubs.resource.Resources;
-
-import java.io.File;
-
-@StubEnvironmentAndProperties
-class OSUtilTest {
-
-  @Test
-  void getUserConfigRootDirectory() throws Exception {
-    // windows
-    Resources.with(new EnvironmentVariables("APPDATA", "C:\\Users\\realuser\\AppData\\Roaming"),
-        new SystemProperties("os.name", "Windows 10")).execute(() -> {
-          String result = OSUtil.getUserConfigRootDirectory();
-          assertEquals("C:\\Users\\realuser\\AppData\\Roaming" + File.separator + "postgresql", result);
-        }
-    );
-    // linux
-    Resources.with(new SystemProperties("os.name", "Linux", "user.home", "/home/realuser")).execute(() -> {
-          String result = OSUtil.getUserConfigRootDirectory();
-          assertEquals("/home/realuser", result);
-        }
-    );
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/util/PGPropertyUtilTest.java b/pgjdbc/src/test/java/org/postgresql/util/PGPropertyUtilTest.java
deleted file mode 100644
index b5b7867..0000000
--- a/pgjdbc/src/test/java/org/postgresql/util/PGPropertyUtilTest.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2021, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.util;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.postgresql.PGProperty;
-
-import org.junit.jupiter.api.Test;
-
-import java.util.Properties;
-
-class PGPropertyUtilTest {
-
-  @Test
-  void propertiesConsistencyCheck() {
-    // PGPORT
-    Properties properties = new Properties();
-    PGProperty.PG_PORT.set(properties, "0");
-    assertFalse(PGPropertyUtil.propertiesConsistencyCheck(properties));
-    PGProperty.PG_PORT.set(properties, "1");
-    assertTrue(PGPropertyUtil.propertiesConsistencyCheck(properties));
-    PGProperty.PG_PORT.set(properties, "5432");
-    assertTrue(PGPropertyUtil.propertiesConsistencyCheck(properties));
-    PGProperty.PG_PORT.set(properties, "65535");
-    assertTrue(PGPropertyUtil.propertiesConsistencyCheck(properties));
-    PGProperty.PG_PORT.set(properties, "65536");
-    assertFalse(PGPropertyUtil.propertiesConsistencyCheck(properties));
-    PGProperty.PG_PORT.set(properties, "abcdef");
-    assertFalse(PGPropertyUtil.propertiesConsistencyCheck(properties));
-    // any other not handled
-    properties = new Properties();
-    properties.setProperty("not-handled-key", "not-handled-value");
-    assertTrue(PGPropertyUtil.propertiesConsistencyCheck(properties));
-  }
-
-  // data for next two test methods
-  private static final String[][] TRANSLATION_TABLE = {
-      {"allowEncodingChanges", "allowEncodingChanges"},
-      {"port", "PGPORT"},
-      {"host", "PGHOST"},
-      {"dbname", "PGDBNAME"},
-  };
-
-  @Test
-  void translatePGServiceToPGProperty() {
-    for (String[] row : TRANSLATION_TABLE) {
-      assertEquals(row[1], PGPropertyUtil.translatePGServiceToPGProperty(row[0]));
-    }
-  }
-
-  @Test
-  void translatePGPropertyToPGService() {
-    for (String[] row : TRANSLATION_TABLE) {
-      assertEquals(row[0], PGPropertyUtil.translatePGPropertyToPGService(row[1]));
-    }
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/util/PGbyteaTest.java b/pgjdbc/src/test/java/org/postgresql/util/PGbyteaTest.java
deleted file mode 100644
index 3a32f1d..0000000
--- a/pgjdbc/src/test/java/org/postgresql/util/PGbyteaTest.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2021, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.util;
-
-import static org.junit.jupiter.api.Assertions.assertArrayEquals;
-
-import org.junit.jupiter.api.Test;
-
-import java.sql.SQLException;
-import java.util.Random;
-
-class PGbyteaTest {
-
-  private static final byte[] HEX_DIGITS_U = new byte[]{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B',
-      'C', 'D', 'E', 'F'};
-  private static final byte[] HEX_DIGITS_L = new byte[]{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',
-      'c', 'd', 'e', 'f'};
-
-  @Test
-  void hexDecode_lower() throws SQLException {
-    final byte[] data = new byte[1023];
-    new Random(7).nextBytes(data);
-    final byte[] encoded = hexEncode(data, HEX_DIGITS_L);
-    final byte[] decoded = PGbytea.toBytes(encoded);
-    assertArrayEquals(data, decoded);
-  }
-
-  @Test
-  void hexDecode_upper() throws SQLException {
-    final byte[] data = new byte[9513];
-    new Random(-8).nextBytes(data);
-    final byte[] encoded = hexEncode(data, HEX_DIGITS_U);
-    final byte[] decoded = PGbytea.toBytes(encoded);
-    assertArrayEquals(data, decoded);
-  }
-
-  private static byte[] hexEncode(byte[] data, byte[] hexDigits) {
-
-    // the string created will have 2 characters for each byte.
-    // and 2 lead characters to indicate hex encoding
-    final byte[] encoded = new byte[2 + (data.length << 1)];
-    encoded[0] = '\\';
-    encoded[1] = 'x';
-    for (int i = 0; i < data.length; i++) {
-      final int idx = (i << 1) + 2;
-      final byte b = data[i];
-      encoded[idx] = hexDigits[(b & 0xF0) >>> 4];
-      encoded[idx + 1] = hexDigits[b & 0x0F];
-    }
-    return encoded;
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/util/PGtokenizerTest.java b/pgjdbc/src/test/java/org/postgresql/util/PGtokenizerTest.java
deleted file mode 100644
index 26926ff..0000000
--- a/pgjdbc/src/test/java/org/postgresql/util/PGtokenizerTest.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2020, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.util;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-import org.junit.jupiter.api.Test;
-
-class PGtokenizerTest {
-
-  @Test
-  void tokenize() {
-    PGtokenizer pGtokenizer = new PGtokenizer("1,2EC1830300027,1,,", ',');
-    assertEquals(5, pGtokenizer.getSize());
-  }
-
-  @Test
-  void tokenize2() {
-    PGtokenizer pGtokenizer = new PGtokenizer(",,d,\"f(10\",\"(mime,pdf,pdf)\",test,2018-10-11,1010", ',');
-    assertEquals(8, pGtokenizer.getSize());
-  }
-
-  @Test
-  void tokenize3() {
-    PGtokenizer pGtokenizer = new PGtokenizer(",,d,\"f)10\",\"(mime,pdf,pdf)\",test,2018-10-11,1010", ',');
-    assertEquals(8, pGtokenizer.getSize());
-  }
-
-  @Test
-  void tokenize4() {
-    PGtokenizer pGtokenizer = new PGtokenizer(",,d,\"f()10\",\"(mime,pdf,pdf)\",test,2018-10-11,1010", ',');
-    assertEquals(8, pGtokenizer.getSize());
-  }
-
-  @Test
-  void removePara() {
-    String string = PGtokenizer.removePara("(1,2EC1830300027,1,,)");
-    assertEquals("1,2EC1830300027,1,,", string);
-  }
-
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/util/ReaderInputStreamTest.java b/pgjdbc/src/test/java/org/postgresql/util/ReaderInputStreamTest.java
deleted file mode 100644
index cf33a98..0000000
--- a/pgjdbc/src/test/java/org/postgresql/util/ReaderInputStreamTest.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * Copyright (c) 2016, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.util;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-
-import org.junit.jupiter.api.Test;
-
-import java.io.ByteArrayInputStream;
-import java.io.CharArrayReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.Reader;
-import java.io.StringReader;
-import java.nio.charset.MalformedInputException;
-import java.util.Arrays;
-
-class ReaderInputStreamTest {
-  // 132878 = U+2070E - chosen because it is the first supplementary character
-  // in the International Ideographic Core (IICore)
-  // see http://www.i18nguy.com/unicode/supplementary-test.html for further explanation
-
-  // Character.highSurrogate(132878) = 0xd841
-  private static final char LEADING_SURROGATE = 0xd841;
-
-  // Character.lowSurrogate(132878) = 0xdf0e
-  private static final char TRAILING_SURROGATE = 0xdf0e;
-
-  @Test
-  @SuppressWarnings("nullability")
-  void NullReaderTest() {
-    assertThrows(IllegalArgumentException.class, () -> {
-      new ReaderInputStream(null);
-    });
-  }
-
-  @Test
-  void cbufTooSmallReaderTest() {
-    assertThrows(IllegalArgumentException.class, () -> {
-      new ReaderInputStream(new StringReader("abc"), 1);
-    });
-  }
-
-  private static void read(InputStream is, int... expected) throws IOException {
-    byte[] actual = new byte[4];
-    Arrays.fill(actual, (byte) 0x00);
-    int nActual = is.read(actual);
-    int[] actualInts = new int[4];
-    for (int i = 0; i < actual.length; i++) {
-      actualInts[i] = actual[i] & 0xff;
-    }
-    if (expected.length > 0) {
-      // Ensure "expected" has 4 bytes
-      expected = Arrays.copyOf(expected, 4);
-      assertEquals(Arrays.toString(expected), Arrays.toString(actualInts));
-    } else {
-      assertEquals(-1, nActual, "should be end-of-stream");
-      is.close();
-    }
-  }
-
-  @Test
-  void SimpleTest() throws IOException {
-    char[] chars = {'a', 'b', 'c'};
-    Reader reader = new CharArrayReader(chars);
-    InputStream is = new ReaderInputStream(reader);
-    read(is, 0x61, 0x62, 0x63);
-    read(is);
-  }
-
-  @Test
-  void inputSmallerThanCbufsizeTest() throws IOException {
-    char[] chars = {'a'};
-    Reader reader = new CharArrayReader(chars);
-    InputStream is = new ReaderInputStream(reader, 2);
-    read(is, 0x61);
-    read(is);
-  }
-
-  @Test
-  void tooManyReadsTest() throws IOException {
-    char[] chars = {'a'};
-    Reader reader = new CharArrayReader(chars);
-    InputStream is = new ReaderInputStream(reader, 2);
-    read(is, 0x61);
-    assertEquals(-1, is.read(), "should be end-of-stream");
-    assertEquals(-1, is.read(), "should be end-of-stream");
-    assertEquals(-1, is.read(), "should be end-of-stream");
-    is.close();
-  }
-
-  @Test
-  void surrogatePairSpansCharBufBoundaryTest() throws IOException {
-    char[] chars = {'a', LEADING_SURROGATE, TRAILING_SURROGATE};
-    Reader reader = new CharArrayReader(chars);
-    InputStream is = new ReaderInputStream(reader, 2);
-    read(is, 0x61, 0xF0, 0xA0, 0x9C);
-    read(is, 0x8E);
-    read(is);
-  }
-
-  @Test
-  void invalidInputTest() throws IOException {
-    assertThrows(MalformedInputException.class, () -> {
-      char[] chars = {'a', LEADING_SURROGATE, LEADING_SURROGATE};
-      Reader reader = new CharArrayReader(chars);
-      InputStream is = new ReaderInputStream(reader, 2);
-      read(is);
-    });
-  }
-
-  @Test
-  void unmatchedLeadingSurrogateInputTest() throws IOException {
-    assertThrows(MalformedInputException.class, () -> {
-      char[] chars = {LEADING_SURROGATE};
-      Reader reader = new CharArrayReader(chars);
-      InputStream is = new ReaderInputStream(reader, 2);
-      read(is, 0x00);
-    });
-  }
-
-  @Test
-  void unmatchedTrailingSurrogateInputTest() throws IOException {
-    assertThrows(MalformedInputException.class, () -> {
-      char[] chars = {TRAILING_SURROGATE};
-      Reader reader = new CharArrayReader(chars);
-      InputStream is = new ReaderInputStream(reader, 2);
-      read(is);
-    });
-  }
-
-  @Test
-  @SuppressWarnings("nullness")
-  void nullArrayReadTest() throws IOException {
-    assertThrows(NullPointerException.class, () -> {
-      Reader reader = new StringReader("abc");
-      InputStream is = new ReaderInputStream(reader);
-      is.read(null, 0, 4);
-    });
-  }
-
-  @Test
-  void invalidOffsetArrayReadTest() throws IOException {
-    assertThrows(IndexOutOfBoundsException.class, () -> {
-      Reader reader = new StringReader("abc");
-      InputStream is = new ReaderInputStream(reader);
-      byte[] bytes = new byte[4];
-      is.read(bytes, 5, 4);
-    });
-  }
-
-  @Test
-  void negativeOffsetArrayReadTest() throws IOException {
-    assertThrows(IndexOutOfBoundsException.class, () -> {
-      Reader reader = new StringReader("abc");
-      InputStream is = new ReaderInputStream(reader);
-      byte[] bytes = new byte[4];
-      is.read(bytes, -1, 4);
-    });
-  }
-
-  @Test
-  void invalidLengthArrayReadTest() throws IOException {
-    assertThrows(IndexOutOfBoundsException.class, () -> {
-      Reader reader = new StringReader("abc");
-      InputStream is = new ReaderInputStream(reader);
-      byte[] bytes = new byte[4];
-      is.read(bytes, 1, 4);
-    });
-  }
-
-  @Test
-  void negativeLengthArrayReadTest() throws IOException {
-    assertThrows(IndexOutOfBoundsException.class, () -> {
-      Reader reader = new StringReader("abc");
-      InputStream is = new ReaderInputStream(reader);
-      byte[] bytes = new byte[4];
-      is.read(bytes, 1, -2);
-    });
-  }
-
-  @Test
-  void zeroLengthArrayReadTest() throws IOException {
-    Reader reader = new StringReader("abc");
-    InputStream is = new ReaderInputStream(reader);
-    byte[] bytes = new byte[4];
-    assertEquals(0, is.read(bytes, 1, 0), "requested 0 byte read");
-  }
-
-  @Test
-  void singleCharArrayReadTest() throws IOException {
-    Reader reader = new SingleCharPerReadReader(LEADING_SURROGATE, TRAILING_SURROGATE);
-    InputStream is = new ReaderInputStream(reader);
-    read(is, 0xF0, 0xA0, 0x9C, 0x8E);
-    read(is);
-  }
-
-  @Test
-  void malformedSingleCharArrayReadTest() throws IOException {
-    assertThrows(MalformedInputException.class, () -> {
-      Reader reader = new SingleCharPerReadReader(LEADING_SURROGATE, LEADING_SURROGATE);
-      InputStream is = new ReaderInputStream(reader);
-      read(is, 0xF0, 0xA0, 0x9C, 0x8E);
-    });
-  }
-
-  @Test
-  void readsEqualToBlockSizeTest() throws Exception {
-    final int blockSize = 8 * 1024;
-    final int dataSize = blockSize + 57;
-    final byte[] data = new byte[dataSize];
-    final byte[] buffer = new byte[blockSize];
-
-    InputStreamReader isr = new InputStreamReader(new ByteArrayInputStream(data), "UTF-8");
-    ReaderInputStream r = new ReaderInputStream(isr, blockSize);
-
-    int total = 0;
-
-    total += r.read(buffer, 0, blockSize);
-    total += r.read(buffer, 0, blockSize);
-
-    assertEquals(dataSize, total, "Data not read completely: missing " + (dataSize - total) + " bytes");
-  }
-
-  private static class SingleCharPerReadReader extends Reader {
-    private final char[] data;
-    private int i;
-
-    private SingleCharPerReadReader(char... data) {
-      this.data = data;
-    }
-
-    @Override
-    public int read(char[] cbuf, int off, int len) throws IOException {
-      if (i < data.length) {
-        cbuf[off] = data[i++];
-        return 1;
-      }
-
-      return -1;
-    }
-
-    @Override
-    public void close() throws IOException {
-    }
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/util/TestLogHandler.java b/pgjdbc/src/test/java/org/postgresql/util/TestLogHandler.java
deleted file mode 100644
index 30850a1..0000000
--- a/pgjdbc/src/test/java/org/postgresql/util/TestLogHandler.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2020, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.util;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Queue;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.logging.Handler;
-import java.util.logging.LogRecord;
-import java.util.regex.Pattern;
-
-public class TestLogHandler extends Handler {
-  public Queue<LogRecord> records = new ConcurrentLinkedQueue<>();
-
-  @Override
-  public void publish(LogRecord record) {
-    records.add(record);
-  }
-
-  @Override
-  public void flush() {
-  }
-
-  @Override
-  public void close() throws SecurityException {
-  }
-
-  public List<LogRecord> getRecordsMatching(Pattern messagePattern) {
-    List<LogRecord> matches = new ArrayList<>();
-    for (LogRecord r: this.records) {
-      String message = r.getMessage();
-      if (message != null && messagePattern.matcher(message).find()) {
-        matches.add(r);
-      }
-    }
-    return matches;
-  }
-}
diff --git a/pgjdbc/src/test/java/org/postgresql/util/UnusualBigDecimalByteConverterTest.java b/pgjdbc/src/test/java/org/postgresql/util/UnusualBigDecimalByteConverterTest.java
deleted file mode 100644
index 1cf056e..0000000
--- a/pgjdbc/src/test/java/org/postgresql/util/UnusualBigDecimalByteConverterTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2020, PostgreSQL Global Development Group
- * See the LICENSE file in the project root for more information.
- */
-
-package org.postgresql.util;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-import org.junit.jupiter.api.Test;
-
-import java.math.BigDecimal;
-
-/**
- * Tests unusual binary representations of numeric values.
- * @author Brett Okken
- */
-class UnusualBigDecimalByteConverterTest {
-
-  /**
-   * Typically a number < 1 would have sections of leading '0' values represented in weight
-   * rather than including as short values.
-   */
-  @Test
-  void test_4_leading_0() {
-    //len 2
-    //weight -1
-    //scale 5
-    final byte[] data = new byte[]{0, 2, -1, -1, 0, 0, 0, 5, 0, 0, 23, 112};
-    final BigDecimal actual = (BigDecimal) ByteConverter.numeric(data);
-    assertEquals(new BigDecimal("0.00006"), actual);
-  }
-}
diff --git a/pgjdbc/src/test/resources/test-file.xml b/pgjdbc/src/test/resources/test-file.xml
index 01f6fd4..757d66a 100644
--- a/pgjdbc/src/test/resources/test-file.xml
+++ b/pgjdbc/src/test/resources/test-file.xml
@@ -1,6 +1,6 @@
 <?xml version="1.0"?>
 <test>
-  <file>
-    <name value="test"/>
-  </file>
+    <file>
+        <name value="test"/>
+    </file>
 </test>
diff --git a/scram-client/src/test/java/com/ongres/scram/client/test/RfcExampleSha1.java b/scram-client/src/test/java/com/ongres/scram/client/test/RfcExampleSha1.java
new file mode 100644
index 0000000..eb9bd02
--- /dev/null
+++ b/scram-client/src/test/java/com/ongres/scram/client/test/RfcExampleSha1.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017 OnGres, Inc.
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+package com.ongres.scram.client.test;
+
+/**
+ * Constants for examples of the RFC for SHA-1 tests.
+ */
+public class RfcExampleSha1 {
+  public static final String USER = "user";
+  public static final String PASSWORD = "pencil";
+  public static final String CLIENT_NONCE = "fyko+d2lbbFgONRv9qkxdawL";
+  public static final String CLIENT_FIRST_MESSAGE_WITHOUT_GS2_HEADER =
+      "n=" + USER + ",r=" + CLIENT_NONCE;
+  public static final String CLIENT_FIRST_MESSAGE =
+      "n,," + CLIENT_FIRST_MESSAGE_WITHOUT_GS2_HEADER;
+  public static final String SERVER_SALT = "QSXCR+Q6sek8bf92";
+  public static final int SERVER_ITERATIONS = 4096;
+  public static final String SERVER_NONCE = "3rfcNHYJY1ZVvWVs7j";
+  public static final String FULL_NONCE = CLIENT_NONCE + SERVER_NONCE;
+  public static final String SERVER_FIRST_MESSAGE = "r=" + FULL_NONCE + ",s=" + SERVER_SALT
+      + ",i=" + SERVER_ITERATIONS;
+  public static final String GS2_HEADER_BASE64 = "biws";
+  public static final String CLIENT_FINAL_MESSAGE_WITHOUT_PROOF = "c=" + GS2_HEADER_BASE64
+      + ",r=" + FULL_NONCE;
+  public static final String AUTH_MESSAGE = CLIENT_FIRST_MESSAGE_WITHOUT_GS2_HEADER + ","
+      + SERVER_FIRST_MESSAGE + ","
+      + CLIENT_FINAL_MESSAGE_WITHOUT_PROOF;
+  public static final String CLIENT_FINAL_MESSAGE_PROOF = "v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=";
+  public static final String CLIENT_FINAL_MESSAGE = CLIENT_FINAL_MESSAGE_WITHOUT_PROOF
+      + ",p=" + CLIENT_FINAL_MESSAGE_PROOF;
+  public static final String SERVER_FINAL_MESSAGE = "v=rmF9pqV8S7suAoZWja4dJRkFsKQ=";
+}
diff --git a/scram-client/src/test/java/com/ongres/scram/client/ScramClientTest.java b/scram-client/src/test/java/com/ongres/scram/client/test/ScramClientTest.java
similarity index 96%
rename from scram-client/src/test/java/com/ongres/scram/client/ScramClientTest.java
rename to scram-client/src/test/java/com/ongres/scram/client/test/ScramClientTest.java
index 2c92e17..e7044f1 100644
--- a/scram-client/src/test/java/com/ongres/scram/client/ScramClientTest.java
+++ b/scram-client/src/test/java/com/ongres/scram/client/test/ScramClientTest.java
@@ -21,23 +21,23 @@
  */
 
 
-package com.ongres.scram.client;
-
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+package com.ongres.scram.client.test;
 
+import com.ongres.scram.client.NonceSupplier;
+import com.ongres.scram.client.ScramClient;
 import java.util.Arrays;
 
-import org.junit.Test;
-
 import com.ongres.scram.common.ScramMechanisms;
 import com.ongres.scram.common.stringprep.StringPreparations;
 import com.ongres.scram.common.util.CryptoUtil;
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 
 public class ScramClientTest {
+
     @Test
     public void getValid() {
         ScramClient client1 = ScramClient
diff --git a/scram-client/src/test/java/com/ongres/scram/client/ScramSessionTest.java b/scram-client/src/test/java/com/ongres/scram/client/test/ScramSessionTest.java
similarity index 75%
rename from scram-client/src/test/java/com/ongres/scram/client/ScramSessionTest.java
rename to scram-client/src/test/java/com/ongres/scram/client/test/ScramSessionTest.java
index 40b670d..ccfc062 100644
--- a/scram-client/src/test/java/com/ongres/scram/client/ScramSessionTest.java
+++ b/scram-client/src/test/java/com/ongres/scram/client/test/ScramSessionTest.java
@@ -19,32 +19,35 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  */
+package com.ongres.scram.client.test;
 
-
-package com.ongres.scram.client;
-
-
+import com.ongres.scram.client.ScramClient;
+import com.ongres.scram.client.ScramSession;
 import com.ongres.scram.common.exception.ScramInvalidServerSignatureException;
 import com.ongres.scram.common.exception.ScramParseException;
 import com.ongres.scram.common.exception.ScramServerErrorException;
 import com.ongres.scram.common.stringprep.StringPreparations;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
-import static com.ongres.scram.common.RfcExampleSha1.*;
-import static org.junit.Assert.*;
+import static com.ongres.scram.client.test.RfcExampleSha1.CLIENT_FINAL_MESSAGE;
+import static com.ongres.scram.client.test.RfcExampleSha1.CLIENT_FIRST_MESSAGE;
+import static com.ongres.scram.client.test.RfcExampleSha1.CLIENT_NONCE;
+import static com.ongres.scram.client.test.RfcExampleSha1.PASSWORD;
+import static com.ongres.scram.client.test.RfcExampleSha1.SERVER_FINAL_MESSAGE;
+import static com.ongres.scram.client.test.RfcExampleSha1.SERVER_FIRST_MESSAGE;
+import static com.ongres.scram.client.test.RfcExampleSha1.SERVER_ITERATIONS;
+import static com.ongres.scram.client.test.RfcExampleSha1.SERVER_SALT;
+import static com.ongres.scram.client.test.RfcExampleSha1.USER;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class ScramSessionTest {
+
     private final ScramClient scramClient = ScramClient
             .channelBinding(ScramClient.ChannelBinding.NO)
             .stringPreparation(StringPreparations.NO_PREPARATION)
             .selectMechanismBasedOnServerAdvertised("SCRAM-SHA-1")
             .nonceSupplier
-            (new NonceSupplier() {
-                @Override
-                public String get() {
-                    return CLIENT_NONCE;
-                }
-            })
+            (() -> CLIENT_NONCE)
             .setup();
 
     @Test
@@ -58,10 +61,8 @@ public class ScramSessionTest {
         );
         assertEquals(SERVER_SALT, serverFirstProcessor.getSalt());
         assertEquals(SERVER_ITERATIONS, serverFirstProcessor.getIteration());
-
         ScramSession.ClientFinalProcessor clientFinalProcessor = serverFirstProcessor.clientFinalProcessor(PASSWORD);
         assertEquals(CLIENT_FINAL_MESSAGE, clientFinalProcessor.clientFinalMessage());
-
         clientFinalProcessor.receiveServerFinalMessage(SERVER_FINAL_MESSAGE);
     }
 }
diff --git a/scram-common/src/test/java/com/ongres/scram/common/stringprep/SaslPrepTest.java b/scram-common/src/test/java/com/ongres/scram/common/stringprep/SaslPrepTest.java
deleted file mode 100644
index 4d32f86..0000000
--- a/scram-common/src/test/java/com/ongres/scram/common/stringprep/SaslPrepTest.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright 2019, OnGres.
- *
- * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
- * following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
- * following disclaimer in the documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-package com.ongres.scram.common.stringprep;
-
-import com.ongres.saslprep.SaslPrep;
-import com.ongres.stringprep.StringPrep;
-import java.io.IOException;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-public class SaslPrepTest {
-
-    @Test
-    public void rfc4013Examples() throws IOException {
-        // Taken from https://tools.ietf.org/html/rfc4013#section-3
-        Assert.assertEquals("IX", SaslPrep.saslPrep("I\u00ADX", true));
-        Assert.assertEquals("user", SaslPrep.saslPrep("user", true));
-        Assert.assertEquals("USER", SaslPrep.saslPrep("USER", true));
-        Assert.assertEquals("a", SaslPrep.saslPrep("\u00AA", true));
-        Assert.assertEquals("IX", SaslPrep.saslPrep("\u2168", true));
-        try {
-            SaslPrep.saslPrep("\u0007", true);
-            Assert.fail("Should throw IllegalArgumentException");
-        } catch (IllegalArgumentException e) {
-            Assert.assertEquals("Prohibited character ", e.getMessage());
-        }
-        try {
-            SaslPrep.saslPrep("\u0627\u0031", true);
-            Assert.fail("Should thow IllegalArgumentException");
-        } catch (IllegalArgumentException e) {
-            Assert.assertEquals("The string contains any RandALCat character but a RandALCat character "
-                    + "is not the first and the last characters", e.getMessage());
-        }
-    }
-
-    @Test
-    public void unassigned() throws IOException {
-        int unassignedCodepoint;
-        for (unassignedCodepoint = Character.MAX_CODE_POINT;
-             unassignedCodepoint >= Character.MIN_CODE_POINT;
-             unassignedCodepoint--) {
-            if (!Character.isDefined(unassignedCodepoint) && 
-                    !StringPrep.prohibitionAsciiControl(unassignedCodepoint) &&
-                    !StringPrep.prohibitionAsciiSpace(unassignedCodepoint) &&
-                    !StringPrep.prohibitionChangeDisplayProperties(unassignedCodepoint) &&
-                    !StringPrep.prohibitionInappropriateCanonicalRepresentation(unassignedCodepoint) &&
-                    !StringPrep.prohibitionInappropriatePlainText(unassignedCodepoint) &&
-                    !StringPrep.prohibitionNonAsciiControl(unassignedCodepoint) &&
-                    !StringPrep.prohibitionNonAsciiSpace(unassignedCodepoint) &&
-                    !StringPrep.prohibitionNonCharacterCodePoints(unassignedCodepoint) &&
-                    !StringPrep.prohibitionPrivateUse(unassignedCodepoint) &&
-                    !StringPrep.prohibitionSurrogateCodes(unassignedCodepoint) &&
-                    !StringPrep.prohibitionTaggingCharacters(unassignedCodepoint)) {
-                break;
-            }
-        }
-        String withUnassignedChar = "abc"+new String(Character.toChars(unassignedCodepoint));
-        //Assert.assertEquals(withUnassignedChar, saslPrepQuery(withUnassignedChar));
-        try {
-            SaslPrep.saslPrep(withUnassignedChar, true);
-            Assert.fail("Should thow IllegalArgumentException");
-        } catch (IllegalArgumentException e) {
-            Assert.assertEquals("Prohibited character 󯿽", e.getMessage());
-        }
-    }
-}
\ No newline at end of file
diff --git a/scram-common/src/test/java/com/ongres/scram/common/RfcExampleSha1.java b/scram-common/src/test/java/com/ongres/scram/common/test/RfcExampleSha1.java
similarity index 98%
rename from scram-common/src/test/java/com/ongres/scram/common/RfcExampleSha1.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/RfcExampleSha1.java
index c0c3f97..1629e75 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/RfcExampleSha1.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/RfcExampleSha1.java
@@ -21,7 +21,7 @@
  */
 
 
-package com.ongres.scram.common;
+package com.ongres.scram.common.test;
 
 
 public class RfcExampleSha1 {
diff --git a/scram-common/src/test/java/com/ongres/scram/common/RfcExampleSha256.java b/scram-common/src/test/java/com/ongres/scram/common/test/RfcExampleSha256.java
similarity index 98%
rename from scram-common/src/test/java/com/ongres/scram/common/RfcExampleSha256.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/RfcExampleSha256.java
index be05cd3..0c88238 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/RfcExampleSha256.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/RfcExampleSha256.java
@@ -21,7 +21,7 @@
  */
 
 
-package com.ongres.scram.common;
+package com.ongres.scram.common.test;
 
 
 public class RfcExampleSha256 {
diff --git a/scram-common/src/test/java/com/ongres/scram/common/ScramAttributeValueTest.java b/scram-common/src/test/java/com/ongres/scram/common/test/ScramAttributeValueTest.java
similarity index 87%
rename from scram-common/src/test/java/com/ongres/scram/common/ScramAttributeValueTest.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/ScramAttributeValueTest.java
index 1046396..a894f76 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/ScramAttributeValueTest.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/ScramAttributeValueTest.java
@@ -21,17 +21,20 @@
  */
 
 
-package com.ongres.scram.common;
+package com.ongres.scram.common.test;
 
 
+import com.ongres.scram.common.ScramAttributeValue;
 import com.ongres.scram.common.exception.ScramParseException;
 import com.ongres.scram.common.message.ServerFinalMessage;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
-import static com.ongres.scram.common.RfcExampleSha1.*;
+import static com.ongres.scram.common.test.RfcExampleSha1.*;
 import static com.ongres.scram.common.ScramAttributes.CLIENT_PROOF;
 import static com.ongres.scram.common.ScramAttributes.USERNAME;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.fail;
 
 
 public class ScramAttributeValueTest {
@@ -60,7 +63,7 @@ public class ScramAttributeValueTest {
             }
         }
 
-        assertEquals("Not every illegal value thrown ScramParseException", values.length, n);
+        assertEquals(values.length, n, "Not every illegal value thrown ScramParseException");
     }
 
     @Test
@@ -78,7 +81,7 @@ public class ScramAttributeValueTest {
             }
         }
 
-        assertEquals("Not every illegal value thrown ScramParseException", values.length, n);
+        assertEquals(values.length, n, "Not every illegal value thrown ScramParseException");
     }
 
     @Test
diff --git a/scram-common/src/test/java/com/ongres/scram/common/ScramFunctionsTest.java b/scram-common/src/test/java/com/ongres/scram/common/test/ScramFunctionsTest.java
similarity index 90%
rename from scram-common/src/test/java/com/ongres/scram/common/ScramFunctionsTest.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/ScramFunctionsTest.java
index 275f00a..cd8996c 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/ScramFunctionsTest.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/ScramFunctionsTest.java
@@ -21,21 +21,21 @@
  */
 
 
-package com.ongres.scram.common;
+package com.ongres.scram.common.test;
 
 
+import com.ongres.scram.common.ScramFunctions;
+import com.ongres.scram.common.ScramMechanisms;
 import com.ongres.scram.common.bouncycastle.base64.Base64;
 import com.ongres.scram.common.stringprep.StringPreparations;
-import org.junit.Test;
 
 import java.io.UnsupportedEncodingException;
 import java.nio.charset.StandardCharsets;
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 public class ScramFunctionsTest {
     private void assertBytesEqualsBase64(String expected, byte[] actual) {
@@ -164,11 +164,11 @@ public class ScramFunctionsTest {
     }
     
     private byte[] generateClientSignature() {
-        return ScramFunctions.clientSignature(ScramMechanisms.SCRAM_SHA_1, generateStoredKey(), com.ongres.scram.common.RfcExampleSha1.AUTH_MESSAGE);
+        return ScramFunctions.clientSignature(ScramMechanisms.SCRAM_SHA_1, generateStoredKey(), RfcExampleSha1.AUTH_MESSAGE);
     }
     
     private byte[] generateClientSignatureSha256() {
-        return ScramFunctions.clientSignature(ScramMechanisms.SCRAM_SHA_256, generateStoredKeySha256(), com.ongres.scram.common.RfcExampleSha256.AUTH_MESSAGE);
+        return ScramFunctions.clientSignature(ScramMechanisms.SCRAM_SHA_256, generateStoredKeySha256(), RfcExampleSha256.AUTH_MESSAGE);
     }
     
     @Test
@@ -200,11 +200,11 @@ public class ScramFunctionsTest {
     }
     
     private byte[] generateServerSignature() {
-        return ScramFunctions.serverSignature(ScramMechanisms.SCRAM_SHA_1, generateServerKey(), com.ongres.scram.common.RfcExampleSha1.AUTH_MESSAGE);
+        return ScramFunctions.serverSignature(ScramMechanisms.SCRAM_SHA_1, generateServerKey(), RfcExampleSha1.AUTH_MESSAGE);
     }
     
     private byte[] generateServerSignatureSha256() {
-        return ScramFunctions.serverSignature(ScramMechanisms.SCRAM_SHA_256, generateServerKeySha256(), com.ongres.scram.common.RfcExampleSha256.AUTH_MESSAGE);
+        return ScramFunctions.serverSignature(ScramMechanisms.SCRAM_SHA_256, generateServerKeySha256(), RfcExampleSha256.AUTH_MESSAGE);
     }
     
     @Test
@@ -221,7 +221,7 @@ public class ScramFunctionsTest {
     public void verifyClientProof() {
         assertTrue(
                 ScramFunctions.verifyClientProof(
-                        ScramMechanisms.SCRAM_SHA_1, generateClientProof(), generateStoredKey(), com.ongres.scram.common.RfcExampleSha1.AUTH_MESSAGE
+                        ScramMechanisms.SCRAM_SHA_1, generateClientProof(), generateStoredKey(), RfcExampleSha1.AUTH_MESSAGE
                 )
         );
     }
@@ -230,7 +230,7 @@ public class ScramFunctionsTest {
     public void verifyClientProofSha256() {
         assertTrue(
                 ScramFunctions.verifyClientProof(
-                        ScramMechanisms.SCRAM_SHA_256, generateClientProofSha256(), generateStoredKeySha256(), com.ongres.scram.common.RfcExampleSha256.AUTH_MESSAGE
+                        ScramMechanisms.SCRAM_SHA_256, generateClientProofSha256(), generateStoredKeySha256(), RfcExampleSha256.AUTH_MESSAGE
                 )
         );
     }
@@ -239,7 +239,7 @@ public class ScramFunctionsTest {
     public void verifyServerSignature() {
         assertTrue(
                 ScramFunctions.verifyServerSignature(
-                        ScramMechanisms.SCRAM_SHA_1, generateServerKey(), com.ongres.scram.common.RfcExampleSha1.AUTH_MESSAGE, generateServerSignature()
+                        ScramMechanisms.SCRAM_SHA_1, generateServerKey(), RfcExampleSha1.AUTH_MESSAGE, generateServerSignature()
                 )
         );
     }
@@ -248,7 +248,7 @@ public class ScramFunctionsTest {
     public void verifyServerSignatureSha256() {
         assertTrue(
                 ScramFunctions.verifyServerSignature(
-                        ScramMechanisms.SCRAM_SHA_256, generateServerKeySha256(), com.ongres.scram.common.RfcExampleSha256.AUTH_MESSAGE, generateServerSignatureSha256()
+                        ScramMechanisms.SCRAM_SHA_256, generateServerKeySha256(), RfcExampleSha256.AUTH_MESSAGE, generateServerSignatureSha256()
                 )
         );
     }
diff --git a/scram-common/src/test/java/com/ongres/scram/common/ScramMechanismsTest.java b/scram-common/src/test/java/com/ongres/scram/common/test/ScramMechanismsTest.java
similarity index 92%
rename from scram-common/src/test/java/com/ongres/scram/common/ScramMechanismsTest.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/ScramMechanismsTest.java
index 8271604..64f1018 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/ScramMechanismsTest.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/ScramMechanismsTest.java
@@ -21,13 +21,12 @@
  */
 
 
-package com.ongres.scram.common;
+package com.ongres.scram.common.test;
 
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-import org.junit.Test;
+import com.ongres.scram.common.ScramMechanisms;
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 
 public class ScramMechanismsTest {
@@ -36,7 +35,7 @@ public class ScramMechanismsTest {
         byte[] digest;
         for(ScramMechanisms scramMechanism : ScramMechanisms.values()) {
             digest = scramMechanism.digest(new byte[0]);
-            assertNotNull("got a null digest", digest);
+            assertNotNull(digest, "got a null digest");
         }
     }
 
@@ -45,7 +44,7 @@ public class ScramMechanismsTest {
         byte[] hmac;
         for(ScramMechanisms scramMechanism : ScramMechanisms.values()) {
             hmac = scramMechanism.hmac(new byte[] { 0 }, new byte[0]);
-            assertNotNull("got a null HMAC", hmac);
+            assertNotNull(hmac, "got a null HMAC");
         }
     }
     
diff --git a/scram-common/src/test/java/com/ongres/scram/common/ScramStringFormattingTest.java b/scram-common/src/test/java/com/ongres/scram/common/test/ScramStringFormattingTest.java
similarity index 90%
rename from scram-common/src/test/java/com/ongres/scram/common/ScramStringFormattingTest.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/ScramStringFormattingTest.java
index 9b15d3b..a3e35d7 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/ScramStringFormattingTest.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/ScramStringFormattingTest.java
@@ -21,13 +21,13 @@
  */
 
 
-package com.ongres.scram.common;
+package com.ongres.scram.common.test;
 
 
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import com.ongres.scram.common.ScramStringFormatting;
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 public class ScramStringFormattingTest {
@@ -79,6 +79,6 @@ public class ScramStringFormattingTest {
             }
         }
 
-        assertTrue("Not all values produced IllegalArgumentException", n == INVALID_SASL_NAMES.length);
+        assertTrue(n == INVALID_SASL_NAMES.length, "Not all values produced IllegalArgumentException");
     }
 }
diff --git a/scram-common/src/test/java/com/ongres/scram/common/gssapi/Gs2AttributeValueTest.java b/scram-common/src/test/java/com/ongres/scram/common/test/gssapi/Gs2AttributeValueTest.java
similarity index 81%
rename from scram-common/src/test/java/com/ongres/scram/common/gssapi/Gs2AttributeValueTest.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/gssapi/Gs2AttributeValueTest.java
index 2bd6ecf..02d13bc 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/gssapi/Gs2AttributeValueTest.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/gssapi/Gs2AttributeValueTest.java
@@ -20,14 +20,15 @@
  *
  */
 
+package com.ongres.scram.common.test.gssapi;
 
-package com.ongres.scram.common.gssapi;
-
-
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
+import com.ongres.scram.common.gssapi.Gs2AttributeValue;
+import com.ongres.scram.common.gssapi.Gs2Attributes;
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.fail;
 
 public class Gs2AttributeValueTest {
     @Test
@@ -55,8 +56,7 @@ public class Gs2AttributeValueTest {
                 n++;
             }
         }
-
-        assertEquals("Not every illegal value thrown IllegalArgumentException", values.length, n);
+        assertEquals(values.length, n, "Not every illegal value thrown IllegalArgumentException");
     }
 
     @Test
@@ -71,8 +71,7 @@ public class Gs2AttributeValueTest {
                 n++;
             }
         }
-
-        assertEquals("Not every illegal value thrown IllegalArgumentException", values.length, n);
+        assertEquals(values.length, n, "Not every illegal value thrown IllegalArgumentException");
     }
 
     @Test
diff --git a/scram-common/src/test/java/com/ongres/scram/common/gssapi/Gs2HeaderTest.java b/scram-common/src/test/java/com/ongres/scram/common/test/gssapi/Gs2HeaderTest.java
similarity index 88%
rename from scram-common/src/test/java/com/ongres/scram/common/gssapi/Gs2HeaderTest.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/gssapi/Gs2HeaderTest.java
index a195ea8..24a8a70 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/gssapi/Gs2HeaderTest.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/gssapi/Gs2HeaderTest.java
@@ -21,13 +21,13 @@
  */
 
 
-package com.ongres.scram.common.gssapi;
+package com.ongres.scram.common.test.gssapi;
 
 
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-
+import com.ongres.scram.common.gssapi.Gs2CbindFlag;
+import com.ongres.scram.common.gssapi.Gs2Header;
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class Gs2HeaderTest {
     private static final String[] VALID_GS2HEADER_STRINGS = new String[] {
@@ -52,22 +52,22 @@ public class Gs2HeaderTest {
         }
     }
 
-    @Test(expected = IllegalArgumentException.class)
+    @Test // (expected = IllegalArgumentException.class)
     public void constructorInvalid1() {
         new Gs2Header(Gs2CbindFlag.CHANNEL_BINDING_REQUIRED, null);
     }
 
-    @Test(expected = IllegalArgumentException.class)
+    @Test // (expected = IllegalArgumentException.class)
     public void constructorInvalid2() {
         new Gs2Header(Gs2CbindFlag.CLIENT_NOT, "blah");
     }
 
-    @Test(expected = IllegalArgumentException.class)
+    @Test // (expected = IllegalArgumentException.class)
     public void constructorInvalid3() {
         new Gs2Header(Gs2CbindFlag.CLIENT_YES_SERVER_NOT, "blah");
     }
 
-    @Test(expected = IllegalArgumentException.class)
+    @Test // (expected = IllegalArgumentException.class)
     public void constructorInvalid4() {
         new Gs2Header(Gs2CbindFlag.CHANNEL_BINDING_REQUIRED, null, "b");
     }
diff --git a/scram-common/src/test/java/com/ongres/scram/common/message/ClientFinalMessageTest.java b/scram-common/src/test/java/com/ongres/scram/common/test/message/ClientFinalMessageTest.java
similarity index 87%
rename from scram-common/src/test/java/com/ongres/scram/common/message/ClientFinalMessageTest.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/message/ClientFinalMessageTest.java
index 10271ec..d79b52f 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/message/ClientFinalMessageTest.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/message/ClientFinalMessageTest.java
@@ -21,18 +21,18 @@
  */
 
 
-package com.ongres.scram.common.message;
+package com.ongres.scram.common.test.message;
 
 
-import com.ongres.scram.common.RfcExampleSha1;
+import com.ongres.scram.common.message.ClientFinalMessage;
+import com.ongres.scram.common.test.RfcExampleSha1;
 import com.ongres.scram.common.gssapi.Gs2CbindFlag;
 import com.ongres.scram.common.gssapi.Gs2Header;
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class ClientFinalMessageTest {
+
     @Test
     public void writeToWithoutProofValid() {
         StringBuffer sb = ClientFinalMessage.writeToWithoutProof(
diff --git a/scram-common/src/test/java/com/ongres/scram/common/message/ClientFirstMessageTest.java b/scram-common/src/test/java/com/ongres/scram/common/test/message/ClientFirstMessageTest.java
similarity index 89%
rename from scram-common/src/test/java/com/ongres/scram/common/message/ClientFirstMessageTest.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/message/ClientFirstMessageTest.java
index 7df7c01..70bc6b3 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/message/ClientFirstMessageTest.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/message/ClientFirstMessageTest.java
@@ -21,44 +21,45 @@
  */
 
 
-package com.ongres.scram.common.message;
-
+package com.ongres.scram.common.test.message;
 
 import com.ongres.scram.common.exception.ScramParseException;
 import com.ongres.scram.common.gssapi.Gs2CbindFlag;
-import org.junit.Test;
-
-import static com.ongres.scram.common.RfcExampleSha1.CLIENT_NONCE;
-import static org.junit.Assert.*;
+import com.ongres.scram.common.message.ClientFirstMessage;
+import org.junit.jupiter.api.Test;
 
+import static com.ongres.scram.common.test.RfcExampleSha1.CLIENT_NONCE;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class ClientFirstMessageTest  {
 
-    @Test(expected = IllegalArgumentException.class)
+    @Test // (expected = IllegalArgumentException.class)
     public void constructorTestInvalid1() {
         assertNotNull(new ClientFirstMessage(null, "a", CLIENT_NONCE));
     }
 
-    @Test(expected = IllegalArgumentException.class)
+    @Test // (expected = IllegalArgumentException.class)
     public void constructorTestInvalid2() {
         assertNotNull(
                 new ClientFirstMessage(Gs2CbindFlag.CLIENT_NOT, null, "cbind", "a", CLIENT_NONCE)
         );
     }
 
-    @Test(expected = IllegalArgumentException.class)
+    @Test //(expected = IllegalArgumentException.class)
     public void constructorTestInvalid3() {
         assertNotNull(
             new ClientFirstMessage(Gs2CbindFlag.CLIENT_YES_SERVER_NOT, null, "cbind", "a", CLIENT_NONCE)
        );
     }
 
-    @Test(expected = IllegalArgumentException.class)
+    @Test // (expected = IllegalArgumentException.class)
     public void constructorTestInvalid4() {
         assertNotNull(new ClientFirstMessage(Gs2CbindFlag.CHANNEL_BINDING_REQUIRED, null, null, "a", CLIENT_NONCE));
     }
 
-    @Test(expected = IllegalArgumentException.class)
+    @Test // (expected = IllegalArgumentException.class)
     public void constructorTestInvalid5() {
         assertNotNull(new ClientFirstMessage(Gs2CbindFlag.CLIENT_NOT, "authzid", null, null, CLIENT_NONCE));
     }
diff --git a/scram-common/src/test/java/com/ongres/scram/common/message/ServerFinalMessageTest.java b/scram-common/src/test/java/com/ongres/scram/common/test/message/ServerFinalMessageTest.java
similarity index 89%
rename from scram-common/src/test/java/com/ongres/scram/common/message/ServerFinalMessageTest.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/message/ServerFinalMessageTest.java
index a476b4a..218575f 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/message/ServerFinalMessageTest.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/message/ServerFinalMessageTest.java
@@ -21,7 +21,7 @@
  */
 
 
-package com.ongres.scram.common.message;
+package com.ongres.scram.common.test.message;
 
 
 import com.ongres.scram.common.ScramAttributes;
@@ -29,13 +29,14 @@ import com.ongres.scram.common.ScramFunctions;
 import com.ongres.scram.common.ScramMechanisms;
 import com.ongres.scram.common.bouncycastle.base64.Base64;
 import com.ongres.scram.common.exception.ScramParseException;
+import com.ongres.scram.common.message.ServerFinalMessage;
 import com.ongres.scram.common.stringprep.StringPreparations;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
-import static com.ongres.scram.common.RfcExampleSha1.*;
-import static junit.framework.TestCase.assertFalse;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static com.ongres.scram.common.test.RfcExampleSha1.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 public class ServerFinalMessageTest {
diff --git a/scram-common/src/test/java/com/ongres/scram/common/message/ServerFirstMessageTest.java b/scram-common/src/test/java/com/ongres/scram/common/test/message/ServerFirstMessageTest.java
similarity index 85%
rename from scram-common/src/test/java/com/ongres/scram/common/message/ServerFirstMessageTest.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/message/ServerFirstMessageTest.java
index 1967777..b318217 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/message/ServerFirstMessageTest.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/message/ServerFirstMessageTest.java
@@ -21,15 +21,16 @@
  */
 
 
-package com.ongres.scram.common.message;
+package com.ongres.scram.common.test.message;
 
 
 import com.ongres.scram.common.exception.ScramParseException;
-import org.junit.Test;
+import com.ongres.scram.common.message.ServerFirstMessage;
+import org.junit.jupiter.api.Test;
 
-import static com.ongres.scram.common.RfcExampleSha1.CLIENT_NONCE;
-import static com.ongres.scram.common.RfcExampleSha1.SERVER_FIRST_MESSAGE;
-import static org.junit.Assert.assertEquals;
+import static com.ongres.scram.common.test.RfcExampleSha1.CLIENT_NONCE;
+import static com.ongres.scram.common.test.RfcExampleSha1.SERVER_FIRST_MESSAGE;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 
 public class ServerFirstMessageTest {
diff --git a/scram-common/src/test/java/com/ongres/scram/common/stringprep/StringPreparationTest.java b/scram-common/src/test/java/com/ongres/scram/common/test/stringprep/StringPreparationTest.java
similarity index 93%
rename from scram-common/src/test/java/com/ongres/scram/common/stringprep/StringPreparationTest.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/stringprep/StringPreparationTest.java
index ed4c432..90d9b90 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/stringprep/StringPreparationTest.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/stringprep/StringPreparationTest.java
@@ -20,15 +20,16 @@
  *
  */
 
-package com.ongres.scram.common.stringprep;
+package com.ongres.scram.common.test.stringprep;
 
 
-import org.junit.Test;
+import com.ongres.scram.common.stringprep.StringPreparation;
+import com.ongres.scram.common.stringprep.StringPreparations;
 
 import java.util.Random;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 public class StringPreparationTest {
@@ -49,8 +50,8 @@ public class StringPreparationTest {
         }
 
         assertTrue(
-                "IllegalArgumentException not thrown for either null or empty input",
-                n == nullEmpty.length * StringPreparations.values().length
+                n == nullEmpty.length * StringPreparations.values().length,
+                "IllegalArgumentException not thrown for either null or empty input"
         );
     }
 
@@ -120,9 +121,9 @@ public class StringPreparationTest {
         for(StringPreparation stringPreparation : StringPreparations.values()) {
             for(String s : values) {
                 assertEquals(
-                        "'" + s + "' is a printable ASCII string, should not be changed by normalize()",
                         s,
-                        stringPreparation.normalize(s)
+                        stringPreparation.normalize(s),
+                        "'" + s + "' is a printable ASCII string, should not be changed by normalize()"
                 );
             }
         }
@@ -140,8 +141,8 @@ public class StringPreparationTest {
         }
 
         assertTrue(
-                "IllegalArgumentException not thrown for either null or empty output after normalization",
-                n == ONLY_NON_PRINTABLE_STRINGS.length
+                n == ONLY_NON_PRINTABLE_STRINGS.length,
+                "IllegalArgumentException not thrown for either null or empty output after normalization"
         );
     }
 
diff --git a/scram-common/src/test/java/com/ongres/scram/common/util/AbstractCharAttributeValueTest.java b/scram-common/src/test/java/com/ongres/scram/common/test/util/AbstractCharAttributeValueTest.java
similarity index 90%
rename from scram-common/src/test/java/com/ongres/scram/common/util/AbstractCharAttributeValueTest.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/util/AbstractCharAttributeValueTest.java
index b5b6204..ae06f84 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/util/AbstractCharAttributeValueTest.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/util/AbstractCharAttributeValueTest.java
@@ -21,13 +21,15 @@
  */
 
 
-package com.ongres.scram.common.util;
+package com.ongres.scram.common.test.util;
 
 
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-
+import com.ongres.scram.common.util.AbstractCharAttributeValue;
+import com.ongres.scram.common.util.CharAttribute;
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.fail;
 
 public class AbstractCharAttributeValueTest {
     private class MockCharAttribute implements CharAttribute {
diff --git a/scram-common/src/test/java/com/ongres/scram/common/util/Base64Test.java b/scram-common/src/test/java/com/ongres/scram/common/test/util/Base64Test.java
similarity index 58%
rename from scram-common/src/test/java/com/ongres/scram/common/util/Base64Test.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/util/Base64Test.java
index 3431061..bcfdbf2 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/util/Base64Test.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/util/Base64Test.java
@@ -20,32 +20,31 @@
  *
  */
 
-package com.ongres.scram.common.util;
+package com.ongres.scram.common.test.util;
 
 import java.nio.charset.StandardCharsets;
 
-import org.junit.Assert;
-import org.junit.Test;
-
 import com.ongres.scram.common.bouncycastle.base64.Base64;
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class Base64Test {
 
   @Test
   public void rfcTest() {
-    Assert.assertEquals("", new String(Base64.decode(""), StandardCharsets.UTF_8));
-    Assert.assertEquals("f", new String(Base64.decode("Zg=="), StandardCharsets.UTF_8));
-    Assert.assertEquals("fo", new String(Base64.decode("Zm8="), StandardCharsets.UTF_8));
-    Assert.assertEquals("foo", new String(Base64.decode("Zm9v"), StandardCharsets.UTF_8));
-    Assert.assertEquals("foob", new String(Base64.decode("Zm9vYg=="), StandardCharsets.UTF_8));
-    Assert.assertEquals("fooba", new String(Base64.decode("Zm9vYmE="), StandardCharsets.UTF_8));
-    Assert.assertEquals("foobar", new String(Base64.decode("Zm9vYmFy"), StandardCharsets.UTF_8));
-    Assert.assertEquals("", Base64.toBase64String("".getBytes(StandardCharsets.UTF_8)));
-    Assert.assertEquals("Zg==", Base64.toBase64String("f".getBytes(StandardCharsets.UTF_8)));
-    Assert.assertEquals("Zm8=", Base64.toBase64String("fo".getBytes(StandardCharsets.UTF_8)));
-    Assert.assertEquals("Zm9v", Base64.toBase64String("foo".getBytes(StandardCharsets.UTF_8)));
-    Assert.assertEquals("Zm9vYg==", Base64.toBase64String("foob".getBytes(StandardCharsets.UTF_8)));
-    Assert.assertEquals("Zm9vYmE=", Base64.toBase64String("fooba".getBytes(StandardCharsets.UTF_8)));
-    Assert.assertEquals("Zm9vYmFy", Base64.toBase64String("foobar".getBytes(StandardCharsets.UTF_8)));
+    assertEquals("", new String(Base64.decode(""), StandardCharsets.UTF_8));
+    assertEquals("f", new String(Base64.decode("Zg=="), StandardCharsets.UTF_8));
+    assertEquals("fo", new String(Base64.decode("Zm8="), StandardCharsets.UTF_8));
+    assertEquals("foo", new String(Base64.decode("Zm9v"), StandardCharsets.UTF_8));
+    assertEquals("foob", new String(Base64.decode("Zm9vYg=="), StandardCharsets.UTF_8));
+    assertEquals("fooba", new String(Base64.decode("Zm9vYmE="), StandardCharsets.UTF_8));
+    assertEquals("foobar", new String(Base64.decode("Zm9vYmFy"), StandardCharsets.UTF_8));
+    assertEquals("", Base64.toBase64String("".getBytes(StandardCharsets.UTF_8)));
+    assertEquals("Zg==", Base64.toBase64String("f".getBytes(StandardCharsets.UTF_8)));
+    assertEquals("Zm8=", Base64.toBase64String("fo".getBytes(StandardCharsets.UTF_8)));
+    assertEquals("Zm9v", Base64.toBase64String("foo".getBytes(StandardCharsets.UTF_8)));
+    assertEquals("Zm9vYg==", Base64.toBase64String("foob".getBytes(StandardCharsets.UTF_8)));
+    assertEquals("Zm9vYmE=", Base64.toBase64String("fooba".getBytes(StandardCharsets.UTF_8)));
+    assertEquals("Zm9vYmFy", Base64.toBase64String("foobar".getBytes(StandardCharsets.UTF_8)));
   }
 }
diff --git a/scram-common/src/test/java/com/ongres/scram/common/util/CryptoUtilTest.java b/scram-common/src/test/java/com/ongres/scram/common/test/util/CryptoUtilTest.java
similarity index 88%
rename from scram-common/src/test/java/com/ongres/scram/common/util/CryptoUtilTest.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/util/CryptoUtilTest.java
index b21b0d0..645291a 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/util/CryptoUtilTest.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/util/CryptoUtilTest.java
@@ -21,26 +21,24 @@
  */
 
 
-package com.ongres.scram.common.util;
-
-
-import org.junit.Test;
+package com.ongres.scram.common.test.util;
 
+import com.ongres.scram.common.util.CryptoUtil;
 import java.security.SecureRandom;
 import java.util.Random;
-
-import static org.junit.Assert.fail;
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.fail;
 
 
 public class CryptoUtilTest {
     private static final SecureRandom SECURE_RANDOM = new SecureRandom();
 
-    @Test(expected = IllegalArgumentException.class)
+    @Test //(expected = IllegalArgumentException.class)
     public void nonceInvalidSize1() {
         CryptoUtil.nonce(0, SECURE_RANDOM);
     }
 
-    @Test(expected = IllegalArgumentException.class)
+    @Test //(expected = IllegalArgumentException.class)
     public void nonceInvalidSize2() {
         CryptoUtil.nonce(-1, SECURE_RANDOM);
     }
diff --git a/scram-common/src/test/java/com/ongres/scram/common/util/StringWritableCsvTest.java b/scram-common/src/test/java/com/ongres/scram/common/test/util/StringWritableCsvTest.java
similarity index 93%
rename from scram-common/src/test/java/com/ongres/scram/common/util/StringWritableCsvTest.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/util/StringWritableCsvTest.java
index ed5c051..f64dde0 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/util/StringWritableCsvTest.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/util/StringWritableCsvTest.java
@@ -21,19 +21,19 @@
  */
 
 
-package com.ongres.scram.common.util;
+package com.ongres.scram.common.test.util;
 
 
 import com.ongres.scram.common.ScramAttributes;
 import com.ongres.scram.common.ScramAttributeValue;
 import com.ongres.scram.common.gssapi.Gs2AttributeValue;
 import com.ongres.scram.common.gssapi.Gs2Attributes;
-import org.junit.Test;
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
+import com.ongres.scram.common.util.CharAttributeValue;
+import com.ongres.scram.common.util.StringWritableCsv;
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class StringWritableCsvTest {
     private static final String[] ONE_ARG_VALUES = new String[] { "c=channel", "i=4096", "a=authzid", "n" };
diff --git a/scram-common/src/test/java/com/ongres/scram/common/util/UsAsciiUtilsTest.java b/scram-common/src/test/java/com/ongres/scram/common/test/util/UsAsciiUtilsTest.java
similarity index 89%
rename from scram-common/src/test/java/com/ongres/scram/common/util/UsAsciiUtilsTest.java
rename to scram-common/src/test/java/com/ongres/scram/common/test/util/UsAsciiUtilsTest.java
index 1c7b6a9..eed0bb1 100644
--- a/scram-common/src/test/java/com/ongres/scram/common/util/UsAsciiUtilsTest.java
+++ b/scram-common/src/test/java/com/ongres/scram/common/test/util/UsAsciiUtilsTest.java
@@ -21,16 +21,15 @@
  */
 
 
-package com.ongres.scram.common.util;
-
-
-import org.junit.Test;
+package com.ongres.scram.common.test.util;
 
+import com.ongres.scram.common.util.UsAsciiUtils;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
-
-import static org.junit.Assert.*;
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 
 public class UsAsciiUtilsTest {
@@ -56,11 +55,7 @@ public class UsAsciiUtilsTest {
                 n++;
             }
         }
-
-        assertTrue(
-                "String(s) with non-ASCII characters not throwing IllegalArgumentException",
-                n == nonASCIIStrings.length
-        );
+        assertEquals(n, nonASCIIStrings.length, "String(s) with non-ASCII characters not throwing IllegalArgumentException");
     }
 
     @Test
diff --git a/settings.gradle b/settings.gradle
index a7f7bd3..2f814d1 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -15,15 +15,23 @@ pluginManagement {
 dependencyResolutionManagement {
     versionCatalogs {
         libs {
-            version('gradle', '8.5')
+            version('gradle', '8.7')
         }
         testLibs {
             version('junit', '5.10.2')
+            version('testcontainers', '1.19.8')
             library('junit-jupiter-api', 'org.junit.jupiter', 'junit-jupiter-api').versionRef('junit')
             library('junit-jupiter-params', 'org.junit.jupiter', 'junit-jupiter-params').versionRef('junit')
             library('junit-jupiter-engine', 'org.junit.jupiter', 'junit-jupiter-engine').versionRef('junit')
             library('junit-jupiter-platform-launcher', 'org.junit.platform', 'junit-platform-launcher').version('1.10.1')
+            library('junit-runner', 'org.junit.platform', 'junit-platform-runner').version('1.10.3')
             library('hamcrest', 'org.hamcrest', 'hamcrest-library').version('2.2')
+            library('bytebuddy', 'net.bytebuddy', 'byte-buddy').version('1.14.17')
+            library('bytebuddy-agent', 'net.bytebuddy', 'byte-buddy-agent').version('1.14.17')
+            library('classloader-leak-test', 'se.jiderhamn', 'classloader-leak-test-framework').version('1.1.2')
+            library('testcontainers', 'org.testcontainers', 'testcontainers').versionRef('testcontainers')
+            library('testcontainers-junit-jupiter', 'org.testcontainers', 'junit-jupiter').versionRef('testcontainers')
+            library('testcontainers-postgresql', 'org.testcontainers', 'postgresql').versionRef('testcontainers')
         }
     }
 }